aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/connector/connector.c1
-rw-r--r--drivers/crypto/caam/caamalg.c6
-rw-r--r--drivers/firmware/google/Kconfig1
-rw-r--r--drivers/gpio/tps65910-gpio.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c16
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c34
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h35
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c42
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c80
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c19
-rw-r--r--drivers/gpu/drm/i915/intel_display.c305
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/nid.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c5
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-multitouch.c12
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/adm1275.c16
-rw-r--r--drivers/hwmon/emc6w201.c58
-rw-r--r--drivers/hwmon/f71882fg.c19
-rw-r--r--drivers/hwmon/hwmon-vid.c2
-rw-r--r--drivers/hwmon/pmbus.c10
-rw-r--r--drivers/hwmon/pmbus_core.c11
-rw-r--r--drivers/hwmon/sch5627.c2
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c8
-rw-r--r--drivers/i2c/muxes/pca954x.c7
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c3
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c3
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c3
-rw-r--r--drivers/leds/leds-lp5521.c4
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/media/rc/fintek-cir.c5
-rw-r--r--drivers/media/rc/imon.c19
-rw-r--r--drivers/media/rc/ir-raw.c4
-rw-r--r--drivers/media/rc/ite-cir.c12
-rw-r--r--drivers/media/rc/ite-cir.h3
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c58
-rw-r--r--drivers/media/rc/lirc_dev.c37
-rw-r--r--drivers/media/rc/mceusb.c80
-rw-r--r--drivers/media/rc/nuvoton-cir.c2
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-main.c48
-rw-r--r--drivers/media/video/m5mols/m5mols.h57
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c22
-rw-r--r--drivers/media/video/m5mols/m5mols_controls.c6
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c144
-rw-r--r--drivers/media/video/m5mols/m5mols_reg.h21
-rw-r--r--drivers/media/video/mx1_camera.c10
-rw-r--r--drivers/media/video/omap/omap_vout.c18
-rw-r--r--drivers/media/video/omap/omap_voutlib.c6
-rw-r--r--drivers/media/video/omap3isp/isp.c2
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c2
-rw-r--r--drivers/media/video/pwc/pwc-if.c152
-rw-r--r--drivers/media/video/pwc/pwc.h4
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c21
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c28
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h29
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c2
-rw-r--r--drivers/media/video/uvc/uvc_entity.c34
-rw-r--r--drivers/media/video/uvc/uvc_queue.c2
-rw-r--r--drivers/media/video/uvc/uvc_video.c4
-rw-r--r--drivers/media/video/v4l2-dev.c39
-rw-r--r--drivers/media/video/videobuf2-core.c14
-rw-r--r--drivers/media/video/videobuf2-dma-sg.c2
-rw-r--r--drivers/mfd/Kconfig3
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/omap-usb-host.c131
-rw-r--r--drivers/mfd/tps65911-comparator.c2
-rw-r--r--drivers/misc/cb710/sgbuf2.c2
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/misc/lkdtm.c8
-rw-r--r--drivers/misc/pti.c11
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/misc/ti-st/st_kim.c8
-rw-r--r--drivers/mmc/card/block.c5
-rw-r--r--drivers/mmc/card/queue.c15
-rw-r--r--drivers/mmc/card/queue.h3
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/sdio.c39
-rw-r--r--drivers/mmc/core/sdio_bus.c2
-rw-r--r--drivers/mmc/host/of_mmc_spi.c5
-rw-r--r--drivers/mmc/host/omap_hsmmc.c6
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c5
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c4
-rw-r--r--drivers/mmc/host/vub300.c11
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c6
-rw-r--r--drivers/net/8139too.c1
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/bna/bnad.c7
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/can/Kconfig4
-rw-r--r--drivers/net/cxgb3/sge.c4
-rw-r--r--drivers/net/greth.c7
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/natsemi.c3
-rw-r--r--drivers/net/ppp_deflate.c5
-rw-r--r--drivers/net/qlge/qlge.h3
-rw-r--r--drivers/net/qlge/qlge_main.c40
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/rionet.c28
-rw-r--r--drivers/net/sh_eth.c6
-rw-r--r--drivers/net/usb/kalmia.c42
-rw-r--r--drivers/net/usb/zaurus.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c138
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h5
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c25
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c20
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c1
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/hpsa.c16
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c4
-rw-r--r--drivers/scsi/isci/Makefile8
-rw-r--r--drivers/scsi/isci/firmware/Makefile19
-rw-r--r--drivers/scsi/isci/firmware/README36
-rw-r--r--drivers/scsi/isci/firmware/create_fw.c99
-rw-r--r--drivers/scsi/isci/firmware/create_fw.h77
-rw-r--r--drivers/scsi/isci/host.c2751
-rw-r--r--drivers/scsi/isci/host.h542
-rw-r--r--drivers/scsi/isci/init.c565
-rw-r--r--drivers/scsi/isci/isci.h538
-rw-r--r--drivers/scsi/isci/phy.c1312
-rw-r--r--drivers/scsi/isci/phy.h504
-rw-r--r--drivers/scsi/isci/port.c1757
-rw-r--r--drivers/scsi/isci/port.h306
-rw-r--r--drivers/scsi/isci/port_config.c754
-rw-r--r--drivers/scsi/isci/probe_roms.c243
-rw-r--r--drivers/scsi/isci/probe_roms.h249
-rw-r--r--drivers/scsi/isci/registers.h1934
-rw-r--r--drivers/scsi/isci/remote_device.c1501
-rw-r--r--drivers/scsi/isci/remote_device.h352
-rw-r--r--drivers/scsi/isci/remote_node_context.c627
-rw-r--r--drivers/scsi/isci/remote_node_context.h224
-rw-r--r--drivers/scsi/isci/remote_node_table.c598
-rw-r--r--drivers/scsi/isci/remote_node_table.h188
-rw-r--r--drivers/scsi/isci/request.c3391
-rw-r--r--drivers/scsi/isci/request.h448
-rw-r--r--drivers/scsi/isci/sas.h219
-rw-r--r--drivers/scsi/isci/scu_completion_codes.h283
-rw-r--r--drivers/scsi/isci/scu_event_codes.h336
-rw-r--r--drivers/scsi/isci/scu_remote_node_context.h229
-rw-r--r--drivers/scsi/isci/scu_task_context.h942
-rw-r--r--drivers/scsi/isci/task.c1676
-rw-r--r--drivers/scsi/isci/task.h367
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c225
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h278
-rw-r--r--drivers/staging/brcm80211/Kconfig2
-rw-r--r--drivers/staging/comedi/Kconfig22
-rw-r--r--drivers/staging/iio/Kconfig2
-rw-r--r--drivers/staging/iio/accel/adis16204.h2
-rw-r--r--drivers/staging/iio/accel/adis16209.h4
-rw-r--r--drivers/staging/iio/gyro/adis16260.h2
-rw-r--r--drivers/staging/iio/imu/adis16400.h2
-rw-r--r--drivers/staging/lirc/lirc_imon.c10
-rw-r--r--drivers/staging/lirc/lirc_serial.c44
-rw-r--r--drivers/staging/lirc/lirc_sir.c11
-rw-r--r--drivers/staging/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/mei/init.c2
-rw-r--r--drivers/staging/mei/wd.c13
-rw-r--r--drivers/tty/n_gsm.c26
-rw-r--r--drivers/tty/n_tty.c1
-rw-r--r--drivers/tty/serial/8250.c1
-rw-r--r--drivers/tty/serial/8250_pci.c61
-rw-r--r--drivers/tty/serial/amba-pl011.c123
-rw-r--r--drivers/tty/serial/atmel_serial.c3
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c18
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/tty/serial/s5pv210.c4
-rw-r--r--drivers/tty/tty_ldisc.c4
-rw-r--r--drivers/usb/core/driver.c11
-rw-r--r--drivers/usb/core/hub.c16
-rw-r--r--drivers/usb/core/message.c17
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c36
-rw-r--r--drivers/usb/host/ehci-ath79.c10
-rw-r--r--drivers/usb/host/ehci-hcd.c4
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c4
-rw-r--r--drivers/usb/host/r8a66597-hcd.c1
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c30
-rw-r--r--drivers/usb/host/xhci.c39
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c19
-rw-r--r--drivers/usb/serial/ftdi_sio.h3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c1
-rw-r--r--drivers/video/amba-clcd.c2
-rw-r--r--drivers/video/fsl-diu-fb.c16
-rw-r--r--drivers/video/geode/gx1fb_core.c14
-rw-r--r--drivers/video/hecubafb.c3
-rw-r--r--drivers/video/sh_mobile_meram.c2
-rw-r--r--drivers/video/sm501fb.c2
-rw-r--r--drivers/video/udlfb.c8
-rw-r--r--drivers/video/vesafb.c1
-rw-r--r--drivers/watchdog/Kconfig3
-rw-r--r--drivers/watchdog/at32ap700x_wdt.c2
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c1
-rw-r--r--drivers/watchdog/mtx-1_wdt.c29
-rw-r--r--drivers/watchdog/wm831x_wdt.c5
224 files changed, 25526 insertions, 1102 deletions
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index d38c40fe4ddb..41223c7f0206 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -452,7 +452,7 @@ void ahci_save_initial_config(struct device *dev,
452 } 452 }
453 453
454 if (mask_port_map) { 454 if (mask_port_map) {
455 dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n", 455 dev_printk(KERN_WARNING, dev, "masking port_map 0x%x -> 0x%x\n",
456 port_map, 456 port_map,
457 port_map & mask_port_map); 457 port_map & mask_port_map);
458 port_map &= mask_port_map; 458 port_map &= mask_port_map;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1c291af637b3..6040717b62bb 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
367 * 367 *
368 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 368 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
369 */ 369 */
370struct platform_device *__init_or_module platform_device_register_resndata( 370struct platform_device *platform_device_register_resndata(
371 struct device *parent, 371 struct device *parent,
372 const char *name, int id, 372 const char *name, int id,
373 const struct resource *res, unsigned int num, 373 const struct resource *res, unsigned int num,
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 219d88a0eeae..dde6a0fad408 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -139,6 +139,7 @@ static int cn_call_callback(struct sk_buff *skb)
139 spin_unlock_bh(&dev->cbdev->queue_lock); 139 spin_unlock_bh(&dev->cbdev->queue_lock);
140 140
141 if (cbq != NULL) { 141 if (cbq != NULL) {
142 err = 0;
142 cbq->callback(msg, nsp); 143 cbq->callback(msg, nsp);
143 kfree_skb(skb); 144 kfree_skb(skb);
144 cn_queue_release_callback(cbq); 145 cn_queue_release_callback(cbq);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index d0e65d6ddc77..676d957c22b0 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -238,9 +238,9 @@ static int build_sh_desc_ipsec(struct caam_ctx *ctx)
238 238
239 /* build shared descriptor for this session */ 239 /* build shared descriptor for this session */
240 sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN + 240 sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN +
241 keys_fit_inline ? 241 (keys_fit_inline ?
242 ctx->split_key_pad_len + ctx->enckeylen : 242 ctx->split_key_pad_len + ctx->enckeylen :
243 CAAM_PTR_SZ * 2, GFP_DMA | GFP_KERNEL); 243 CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL);
244 if (!sh_desc) { 244 if (!sh_desc) {
245 dev_err(jrdev, "could not allocate shared descriptor\n"); 245 dev_err(jrdev, "could not allocate shared descriptor\n");
246 return -ENOMEM; 246 return -ENOMEM;
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 87096b6ca5c9..2f21b0bfe653 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -13,6 +13,7 @@ menu "Google Firmware Drivers"
13config GOOGLE_SMI 13config GOOGLE_SMI
14 tristate "SMI interface for Google platforms" 14 tristate "SMI interface for Google platforms"
15 depends on ACPI && DMI 15 depends on ACPI && DMI
16 select EFI
16 select EFI_VARS 17 select EFI_VARS
17 help 18 help
18 Say Y here if you want to enable SMI callbacks for Google 19 Say Y here if you want to enable SMI callbacks for Google
diff --git a/drivers/gpio/tps65910-gpio.c b/drivers/gpio/tps65910-gpio.c
index 8d1ddfdd63eb..15097ca616d6 100644
--- a/drivers/gpio/tps65910-gpio.c
+++ b/drivers/gpio/tps65910-gpio.c
@@ -81,8 +81,10 @@ void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
81 switch(tps65910_chip_id(tps65910)) { 81 switch(tps65910_chip_id(tps65910)) {
82 case TPS65910: 82 case TPS65910:
83 tps65910->gpio.ngpio = 6; 83 tps65910->gpio.ngpio = 6;
84 break;
84 case TPS65911: 85 case TPS65911:
85 tps65910->gpio.ngpio = 9; 86 tps65910->gpio.ngpio = 9;
87 break;
86 default: 88 default:
87 return; 89 return;
88 } 90 }
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 74e4ff578017..4012fe423460 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -34,6 +34,7 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/mman.h> 35#include <linux/mman.h>
36#include <linux/pagemap.h> 36#include <linux/pagemap.h>
37#include <linux/shmem_fs.h>
37#include "drmP.h" 38#include "drmP.h"
38 39
39/** @file drm_gem.c 40/** @file drm_gem.c
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4d46441cbe2d..0a893f7400fa 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1207,13 +1207,17 @@ static int i915_context_status(struct seq_file *m, void *unused)
1207 if (ret) 1207 if (ret)
1208 return ret; 1208 return ret;
1209 1209
1210 seq_printf(m, "power context "); 1210 if (dev_priv->pwrctx) {
1211 describe_obj(m, dev_priv->pwrctx); 1211 seq_printf(m, "power context ");
1212 seq_printf(m, "\n"); 1212 describe_obj(m, dev_priv->pwrctx);
1213 seq_printf(m, "\n");
1214 }
1213 1215
1214 seq_printf(m, "render context "); 1216 if (dev_priv->renderctx) {
1215 describe_obj(m, dev_priv->renderctx); 1217 seq_printf(m, "render context ");
1216 seq_printf(m, "\n"); 1218 describe_obj(m, dev_priv->renderctx);
1219 seq_printf(m, "\n");
1220 }
1217 1221
1218 mutex_unlock(&dev->mode_config.mutex); 1222 mutex_unlock(&dev->mode_config.mutex);
1219 1223
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0239e9974bf2..e1787022d6c8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1266,30 +1266,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1266 1266
1267 intel_modeset_gem_init(dev); 1267 intel_modeset_gem_init(dev);
1268 1268
1269 if (IS_IVYBRIDGE(dev)) {
1270 /* Share pre & uninstall handlers with ILK/SNB */
1271 dev->driver->irq_handler = ivybridge_irq_handler;
1272 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1273 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
1274 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1275 dev->driver->enable_vblank = ivybridge_enable_vblank;
1276 dev->driver->disable_vblank = ivybridge_disable_vblank;
1277 } else if (HAS_PCH_SPLIT(dev)) {
1278 dev->driver->irq_handler = ironlake_irq_handler;
1279 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1280 dev->driver->irq_postinstall = ironlake_irq_postinstall;
1281 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1282 dev->driver->enable_vblank = ironlake_enable_vblank;
1283 dev->driver->disable_vblank = ironlake_disable_vblank;
1284 } else {
1285 dev->driver->irq_preinstall = i915_driver_irq_preinstall;
1286 dev->driver->irq_postinstall = i915_driver_irq_postinstall;
1287 dev->driver->irq_uninstall = i915_driver_irq_uninstall;
1288 dev->driver->irq_handler = i915_driver_irq_handler;
1289 dev->driver->enable_vblank = i915_enable_vblank;
1290 dev->driver->disable_vblank = i915_disable_vblank;
1291 }
1292
1293 ret = drm_irq_install(dev); 1269 ret = drm_irq_install(dev);
1294 if (ret) 1270 if (ret)
1295 goto cleanup_gem; 1271 goto cleanup_gem;
@@ -2017,12 +1993,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2017 /* enable GEM by default */ 1993 /* enable GEM by default */
2018 dev_priv->has_gem = 1; 1994 dev_priv->has_gem = 1;
2019 1995
2020 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1996 intel_irq_init(dev);
2021 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2022 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
2023 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2024 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2025 }
2026 1997
2027 /* Try to make sure MCHBAR is enabled before poking at it */ 1998 /* Try to make sure MCHBAR is enabled before poking at it */
2028 intel_setup_mchbar(dev); 1999 intel_setup_mchbar(dev);
@@ -2182,9 +2153,8 @@ int i915_driver_unload(struct drm_device *dev)
2182 /* Flush any outstanding unpin_work. */ 2153 /* Flush any outstanding unpin_work. */
2183 flush_workqueue(dev_priv->wq); 2154 flush_workqueue(dev_priv->wq);
2184 2155
2185 i915_gem_free_all_phys_object(dev);
2186
2187 mutex_lock(&dev->struct_mutex); 2156 mutex_lock(&dev->struct_mutex);
2157 i915_gem_free_all_phys_object(dev);
2188 i915_gem_cleanup_ringbuffer(dev); 2158 i915_gem_cleanup_ringbuffer(dev);
2189 mutex_unlock(&dev->struct_mutex); 2159 mutex_unlock(&dev->struct_mutex);
2190 if (I915_HAS_FBC(dev) && i915_powersave) 2160 if (I915_HAS_FBC(dev) && i915_powersave)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0defd4270594..013d304455b9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -579,6 +579,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
579 } else switch (INTEL_INFO(dev)->gen) { 579 } else switch (INTEL_INFO(dev)->gen) {
580 case 6: 580 case 6:
581 ret = gen6_do_reset(dev, flags); 581 ret = gen6_do_reset(dev, flags);
582 /* If reset with a user forcewake, try to restore */
583 if (atomic_read(&dev_priv->forcewake_count))
584 __gen6_gt_force_wake_get(dev_priv);
582 break; 585 break;
583 case 5: 586 case 5:
584 ret = ironlake_do_reset(dev, flags); 587 ret = ironlake_do_reset(dev, flags);
@@ -762,14 +765,6 @@ static struct drm_driver driver = {
762 .resume = i915_resume, 765 .resume = i915_resume,
763 766
764 .device_is_agp = i915_driver_device_is_agp, 767 .device_is_agp = i915_driver_device_is_agp,
765 .enable_vblank = i915_enable_vblank,
766 .disable_vblank = i915_disable_vblank,
767 .get_vblank_timestamp = i915_get_vblank_timestamp,
768 .get_scanout_position = i915_get_crtc_scanoutpos,
769 .irq_preinstall = i915_driver_irq_preinstall,
770 .irq_postinstall = i915_driver_irq_postinstall,
771 .irq_uninstall = i915_driver_irq_uninstall,
772 .irq_handler = i915_driver_irq_handler,
773 .reclaim_buffers = drm_core_reclaim_buffers, 768 .reclaim_buffers = drm_core_reclaim_buffers,
774 .master_create = i915_master_create, 769 .master_create = i915_master_create,
775 .master_destroy = i915_master_destroy, 770 .master_destroy = i915_master_destroy,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f63ee162f124..f245c588ae95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -211,6 +211,9 @@ struct drm_i915_display_funcs {
211 void (*fdi_link_train)(struct drm_crtc *crtc); 211 void (*fdi_link_train)(struct drm_crtc *crtc);
212 void (*init_clock_gating)(struct drm_device *dev); 212 void (*init_clock_gating)(struct drm_device *dev);
213 void (*init_pch_clock_gating)(struct drm_device *dev); 213 void (*init_pch_clock_gating)(struct drm_device *dev);
214 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
215 struct drm_framebuffer *fb,
216 struct drm_i915_gem_object *obj);
214 /* clock updates for mode set */ 217 /* clock updates for mode set */
215 /* cursor updates */ 218 /* cursor updates */
216 /* render clock increase/decrease */ 219 /* render clock increase/decrease */
@@ -994,8 +997,6 @@ extern unsigned int i915_enable_fbc;
994 997
995extern int i915_suspend(struct drm_device *dev, pm_message_t state); 998extern int i915_suspend(struct drm_device *dev, pm_message_t state);
996extern int i915_resume(struct drm_device *dev); 999extern int i915_resume(struct drm_device *dev);
997extern void i915_save_display(struct drm_device *dev);
998extern void i915_restore_display(struct drm_device *dev);
999extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 1000extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1000extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 1001extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1001 1002
@@ -1030,33 +1031,12 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
1030extern int i915_irq_wait(struct drm_device *dev, void *data, 1031extern int i915_irq_wait(struct drm_device *dev, void *data,
1031 struct drm_file *file_priv); 1032 struct drm_file *file_priv);
1032 1033
1033extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 1034extern void intel_irq_init(struct drm_device *dev);
1034extern void i915_driver_irq_preinstall(struct drm_device * dev);
1035extern int i915_driver_irq_postinstall(struct drm_device *dev);
1036extern void i915_driver_irq_uninstall(struct drm_device * dev);
1037
1038extern irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS);
1039extern void ironlake_irq_preinstall(struct drm_device *dev);
1040extern int ironlake_irq_postinstall(struct drm_device *dev);
1041extern void ironlake_irq_uninstall(struct drm_device *dev);
1042
1043extern irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS);
1044extern void ivybridge_irq_preinstall(struct drm_device *dev);
1045extern int ivybridge_irq_postinstall(struct drm_device *dev);
1046extern void ivybridge_irq_uninstall(struct drm_device *dev);
1047 1035
1048extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1036extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1049 struct drm_file *file_priv); 1037 struct drm_file *file_priv);
1050extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 1038extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1051 struct drm_file *file_priv); 1039 struct drm_file *file_priv);
1052extern int i915_enable_vblank(struct drm_device *dev, int crtc);
1053extern void i915_disable_vblank(struct drm_device *dev, int crtc);
1054extern int ironlake_enable_vblank(struct drm_device *dev, int crtc);
1055extern void ironlake_disable_vblank(struct drm_device *dev, int crtc);
1056extern int ivybridge_enable_vblank(struct drm_device *dev, int crtc);
1057extern void ivybridge_disable_vblank(struct drm_device *dev, int crtc);
1058extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
1059extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
1060extern int i915_vblank_swap(struct drm_device *dev, void *data, 1040extern int i915_vblank_swap(struct drm_device *dev, void *data,
1061 struct drm_file *file_priv); 1041 struct drm_file *file_priv);
1062 1042
@@ -1067,13 +1047,6 @@ void
1067i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1047i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1068 1048
1069void intel_enable_asle (struct drm_device *dev); 1049void intel_enable_asle (struct drm_device *dev);
1070int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
1071 int *max_error,
1072 struct timeval *vblank_time,
1073 unsigned flags);
1074
1075int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
1076 int *vpos, int *hpos);
1077 1050
1078#ifdef CONFIG_DEBUG_FS 1051#ifdef CONFIG_DEBUG_FS
1079extern void i915_destroy_error_state(struct drm_device *dev); 1052extern void i915_destroy_error_state(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c6389de53161..5c0d1247f453 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_trace.h" 32#include "i915_trace.h"
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/shmem_fs.h>
34#include <linux/slab.h> 35#include <linux/slab.h>
35#include <linux/swap.h> 36#include <linux/swap.h>
36#include <linux/pci.h> 37#include <linux/pci.h>
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
359 if ((page_offset + remain) > PAGE_SIZE) 360 if ((page_offset + remain) > PAGE_SIZE)
360 page_length = PAGE_SIZE - page_offset; 361 page_length = PAGE_SIZE - page_offset;
361 362
362 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 363 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
363 GFP_HIGHUSER | __GFP_RECLAIMABLE);
364 if (IS_ERR(page)) 364 if (IS_ERR(page))
365 return PTR_ERR(page); 365 return PTR_ERR(page);
366 366
@@ -463,8 +463,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
463 if ((data_page_offset + page_length) > PAGE_SIZE) 463 if ((data_page_offset + page_length) > PAGE_SIZE)
464 page_length = PAGE_SIZE - data_page_offset; 464 page_length = PAGE_SIZE - data_page_offset;
465 465
466 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 466 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
467 GFP_HIGHUSER | __GFP_RECLAIMABLE);
468 if (IS_ERR(page)) { 467 if (IS_ERR(page)) {
469 ret = PTR_ERR(page); 468 ret = PTR_ERR(page);
470 goto out; 469 goto out;
@@ -797,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
797 if ((page_offset + remain) > PAGE_SIZE) 796 if ((page_offset + remain) > PAGE_SIZE)
798 page_length = PAGE_SIZE - page_offset; 797 page_length = PAGE_SIZE - page_offset;
799 798
800 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 799 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
801 GFP_HIGHUSER | __GFP_RECLAIMABLE);
802 if (IS_ERR(page)) 800 if (IS_ERR(page))
803 return PTR_ERR(page); 801 return PTR_ERR(page);
804 802
@@ -907,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
907 if ((data_page_offset + page_length) > PAGE_SIZE) 905 if ((data_page_offset + page_length) > PAGE_SIZE)
908 page_length = PAGE_SIZE - data_page_offset; 906 page_length = PAGE_SIZE - data_page_offset;
909 907
910 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 908 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
911 GFP_HIGHUSER | __GFP_RECLAIMABLE);
912 if (IS_ERR(page)) { 909 if (IS_ERR(page)) {
913 ret = PTR_ERR(page); 910 ret = PTR_ERR(page);
914 goto out; 911 goto out;
@@ -1558,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1558 1555
1559 inode = obj->base.filp->f_path.dentry->d_inode; 1556 inode = obj->base.filp->f_path.dentry->d_inode;
1560 mapping = inode->i_mapping; 1557 mapping = inode->i_mapping;
1558 gfpmask |= mapping_gfp_mask(mapping);
1559
1561 for (i = 0; i < page_count; i++) { 1560 for (i = 0; i < page_count; i++) {
1562 page = read_cache_page_gfp(mapping, i, 1561 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
1563 GFP_HIGHUSER |
1564 __GFP_COLD |
1565 __GFP_RECLAIMABLE |
1566 gfpmask);
1567 if (IS_ERR(page)) 1562 if (IS_ERR(page))
1568 goto err_pages; 1563 goto err_pages;
1569 1564
@@ -1701,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1701 /* Our goal here is to return as much of the memory as 1696 /* Our goal here is to return as much of the memory as
1702 * is possible back to the system as we are called from OOM. 1697 * is possible back to the system as we are called from OOM.
1703 * To do this we must instruct the shmfs to drop all of its 1698 * To do this we must instruct the shmfs to drop all of its
1704 * backing pages, *now*. Here we mirror the actions taken 1699 * backing pages, *now*.
1705 * when by shmem_delete_inode() to release the backing store.
1706 */ 1700 */
1707 inode = obj->base.filp->f_path.dentry->d_inode; 1701 inode = obj->base.filp->f_path.dentry->d_inode;
1708 truncate_inode_pages(inode->i_mapping, 0); 1702 shmem_truncate_range(inode, 0, (loff_t)-1);
1709 if (inode->i_op->truncate_range)
1710 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1711 1703
1712 obj->madv = __I915_MADV_PURGED; 1704 obj->madv = __I915_MADV_PURGED;
1713} 1705}
@@ -2080,8 +2072,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
2080 if (!ier) { 2072 if (!ier) {
2081 DRM_ERROR("something (likely vbetool) disabled " 2073 DRM_ERROR("something (likely vbetool) disabled "
2082 "interrupts, re-enabling\n"); 2074 "interrupts, re-enabling\n");
2083 i915_driver_irq_preinstall(ring->dev); 2075 ring->dev->driver->irq_preinstall(ring->dev);
2084 i915_driver_irq_postinstall(ring->dev); 2076 ring->dev->driver->irq_postinstall(ring->dev);
2085 } 2077 }
2086 2078
2087 trace_i915_gem_request_wait_begin(ring, seqno); 2079 trace_i915_gem_request_wait_begin(ring, seqno);
@@ -3565,6 +3557,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3565{ 3557{
3566 struct drm_i915_private *dev_priv = dev->dev_private; 3558 struct drm_i915_private *dev_priv = dev->dev_private;
3567 struct drm_i915_gem_object *obj; 3559 struct drm_i915_gem_object *obj;
3560 struct address_space *mapping;
3568 3561
3569 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 3562 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3570 if (obj == NULL) 3563 if (obj == NULL)
@@ -3575,6 +3568,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3575 return NULL; 3568 return NULL;
3576 } 3569 }
3577 3570
3571 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3572 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3573
3578 i915_gem_info_add_obj(dev_priv, size); 3574 i915_gem_info_add_obj(dev_priv, size);
3579 3575
3580 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3576 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -3950,8 +3946,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
3950 3946
3951 page_count = obj->base.size / PAGE_SIZE; 3947 page_count = obj->base.size / PAGE_SIZE;
3952 for (i = 0; i < page_count; i++) { 3948 for (i = 0; i < page_count; i++) {
3953 struct page *page = read_cache_page_gfp(mapping, i, 3949 struct page *page = shmem_read_mapping_page(mapping, i);
3954 GFP_HIGHUSER | __GFP_RECLAIMABLE);
3955 if (!IS_ERR(page)) { 3950 if (!IS_ERR(page)) {
3956 char *dst = kmap_atomic(page); 3951 char *dst = kmap_atomic(page);
3957 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); 3952 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4012,8 +4007,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4012 struct page *page; 4007 struct page *page;
4013 char *dst, *src; 4008 char *dst, *src;
4014 4009
4015 page = read_cache_page_gfp(mapping, i, 4010 page = shmem_read_mapping_page(mapping, i);
4016 GFP_HIGHUSER | __GFP_RECLAIMABLE);
4017 if (IS_ERR(page)) 4011 if (IS_ERR(page))
4018 return PTR_ERR(page); 4012 return PTR_ERR(page);
4019 4013
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ae2b49969b99..3b03f85ea627 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -152,7 +152,7 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
152/* Called from drm generic code, passed a 'crtc', which 152/* Called from drm generic code, passed a 'crtc', which
153 * we use as a pipe index 153 * we use as a pipe index
154 */ 154 */
155u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 155static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
156{ 156{
157 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 157 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
158 unsigned long high_frame; 158 unsigned long high_frame;
@@ -184,7 +184,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
184 return (high1 << 8) | low; 184 return (high1 << 8) | low;
185} 185}
186 186
187u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 187static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
188{ 188{
189 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 189 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
190 int reg = PIPE_FRMCOUNT_GM45(pipe); 190 int reg = PIPE_FRMCOUNT_GM45(pipe);
@@ -198,7 +198,7 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
198 return I915_READ(reg); 198 return I915_READ(reg);
199} 199}
200 200
201int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 201static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
202 int *vpos, int *hpos) 202 int *vpos, int *hpos)
203{ 203{
204 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 204 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -264,7 +264,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
264 return ret; 264 return ret;
265} 265}
266 266
267int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 267static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
268 int *max_error, 268 int *max_error,
269 struct timeval *vblank_time, 269 struct timeval *vblank_time,
270 unsigned flags) 270 unsigned flags)
@@ -462,7 +462,7 @@ static void pch_irq_handler(struct drm_device *dev)
462 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 462 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
463} 463}
464 464
465irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) 465static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
466{ 466{
467 struct drm_device *dev = (struct drm_device *) arg; 467 struct drm_device *dev = (struct drm_device *) arg;
468 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 468 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -550,7 +550,7 @@ done:
550 return ret; 550 return ret;
551} 551}
552 552
553irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) 553static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
554{ 554{
555 struct drm_device *dev = (struct drm_device *) arg; 555 struct drm_device *dev = (struct drm_device *) arg;
556 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 556 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1209,7 +1209,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1209 } 1209 }
1210} 1210}
1211 1211
1212irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 1212static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1213{ 1213{
1214 struct drm_device *dev = (struct drm_device *) arg; 1214 struct drm_device *dev = (struct drm_device *) arg;
1215 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1215 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1454,7 +1454,7 @@ int i915_irq_wait(struct drm_device *dev, void *data,
1454/* Called from drm generic code, passed 'crtc' which 1454/* Called from drm generic code, passed 'crtc' which
1455 * we use as a pipe index 1455 * we use as a pipe index
1456 */ 1456 */
1457int i915_enable_vblank(struct drm_device *dev, int pipe) 1457static int i915_enable_vblank(struct drm_device *dev, int pipe)
1458{ 1458{
1459 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1459 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1460 unsigned long irqflags; 1460 unsigned long irqflags;
@@ -1478,7 +1478,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1478 return 0; 1478 return 0;
1479} 1479}
1480 1480
1481int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1481static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1482{ 1482{
1483 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1483 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1484 unsigned long irqflags; 1484 unsigned long irqflags;
@@ -1494,7 +1494,7 @@ int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1494 return 0; 1494 return 0;
1495} 1495}
1496 1496
1497int ivybridge_enable_vblank(struct drm_device *dev, int pipe) 1497static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1498{ 1498{
1499 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1499 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1500 unsigned long irqflags; 1500 unsigned long irqflags;
@@ -1513,7 +1513,7 @@ int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1513/* Called from drm generic code, passed 'crtc' which 1513/* Called from drm generic code, passed 'crtc' which
1514 * we use as a pipe index 1514 * we use as a pipe index
1515 */ 1515 */
1516void i915_disable_vblank(struct drm_device *dev, int pipe) 1516static void i915_disable_vblank(struct drm_device *dev, int pipe)
1517{ 1517{
1518 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1518 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1519 unsigned long irqflags; 1519 unsigned long irqflags;
@@ -1529,7 +1529,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
1529 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1529 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1530} 1530}
1531 1531
1532void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1532static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1533{ 1533{
1534 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1534 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1535 unsigned long irqflags; 1535 unsigned long irqflags;
@@ -1540,7 +1540,7 @@ void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1540 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1540 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1541} 1541}
1542 1542
1543void ivybridge_disable_vblank(struct drm_device *dev, int pipe) 1543static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1544{ 1544{
1545 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1545 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1546 unsigned long irqflags; 1546 unsigned long irqflags;
@@ -1728,7 +1728,7 @@ repeat:
1728 1728
1729/* drm_dma.h hooks 1729/* drm_dma.h hooks
1730*/ 1730*/
1731void ironlake_irq_preinstall(struct drm_device *dev) 1731static void ironlake_irq_preinstall(struct drm_device *dev)
1732{ 1732{
1733 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1733 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1734 1734
@@ -1740,7 +1740,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
1740 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); 1740 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
1741 1741
1742 I915_WRITE(HWSTAM, 0xeffe); 1742 I915_WRITE(HWSTAM, 0xeffe);
1743 if (IS_GEN6(dev)) { 1743 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1744 /* Workaround stalls observed on Sandy Bridge GPUs by 1744 /* Workaround stalls observed on Sandy Bridge GPUs by
1745 * making the blitter command streamer generate a 1745 * making the blitter command streamer generate a
1746 * write to the Hardware Status Page for 1746 * write to the Hardware Status Page for
@@ -1769,7 +1769,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
1769 POSTING_READ(SDEIER); 1769 POSTING_READ(SDEIER);
1770} 1770}
1771 1771
1772int ironlake_irq_postinstall(struct drm_device *dev) 1772static int ironlake_irq_postinstall(struct drm_device *dev)
1773{ 1773{
1774 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1774 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1775 /* enable kind of interrupts always enabled */ 1775 /* enable kind of interrupts always enabled */
@@ -1841,7 +1841,7 @@ int ironlake_irq_postinstall(struct drm_device *dev)
1841 return 0; 1841 return 0;
1842} 1842}
1843 1843
1844int ivybridge_irq_postinstall(struct drm_device *dev) 1844static int ivybridge_irq_postinstall(struct drm_device *dev)
1845{ 1845{
1846 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1846 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1847 /* enable kind of interrupts always enabled */ 1847 /* enable kind of interrupts always enabled */
@@ -1891,7 +1891,7 @@ int ivybridge_irq_postinstall(struct drm_device *dev)
1891 return 0; 1891 return 0;
1892} 1892}
1893 1893
1894void i915_driver_irq_preinstall(struct drm_device * dev) 1894static void i915_driver_irq_preinstall(struct drm_device * dev)
1895{ 1895{
1896 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1896 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1897 int pipe; 1897 int pipe;
@@ -1918,7 +1918,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1918 * Must be called after intel_modeset_init or hotplug interrupts won't be 1918 * Must be called after intel_modeset_init or hotplug interrupts won't be
1919 * enabled correctly. 1919 * enabled correctly.
1920 */ 1920 */
1921int i915_driver_irq_postinstall(struct drm_device *dev) 1921static int i915_driver_irq_postinstall(struct drm_device *dev)
1922{ 1922{
1923 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1923 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1924 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1924 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
@@ -1994,7 +1994,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1994 return 0; 1994 return 0;
1995} 1995}
1996 1996
1997void ironlake_irq_uninstall(struct drm_device *dev) 1997static void ironlake_irq_uninstall(struct drm_device *dev)
1998{ 1998{
1999 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1999 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2000 2000
@@ -2014,7 +2014,7 @@ void ironlake_irq_uninstall(struct drm_device *dev)
2014 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2014 I915_WRITE(GTIIR, I915_READ(GTIIR));
2015} 2015}
2016 2016
2017void i915_driver_irq_uninstall(struct drm_device * dev) 2017static void i915_driver_irq_uninstall(struct drm_device * dev)
2018{ 2018{
2019 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2019 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2020 int pipe; 2020 int pipe;
@@ -2040,3 +2040,41 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
2040 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 2040 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2041 I915_WRITE(IIR, I915_READ(IIR)); 2041 I915_WRITE(IIR, I915_READ(IIR));
2042} 2042}
2043
2044void intel_irq_init(struct drm_device *dev)
2045{
2046 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2047 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2048 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
2049 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2050 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2051 }
2052
2053
2054 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2055 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2056
2057 if (IS_IVYBRIDGE(dev)) {
2058 /* Share pre & uninstall handlers with ILK/SNB */
2059 dev->driver->irq_handler = ivybridge_irq_handler;
2060 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2061 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2062 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2063 dev->driver->enable_vblank = ivybridge_enable_vblank;
2064 dev->driver->disable_vblank = ivybridge_disable_vblank;
2065 } else if (HAS_PCH_SPLIT(dev)) {
2066 dev->driver->irq_handler = ironlake_irq_handler;
2067 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2068 dev->driver->irq_postinstall = ironlake_irq_postinstall;
2069 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2070 dev->driver->enable_vblank = ironlake_enable_vblank;
2071 dev->driver->disable_vblank = ironlake_disable_vblank;
2072 } else {
2073 dev->driver->irq_preinstall = i915_driver_irq_preinstall;
2074 dev->driver->irq_postinstall = i915_driver_irq_postinstall;
2075 dev->driver->irq_uninstall = i915_driver_irq_uninstall;
2076 dev->driver->irq_handler = i915_driver_irq_handler;
2077 dev->driver->enable_vblank = i915_enable_vblank;
2078 dev->driver->disable_vblank = i915_disable_vblank;
2079 }
2080}
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index e8152d23d5b6..5257cfc34c35 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -597,7 +597,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
597 return; 597 return;
598} 598}
599 599
600void i915_save_display(struct drm_device *dev) 600static void i915_save_display(struct drm_device *dev)
601{ 601{
602 struct drm_i915_private *dev_priv = dev->dev_private; 602 struct drm_i915_private *dev_priv = dev->dev_private;
603 603
@@ -678,7 +678,6 @@ void i915_save_display(struct drm_device *dev)
678 } 678 }
679 679
680 /* VGA state */ 680 /* VGA state */
681 mutex_lock(&dev->struct_mutex);
682 dev_priv->saveVGA0 = I915_READ(VGA0); 681 dev_priv->saveVGA0 = I915_READ(VGA0);
683 dev_priv->saveVGA1 = I915_READ(VGA1); 682 dev_priv->saveVGA1 = I915_READ(VGA1);
684 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 683 dev_priv->saveVGA_PD = I915_READ(VGA_PD);
@@ -688,10 +687,9 @@ void i915_save_display(struct drm_device *dev)
688 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 687 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
689 688
690 i915_save_vga(dev); 689 i915_save_vga(dev);
691 mutex_unlock(&dev->struct_mutex);
692} 690}
693 691
694void i915_restore_display(struct drm_device *dev) 692static void i915_restore_display(struct drm_device *dev)
695{ 693{
696 struct drm_i915_private *dev_priv = dev->dev_private; 694 struct drm_i915_private *dev_priv = dev->dev_private;
697 695
@@ -783,7 +781,6 @@ void i915_restore_display(struct drm_device *dev)
783 else 781 else
784 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); 782 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
785 783
786 mutex_lock(&dev->struct_mutex);
787 I915_WRITE(VGA0, dev_priv->saveVGA0); 784 I915_WRITE(VGA0, dev_priv->saveVGA0);
788 I915_WRITE(VGA1, dev_priv->saveVGA1); 785 I915_WRITE(VGA1, dev_priv->saveVGA1);
789 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); 786 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -791,7 +788,6 @@ void i915_restore_display(struct drm_device *dev)
791 udelay(150); 788 udelay(150);
792 789
793 i915_restore_vga(dev); 790 i915_restore_vga(dev);
794 mutex_unlock(&dev->struct_mutex);
795} 791}
796 792
797int i915_save_state(struct drm_device *dev) 793int i915_save_state(struct drm_device *dev)
@@ -801,6 +797,8 @@ int i915_save_state(struct drm_device *dev)
801 797
802 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 798 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
803 799
800 mutex_lock(&dev->struct_mutex);
801
804 /* Hardware status page */ 802 /* Hardware status page */
805 dev_priv->saveHWS = I915_READ(HWS_PGA); 803 dev_priv->saveHWS = I915_READ(HWS_PGA);
806 804
@@ -840,6 +838,8 @@ int i915_save_state(struct drm_device *dev)
840 for (i = 0; i < 3; i++) 838 for (i = 0; i < 3; i++)
841 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 839 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
842 840
841 mutex_unlock(&dev->struct_mutex);
842
843 return 0; 843 return 0;
844} 844}
845 845
@@ -850,6 +850,8 @@ int i915_restore_state(struct drm_device *dev)
850 850
851 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 851 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
852 852
853 mutex_lock(&dev->struct_mutex);
854
853 /* Hardware status page */ 855 /* Hardware status page */
854 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 856 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
855 857
@@ -867,6 +869,7 @@ int i915_restore_state(struct drm_device *dev)
867 I915_WRITE(IER, dev_priv->saveIER); 869 I915_WRITE(IER, dev_priv->saveIER);
868 I915_WRITE(IMR, dev_priv->saveIMR); 870 I915_WRITE(IMR, dev_priv->saveIMR);
869 } 871 }
872 mutex_unlock(&dev->struct_mutex);
870 873
871 intel_init_clock_gating(dev); 874 intel_init_clock_gating(dev);
872 875
@@ -878,6 +881,8 @@ int i915_restore_state(struct drm_device *dev)
878 if (IS_GEN6(dev)) 881 if (IS_GEN6(dev))
879 gen6_enable_rps(dev_priv); 882 gen6_enable_rps(dev_priv);
880 883
884 mutex_lock(&dev->struct_mutex);
885
881 /* Cache mode state */ 886 /* Cache mode state */
882 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 887 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
883 888
@@ -891,6 +896,8 @@ int i915_restore_state(struct drm_device *dev)
891 for (i = 0; i < 3; i++) 896 for (i = 0; i < 3; i++)
892 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 897 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
893 898
899 mutex_unlock(&dev->struct_mutex);
900
894 intel_i2c_reset(dev); 901 intel_i2c_reset(dev);
895 902
896 return 0; 903 return 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index aa43e7be6053..21b6f93fe919 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6261,6 +6261,197 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
6261 spin_unlock_irqrestore(&dev->event_lock, flags); 6261 spin_unlock_irqrestore(&dev->event_lock, flags);
6262} 6262}
6263 6263
6264static int intel_gen2_queue_flip(struct drm_device *dev,
6265 struct drm_crtc *crtc,
6266 struct drm_framebuffer *fb,
6267 struct drm_i915_gem_object *obj)
6268{
6269 struct drm_i915_private *dev_priv = dev->dev_private;
6270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6271 unsigned long offset;
6272 u32 flip_mask;
6273 int ret;
6274
6275 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6276 if (ret)
6277 goto out;
6278
6279 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6280 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6281
6282 ret = BEGIN_LP_RING(6);
6283 if (ret)
6284 goto out;
6285
6286 /* Can't queue multiple flips, so wait for the previous
6287 * one to finish before executing the next.
6288 */
6289 if (intel_crtc->plane)
6290 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6291 else
6292 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6293 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6294 OUT_RING(MI_NOOP);
6295 OUT_RING(MI_DISPLAY_FLIP |
6296 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6297 OUT_RING(fb->pitch);
6298 OUT_RING(obj->gtt_offset + offset);
6299 OUT_RING(MI_NOOP);
6300 ADVANCE_LP_RING();
6301out:
6302 return ret;
6303}
6304
6305static int intel_gen3_queue_flip(struct drm_device *dev,
6306 struct drm_crtc *crtc,
6307 struct drm_framebuffer *fb,
6308 struct drm_i915_gem_object *obj)
6309{
6310 struct drm_i915_private *dev_priv = dev->dev_private;
6311 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6312 unsigned long offset;
6313 u32 flip_mask;
6314 int ret;
6315
6316 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6317 if (ret)
6318 goto out;
6319
6320 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6321 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6322
6323 ret = BEGIN_LP_RING(6);
6324 if (ret)
6325 goto out;
6326
6327 if (intel_crtc->plane)
6328 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6329 else
6330 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6331 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6332 OUT_RING(MI_NOOP);
6333 OUT_RING(MI_DISPLAY_FLIP_I915 |
6334 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6335 OUT_RING(fb->pitch);
6336 OUT_RING(obj->gtt_offset + offset);
6337 OUT_RING(MI_NOOP);
6338
6339 ADVANCE_LP_RING();
6340out:
6341 return ret;
6342}
6343
6344static int intel_gen4_queue_flip(struct drm_device *dev,
6345 struct drm_crtc *crtc,
6346 struct drm_framebuffer *fb,
6347 struct drm_i915_gem_object *obj)
6348{
6349 struct drm_i915_private *dev_priv = dev->dev_private;
6350 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6351 uint32_t pf, pipesrc;
6352 int ret;
6353
6354 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6355 if (ret)
6356 goto out;
6357
6358 ret = BEGIN_LP_RING(4);
6359 if (ret)
6360 goto out;
6361
6362 /* i965+ uses the linear or tiled offsets from the
6363 * Display Registers (which do not change across a page-flip)
6364 * so we need only reprogram the base address.
6365 */
6366 OUT_RING(MI_DISPLAY_FLIP |
6367 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6368 OUT_RING(fb->pitch);
6369 OUT_RING(obj->gtt_offset | obj->tiling_mode);
6370
6371 /* XXX Enabling the panel-fitter across page-flip is so far
6372 * untested on non-native modes, so ignore it for now.
6373 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6374 */
6375 pf = 0;
6376 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6377 OUT_RING(pf | pipesrc);
6378 ADVANCE_LP_RING();
6379out:
6380 return ret;
6381}
6382
6383static int intel_gen6_queue_flip(struct drm_device *dev,
6384 struct drm_crtc *crtc,
6385 struct drm_framebuffer *fb,
6386 struct drm_i915_gem_object *obj)
6387{
6388 struct drm_i915_private *dev_priv = dev->dev_private;
6389 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6390 uint32_t pf, pipesrc;
6391 int ret;
6392
6393 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6394 if (ret)
6395 goto out;
6396
6397 ret = BEGIN_LP_RING(4);
6398 if (ret)
6399 goto out;
6400
6401 OUT_RING(MI_DISPLAY_FLIP |
6402 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6403 OUT_RING(fb->pitch | obj->tiling_mode);
6404 OUT_RING(obj->gtt_offset);
6405
6406 pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6407 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6408 OUT_RING(pf | pipesrc);
6409 ADVANCE_LP_RING();
6410out:
6411 return ret;
6412}
6413
6414/*
6415 * On gen7 we currently use the blit ring because (in early silicon at least)
6416 * the render ring doesn't give us interrpts for page flip completion, which
6417 * means clients will hang after the first flip is queued. Fortunately the
6418 * blit ring generates interrupts properly, so use it instead.
6419 */
6420static int intel_gen7_queue_flip(struct drm_device *dev,
6421 struct drm_crtc *crtc,
6422 struct drm_framebuffer *fb,
6423 struct drm_i915_gem_object *obj)
6424{
6425 struct drm_i915_private *dev_priv = dev->dev_private;
6426 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6427 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
6428 int ret;
6429
6430 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6431 if (ret)
6432 goto out;
6433
6434 ret = intel_ring_begin(ring, 4);
6435 if (ret)
6436 goto out;
6437
6438 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
6439 intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
6440 intel_ring_emit(ring, (obj->gtt_offset));
6441 intel_ring_emit(ring, (MI_NOOP));
6442 intel_ring_advance(ring);
6443out:
6444 return ret;
6445}
6446
6447static int intel_default_queue_flip(struct drm_device *dev,
6448 struct drm_crtc *crtc,
6449 struct drm_framebuffer *fb,
6450 struct drm_i915_gem_object *obj)
6451{
6452 return -ENODEV;
6453}
6454
6264static int intel_crtc_page_flip(struct drm_crtc *crtc, 6455static int intel_crtc_page_flip(struct drm_crtc *crtc,
6265 struct drm_framebuffer *fb, 6456 struct drm_framebuffer *fb,
6266 struct drm_pending_vblank_event *event) 6457 struct drm_pending_vblank_event *event)
@@ -6271,9 +6462,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6271 struct drm_i915_gem_object *obj; 6462 struct drm_i915_gem_object *obj;
6272 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6463 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6273 struct intel_unpin_work *work; 6464 struct intel_unpin_work *work;
6274 unsigned long flags, offset; 6465 unsigned long flags;
6275 int pipe = intel_crtc->pipe;
6276 u32 pf, pipesrc;
6277 int ret; 6466 int ret;
6278 6467
6279 work = kzalloc(sizeof *work, GFP_KERNEL); 6468 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -6302,9 +6491,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6302 obj = intel_fb->obj; 6491 obj = intel_fb->obj;
6303 6492
6304 mutex_lock(&dev->struct_mutex); 6493 mutex_lock(&dev->struct_mutex);
6305 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6306 if (ret)
6307 goto cleanup_work;
6308 6494
6309 /* Reference the objects for the scheduled work. */ 6495 /* Reference the objects for the scheduled work. */
6310 drm_gem_object_reference(&work->old_fb_obj->base); 6496 drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6316,91 +6502,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6316 if (ret) 6502 if (ret)
6317 goto cleanup_objs; 6503 goto cleanup_objs;
6318 6504
6319 if (IS_GEN3(dev) || IS_GEN2(dev)) {
6320 u32 flip_mask;
6321
6322 /* Can't queue multiple flips, so wait for the previous
6323 * one to finish before executing the next.
6324 */
6325 ret = BEGIN_LP_RING(2);
6326 if (ret)
6327 goto cleanup_objs;
6328
6329 if (intel_crtc->plane)
6330 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6331 else
6332 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6333 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6334 OUT_RING(MI_NOOP);
6335 ADVANCE_LP_RING();
6336 }
6337
6338 work->pending_flip_obj = obj; 6505 work->pending_flip_obj = obj;
6339 6506
6340 work->enable_stall_check = true; 6507 work->enable_stall_check = true;
6341 6508
6342 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6343 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6344
6345 ret = BEGIN_LP_RING(4);
6346 if (ret)
6347 goto cleanup_objs;
6348
6349 /* Block clients from rendering to the new back buffer until 6509 /* Block clients from rendering to the new back buffer until
6350 * the flip occurs and the object is no longer visible. 6510 * the flip occurs and the object is no longer visible.
6351 */ 6511 */
6352 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 6512 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6353 6513
6354 switch (INTEL_INFO(dev)->gen) { 6514 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6355 case 2: 6515 if (ret)
6356 OUT_RING(MI_DISPLAY_FLIP | 6516 goto cleanup_pending;
6357 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6358 OUT_RING(fb->pitch);
6359 OUT_RING(obj->gtt_offset + offset);
6360 OUT_RING(MI_NOOP);
6361 break;
6362
6363 case 3:
6364 OUT_RING(MI_DISPLAY_FLIP_I915 |
6365 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6366 OUT_RING(fb->pitch);
6367 OUT_RING(obj->gtt_offset + offset);
6368 OUT_RING(MI_NOOP);
6369 break;
6370
6371 case 4:
6372 case 5:
6373 /* i965+ uses the linear or tiled offsets from the
6374 * Display Registers (which do not change across a page-flip)
6375 * so we need only reprogram the base address.
6376 */
6377 OUT_RING(MI_DISPLAY_FLIP |
6378 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6379 OUT_RING(fb->pitch);
6380 OUT_RING(obj->gtt_offset | obj->tiling_mode);
6381
6382 /* XXX Enabling the panel-fitter across page-flip is so far
6383 * untested on non-native modes, so ignore it for now.
6384 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6385 */
6386 pf = 0;
6387 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
6388 OUT_RING(pf | pipesrc);
6389 break;
6390
6391 case 6:
6392 case 7:
6393 OUT_RING(MI_DISPLAY_FLIP |
6394 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6395 OUT_RING(fb->pitch | obj->tiling_mode);
6396 OUT_RING(obj->gtt_offset);
6397
6398 pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
6399 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
6400 OUT_RING(pf | pipesrc);
6401 break;
6402 }
6403 ADVANCE_LP_RING();
6404 6517
6405 mutex_unlock(&dev->struct_mutex); 6518 mutex_unlock(&dev->struct_mutex);
6406 6519
@@ -6408,10 +6521,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6408 6521
6409 return 0; 6522 return 0;
6410 6523
6524cleanup_pending:
6525 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6411cleanup_objs: 6526cleanup_objs:
6412 drm_gem_object_unreference(&work->old_fb_obj->base); 6527 drm_gem_object_unreference(&work->old_fb_obj->base);
6413 drm_gem_object_unreference(&obj->base); 6528 drm_gem_object_unreference(&obj->base);
6414cleanup_work:
6415 mutex_unlock(&dev->struct_mutex); 6529 mutex_unlock(&dev->struct_mutex);
6416 6530
6417 spin_lock_irqsave(&dev->event_lock, flags); 6531 spin_lock_irqsave(&dev->event_lock, flags);
@@ -7656,6 +7770,31 @@ static void intel_init_display(struct drm_device *dev)
7656 else 7770 else
7657 dev_priv->display.get_fifo_size = i830_get_fifo_size; 7771 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7658 } 7772 }
7773
7774 /* Default just returns -ENODEV to indicate unsupported */
7775 dev_priv->display.queue_flip = intel_default_queue_flip;
7776
7777 switch (INTEL_INFO(dev)->gen) {
7778 case 2:
7779 dev_priv->display.queue_flip = intel_gen2_queue_flip;
7780 break;
7781
7782 case 3:
7783 dev_priv->display.queue_flip = intel_gen3_queue_flip;
7784 break;
7785
7786 case 4:
7787 case 5:
7788 dev_priv->display.queue_flip = intel_gen4_queue_flip;
7789 break;
7790
7791 case 6:
7792 dev_priv->display.queue_flip = intel_gen6_queue_flip;
7793 break;
7794 case 7:
7795 dev_priv->display.queue_flip = intel_gen7_queue_flip;
7796 break;
7797 }
7659} 7798}
7660 7799
7661/* 7800/*
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a670c006982e..9e2959bc91cd 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1409,6 +1409,11 @@ void intel_setup_overlay(struct drm_device *dev)
1409 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); 1409 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
1410 if (!overlay) 1410 if (!overlay)
1411 return; 1411 return;
1412
1413 mutex_lock(&dev->struct_mutex);
1414 if (WARN_ON(dev_priv->overlay))
1415 goto out_free;
1416
1412 overlay->dev = dev; 1417 overlay->dev = dev;
1413 1418
1414 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); 1419 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
@@ -1448,7 +1453,7 @@ void intel_setup_overlay(struct drm_device *dev)
1448 1453
1449 regs = intel_overlay_map_regs(overlay); 1454 regs = intel_overlay_map_regs(overlay);
1450 if (!regs) 1455 if (!regs)
1451 goto out_free_bo; 1456 goto out_unpin_bo;
1452 1457
1453 memset(regs, 0, sizeof(struct overlay_registers)); 1458 memset(regs, 0, sizeof(struct overlay_registers));
1454 update_polyphase_filter(regs); 1459 update_polyphase_filter(regs);
@@ -1457,14 +1462,17 @@ void intel_setup_overlay(struct drm_device *dev)
1457 intel_overlay_unmap_regs(overlay, regs); 1462 intel_overlay_unmap_regs(overlay, regs);
1458 1463
1459 dev_priv->overlay = overlay; 1464 dev_priv->overlay = overlay;
1465 mutex_unlock(&dev->struct_mutex);
1460 DRM_INFO("initialized overlay support\n"); 1466 DRM_INFO("initialized overlay support\n");
1461 return; 1467 return;
1462 1468
1463out_unpin_bo: 1469out_unpin_bo:
1464 i915_gem_object_unpin(reg_bo); 1470 if (!OVERLAY_NEEDS_PHYSICAL(dev))
1471 i915_gem_object_unpin(reg_bo);
1465out_free_bo: 1472out_free_bo:
1466 drm_gem_object_unreference(&reg_bo->base); 1473 drm_gem_object_unreference(&reg_bo->base);
1467out_free: 1474out_free:
1475 mutex_unlock(&dev->struct_mutex);
1468 kfree(overlay); 1476 kfree(overlay);
1469 return; 1477 return;
1470} 1478}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 144f79a350ae..731acea865b5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -371,7 +371,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
371 engine->vram.flags_valid = nv50_vram_flags_valid; 371 engine->vram.flags_valid = nv50_vram_flags_valid;
372 break; 372 break;
373 case 0xC0: 373 case 0xC0:
374 case 0xD0:
375 engine->instmem.init = nvc0_instmem_init; 374 engine->instmem.init = nvc0_instmem_init;
376 engine->instmem.takedown = nvc0_instmem_takedown; 375 engine->instmem.takedown = nvc0_instmem_takedown;
377 engine->instmem.suspend = nvc0_instmem_suspend; 376 engine->instmem.suspend = nvc0_instmem_suspend;
@@ -923,7 +922,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
923 dev_priv->card_type = NV_50; 922 dev_priv->card_type = NV_50;
924 break; 923 break;
925 case 0xc0: 924 case 0xc0:
926 case 0xd0:
927 dev_priv->card_type = NV_C0; 925 dev_priv->card_type = NV_C0;
928 break; 926 break;
929 default: 927 default:
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 12d2fdc52414..e8a5ffb0124d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2248,7 +2248,10 @@ int evergreen_mc_init(struct radeon_device *rdev)
2248 2248
2249 /* Get VRAM informations */ 2249 /* Get VRAM informations */
2250 rdev->mc.vram_is_ddr = true; 2250 rdev->mc.vram_is_ddr = true;
2251 tmp = RREG32(MC_ARB_RAMCFG); 2251 if (rdev->flags & RADEON_IS_IGP)
2252 tmp = RREG32(FUS_MC_ARB_RAMCFG);
2253 else
2254 tmp = RREG32(MC_ARB_RAMCFG);
2252 if (tmp & CHANSIZE_OVERRIDE) { 2255 if (tmp & CHANSIZE_OVERRIDE) {
2253 chansize = 16; 2256 chansize = 16;
2254 } else if (tmp & CHANSIZE_MASK) { 2257 } else if (tmp & CHANSIZE_MASK) {
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 9736746da2d6..4672869cdb26 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -320,7 +320,7 @@
320#define CGTS_USER_TCC_DISABLE 0x914C 320#define CGTS_USER_TCC_DISABLE 0x914C
321#define TCC_DISABLE_MASK 0xFFFF0000 321#define TCC_DISABLE_MASK 0xFFFF0000
322#define TCC_DISABLE_SHIFT 16 322#define TCC_DISABLE_SHIFT 16
323#define CGTS_SM_CTRL_REG 0x915C 323#define CGTS_SM_CTRL_REG 0x9150
324#define OVERRIDE (1 << 21) 324#define OVERRIDE (1 << 21)
325 325
326#define TA_CNTL_AUX 0x9508 326#define TA_CNTL_AUX 0x9508
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 1aba85cad1a8..3fc5fa1aefd0 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -104,7 +104,7 @@ static bool radeon_read_bios(struct radeon_device *rdev)
104static bool radeon_atrm_get_bios(struct radeon_device *rdev) 104static bool radeon_atrm_get_bios(struct radeon_device *rdev)
105{ 105{
106 int ret; 106 int ret;
107 int size = 64 * 1024; 107 int size = 256 * 1024;
108 int i; 108 int i;
109 109
110 if (!radeon_atrm_supported(rdev->pdev)) 110 if (!radeon_atrm_supported(rdev->pdev))
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 6f508ffd1035..8bb347d23ca6 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -575,6 +575,12 @@ static void rv770_program_channel_remap(struct radeon_device *rdev)
575 else 575 else
576 tcp_chan_steer = 0x00fac688; 576 tcp_chan_steer = 0x00fac688;
577 577
578 /* RV770 CE has special chremap setup */
579 if (rdev->pdev->device == 0x944e) {
580 tcp_chan_steer = 0x00b08b08;
581 mc_shared_chremap = 0x00b08b08;
582 }
583
578 WREG32(TCP_CHAN_STEER, tcp_chan_steer); 584 WREG32(TCP_CHAN_STEER, tcp_chan_steer);
579 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); 585 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
580} 586}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 90e23e0bfadb..58c271ebc0f7 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,6 +31,7 @@
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/highmem.h> 32#include <linux/highmem.h>
33#include <linux/pagemap.h> 33#include <linux/pagemap.h>
34#include <linux/shmem_fs.h>
34#include <linux/file.h> 35#include <linux/file.h>
35#include <linux/swap.h> 36#include <linux/swap.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
@@ -484,7 +485,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
484 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; 485 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
485 486
486 for (i = 0; i < ttm->num_pages; ++i) { 487 for (i = 0; i < ttm->num_pages; ++i) {
487 from_page = read_mapping_page(swap_space, i, NULL); 488 from_page = shmem_read_mapping_page(swap_space, i);
488 if (IS_ERR(from_page)) { 489 if (IS_ERR(from_page)) {
489 ret = PTR_ERR(from_page); 490 ret = PTR_ERR(from_page);
490 goto out_err; 491 goto out_err;
@@ -557,7 +558,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
557 from_page = ttm->pages[i]; 558 from_page = ttm->pages[i];
558 if (unlikely(from_page == NULL)) 559 if (unlikely(from_page == NULL))
559 continue; 560 continue;
560 to_page = read_mapping_page(swap_space, i, NULL); 561 to_page = shmem_read_mapping_page(swap_space, i);
561 if (unlikely(IS_ERR(to_page))) { 562 if (unlikely(IS_ERR(to_page))) {
562 ret = PTR_ERR(to_page); 563 ret = PTR_ERR(to_page);
563 goto out_err; 564 goto out_err;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f7440e8ce3e7..6f3289a57888 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1423,6 +1423,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1423 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, 1423 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
1424 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, 1424 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
1425 { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) }, 1425 { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) },
1426 { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
1426 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, 1427 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
1427 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, 1428 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
1428 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, 1429 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index aecb5a4b8d6d..a756ee6c7df5 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -449,6 +449,7 @@
449 449
450#define USB_VENDOR_ID_LUMIO 0x202e 450#define USB_VENDOR_ID_LUMIO 0x202e
451#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006 451#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
452#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
452 453
453#define USB_VENDOR_ID_MCC 0x09db 454#define USB_VENDOR_ID_MCC 0x09db
454#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 455#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 0b2dcd0ee591..62cac4dc3b62 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -271,6 +271,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
271 } 271 }
272 return 1; 272 return 1;
273 case HID_DG_CONTACTID: 273 case HID_DG_CONTACTID:
274 if (!td->maxcontacts)
275 td->maxcontacts = MT_DEFAULT_MAXCONTACT;
274 input_mt_init_slots(hi->input, td->maxcontacts); 276 input_mt_init_slots(hi->input, td->maxcontacts);
275 td->last_slot_field = usage->hid; 277 td->last_slot_field = usage->hid;
276 td->last_field_index = field->index; 278 td->last_field_index = field->index;
@@ -547,9 +549,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
547 if (ret) 549 if (ret)
548 goto fail; 550 goto fail;
549 551
550 if (!td->maxcontacts)
551 td->maxcontacts = MT_DEFAULT_MAXCONTACT;
552
553 td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot), 552 td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
554 GFP_KERNEL); 553 GFP_KERNEL);
555 if (!td->slots) { 554 if (!td->slots) {
@@ -677,6 +676,9 @@ static const struct hid_device_id mt_devices[] = {
677 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, 676 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
678 HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, 677 HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
679 USB_DEVICE_ID_CRYSTALTOUCH) }, 678 USB_DEVICE_ID_CRYSTALTOUCH) },
679 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
680 HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
681 USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
680 682
681 /* MosArt panels */ 683 /* MosArt panels */
682 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, 684 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -707,10 +709,10 @@ static const struct hid_device_id mt_devices[] = {
707 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, 709 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
708 USB_DEVICE_ID_MTP)}, 710 USB_DEVICE_ID_MTP)},
709 { .driver_data = MT_CLS_CONFIDENCE, 711 { .driver_data = MT_CLS_CONFIDENCE,
710 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, 712 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
711 USB_DEVICE_ID_MTP_STM)}, 713 USB_DEVICE_ID_MTP_STM)},
712 { .driver_data = MT_CLS_CONFIDENCE, 714 { .driver_data = MT_CLS_CONFIDENCE,
713 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, 715 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX,
714 USB_DEVICE_ID_MTP_SITRONIX)}, 716 USB_DEVICE_ID_MTP_SITRONIX)},
715 717
716 /* Touch International panels */ 718 /* Touch International panels */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 16db83c83c8b..5f888f7e7dcb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -333,7 +333,7 @@ config SENSORS_F71882FG
333 F71858FG 333 F71858FG
334 F71862FG 334 F71862FG
335 F71863FG 335 F71863FG
336 F71869F/E 336 F71869F/E/A
337 F71882FG 337 F71882FG
338 F71883FG 338 F71883FG
339 F71889FG/ED/A 339 F71889FG/ED/A
diff --git a/drivers/hwmon/adm1275.c b/drivers/hwmon/adm1275.c
index c2ee2048ab91..b9b7caf4a1d2 100644
--- a/drivers/hwmon/adm1275.c
+++ b/drivers/hwmon/adm1275.c
@@ -32,6 +32,7 @@ static int adm1275_probe(struct i2c_client *client,
32 const struct i2c_device_id *id) 32 const struct i2c_device_id *id)
33{ 33{
34 int config; 34 int config;
35 int ret;
35 struct pmbus_driver_info *info; 36 struct pmbus_driver_info *info;
36 37
37 if (!i2c_check_functionality(client->adapter, 38 if (!i2c_check_functionality(client->adapter,
@@ -43,8 +44,10 @@ static int adm1275_probe(struct i2c_client *client,
43 return -ENOMEM; 44 return -ENOMEM;
44 45
45 config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG); 46 config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
46 if (config < 0) 47 if (config < 0) {
47 return config; 48 ret = config;
49 goto err_mem;
50 }
48 51
49 info->pages = 1; 52 info->pages = 1;
50 info->direct[PSC_VOLTAGE_IN] = true; 53 info->direct[PSC_VOLTAGE_IN] = true;
@@ -76,7 +79,14 @@ static int adm1275_probe(struct i2c_client *client,
76 else 79 else
77 info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT; 80 info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
78 81
79 return pmbus_do_probe(client, id, info); 82 ret = pmbus_do_probe(client, id, info);
83 if (ret)
84 goto err_mem;
85 return 0;
86
87err_mem:
88 kfree(info);
89 return ret;
80} 90}
81 91
82static int adm1275_remove(struct i2c_client *client) 92static int adm1275_remove(struct i2c_client *client)
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index e0ef32378ac6..0064432f361f 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -78,8 +78,9 @@ static u16 emc6w201_read16(struct i2c_client *client, u8 reg)
78 78
79 lsb = i2c_smbus_read_byte_data(client, reg); 79 lsb = i2c_smbus_read_byte_data(client, reg);
80 msb = i2c_smbus_read_byte_data(client, reg + 1); 80 msb = i2c_smbus_read_byte_data(client, reg + 1);
81 if (lsb < 0 || msb < 0) { 81 if (unlikely(lsb < 0 || msb < 0)) {
82 dev_err(&client->dev, "16-bit read failed at 0x%02x\n", reg); 82 dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
83 16, "read", reg);
83 return 0xFFFF; /* Arbitrary value */ 84 return 0xFFFF; /* Arbitrary value */
84 } 85 }
85 86
@@ -95,10 +96,39 @@ static int emc6w201_write16(struct i2c_client *client, u8 reg, u16 val)
95 int err; 96 int err;
96 97
97 err = i2c_smbus_write_byte_data(client, reg, val & 0xff); 98 err = i2c_smbus_write_byte_data(client, reg, val & 0xff);
98 if (!err) 99 if (likely(!err))
99 err = i2c_smbus_write_byte_data(client, reg + 1, val >> 8); 100 err = i2c_smbus_write_byte_data(client, reg + 1, val >> 8);
100 if (err < 0) 101 if (unlikely(err < 0))
101 dev_err(&client->dev, "16-bit write failed at 0x%02x\n", reg); 102 dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
103 16, "write", reg);
104
105 return err;
106}
107
108/* Read 8-bit value from register */
109static u8 emc6w201_read8(struct i2c_client *client, u8 reg)
110{
111 int val;
112
113 val = i2c_smbus_read_byte_data(client, reg);
114 if (unlikely(val < 0)) {
115 dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
116 8, "read", reg);
117 return 0x00; /* Arbitrary value */
118 }
119
120 return val;
121}
122
123/* Write 8-bit value to register */
124static int emc6w201_write8(struct i2c_client *client, u8 reg, u8 val)
125{
126 int err;
127
128 err = i2c_smbus_write_byte_data(client, reg, val);
129 if (unlikely(err < 0))
130 dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
131 8, "write", reg);
102 132
103 return err; 133 return err;
104} 134}
@@ -114,25 +144,25 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
114 if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { 144 if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
115 for (nr = 0; nr < 6; nr++) { 145 for (nr = 0; nr < 6; nr++) {
116 data->in[input][nr] = 146 data->in[input][nr] =
117 i2c_smbus_read_byte_data(client, 147 emc6w201_read8(client,
118 EMC6W201_REG_IN(nr)); 148 EMC6W201_REG_IN(nr));
119 data->in[min][nr] = 149 data->in[min][nr] =
120 i2c_smbus_read_byte_data(client, 150 emc6w201_read8(client,
121 EMC6W201_REG_IN_LOW(nr)); 151 EMC6W201_REG_IN_LOW(nr));
122 data->in[max][nr] = 152 data->in[max][nr] =
123 i2c_smbus_read_byte_data(client, 153 emc6w201_read8(client,
124 EMC6W201_REG_IN_HIGH(nr)); 154 EMC6W201_REG_IN_HIGH(nr));
125 } 155 }
126 156
127 for (nr = 0; nr < 6; nr++) { 157 for (nr = 0; nr < 6; nr++) {
128 data->temp[input][nr] = 158 data->temp[input][nr] =
129 i2c_smbus_read_byte_data(client, 159 emc6w201_read8(client,
130 EMC6W201_REG_TEMP(nr)); 160 EMC6W201_REG_TEMP(nr));
131 data->temp[min][nr] = 161 data->temp[min][nr] =
132 i2c_smbus_read_byte_data(client, 162 emc6w201_read8(client,
133 EMC6W201_REG_TEMP_LOW(nr)); 163 EMC6W201_REG_TEMP_LOW(nr));
134 data->temp[max][nr] = 164 data->temp[max][nr] =
135 i2c_smbus_read_byte_data(client, 165 emc6w201_read8(client,
136 EMC6W201_REG_TEMP_HIGH(nr)); 166 EMC6W201_REG_TEMP_HIGH(nr));
137 } 167 }
138 168
@@ -192,7 +222,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
192 222
193 mutex_lock(&data->update_lock); 223 mutex_lock(&data->update_lock);
194 data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255); 224 data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255);
195 err = i2c_smbus_write_byte_data(client, reg, data->in[sf][nr]); 225 err = emc6w201_write8(client, reg, data->in[sf][nr]);
196 mutex_unlock(&data->update_lock); 226 mutex_unlock(&data->update_lock);
197 227
198 return err < 0 ? err : count; 228 return err < 0 ? err : count;
@@ -229,7 +259,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
229 259
230 mutex_lock(&data->update_lock); 260 mutex_lock(&data->update_lock);
231 data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128); 261 data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128);
232 err = i2c_smbus_write_byte_data(client, reg, data->temp[sf][nr]); 262 err = emc6w201_write8(client, reg, data->temp[sf][nr]);
233 mutex_unlock(&data->update_lock); 263 mutex_unlock(&data->update_lock);
234 264
235 return err < 0 ? err : count; 265 return err < 0 ? err : count;
@@ -444,7 +474,7 @@ static int emc6w201_detect(struct i2c_client *client,
444 474
445 /* Check configuration */ 475 /* Check configuration */
446 config = i2c_smbus_read_byte_data(client, EMC6W201_REG_CONFIG); 476 config = i2c_smbus_read_byte_data(client, EMC6W201_REG_CONFIG);
447 if ((config & 0xF4) != 0x04) 477 if (config < 0 || (config & 0xF4) != 0x04)
448 return -ENODEV; 478 return -ENODEV;
449 if (!(config & 0x01)) { 479 if (!(config & 0x01)) {
450 dev_err(&client->dev, "Monitoring not enabled\n"); 480 dev_err(&client->dev, "Monitoring not enabled\n");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index a4a94a096c90..2d96ed2bf8ed 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -52,6 +52,7 @@
52#define SIO_F71858_ID 0x0507 /* Chipset ID */ 52#define SIO_F71858_ID 0x0507 /* Chipset ID */
53#define SIO_F71862_ID 0x0601 /* Chipset ID */ 53#define SIO_F71862_ID 0x0601 /* Chipset ID */
54#define SIO_F71869_ID 0x0814 /* Chipset ID */ 54#define SIO_F71869_ID 0x0814 /* Chipset ID */
55#define SIO_F71869A_ID 0x1007 /* Chipset ID */
55#define SIO_F71882_ID 0x0541 /* Chipset ID */ 56#define SIO_F71882_ID 0x0541 /* Chipset ID */
56#define SIO_F71889_ID 0x0723 /* Chipset ID */ 57#define SIO_F71889_ID 0x0723 /* Chipset ID */
57#define SIO_F71889E_ID 0x0909 /* Chipset ID */ 58#define SIO_F71889E_ID 0x0909 /* Chipset ID */
@@ -108,8 +109,8 @@ static unsigned short force_id;
108module_param(force_id, ushort, 0); 109module_param(force_id, ushort, 0);
109MODULE_PARM_DESC(force_id, "Override the detected device ID"); 110MODULE_PARM_DESC(force_id, "Override the detected device ID");
110 111
111enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71882fg, f71889fg, 112enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71869a, f71882fg,
112 f71889ed, f71889a, f8000, f81865f }; 113 f71889fg, f71889ed, f71889a, f8000, f81865f };
113 114
114static const char *f71882fg_names[] = { 115static const char *f71882fg_names[] = {
115 "f71808e", 116 "f71808e",
@@ -117,6 +118,7 @@ static const char *f71882fg_names[] = {
117 "f71858fg", 118 "f71858fg",
118 "f71862fg", 119 "f71862fg",
119 "f71869", /* Both f71869f and f71869e, reg. compatible and same id */ 120 "f71869", /* Both f71869f and f71869e, reg. compatible and same id */
121 "f71869a",
120 "f71882fg", 122 "f71882fg",
121 "f71889fg", /* f81801u too, same id */ 123 "f71889fg", /* f81801u too, same id */
122 "f71889ed", 124 "f71889ed",
@@ -131,6 +133,7 @@ static const char f71882fg_has_in[][F71882FG_MAX_INS] = {
131 [f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, 133 [f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
132 [f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, 134 [f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
133 [f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, 135 [f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
136 [f71869a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
134 [f71882fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, 137 [f71882fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
135 [f71889fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, 138 [f71889fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
136 [f71889ed] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, 139 [f71889ed] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
@@ -145,6 +148,7 @@ static const char f71882fg_has_in1_alarm[] = {
145 [f71858fg] = 0, 148 [f71858fg] = 0,
146 [f71862fg] = 0, 149 [f71862fg] = 0,
147 [f71869] = 0, 150 [f71869] = 0,
151 [f71869a] = 0,
148 [f71882fg] = 1, 152 [f71882fg] = 1,
149 [f71889fg] = 1, 153 [f71889fg] = 1,
150 [f71889ed] = 1, 154 [f71889ed] = 1,
@@ -159,6 +163,7 @@ static const char f71882fg_fan_has_beep[] = {
159 [f71858fg] = 0, 163 [f71858fg] = 0,
160 [f71862fg] = 1, 164 [f71862fg] = 1,
161 [f71869] = 1, 165 [f71869] = 1,
166 [f71869a] = 1,
162 [f71882fg] = 1, 167 [f71882fg] = 1,
163 [f71889fg] = 1, 168 [f71889fg] = 1,
164 [f71889ed] = 1, 169 [f71889ed] = 1,
@@ -173,6 +178,7 @@ static const char f71882fg_nr_fans[] = {
173 [f71858fg] = 3, 178 [f71858fg] = 3,
174 [f71862fg] = 3, 179 [f71862fg] = 3,
175 [f71869] = 3, 180 [f71869] = 3,
181 [f71869a] = 3,
176 [f71882fg] = 4, 182 [f71882fg] = 4,
177 [f71889fg] = 3, 183 [f71889fg] = 3,
178 [f71889ed] = 3, 184 [f71889ed] = 3,
@@ -187,6 +193,7 @@ static const char f71882fg_temp_has_beep[] = {
187 [f71858fg] = 0, 193 [f71858fg] = 0,
188 [f71862fg] = 1, 194 [f71862fg] = 1,
189 [f71869] = 1, 195 [f71869] = 1,
196 [f71869a] = 1,
190 [f71882fg] = 1, 197 [f71882fg] = 1,
191 [f71889fg] = 1, 198 [f71889fg] = 1,
192 [f71889ed] = 1, 199 [f71889ed] = 1,
@@ -201,6 +208,7 @@ static const char f71882fg_nr_temps[] = {
201 [f71858fg] = 3, 208 [f71858fg] = 3,
202 [f71862fg] = 3, 209 [f71862fg] = 3,
203 [f71869] = 3, 210 [f71869] = 3,
211 [f71869a] = 3,
204 [f71882fg] = 3, 212 [f71882fg] = 3,
205 [f71889fg] = 3, 213 [f71889fg] = 3,
206 [f71889ed] = 3, 214 [f71889ed] = 3,
@@ -2243,6 +2251,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2243 case f71808e: 2251 case f71808e:
2244 case f71808a: 2252 case f71808a:
2245 case f71869: 2253 case f71869:
2254 case f71869a:
2246 /* These always have signed auto point temps */ 2255 /* These always have signed auto point temps */
2247 data->auto_point_temp_signed = 1; 2256 data->auto_point_temp_signed = 1;
2248 /* Fall through to select correct fan/pwm reg bank! */ 2257 /* Fall through to select correct fan/pwm reg bank! */
@@ -2305,6 +2314,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2305 case f71808e: 2314 case f71808e:
2306 case f71808a: 2315 case f71808a:
2307 case f71869: 2316 case f71869:
2317 case f71869a:
2308 case f71889fg: 2318 case f71889fg:
2309 case f71889ed: 2319 case f71889ed:
2310 case f71889a: 2320 case f71889a:
@@ -2528,6 +2538,9 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2528 case SIO_F71869_ID: 2538 case SIO_F71869_ID:
2529 sio_data->type = f71869; 2539 sio_data->type = f71869;
2530 break; 2540 break;
2541 case SIO_F71869A_ID:
2542 sio_data->type = f71869a;
2543 break;
2531 case SIO_F71882_ID: 2544 case SIO_F71882_ID:
2532 sio_data->type = f71882fg; 2545 sio_data->type = f71882fg;
2533 break; 2546 break;
@@ -2662,7 +2675,7 @@ static void __exit f71882fg_exit(void)
2662} 2675}
2663 2676
2664MODULE_DESCRIPTION("F71882FG Hardware Monitoring Driver"); 2677MODULE_DESCRIPTION("F71882FG Hardware Monitoring Driver");
2665MODULE_AUTHOR("Hans Edgington, Hans de Goede (hdegoede@redhat.com)"); 2678MODULE_AUTHOR("Hans Edgington, Hans de Goede <hdegoede@redhat.com>");
2666MODULE_LICENSE("GPL"); 2679MODULE_LICENSE("GPL");
2667 2680
2668module_init(f71882fg_init); 2681module_init(f71882fg_init);
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 2582bfef6ccb..c8195a077da3 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -202,7 +202,7 @@ static struct vrm_model vrm_models[] = {
202 202
203 {X86_VENDOR_CENTAUR, 0x6, 0x7, ANY, 85}, /* Eden ESP/Ezra */ 203 {X86_VENDOR_CENTAUR, 0x6, 0x7, ANY, 85}, /* Eden ESP/Ezra */
204 {X86_VENDOR_CENTAUR, 0x6, 0x8, 0x7, 85}, /* Ezra T */ 204 {X86_VENDOR_CENTAUR, 0x6, 0x8, 0x7, 85}, /* Ezra T */
205 {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85}, /* Nemiah */ 205 {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85}, /* Nehemiah */
206 {X86_VENDOR_CENTAUR, 0x6, 0x9, ANY, 17}, /* C3-M, Eden-N */ 206 {X86_VENDOR_CENTAUR, 0x6, 0x9, ANY, 17}, /* C3-M, Eden-N */
207 {X86_VENDOR_CENTAUR, 0x6, 0xA, 0x7, 0}, /* No information */ 207 {X86_VENDOR_CENTAUR, 0x6, 0xA, 0x7, 0}, /* No information */
208 {X86_VENDOR_CENTAUR, 0x6, 0xA, ANY, 13}, /* C7, Esther */ 208 {X86_VENDOR_CENTAUR, 0x6, 0xA, ANY, 13}, /* C7, Esther */
diff --git a/drivers/hwmon/pmbus.c b/drivers/hwmon/pmbus.c
index 98e2e28899e2..931d940923ae 100644
--- a/drivers/hwmon/pmbus.c
+++ b/drivers/hwmon/pmbus.c
@@ -47,12 +47,14 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
47 if (info->func[0] 47 if (info->func[0]
48 && pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT)) 48 && pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT))
49 info->func[0] |= PMBUS_HAVE_STATUS_INPUT; 49 info->func[0] |= PMBUS_HAVE_STATUS_INPUT;
50 if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) { 50 if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_12) &&
51 pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
51 info->func[0] |= PMBUS_HAVE_FAN12; 52 info->func[0] |= PMBUS_HAVE_FAN12;
52 if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12)) 53 if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12))
53 info->func[0] |= PMBUS_HAVE_STATUS_FAN12; 54 info->func[0] |= PMBUS_HAVE_STATUS_FAN12;
54 } 55 }
55 if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) { 56 if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_34) &&
57 pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
56 info->func[0] |= PMBUS_HAVE_FAN34; 58 info->func[0] |= PMBUS_HAVE_FAN34;
57 if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34)) 59 if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34))
58 info->func[0] |= PMBUS_HAVE_STATUS_FAN34; 60 info->func[0] |= PMBUS_HAVE_STATUS_FAN34;
@@ -63,6 +65,10 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
63 PMBUS_STATUS_TEMPERATURE)) 65 PMBUS_STATUS_TEMPERATURE))
64 info->func[0] |= PMBUS_HAVE_STATUS_TEMP; 66 info->func[0] |= PMBUS_HAVE_STATUS_TEMP;
65 } 67 }
68 if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_2))
69 info->func[0] |= PMBUS_HAVE_TEMP2;
70 if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_3))
71 info->func[0] |= PMBUS_HAVE_TEMP3;
66 72
67 /* Sensors detected on all pages */ 73 /* Sensors detected on all pages */
68 for (page = 0; page < info->pages; page++) { 74 for (page = 0; page < info->pages; page++) {
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
index 354770ed3186..744672c1f26d 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus_core.c
@@ -1430,14 +1430,9 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
1430 i2c_set_clientdata(client, data); 1430 i2c_set_clientdata(client, data);
1431 mutex_init(&data->update_lock); 1431 mutex_init(&data->update_lock);
1432 1432
1433 /* 1433 /* Bail out if PMBus status register does not exist. */
1434 * Bail out if status register or PMBus revision register 1434 if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) {
1435 * does not exist. 1435 dev_err(&client->dev, "PMBus status register not found\n");
1436 */
1437 if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0
1438 || i2c_smbus_read_byte_data(client, PMBUS_REVISION) < 0) {
1439 dev_err(&client->dev,
1440 "Status or revision register not found\n");
1441 ret = -ENODEV; 1436 ret = -ENODEV;
1442 goto out_data; 1437 goto out_data;
1443 } 1438 }
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 020c87273ea1..3494a4cce414 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -887,7 +887,7 @@ static void __exit sch5627_exit(void)
887} 887}
888 888
889MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver"); 889MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
890MODULE_AUTHOR("Hans de Goede (hdegoede@redhat.com)"); 890MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
891MODULE_LICENSE("GPL"); 891MODULE_LICENSE("GPL");
892 892
893module_init(sch5627_init); 893module_init(sch5627_init);
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index dd39c1eb03ed..26c352a09298 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -234,7 +234,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
234 234
235 if (taos->state != TAOS_STATE_IDLE) { 235 if (taos->state != TAOS_STATE_IDLE) {
236 err = -ENODEV; 236 err = -ENODEV;
237 dev_dbg(&serio->dev, "TAOS EVM reset failed (state=%d, " 237 dev_err(&serio->dev, "TAOS EVM reset failed (state=%d, "
238 "pos=%d)\n", taos->state, taos->pos); 238 "pos=%d)\n", taos->state, taos->pos);
239 goto exit_close; 239 goto exit_close;
240 } 240 }
@@ -255,7 +255,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
255 msecs_to_jiffies(250)); 255 msecs_to_jiffies(250));
256 if (taos->state != TAOS_STATE_IDLE) { 256 if (taos->state != TAOS_STATE_IDLE) {
257 err = -ENODEV; 257 err = -ENODEV;
258 dev_err(&adapter->dev, "Echo off failed " 258 dev_err(&serio->dev, "TAOS EVM echo off failed "
259 "(state=%d)\n", taos->state); 259 "(state=%d)\n", taos->state);
260 goto exit_close; 260 goto exit_close;
261 } 261 }
@@ -263,7 +263,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
263 err = i2c_add_adapter(adapter); 263 err = i2c_add_adapter(adapter);
264 if (err) 264 if (err)
265 goto exit_close; 265 goto exit_close;
266 dev_dbg(&serio->dev, "Connected to TAOS EVM\n"); 266 dev_info(&serio->dev, "Connected to TAOS EVM\n");
267 267
268 taos->client = taos_instantiate_device(adapter); 268 taos->client = taos_instantiate_device(adapter);
269 return 0; 269 return 0;
@@ -288,7 +288,7 @@ static void taos_disconnect(struct serio *serio)
288 serio_set_drvdata(serio, NULL); 288 serio_set_drvdata(serio, NULL);
289 kfree(taos); 289 kfree(taos);
290 290
291 dev_dbg(&serio->dev, "Disconnected from TAOS EVM\n"); 291 dev_info(&serio->dev, "Disconnected from TAOS EVM\n");
292} 292}
293 293
294static struct serio_device_id taos_serio_ids[] = { 294static struct serio_device_id taos_serio_ids[] = {
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/pca954x.c
index 54e1ce73534b..6f8953664636 100644
--- a/drivers/i2c/muxes/pca954x.c
+++ b/drivers/i2c/muxes/pca954x.c
@@ -201,10 +201,11 @@ static int pca954x_probe(struct i2c_client *client,
201 201
202 i2c_set_clientdata(client, data); 202 i2c_set_clientdata(client, data);
203 203
204 /* Read the mux register at addr to verify 204 /* Write the mux register at addr to verify
205 * that the mux is in fact present. 205 * that the mux is in fact present. This also
206 * initializes the mux to disconnected state.
206 */ 207 */
207 if (i2c_smbus_read_byte(client) < 0) { 208 if (i2c_smbus_write_byte(client, 0) < 0) {
208 dev_warn(&client->dev, "probe failed\n"); 209 dev_warn(&client->dev, "probe failed\n");
209 goto exit_free; 210 goto exit_free;
210 } 211 }
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f62f52fb9ece..fc0f2bd9ca82 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3641,7 +3641,8 @@ static struct kobj_type cm_port_obj_type = {
3641 3641
3642static char *cm_devnode(struct device *dev, mode_t *mode) 3642static char *cm_devnode(struct device *dev, mode_t *mode)
3643{ 3643{
3644 *mode = 0666; 3644 if (mode)
3645 *mode = 0666;
3645 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); 3646 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3646} 3647}
3647 3648
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e49a85f8a44d..56898b6578a4 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -826,7 +826,8 @@ static void ib_uverbs_remove_one(struct ib_device *device)
826 826
827static char *uverbs_devnode(struct device *dev, mode_t *mode) 827static char *uverbs_devnode(struct device *dev, mode_t *mode)
828{ 828{
829 *mode = 0666; 829 if (mode)
830 *mode = 0666;
830 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); 831 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
831} 832}
832 833
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 40b02ae96f86..6229c3e8e78b 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -520,7 +520,8 @@ static void pmic8xxx_kp_close(struct input_dev *dev)
520 */ 520 */
521static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev) 521static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
522{ 522{
523 const struct pm8xxx_keypad_platform_data *pdata = mfd_get_data(pdev); 523 const struct pm8xxx_keypad_platform_data *pdata =
524 dev_get_platdata(&pdev->dev);
524 const struct matrix_keymap_data *keymap_data; 525 const struct matrix_keymap_data *keymap_data;
525 struct pmic8xxx_kp *kp; 526 struct pmic8xxx_kp *kp;
526 int rc; 527 int rc;
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 97e07e786e41..b3cfb9c71e66 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -90,7 +90,8 @@ static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev)
90 unsigned int delay; 90 unsigned int delay;
91 u8 pon_cntl; 91 u8 pon_cntl;
92 struct pmic8xxx_pwrkey *pwrkey; 92 struct pmic8xxx_pwrkey *pwrkey;
93 const struct pm8xxx_pwrkey_platform_data *pdata = mfd_get_data(pdev); 93 const struct pm8xxx_pwrkey_platform_data *pdata =
94 dev_get_platdata(&pdev->dev);
94 95
95 if (!pdata) { 96 if (!pdata) {
96 dev_err(&pdev->dev, "power key platform data not supplied\n"); 97 dev_err(&pdev->dev, "power key platform data not supplied\n");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index c0cff64a1ae6..cc1dc4817fac 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -593,7 +593,7 @@ static void lp5521_unregister_sysfs(struct i2c_client *client)
593 &lp5521_led_attribute_group); 593 &lp5521_led_attribute_group);
594} 594}
595 595
596static int __init lp5521_init_led(struct lp5521_led *led, 596static int __devinit lp5521_init_led(struct lp5521_led *led,
597 struct i2c_client *client, 597 struct i2c_client *client,
598 int chan, struct lp5521_platform_data *pdata) 598 int chan, struct lp5521_platform_data *pdata)
599{ 599{
@@ -637,7 +637,7 @@ static int __init lp5521_init_led(struct lp5521_led *led,
637 return 0; 637 return 0;
638} 638}
639 639
640static int lp5521_probe(struct i2c_client *client, 640static int __devinit lp5521_probe(struct i2c_client *client,
641 const struct i2c_device_id *id) 641 const struct i2c_device_id *id)
642{ 642{
643 struct lp5521_chip *chip; 643 struct lp5521_chip *chip;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index e19fed25f137..5971e309b234 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -826,7 +826,7 @@ static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
826 return 0; 826 return 0;
827} 827}
828 828
829static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev, 829static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
830 int chan, struct lp5523_platform_data *pdata) 830 int chan, struct lp5523_platform_data *pdata)
831{ 831{
832 char name[32]; 832 char name[32];
@@ -872,7 +872,7 @@ static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
872 872
873static struct i2c_driver lp5523_driver; 873static struct i2c_driver lp5523_driver;
874 874
875static int lp5523_probe(struct i2c_client *client, 875static int __devinit lp5523_probe(struct i2c_client *client,
876 const struct i2c_device_id *id) 876 const struct i2c_device_id *id)
877{ 877{
878 struct lp5523_chip *chip; 878 struct lp5523_chip *chip;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4332fc2f25d4..91e31e260b4a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7088,6 +7088,7 @@ static int remove_and_add_spares(mddev_t *mddev)
7088 list_for_each_entry(rdev, &mddev->disks, same_set) { 7088 list_for_each_entry(rdev, &mddev->disks, same_set) {
7089 if (rdev->raid_disk >= 0 && 7089 if (rdev->raid_disk >= 0 &&
7090 !test_bit(In_sync, &rdev->flags) && 7090 !test_bit(In_sync, &rdev->flags) &&
7091 !test_bit(Faulty, &rdev->flags) &&
7091 !test_bit(Blocked, &rdev->flags)) 7092 !test_bit(Blocked, &rdev->flags))
7092 spares++; 7093 spares++;
7093 if (rdev->raid_disk < 0 7094 if (rdev->raid_disk < 0
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 8fa539dde1b4..7f7079b12f23 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -597,12 +597,17 @@ static void __devexit fintek_remove(struct pnp_dev *pdev)
597static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state) 597static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state)
598{ 598{
599 struct fintek_dev *fintek = pnp_get_drvdata(pdev); 599 struct fintek_dev *fintek = pnp_get_drvdata(pdev);
600 unsigned long flags;
600 601
601 fit_dbg("%s called", __func__); 602 fit_dbg("%s called", __func__);
602 603
604 spin_lock_irqsave(&fintek->fintek_lock, flags);
605
603 /* disable all CIR interrupts */ 606 /* disable all CIR interrupts */
604 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS); 607 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
605 608
609 spin_unlock_irqrestore(&fintek->fintek_lock, flags);
610
606 fintek_config_mode_enable(fintek); 611 fintek_config_mode_enable(fintek);
607 612
608 /* disable cir logical dev */ 613 /* disable cir logical dev */
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 3f3c70716268..6bc35eeb653b 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -307,6 +307,14 @@ static const struct {
307 /* 0xffdc iMON MCE VFD */ 307 /* 0xffdc iMON MCE VFD */
308 { 0x00010000ffffffeell, KEY_VOLUMEUP }, 308 { 0x00010000ffffffeell, KEY_VOLUMEUP },
309 { 0x01000000ffffffeell, KEY_VOLUMEDOWN }, 309 { 0x01000000ffffffeell, KEY_VOLUMEDOWN },
310 { 0x00000001ffffffeell, KEY_MUTE },
311 { 0x0000000fffffffeell, KEY_MEDIA },
312 { 0x00000012ffffffeell, KEY_UP },
313 { 0x00000013ffffffeell, KEY_DOWN },
314 { 0x00000014ffffffeell, KEY_LEFT },
315 { 0x00000015ffffffeell, KEY_RIGHT },
316 { 0x00000016ffffffeell, KEY_ENTER },
317 { 0x00000017ffffffeell, KEY_ESC },
310 /* iMON Knob values */ 318 /* iMON Knob values */
311 { 0x000100ffffffffeell, KEY_VOLUMEUP }, 319 { 0x000100ffffffffeell, KEY_VOLUMEUP },
312 { 0x010000ffffffffeell, KEY_VOLUMEDOWN }, 320 { 0x010000ffffffffeell, KEY_VOLUMEDOWN },
@@ -1582,16 +1590,16 @@ static void imon_incoming_packet(struct imon_context *ictx,
1582 /* Only panel type events left to process now */ 1590 /* Only panel type events left to process now */
1583 spin_lock_irqsave(&ictx->kc_lock, flags); 1591 spin_lock_irqsave(&ictx->kc_lock, flags);
1584 1592
1593 do_gettimeofday(&t);
1585 /* KEY_MUTE repeats from knob need to be suppressed */ 1594 /* KEY_MUTE repeats from knob need to be suppressed */
1586 if (ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) { 1595 if (ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) {
1587 do_gettimeofday(&t);
1588 msec = tv2int(&t, &prev_time); 1596 msec = tv2int(&t, &prev_time);
1589 prev_time = t;
1590 if (msec < ictx->idev->rep[REP_DELAY]) { 1597 if (msec < ictx->idev->rep[REP_DELAY]) {
1591 spin_unlock_irqrestore(&ictx->kc_lock, flags); 1598 spin_unlock_irqrestore(&ictx->kc_lock, flags);
1592 return; 1599 return;
1593 } 1600 }
1594 } 1601 }
1602 prev_time = t;
1595 kc = ictx->kc; 1603 kc = ictx->kc;
1596 1604
1597 spin_unlock_irqrestore(&ictx->kc_lock, flags); 1605 spin_unlock_irqrestore(&ictx->kc_lock, flags);
@@ -1603,7 +1611,9 @@ static void imon_incoming_packet(struct imon_context *ictx,
1603 input_report_key(ictx->idev, kc, 0); 1611 input_report_key(ictx->idev, kc, 0);
1604 input_sync(ictx->idev); 1612 input_sync(ictx->idev);
1605 1613
1614 spin_lock_irqsave(&ictx->kc_lock, flags);
1606 ictx->last_keycode = kc; 1615 ictx->last_keycode = kc;
1616 spin_unlock_irqrestore(&ictx->kc_lock, flags);
1607 1617
1608 return; 1618 return;
1609 1619
@@ -1740,6 +1750,8 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
1740 detected_display_type = IMON_DISPLAY_TYPE_VFD; 1750 detected_display_type = IMON_DISPLAY_TYPE_VFD;
1741 break; 1751 break;
1742 /* iMON VFD, MCE IR */ 1752 /* iMON VFD, MCE IR */
1753 case 0x46:
1754 case 0x7e:
1743 case 0x9e: 1755 case 0x9e:
1744 dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR"); 1756 dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
1745 detected_display_type = IMON_DISPLAY_TYPE_VFD; 1757 detected_display_type = IMON_DISPLAY_TYPE_VFD;
@@ -1755,6 +1767,9 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
1755 dev_info(ictx->dev, "Unknown 0xffdc device, " 1767 dev_info(ictx->dev, "Unknown 0xffdc device, "
1756 "defaulting to VFD and iMON IR"); 1768 "defaulting to VFD and iMON IR");
1757 detected_display_type = IMON_DISPLAY_TYPE_VFD; 1769 detected_display_type = IMON_DISPLAY_TYPE_VFD;
1770 /* We don't know which one it is, allow user to set the
1771 * RC6 one from userspace if OTHER wasn't correct. */
1772 allowed_protos |= RC_TYPE_RC6;
1758 break; 1773 break;
1759 } 1774 }
1760 1775
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 11c19d8d0ee0..423ed45d6c55 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -114,18 +114,20 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
114 s64 delta; /* ns */ 114 s64 delta; /* ns */
115 DEFINE_IR_RAW_EVENT(ev); 115 DEFINE_IR_RAW_EVENT(ev);
116 int rc = 0; 116 int rc = 0;
117 int delay;
117 118
118 if (!dev->raw) 119 if (!dev->raw)
119 return -EINVAL; 120 return -EINVAL;
120 121
121 now = ktime_get(); 122 now = ktime_get();
122 delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event)); 123 delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
124 delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
123 125
124 /* Check for a long duration since last event or if we're 126 /* Check for a long duration since last event or if we're
125 * being called for the first time, note that delta can't 127 * being called for the first time, note that delta can't
126 * possibly be negative. 128 * possibly be negative.
127 */ 129 */
128 if (delta > IR_MAX_DURATION || !dev->raw->last_type) 130 if (delta > delay || !dev->raw->last_type)
129 type |= IR_START_EVENT; 131 type |= IR_START_EVENT;
130 else 132 else
131 ev.duration = delta; 133 ev.duration = delta;
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index e716b931cf7e..ecd3d0280768 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1347,6 +1347,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
1347 { /* 0: ITE8704 */ 1347 { /* 0: ITE8704 */
1348 .model = "ITE8704 CIR transceiver", 1348 .model = "ITE8704 CIR transceiver",
1349 .io_region_size = IT87_IOREG_LENGTH, 1349 .io_region_size = IT87_IOREG_LENGTH,
1350 .io_rsrc_no = 0,
1350 .hw_tx_capable = true, 1351 .hw_tx_capable = true,
1351 .sample_period = (u32) (1000000000ULL / 115200), 1352 .sample_period = (u32) (1000000000ULL / 115200),
1352 .tx_carrier_freq = 38000, 1353 .tx_carrier_freq = 38000,
@@ -1371,6 +1372,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
1371 { /* 1: ITE8713 */ 1372 { /* 1: ITE8713 */
1372 .model = "ITE8713 CIR transceiver", 1373 .model = "ITE8713 CIR transceiver",
1373 .io_region_size = IT87_IOREG_LENGTH, 1374 .io_region_size = IT87_IOREG_LENGTH,
1375 .io_rsrc_no = 0,
1374 .hw_tx_capable = true, 1376 .hw_tx_capable = true,
1375 .sample_period = (u32) (1000000000ULL / 115200), 1377 .sample_period = (u32) (1000000000ULL / 115200),
1376 .tx_carrier_freq = 38000, 1378 .tx_carrier_freq = 38000,
@@ -1395,6 +1397,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
1395 { /* 2: ITE8708 */ 1397 { /* 2: ITE8708 */
1396 .model = "ITE8708 CIR transceiver", 1398 .model = "ITE8708 CIR transceiver",
1397 .io_region_size = IT8708_IOREG_LENGTH, 1399 .io_region_size = IT8708_IOREG_LENGTH,
1400 .io_rsrc_no = 0,
1398 .hw_tx_capable = true, 1401 .hw_tx_capable = true,
1399 .sample_period = (u32) (1000000000ULL / 115200), 1402 .sample_period = (u32) (1000000000ULL / 115200),
1400 .tx_carrier_freq = 38000, 1403 .tx_carrier_freq = 38000,
@@ -1420,6 +1423,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
1420 { /* 3: ITE8709 */ 1423 { /* 3: ITE8709 */
1421 .model = "ITE8709 CIR transceiver", 1424 .model = "ITE8709 CIR transceiver",
1422 .io_region_size = IT8709_IOREG_LENGTH, 1425 .io_region_size = IT8709_IOREG_LENGTH,
1426 .io_rsrc_no = 2,
1423 .hw_tx_capable = true, 1427 .hw_tx_capable = true,
1424 .sample_period = (u32) (1000000000ULL / 115200), 1428 .sample_period = (u32) (1000000000ULL / 115200),
1425 .tx_carrier_freq = 38000, 1429 .tx_carrier_freq = 38000,
@@ -1461,6 +1465,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
1461 struct rc_dev *rdev = NULL; 1465 struct rc_dev *rdev = NULL;
1462 int ret = -ENOMEM; 1466 int ret = -ENOMEM;
1463 int model_no; 1467 int model_no;
1468 int io_rsrc_no;
1464 1469
1465 ite_dbg("%s called", __func__); 1470 ite_dbg("%s called", __func__);
1466 1471
@@ -1490,10 +1495,11 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
1490 1495
1491 /* get the description for the device */ 1496 /* get the description for the device */
1492 dev_desc = &ite_dev_descs[model_no]; 1497 dev_desc = &ite_dev_descs[model_no];
1498 io_rsrc_no = dev_desc->io_rsrc_no;
1493 1499
1494 /* validate pnp resources */ 1500 /* validate pnp resources */
1495 if (!pnp_port_valid(pdev, 0) || 1501 if (!pnp_port_valid(pdev, io_rsrc_no) ||
1496 pnp_port_len(pdev, 0) != dev_desc->io_region_size) { 1502 pnp_port_len(pdev, io_rsrc_no) != dev_desc->io_region_size) {
1497 dev_err(&pdev->dev, "IR PNP Port not valid!\n"); 1503 dev_err(&pdev->dev, "IR PNP Port not valid!\n");
1498 goto failure; 1504 goto failure;
1499 } 1505 }
@@ -1504,7 +1510,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
1504 } 1510 }
1505 1511
1506 /* store resource values */ 1512 /* store resource values */
1507 itdev->cir_addr = pnp_port_start(pdev, 0); 1513 itdev->cir_addr = pnp_port_start(pdev, io_rsrc_no);
1508 itdev->cir_irq = pnp_irq(pdev, 0); 1514 itdev->cir_irq = pnp_irq(pdev, 0);
1509 1515
1510 /* initialize spinlocks */ 1516 /* initialize spinlocks */
diff --git a/drivers/media/rc/ite-cir.h b/drivers/media/rc/ite-cir.h
index 16a19f5fd718..aa899a0b9750 100644
--- a/drivers/media/rc/ite-cir.h
+++ b/drivers/media/rc/ite-cir.h
@@ -57,6 +57,9 @@ struct ite_dev_params {
57 /* size of the I/O region */ 57 /* size of the I/O region */
58 int io_region_size; 58 int io_region_size;
59 59
60 /* IR pnp I/O resource number */
61 int io_rsrc_no;
62
60 /* true if the hardware supports transmission */ 63 /* true if the hardware supports transmission */
61 bool hw_tx_capable; 64 bool hw_tx_capable;
62 65
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
index bb10ffe086b4..8d558ae63456 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
@@ -15,43 +15,39 @@
15/* Pinnacle PCTV HD 800i mini remote */ 15/* Pinnacle PCTV HD 800i mini remote */
16 16
17static struct rc_map_table pinnacle_pctv_hd[] = { 17static struct rc_map_table pinnacle_pctv_hd[] = {
18 18 /* Key codes for the tiny Pinnacle remote*/
19 { 0x0f, KEY_1 }, 19 { 0x0700, KEY_MUTE },
20 { 0x15, KEY_2 }, 20 { 0x0701, KEY_MENU }, /* Pinnacle logo */
21 { 0x10, KEY_3 }, 21 { 0x0739, KEY_POWER },
22 { 0x18, KEY_4 }, 22 { 0x0703, KEY_VOLUMEUP },
23 { 0x1b, KEY_5 }, 23 { 0x0709, KEY_VOLUMEDOWN },
24 { 0x1e, KEY_6 }, 24 { 0x0706, KEY_CHANNELUP },
25 { 0x11, KEY_7 }, 25 { 0x070c, KEY_CHANNELDOWN },
26 { 0x21, KEY_8 }, 26 { 0x070f, KEY_1 },
27 { 0x12, KEY_9 }, 27 { 0x0715, KEY_2 },
28 { 0x27, KEY_0 }, 28 { 0x0710, KEY_3 },
29 29 { 0x0718, KEY_4 },
30 { 0x24, KEY_ZOOM }, 30 { 0x071b, KEY_5 },
31 { 0x2a, KEY_SUBTITLE }, 31 { 0x071e, KEY_6 },
32 32 { 0x0711, KEY_7 },
33 { 0x00, KEY_MUTE }, 33 { 0x0721, KEY_8 },
34 { 0x01, KEY_ENTER }, /* Pinnacle Logo */ 34 { 0x0712, KEY_9 },
35 { 0x39, KEY_POWER }, 35 { 0x0727, KEY_0 },
36 36 { 0x0724, KEY_ZOOM }, /* 'Square' key */
37 { 0x03, KEY_VOLUMEUP }, 37 { 0x072a, KEY_SUBTITLE }, /* 'T' key */
38 { 0x09, KEY_VOLUMEDOWN }, 38 { 0x072d, KEY_REWIND },
39 { 0x06, KEY_CHANNELUP }, 39 { 0x0730, KEY_PLAYPAUSE },
40 { 0x0c, KEY_CHANNELDOWN }, 40 { 0x0733, KEY_FASTFORWARD },
41 41 { 0x0736, KEY_RECORD },
42 { 0x2d, KEY_REWIND }, 42 { 0x073c, KEY_STOP },
43 { 0x30, KEY_PLAYPAUSE }, 43 { 0x073f, KEY_HELP }, /* '?' key */
44 { 0x33, KEY_FASTFORWARD },
45 { 0x3c, KEY_STOP },
46 { 0x36, KEY_RECORD },
47 { 0x3f, KEY_EPG }, /* Labeled "?" */
48}; 44};
49 45
50static struct rc_map_list pinnacle_pctv_hd_map = { 46static struct rc_map_list pinnacle_pctv_hd_map = {
51 .map = { 47 .map = {
52 .scan = pinnacle_pctv_hd, 48 .scan = pinnacle_pctv_hd,
53 .size = ARRAY_SIZE(pinnacle_pctv_hd), 49 .size = ARRAY_SIZE(pinnacle_pctv_hd),
54 .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ 50 .rc_type = RC_TYPE_RC5,
55 .name = RC_MAP_PINNACLE_PCTV_HD, 51 .name = RC_MAP_PINNACLE_PCTV_HD,
56 } 52 }
57}; 53};
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index fd237ab120bb..27997a9ceb0d 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -55,6 +55,8 @@ struct irctl {
55 struct lirc_buffer *buf; 55 struct lirc_buffer *buf;
56 unsigned int chunk_size; 56 unsigned int chunk_size;
57 57
58 struct cdev *cdev;
59
58 struct task_struct *task; 60 struct task_struct *task;
59 long jiffies_to_wait; 61 long jiffies_to_wait;
60}; 62};
@@ -62,7 +64,6 @@ struct irctl {
62static DEFINE_MUTEX(lirc_dev_lock); 64static DEFINE_MUTEX(lirc_dev_lock);
63 65
64static struct irctl *irctls[MAX_IRCTL_DEVICES]; 66static struct irctl *irctls[MAX_IRCTL_DEVICES];
65static struct cdev cdevs[MAX_IRCTL_DEVICES];
66 67
67/* Only used for sysfs but defined to void otherwise */ 68/* Only used for sysfs but defined to void otherwise */
68static struct class *lirc_class; 69static struct class *lirc_class;
@@ -167,9 +168,13 @@ static struct file_operations lirc_dev_fops = {
167 168
168static int lirc_cdev_add(struct irctl *ir) 169static int lirc_cdev_add(struct irctl *ir)
169{ 170{
170 int retval; 171 int retval = -ENOMEM;
171 struct lirc_driver *d = &ir->d; 172 struct lirc_driver *d = &ir->d;
172 struct cdev *cdev = &cdevs[d->minor]; 173 struct cdev *cdev;
174
175 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
176 if (!cdev)
177 goto err_out;
173 178
174 if (d->fops) { 179 if (d->fops) {
175 cdev_init(cdev, d->fops); 180 cdev_init(cdev, d->fops);
@@ -180,12 +185,20 @@ static int lirc_cdev_add(struct irctl *ir)
180 } 185 }
181 retval = kobject_set_name(&cdev->kobj, "lirc%d", d->minor); 186 retval = kobject_set_name(&cdev->kobj, "lirc%d", d->minor);
182 if (retval) 187 if (retval)
183 return retval; 188 goto err_out;
184 189
185 retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1); 190 retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
186 if (retval) 191 if (retval) {
187 kobject_put(&cdev->kobj); 192 kobject_put(&cdev->kobj);
193 goto err_out;
194 }
195
196 ir->cdev = cdev;
197
198 return 0;
188 199
200err_out:
201 kfree(cdev);
189 return retval; 202 return retval;
190} 203}
191 204
@@ -214,7 +227,7 @@ int lirc_register_driver(struct lirc_driver *d)
214 if (MAX_IRCTL_DEVICES <= d->minor) { 227 if (MAX_IRCTL_DEVICES <= d->minor) {
215 dev_err(d->dev, "lirc_dev: lirc_register_driver: " 228 dev_err(d->dev, "lirc_dev: lirc_register_driver: "
216 "\"minor\" must be between 0 and %d (%d)!\n", 229 "\"minor\" must be between 0 and %d (%d)!\n",
217 MAX_IRCTL_DEVICES-1, d->minor); 230 MAX_IRCTL_DEVICES - 1, d->minor);
218 err = -EBADRQC; 231 err = -EBADRQC;
219 goto out; 232 goto out;
220 } 233 }
@@ -369,7 +382,7 @@ int lirc_unregister_driver(int minor)
369 382
370 if (minor < 0 || minor >= MAX_IRCTL_DEVICES) { 383 if (minor < 0 || minor >= MAX_IRCTL_DEVICES) {
371 printk(KERN_ERR "lirc_dev: %s: minor (%d) must be between " 384 printk(KERN_ERR "lirc_dev: %s: minor (%d) must be between "
372 "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES-1); 385 "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES - 1);
373 return -EBADRQC; 386 return -EBADRQC;
374 } 387 }
375 388
@@ -380,7 +393,7 @@ int lirc_unregister_driver(int minor)
380 return -ENOENT; 393 return -ENOENT;
381 } 394 }
382 395
383 cdev = &cdevs[minor]; 396 cdev = ir->cdev;
384 397
385 mutex_lock(&lirc_dev_lock); 398 mutex_lock(&lirc_dev_lock);
386 399
@@ -410,6 +423,7 @@ int lirc_unregister_driver(int minor)
410 } else { 423 } else {
411 lirc_irctl_cleanup(ir); 424 lirc_irctl_cleanup(ir);
412 cdev_del(cdev); 425 cdev_del(cdev);
426 kfree(cdev);
413 kfree(ir); 427 kfree(ir);
414 irctls[minor] = NULL; 428 irctls[minor] = NULL;
415 } 429 }
@@ -453,7 +467,7 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
453 goto error; 467 goto error;
454 } 468 }
455 469
456 cdev = &cdevs[iminor(inode)]; 470 cdev = ir->cdev;
457 if (try_module_get(cdev->owner)) { 471 if (try_module_get(cdev->owner)) {
458 ir->open++; 472 ir->open++;
459 retval = ir->d.set_use_inc(ir->d.data); 473 retval = ir->d.set_use_inc(ir->d.data);
@@ -484,13 +498,15 @@ EXPORT_SYMBOL(lirc_dev_fop_open);
484int lirc_dev_fop_close(struct inode *inode, struct file *file) 498int lirc_dev_fop_close(struct inode *inode, struct file *file)
485{ 499{
486 struct irctl *ir = irctls[iminor(inode)]; 500 struct irctl *ir = irctls[iminor(inode)];
487 struct cdev *cdev = &cdevs[iminor(inode)]; 501 struct cdev *cdev;
488 502
489 if (!ir) { 503 if (!ir) {
490 printk(KERN_ERR "%s: called with invalid irctl\n", __func__); 504 printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
491 return -EINVAL; 505 return -EINVAL;
492 } 506 }
493 507
508 cdev = ir->cdev;
509
494 dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor); 510 dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor);
495 511
496 WARN_ON(mutex_lock_killable(&lirc_dev_lock)); 512 WARN_ON(mutex_lock_killable(&lirc_dev_lock));
@@ -503,6 +519,7 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
503 lirc_irctl_cleanup(ir); 519 lirc_irctl_cleanup(ir);
504 cdev_del(cdev); 520 cdev_del(cdev);
505 irctls[ir->d.minor] = NULL; 521 irctls[ir->d.minor] = NULL;
522 kfree(cdev);
506 kfree(ir); 523 kfree(ir);
507 } 524 }
508 525
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index ad927fcaa020..06dfe0957b5e 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -108,6 +108,12 @@ static int debug = 1;
108static int debug; 108static int debug;
109#endif 109#endif
110 110
111#define mce_dbg(dev, fmt, ...) \
112 do { \
113 if (debug) \
114 dev_info(dev, fmt, ## __VA_ARGS__); \
115 } while (0)
116
111/* general constants */ 117/* general constants */
112#define SEND_FLAG_IN_PROGRESS 1 118#define SEND_FLAG_IN_PROGRESS 1
113#define SEND_FLAG_COMPLETE 2 119#define SEND_FLAG_COMPLETE 2
@@ -246,6 +252,9 @@ static struct usb_device_id mceusb_dev_table[] = {
246 .driver_info = MCE_GEN2_TX_INV }, 252 .driver_info = MCE_GEN2_TX_INV },
247 /* SMK eHome Infrared Transceiver */ 253 /* SMK eHome Infrared Transceiver */
248 { USB_DEVICE(VENDOR_SMK, 0x0338) }, 254 { USB_DEVICE(VENDOR_SMK, 0x0338) },
255 /* SMK/I-O Data GV-MC7/RCKIT Receiver */
256 { USB_DEVICE(VENDOR_SMK, 0x0353),
257 .driver_info = MCE_GEN2_NO_TX },
249 /* Tatung eHome Infrared Transceiver */ 258 /* Tatung eHome Infrared Transceiver */
250 { USB_DEVICE(VENDOR_TATUNG, 0x9150) }, 259 { USB_DEVICE(VENDOR_TATUNG, 0x9150) },
251 /* Shuttle eHome Infrared Transceiver */ 260 /* Shuttle eHome Infrared Transceiver */
@@ -606,12 +615,15 @@ static void mce_async_callback(struct urb *urb, struct pt_regs *regs)
606 if (ir) { 615 if (ir) {
607 len = urb->actual_length; 616 len = urb->actual_length;
608 617
609 dev_dbg(ir->dev, "callback called (status=%d len=%d)\n", 618 mce_dbg(ir->dev, "callback called (status=%d len=%d)\n",
610 urb->status, len); 619 urb->status, len);
611 620
612 mceusb_dev_printdata(ir, urb->transfer_buffer, 0, len, true); 621 mceusb_dev_printdata(ir, urb->transfer_buffer, 0, len, true);
613 } 622 }
614 623
624 /* the transfer buffer and urb were allocated in mce_request_packet */
625 kfree(urb->transfer_buffer);
626 usb_free_urb(urb);
615} 627}
616 628
617/* request incoming or send outgoing usb packet - used to initialize remote */ 629/* request incoming or send outgoing usb packet - used to initialize remote */
@@ -655,17 +667,17 @@ static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
655 return; 667 return;
656 } 668 }
657 669
658 dev_dbg(dev, "receive request called (size=%#x)\n", size); 670 mce_dbg(dev, "receive request called (size=%#x)\n", size);
659 671
660 async_urb->transfer_buffer_length = size; 672 async_urb->transfer_buffer_length = size;
661 async_urb->dev = ir->usbdev; 673 async_urb->dev = ir->usbdev;
662 674
663 res = usb_submit_urb(async_urb, GFP_ATOMIC); 675 res = usb_submit_urb(async_urb, GFP_ATOMIC);
664 if (res) { 676 if (res) {
665 dev_dbg(dev, "receive request FAILED! (res=%d)\n", res); 677 mce_dbg(dev, "receive request FAILED! (res=%d)\n", res);
666 return; 678 return;
667 } 679 }
668 dev_dbg(dev, "receive request complete (res=%d)\n", res); 680 mce_dbg(dev, "receive request complete (res=%d)\n", res);
669} 681}
670 682
671static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size) 683static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
@@ -673,9 +685,9 @@ static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
673 mce_request_packet(ir, data, size, MCEUSB_TX); 685 mce_request_packet(ir, data, size, MCEUSB_TX);
674} 686}
675 687
676static void mce_sync_in(struct mceusb_dev *ir, unsigned char *data, int size) 688static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
677{ 689{
678 mce_request_packet(ir, data, size, MCEUSB_RX); 690 mce_request_packet(ir, NULL, size, MCEUSB_RX);
679} 691}
680 692
681/* Send data out the IR blaster port(s) */ 693/* Send data out the IR blaster port(s) */
@@ -794,7 +806,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
794 ir->carrier = carrier; 806 ir->carrier = carrier;
795 cmdbuf[2] = MCE_CMD_SIG_END; 807 cmdbuf[2] = MCE_CMD_SIG_END;
796 cmdbuf[3] = MCE_IRDATA_TRAILER; 808 cmdbuf[3] = MCE_IRDATA_TRAILER;
797 dev_dbg(ir->dev, "%s: disabling carrier " 809 mce_dbg(ir->dev, "%s: disabling carrier "
798 "modulation\n", __func__); 810 "modulation\n", __func__);
799 mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); 811 mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
800 return carrier; 812 return carrier;
@@ -806,7 +818,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
806 ir->carrier = carrier; 818 ir->carrier = carrier;
807 cmdbuf[2] = prescaler; 819 cmdbuf[2] = prescaler;
808 cmdbuf[3] = divisor; 820 cmdbuf[3] = divisor;
809 dev_dbg(ir->dev, "%s: requesting %u HZ " 821 mce_dbg(ir->dev, "%s: requesting %u HZ "
810 "carrier\n", __func__, carrier); 822 "carrier\n", __func__, carrier);
811 823
812 /* Transmit new carrier to mce device */ 824 /* Transmit new carrier to mce device */
@@ -879,7 +891,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
879 rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK) 891 rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
880 * US_TO_NS(MCE_TIME_UNIT); 892 * US_TO_NS(MCE_TIME_UNIT);
881 893
882 dev_dbg(ir->dev, "Storing %s with duration %d\n", 894 mce_dbg(ir->dev, "Storing %s with duration %d\n",
883 rawir.pulse ? "pulse" : "space", 895 rawir.pulse ? "pulse" : "space",
884 rawir.duration); 896 rawir.duration);
885 897
@@ -911,7 +923,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
911 if (ir->parser_state != CMD_HEADER && !ir->rem) 923 if (ir->parser_state != CMD_HEADER && !ir->rem)
912 ir->parser_state = CMD_HEADER; 924 ir->parser_state = CMD_HEADER;
913 } 925 }
914 dev_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n"); 926 mce_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
915 ir_raw_event_handle(ir->rc); 927 ir_raw_event_handle(ir->rc);
916} 928}
917 929
@@ -933,7 +945,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
933 945
934 if (ir->send_flags == RECV_FLAG_IN_PROGRESS) { 946 if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
935 ir->send_flags = SEND_FLAG_COMPLETE; 947 ir->send_flags = SEND_FLAG_COMPLETE;
936 dev_dbg(ir->dev, "setup answer received %d bytes\n", 948 mce_dbg(ir->dev, "setup answer received %d bytes\n",
937 buf_len); 949 buf_len);
938 } 950 }
939 951
@@ -951,7 +963,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
951 963
952 case -EPIPE: 964 case -EPIPE:
953 default: 965 default:
954 dev_dbg(ir->dev, "Error: urb status = %d\n", urb->status); 966 mce_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
955 break; 967 break;
956 } 968 }
957 969
@@ -961,7 +973,6 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
961static void mceusb_gen1_init(struct mceusb_dev *ir) 973static void mceusb_gen1_init(struct mceusb_dev *ir)
962{ 974{
963 int ret; 975 int ret;
964 int maxp = ir->len_in;
965 struct device *dev = ir->dev; 976 struct device *dev = ir->dev;
966 char *data; 977 char *data;
967 978
@@ -978,8 +989,8 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
978 ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), 989 ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
979 USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0, 990 USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
980 data, USB_CTRL_MSG_SZ, HZ * 3); 991 data, USB_CTRL_MSG_SZ, HZ * 3);
981 dev_dbg(dev, "%s - ret = %d\n", __func__, ret); 992 mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
982 dev_dbg(dev, "%s - data[0] = %d, data[1] = %d\n", 993 mce_dbg(dev, "%s - data[0] = %d, data[1] = %d\n",
983 __func__, data[0], data[1]); 994 __func__, data[0], data[1]);
984 995
985 /* set feature: bit rate 38400 bps */ 996 /* set feature: bit rate 38400 bps */
@@ -987,71 +998,56 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
987 USB_REQ_SET_FEATURE, USB_TYPE_VENDOR, 998 USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
988 0xc04e, 0x0000, NULL, 0, HZ * 3); 999 0xc04e, 0x0000, NULL, 0, HZ * 3);
989 1000
990 dev_dbg(dev, "%s - ret = %d\n", __func__, ret); 1001 mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
991 1002
992 /* bRequest 4: set char length to 8 bits */ 1003 /* bRequest 4: set char length to 8 bits */
993 ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), 1004 ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
994 4, USB_TYPE_VENDOR, 1005 4, USB_TYPE_VENDOR,
995 0x0808, 0x0000, NULL, 0, HZ * 3); 1006 0x0808, 0x0000, NULL, 0, HZ * 3);
996 dev_dbg(dev, "%s - retB = %d\n", __func__, ret); 1007 mce_dbg(dev, "%s - retB = %d\n", __func__, ret);
997 1008
998 /* bRequest 2: set handshaking to use DTR/DSR */ 1009 /* bRequest 2: set handshaking to use DTR/DSR */
999 ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), 1010 ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
1000 2, USB_TYPE_VENDOR, 1011 2, USB_TYPE_VENDOR,
1001 0x0000, 0x0100, NULL, 0, HZ * 3); 1012 0x0000, 0x0100, NULL, 0, HZ * 3);
1002 dev_dbg(dev, "%s - retC = %d\n", __func__, ret); 1013 mce_dbg(dev, "%s - retC = %d\n", __func__, ret);
1003 1014
1004 /* device reset */ 1015 /* device reset */
1005 mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET)); 1016 mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET));
1006 mce_sync_in(ir, NULL, maxp);
1007 1017
1008 /* get hw/sw revision? */ 1018 /* get hw/sw revision? */
1009 mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION)); 1019 mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
1010 mce_sync_in(ir, NULL, maxp);
1011 1020
1012 kfree(data); 1021 kfree(data);
1013}; 1022};
1014 1023
1015static void mceusb_gen2_init(struct mceusb_dev *ir) 1024static void mceusb_gen2_init(struct mceusb_dev *ir)
1016{ 1025{
1017 int maxp = ir->len_in;
1018
1019 /* device reset */ 1026 /* device reset */
1020 mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET)); 1027 mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET));
1021 mce_sync_in(ir, NULL, maxp);
1022 1028
1023 /* get hw/sw revision? */ 1029 /* get hw/sw revision? */
1024 mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION)); 1030 mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
1025 mce_sync_in(ir, NULL, maxp);
1026 1031
1027 /* unknown what the next two actually return... */ 1032 /* unknown what the next two actually return... */
1028 mce_async_out(ir, GET_UNKNOWN, sizeof(GET_UNKNOWN)); 1033 mce_async_out(ir, GET_UNKNOWN, sizeof(GET_UNKNOWN));
1029 mce_sync_in(ir, NULL, maxp);
1030 mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2)); 1034 mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
1031 mce_sync_in(ir, NULL, maxp);
1032} 1035}
1033 1036
1034static void mceusb_get_parameters(struct mceusb_dev *ir) 1037static void mceusb_get_parameters(struct mceusb_dev *ir)
1035{ 1038{
1036 int maxp = ir->len_in;
1037
1038 /* get the carrier and frequency */ 1039 /* get the carrier and frequency */
1039 mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ)); 1040 mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
1040 mce_sync_in(ir, NULL, maxp);
1041 1041
1042 if (!ir->flags.no_tx) { 1042 if (!ir->flags.no_tx)
1043 /* get the transmitter bitmask */ 1043 /* get the transmitter bitmask */
1044 mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK)); 1044 mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
1045 mce_sync_in(ir, NULL, maxp);
1046 }
1047 1045
1048 /* get receiver timeout value */ 1046 /* get receiver timeout value */
1049 mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); 1047 mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
1050 mce_sync_in(ir, NULL, maxp);
1051 1048
1052 /* get receiver sensor setting */ 1049 /* get receiver sensor setting */
1053 mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR)); 1050 mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
1054 mce_sync_in(ir, NULL, maxp);
1055} 1051}
1056 1052
1057static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir) 1053static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
@@ -1122,7 +1118,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
1122 bool tx_mask_normal; 1118 bool tx_mask_normal;
1123 int ir_intfnum; 1119 int ir_intfnum;
1124 1120
1125 dev_dbg(&intf->dev, "%s called\n", __func__); 1121 mce_dbg(&intf->dev, "%s called\n", __func__);
1126 1122
1127 idesc = intf->cur_altsetting; 1123 idesc = intf->cur_altsetting;
1128 1124
@@ -1150,7 +1146,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
1150 ep_in = ep; 1146 ep_in = ep;
1151 ep_in->bmAttributes = USB_ENDPOINT_XFER_INT; 1147 ep_in->bmAttributes = USB_ENDPOINT_XFER_INT;
1152 ep_in->bInterval = 1; 1148 ep_in->bInterval = 1;
1153 dev_dbg(&intf->dev, "acceptable inbound endpoint " 1149 mce_dbg(&intf->dev, "acceptable inbound endpoint "
1154 "found\n"); 1150 "found\n");
1155 } 1151 }
1156 1152
@@ -1165,12 +1161,12 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
1165 ep_out = ep; 1161 ep_out = ep;
1166 ep_out->bmAttributes = USB_ENDPOINT_XFER_INT; 1162 ep_out->bmAttributes = USB_ENDPOINT_XFER_INT;
1167 ep_out->bInterval = 1; 1163 ep_out->bInterval = 1;
1168 dev_dbg(&intf->dev, "acceptable outbound endpoint " 1164 mce_dbg(&intf->dev, "acceptable outbound endpoint "
1169 "found\n"); 1165 "found\n");
1170 } 1166 }
1171 } 1167 }
1172 if (ep_in == NULL) { 1168 if (ep_in == NULL) {
1173 dev_dbg(&intf->dev, "inbound and/or endpoint not found\n"); 1169 mce_dbg(&intf->dev, "inbound and/or endpoint not found\n");
1174 return -ENODEV; 1170 return -ENODEV;
1175 } 1171 }
1176 1172
@@ -1215,16 +1211,16 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
1215 if (!ir->rc) 1211 if (!ir->rc)
1216 goto rc_dev_fail; 1212 goto rc_dev_fail;
1217 1213
1218 /* flush buffers on the device */
1219 mce_sync_in(ir, NULL, maxp);
1220 mce_sync_in(ir, NULL, maxp);
1221
1222 /* wire up inbound data handler */ 1214 /* wire up inbound data handler */
1223 usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, 1215 usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in,
1224 maxp, (usb_complete_t) mceusb_dev_recv, ir, ep_in->bInterval); 1216 maxp, (usb_complete_t) mceusb_dev_recv, ir, ep_in->bInterval);
1225 ir->urb_in->transfer_dma = ir->dma_in; 1217 ir->urb_in->transfer_dma = ir->dma_in;
1226 ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1218 ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1227 1219
1220 /* flush buffers on the device */
1221 mce_dbg(&intf->dev, "Flushing receive buffers\n");
1222 mce_flush_rx_buffer(ir, maxp);
1223
1228 /* initialize device */ 1224 /* initialize device */
1229 if (ir->flags.microsoft_gen1) 1225 if (ir->flags.microsoft_gen1)
1230 mceusb_gen1_init(ir); 1226 mceusb_gen1_init(ir);
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index bf3060ea6107..565f24c20d77 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -991,7 +991,6 @@ static int nvt_open(struct rc_dev *dev)
991 unsigned long flags; 991 unsigned long flags;
992 992
993 spin_lock_irqsave(&nvt->nvt_lock, flags); 993 spin_lock_irqsave(&nvt->nvt_lock, flags);
994 nvt->in_use = true;
995 nvt_enable_cir(nvt); 994 nvt_enable_cir(nvt);
996 spin_unlock_irqrestore(&nvt->nvt_lock, flags); 995 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
997 996
@@ -1004,7 +1003,6 @@ static void nvt_close(struct rc_dev *dev)
1004 unsigned long flags; 1003 unsigned long flags;
1005 1004
1006 spin_lock_irqsave(&nvt->nvt_lock, flags); 1005 spin_lock_irqsave(&nvt->nvt_lock, flags);
1007 nvt->in_use = false;
1008 nvt_disable_cir(nvt); 1006 nvt_disable_cir(nvt);
1009 spin_unlock_irqrestore(&nvt->nvt_lock, flags); 1007 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
1010} 1008}
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 379795d61ea7..1241fc89a36c 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -70,7 +70,6 @@ struct nvt_dev {
70 struct ir_raw_event rawir; 70 struct ir_raw_event rawir;
71 71
72 spinlock_t nvt_lock; 72 spinlock_t nvt_lock;
73 bool in_use;
74 73
75 /* for rx */ 74 /* for rx */
76 u8 buf[RX_BUF_LEN]; 75 u8 buf[RX_BUF_LEN];
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index f57cd5677ac2..3186ac7c2c10 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -522,18 +522,20 @@ EXPORT_SYMBOL_GPL(rc_g_keycode_from_table);
522/** 522/**
523 * ir_do_keyup() - internal function to signal the release of a keypress 523 * ir_do_keyup() - internal function to signal the release of a keypress
524 * @dev: the struct rc_dev descriptor of the device 524 * @dev: the struct rc_dev descriptor of the device
525 * @sync: whether or not to call input_sync
525 * 526 *
526 * This function is used internally to release a keypress, it must be 527 * This function is used internally to release a keypress, it must be
527 * called with keylock held. 528 * called with keylock held.
528 */ 529 */
529static void ir_do_keyup(struct rc_dev *dev) 530static void ir_do_keyup(struct rc_dev *dev, bool sync)
530{ 531{
531 if (!dev->keypressed) 532 if (!dev->keypressed)
532 return; 533 return;
533 534
534 IR_dprintk(1, "keyup key 0x%04x\n", dev->last_keycode); 535 IR_dprintk(1, "keyup key 0x%04x\n", dev->last_keycode);
535 input_report_key(dev->input_dev, dev->last_keycode, 0); 536 input_report_key(dev->input_dev, dev->last_keycode, 0);
536 input_sync(dev->input_dev); 537 if (sync)
538 input_sync(dev->input_dev);
537 dev->keypressed = false; 539 dev->keypressed = false;
538} 540}
539 541
@@ -549,7 +551,7 @@ void rc_keyup(struct rc_dev *dev)
549 unsigned long flags; 551 unsigned long flags;
550 552
551 spin_lock_irqsave(&dev->keylock, flags); 553 spin_lock_irqsave(&dev->keylock, flags);
552 ir_do_keyup(dev); 554 ir_do_keyup(dev, true);
553 spin_unlock_irqrestore(&dev->keylock, flags); 555 spin_unlock_irqrestore(&dev->keylock, flags);
554} 556}
555EXPORT_SYMBOL_GPL(rc_keyup); 557EXPORT_SYMBOL_GPL(rc_keyup);
@@ -578,7 +580,7 @@ static void ir_timer_keyup(unsigned long cookie)
578 */ 580 */
579 spin_lock_irqsave(&dev->keylock, flags); 581 spin_lock_irqsave(&dev->keylock, flags);
580 if (time_is_before_eq_jiffies(dev->keyup_jiffies)) 582 if (time_is_before_eq_jiffies(dev->keyup_jiffies))
581 ir_do_keyup(dev); 583 ir_do_keyup(dev, true);
582 spin_unlock_irqrestore(&dev->keylock, flags); 584 spin_unlock_irqrestore(&dev->keylock, flags);
583} 585}
584 586
@@ -597,6 +599,7 @@ void rc_repeat(struct rc_dev *dev)
597 spin_lock_irqsave(&dev->keylock, flags); 599 spin_lock_irqsave(&dev->keylock, flags);
598 600
599 input_event(dev->input_dev, EV_MSC, MSC_SCAN, dev->last_scancode); 601 input_event(dev->input_dev, EV_MSC, MSC_SCAN, dev->last_scancode);
602 input_sync(dev->input_dev);
600 603
601 if (!dev->keypressed) 604 if (!dev->keypressed)
602 goto out; 605 goto out;
@@ -622,29 +625,28 @@ EXPORT_SYMBOL_GPL(rc_repeat);
622static void ir_do_keydown(struct rc_dev *dev, int scancode, 625static void ir_do_keydown(struct rc_dev *dev, int scancode,
623 u32 keycode, u8 toggle) 626 u32 keycode, u8 toggle)
624{ 627{
625 input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode); 628 bool new_event = !dev->keypressed ||
626 629 dev->last_scancode != scancode ||
627 /* Repeat event? */ 630 dev->last_toggle != toggle;
628 if (dev->keypressed &&
629 dev->last_scancode == scancode &&
630 dev->last_toggle == toggle)
631 return;
632 631
633 /* Release old keypress */ 632 if (new_event && dev->keypressed)
634 ir_do_keyup(dev); 633 ir_do_keyup(dev, false);
635 634
636 dev->last_scancode = scancode; 635 input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
637 dev->last_toggle = toggle;
638 dev->last_keycode = keycode;
639 636
640 if (keycode == KEY_RESERVED) 637 if (new_event && keycode != KEY_RESERVED) {
641 return; 638 /* Register a keypress */
639 dev->keypressed = true;
640 dev->last_scancode = scancode;
641 dev->last_toggle = toggle;
642 dev->last_keycode = keycode;
643
644 IR_dprintk(1, "%s: key down event, "
645 "key 0x%04x, scancode 0x%04x\n",
646 dev->input_name, keycode, scancode);
647 input_report_key(dev->input_dev, keycode, 1);
648 }
642 649
643 /* Register a keypress */
644 dev->keypressed = true;
645 IR_dprintk(1, "%s: key down event, key 0x%04x, scancode 0x%04x\n",
646 dev->input_name, keycode, scancode);
647 input_report_key(dev->input_dev, dev->last_keycode, 1);
648 input_sync(dev->input_dev); 650 input_sync(dev->input_dev);
649} 651}
650 652
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
index 10b55c854487..89d09a8914f8 100644
--- a/drivers/media/video/m5mols/m5mols.h
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -2,10 +2,10 @@
2 * Header for M-5MOLS 8M Pixel camera sensor with ISP 2 * Header for M-5MOLS 8M Pixel camera sensor with ISP
3 * 3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd. 4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com 5 * Author: HeungJun Kim <riverful.kim@samsung.com>
6 * 6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd. 7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com 8 * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -106,23 +106,23 @@ struct m5mols_capture {
106 * The each value according to each scenemode is recommended in the documents. 106 * The each value according to each scenemode is recommended in the documents.
107 */ 107 */
108struct m5mols_scenemode { 108struct m5mols_scenemode {
109 u32 metering; 109 u8 metering;
110 u32 ev_bias; 110 u8 ev_bias;
111 u32 wb_mode; 111 u8 wb_mode;
112 u32 wb_preset; 112 u8 wb_preset;
113 u32 chroma_en; 113 u8 chroma_en;
114 u32 chroma_lvl; 114 u8 chroma_lvl;
115 u32 edge_en; 115 u8 edge_en;
116 u32 edge_lvl; 116 u8 edge_lvl;
117 u32 af_range; 117 u8 af_range;
118 u32 fd_mode; 118 u8 fd_mode;
119 u32 mcc; 119 u8 mcc;
120 u32 light; 120 u8 light;
121 u32 flash; 121 u8 flash;
122 u32 tone; 122 u8 tone;
123 u32 iso; 123 u8 iso;
124 u32 capt_mode; 124 u8 capt_mode;
125 u32 wdr; 125 u8 wdr;
126}; 126};
127 127
128/** 128/**
@@ -154,7 +154,6 @@ struct m5mols_version {
154 u8 str[VERSION_STRING_SIZE]; 154 u8 str[VERSION_STRING_SIZE];
155 u8 af; 155 u8 af;
156}; 156};
157#define VERSION_SIZE sizeof(struct m5mols_version)
158 157
159/** 158/**
160 * struct m5mols_info - M-5MOLS driver data structure 159 * struct m5mols_info - M-5MOLS driver data structure
@@ -216,9 +215,9 @@ struct m5mols_info {
216 bool lock_ae; 215 bool lock_ae;
217 bool lock_awb; 216 bool lock_awb;
218 u8 resolution; 217 u8 resolution;
219 u32 interrupt; 218 u8 interrupt;
220 u32 mode; 219 u8 mode;
221 u32 mode_save; 220 u8 mode_save;
222 int (*set_power)(struct device *dev, int on); 221 int (*set_power)(struct device *dev, int on);
223}; 222};
224 223
@@ -256,9 +255,11 @@ struct m5mols_info {
256 * +-------+---+----------+-----+------+------+------+------+ 255 * +-------+---+----------+-----+------+------+------+------+
257 * - d[0..3]: according to size1 256 * - d[0..3]: according to size1
258 */ 257 */
259int m5mols_read(struct v4l2_subdev *sd, u32 reg_comb, u32 *val); 258int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg_comb, u8 *val);
259int m5mols_read_u16(struct v4l2_subdev *sd, u32 reg_comb, u16 *val);
260int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
260int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val); 261int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val);
261int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value); 262int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 value);
262 263
263/* 264/*
264 * Mode operation of the M-5MOLS 265 * Mode operation of the M-5MOLS
@@ -280,12 +281,12 @@ int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
280 * The available executing order between each modes are as follows: 281 * The available executing order between each modes are as follows:
281 * PARAMETER <---> MONITOR <---> CAPTURE 282 * PARAMETER <---> MONITOR <---> CAPTURE
282 */ 283 */
283int m5mols_mode(struct m5mols_info *info, u32 mode); 284int m5mols_mode(struct m5mols_info *info, u8 mode);
284 285
285int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg); 286int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg);
286int m5mols_sync_controls(struct m5mols_info *info); 287int m5mols_sync_controls(struct m5mols_info *info);
287int m5mols_start_capture(struct m5mols_info *info); 288int m5mols_start_capture(struct m5mols_info *info);
288int m5mols_do_scenemode(struct m5mols_info *info, u32 mode); 289int m5mols_do_scenemode(struct m5mols_info *info, u8 mode);
289int m5mols_lock_3a(struct m5mols_info *info, bool lock); 290int m5mols_lock_3a(struct m5mols_info *info, bool lock);
290int m5mols_set_ctrl(struct v4l2_ctrl *ctrl); 291int m5mols_set_ctrl(struct v4l2_ctrl *ctrl);
291 292
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
index d71a3903b60f..d9471928369d 100644
--- a/drivers/media/video/m5mols/m5mols_capture.c
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -2,10 +2,10 @@
2 * The Capture code for Fujitsu M-5MOLS ISP 2 * The Capture code for Fujitsu M-5MOLS ISP
3 * 3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd. 4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com 5 * Author: HeungJun Kim <riverful.kim@samsung.com>
6 * 6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd. 7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com 8 * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -58,9 +58,9 @@ static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
58{ 58{
59 u32 num, den; 59 u32 num, den;
60 60
61 int ret = m5mols_read(sd, addr_num, &num); 61 int ret = m5mols_read_u32(sd, addr_num, &num);
62 if (!ret) 62 if (!ret)
63 ret = m5mols_read(sd, addr_den, &den); 63 ret = m5mols_read_u32(sd, addr_den, &den);
64 if (ret) 64 if (ret)
65 return ret; 65 return ret;
66 *val = den == 0 ? 0 : num / den; 66 *val = den == 0 ? 0 : num / den;
@@ -99,20 +99,20 @@ static int m5mols_capture_info(struct m5mols_info *info)
99 if (ret) 99 if (ret)
100 return ret; 100 return ret;
101 101
102 ret = m5mols_read(sd, EXIF_INFO_ISO, (u32 *)&exif->iso_speed); 102 ret = m5mols_read_u16(sd, EXIF_INFO_ISO, &exif->iso_speed);
103 if (!ret) 103 if (!ret)
104 ret = m5mols_read(sd, EXIF_INFO_FLASH, (u32 *)&exif->flash); 104 ret = m5mols_read_u16(sd, EXIF_INFO_FLASH, &exif->flash);
105 if (!ret) 105 if (!ret)
106 ret = m5mols_read(sd, EXIF_INFO_SDR, (u32 *)&exif->sdr); 106 ret = m5mols_read_u16(sd, EXIF_INFO_SDR, &exif->sdr);
107 if (!ret) 107 if (!ret)
108 ret = m5mols_read(sd, EXIF_INFO_QVAL, (u32 *)&exif->qval); 108 ret = m5mols_read_u16(sd, EXIF_INFO_QVAL, &exif->qval);
109 if (ret) 109 if (ret)
110 return ret; 110 return ret;
111 111
112 if (!ret) 112 if (!ret)
113 ret = m5mols_read(sd, CAPC_IMAGE_SIZE, &info->cap.main); 113 ret = m5mols_read_u32(sd, CAPC_IMAGE_SIZE, &info->cap.main);
114 if (!ret) 114 if (!ret)
115 ret = m5mols_read(sd, CAPC_THUMB_SIZE, &info->cap.thumb); 115 ret = m5mols_read_u32(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
116 if (!ret) 116 if (!ret)
117 info->cap.total = info->cap.main + info->cap.thumb; 117 info->cap.total = info->cap.main + info->cap.thumb;
118 118
@@ -122,7 +122,7 @@ static int m5mols_capture_info(struct m5mols_info *info)
122int m5mols_start_capture(struct m5mols_info *info) 122int m5mols_start_capture(struct m5mols_info *info)
123{ 123{
124 struct v4l2_subdev *sd = &info->sd; 124 struct v4l2_subdev *sd = &info->sd;
125 u32 resolution = info->resolution; 125 u8 resolution = info->resolution;
126 int timeout; 126 int timeout;
127 int ret; 127 int ret;
128 128
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c
index 817c16fec368..d135d20d09cf 100644
--- a/drivers/media/video/m5mols/m5mols_controls.c
+++ b/drivers/media/video/m5mols/m5mols_controls.c
@@ -2,10 +2,10 @@
2 * Controls for M-5MOLS 8M Pixel camera sensor with ISP 2 * Controls for M-5MOLS 8M Pixel camera sensor with ISP
3 * 3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd. 4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com 5 * Author: HeungJun Kim <riverful.kim@samsung.com>
6 * 6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd. 7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com 8 * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -130,7 +130,7 @@ static struct m5mols_scenemode m5mols_default_scenemode[] = {
130 * 130 *
131 * WARNING: The execution order is important. Do not change the order. 131 * WARNING: The execution order is important. Do not change the order.
132 */ 132 */
133int m5mols_do_scenemode(struct m5mols_info *info, u32 mode) 133int m5mols_do_scenemode(struct m5mols_info *info, u8 mode)
134{ 134{
135 struct v4l2_subdev *sd = &info->sd; 135 struct v4l2_subdev *sd = &info->sd;
136 struct m5mols_scenemode scenemode = m5mols_default_scenemode[mode]; 136 struct m5mols_scenemode scenemode = m5mols_default_scenemode[mode];
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index 76eac26e84ae..43c68f51c5ce 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -2,10 +2,10 @@
2 * Driver for M-5MOLS 8M Pixel camera sensor with ISP 2 * Driver for M-5MOLS 8M Pixel camera sensor with ISP
3 * 3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd. 4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com 5 * Author: HeungJun Kim <riverful.kim@samsung.com>
6 * 6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd. 7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com 8 * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -133,13 +133,13 @@ static u32 m5mols_swap_byte(u8 *data, u8 length)
133/** 133/**
134 * m5mols_read - I2C read function 134 * m5mols_read - I2C read function
135 * @reg: combination of size, category and command for the I2C packet 135 * @reg: combination of size, category and command for the I2C packet
136 * @size: desired size of I2C packet
136 * @val: read value 137 * @val: read value
137 */ 138 */
138int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val) 139static int m5mols_read(struct v4l2_subdev *sd, u32 size, u32 reg, u32 *val)
139{ 140{
140 struct i2c_client *client = v4l2_get_subdevdata(sd); 141 struct i2c_client *client = v4l2_get_subdevdata(sd);
141 u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1]; 142 u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1];
142 u8 size = I2C_SIZE(reg);
143 u8 category = I2C_CATEGORY(reg); 143 u8 category = I2C_CATEGORY(reg);
144 u8 cmd = I2C_COMMAND(reg); 144 u8 cmd = I2C_COMMAND(reg);
145 struct i2c_msg msg[2]; 145 struct i2c_msg msg[2];
@@ -149,11 +149,6 @@ int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
149 if (!client->adapter) 149 if (!client->adapter)
150 return -ENODEV; 150 return -ENODEV;
151 151
152 if (size != 1 && size != 2 && size != 4) {
153 v4l2_err(sd, "Wrong data size\n");
154 return -EINVAL;
155 }
156
157 msg[0].addr = client->addr; 152 msg[0].addr = client->addr;
158 msg[0].flags = 0; 153 msg[0].flags = 0;
159 msg[0].len = 5; 154 msg[0].len = 5;
@@ -184,6 +179,52 @@ int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
184 return 0; 179 return 0;
185} 180}
186 181
182int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg, u8 *val)
183{
184 u32 val_32;
185 int ret;
186
187 if (I2C_SIZE(reg) != 1) {
188 v4l2_err(sd, "Wrong data size\n");
189 return -EINVAL;
190 }
191
192 ret = m5mols_read(sd, I2C_SIZE(reg), reg, &val_32);
193 if (ret)
194 return ret;
195
196 *val = (u8)val_32;
197 return ret;
198}
199
200int m5mols_read_u16(struct v4l2_subdev *sd, u32 reg, u16 *val)
201{
202 u32 val_32;
203 int ret;
204
205 if (I2C_SIZE(reg) != 2) {
206 v4l2_err(sd, "Wrong data size\n");
207 return -EINVAL;
208 }
209
210 ret = m5mols_read(sd, I2C_SIZE(reg), reg, &val_32);
211 if (ret)
212 return ret;
213
214 *val = (u16)val_32;
215 return ret;
216}
217
218int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg, u32 *val)
219{
220 if (I2C_SIZE(reg) != 4) {
221 v4l2_err(sd, "Wrong data size\n");
222 return -EINVAL;
223 }
224
225 return m5mols_read(sd, I2C_SIZE(reg), reg, val);
226}
227
187/** 228/**
188 * m5mols_write - I2C command write function 229 * m5mols_write - I2C command write function
189 * @reg: combination of size, category and command for the I2C packet 230 * @reg: combination of size, category and command for the I2C packet
@@ -231,13 +272,14 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
231 return 0; 272 return 0;
232} 273}
233 274
234int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask) 275int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 mask)
235{ 276{
236 u32 busy, i; 277 u8 busy;
278 int i;
237 int ret; 279 int ret;
238 280
239 for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) { 281 for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) {
240 ret = m5mols_read(sd, I2C_REG(category, cmd, 1), &busy); 282 ret = m5mols_read_u8(sd, I2C_REG(category, cmd, 1), &busy);
241 if (ret < 0) 283 if (ret < 0)
242 return ret; 284 return ret;
243 if ((busy & mask) == mask) 285 if ((busy & mask) == mask)
@@ -252,14 +294,14 @@ int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
252 * Before writing desired interrupt value the INT_FACTOR register should 294 * Before writing desired interrupt value the INT_FACTOR register should
253 * be read to clear pending interrupts. 295 * be read to clear pending interrupts.
254 */ 296 */
255int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg) 297int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg)
256{ 298{
257 struct m5mols_info *info = to_m5mols(sd); 299 struct m5mols_info *info = to_m5mols(sd);
258 u32 mask = is_available_af(info) ? REG_INT_AF : 0; 300 u8 mask = is_available_af(info) ? REG_INT_AF : 0;
259 u32 dummy; 301 u8 dummy;
260 int ret; 302 int ret;
261 303
262 ret = m5mols_read(sd, SYSTEM_INT_FACTOR, &dummy); 304 ret = m5mols_read_u8(sd, SYSTEM_INT_FACTOR, &dummy);
263 if (!ret) 305 if (!ret)
264 ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask); 306 ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask);
265 return ret; 307 return ret;
@@ -271,7 +313,7 @@ int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
271 * It always accompanies a little delay changing the M-5MOLS mode, so it is 313 * It always accompanies a little delay changing the M-5MOLS mode, so it is
272 * needed checking current busy status to guarantee right mode. 314 * needed checking current busy status to guarantee right mode.
273 */ 315 */
274static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode) 316static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode)
275{ 317{
276 int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode); 318 int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode);
277 319
@@ -286,16 +328,16 @@ static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
286 * can be guaranteed only when the sensor is operating in mode which which 328 * can be guaranteed only when the sensor is operating in mode which which
287 * a command belongs to. 329 * a command belongs to.
288 */ 330 */
289int m5mols_mode(struct m5mols_info *info, u32 mode) 331int m5mols_mode(struct m5mols_info *info, u8 mode)
290{ 332{
291 struct v4l2_subdev *sd = &info->sd; 333 struct v4l2_subdev *sd = &info->sd;
292 int ret = -EINVAL; 334 int ret = -EINVAL;
293 u32 reg; 335 u8 reg;
294 336
295 if (mode < REG_PARAMETER && mode > REG_CAPTURE) 337 if (mode < REG_PARAMETER && mode > REG_CAPTURE)
296 return ret; 338 return ret;
297 339
298 ret = m5mols_read(sd, SYSTEM_SYSMODE, &reg); 340 ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
299 if ((!ret && reg == mode) || ret) 341 if ((!ret && reg == mode) || ret)
300 return ret; 342 return ret;
301 343
@@ -344,41 +386,37 @@ int m5mols_mode(struct m5mols_info *info, u32 mode)
344static int m5mols_get_version(struct v4l2_subdev *sd) 386static int m5mols_get_version(struct v4l2_subdev *sd)
345{ 387{
346 struct m5mols_info *info = to_m5mols(sd); 388 struct m5mols_info *info = to_m5mols(sd);
347 union { 389 struct m5mols_version *ver = &info->ver;
348 struct m5mols_version ver; 390 u8 *str = ver->str;
349 u8 bytes[VERSION_SIZE]; 391 int i;
350 } version;
351 u32 *value;
352 u8 cmd = CAT0_VER_CUSTOMER;
353 int ret; 392 int ret;
354 393
355 do { 394 ret = m5mols_read_u8(sd, SYSTEM_VER_CUSTOMER, &ver->customer);
356 value = (u32 *)&version.bytes[cmd]; 395 if (!ret)
357 ret = m5mols_read(sd, SYSTEM_CMD(cmd), value); 396 ret = m5mols_read_u8(sd, SYSTEM_VER_PROJECT, &ver->project);
358 if (ret) 397 if (!ret)
359 return ret; 398 ret = m5mols_read_u16(sd, SYSTEM_VER_FIRMWARE, &ver->fw);
360 } while (cmd++ != CAT0_VER_AWB); 399 if (!ret)
400 ret = m5mols_read_u16(sd, SYSTEM_VER_HARDWARE, &ver->hw);
401 if (!ret)
402 ret = m5mols_read_u16(sd, SYSTEM_VER_PARAMETER, &ver->param);
403 if (!ret)
404 ret = m5mols_read_u16(sd, SYSTEM_VER_AWB, &ver->awb);
405 if (!ret)
406 ret = m5mols_read_u8(sd, AF_VERSION, &ver->af);
407 if (ret)
408 return ret;
361 409
362 do { 410 for (i = 0; i < VERSION_STRING_SIZE; i++) {
363 value = (u32 *)&version.bytes[cmd]; 411 ret = m5mols_read_u8(sd, SYSTEM_VER_STRING, &str[i]);
364 ret = m5mols_read(sd, SYSTEM_VER_STRING, value);
365 if (ret) 412 if (ret)
366 return ret; 413 return ret;
367 if (cmd >= VERSION_SIZE - 1) 414 }
368 return -EINVAL;
369 } while (version.bytes[cmd++]);
370
371 value = (u32 *)&version.bytes[cmd];
372 ret = m5mols_read(sd, AF_VERSION, value);
373 if (ret)
374 return ret;
375 415
376 /* store version information swapped for being readable */ 416 ver->fw = be16_to_cpu(ver->fw);
377 info->ver = version.ver; 417 ver->hw = be16_to_cpu(ver->hw);
378 info->ver.fw = be16_to_cpu(info->ver.fw); 418 ver->param = be16_to_cpu(ver->param);
379 info->ver.hw = be16_to_cpu(info->ver.hw); 419 ver->awb = be16_to_cpu(ver->awb);
380 info->ver.param = be16_to_cpu(info->ver.param);
381 info->ver.awb = be16_to_cpu(info->ver.awb);
382 420
383 v4l2_info(sd, "Manufacturer\t[%s]\n", 421 v4l2_info(sd, "Manufacturer\t[%s]\n",
384 is_manufacturer(info, REG_SAMSUNG_ELECTRO) ? 422 is_manufacturer(info, REG_SAMSUNG_ELECTRO) ?
@@ -722,7 +760,7 @@ static int m5mols_init_controls(struct m5mols_info *info)
722 int ret; 760 int ret;
723 761
724 /* Determine value's range & step of controls for various FW version */ 762 /* Determine value's range & step of controls for various FW version */
725 ret = m5mols_read(sd, AE_MAX_GAIN_MON, (u32 *)&max_exposure); 763 ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &max_exposure);
726 if (!ret) 764 if (!ret)
727 step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1; 765 step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
728 if (ret) 766 if (ret)
@@ -842,18 +880,18 @@ static void m5mols_irq_work(struct work_struct *work)
842 struct m5mols_info *info = 880 struct m5mols_info *info =
843 container_of(work, struct m5mols_info, work_irq); 881 container_of(work, struct m5mols_info, work_irq);
844 struct v4l2_subdev *sd = &info->sd; 882 struct v4l2_subdev *sd = &info->sd;
845 u32 reg; 883 u8 reg;
846 int ret; 884 int ret;
847 885
848 if (!is_powered(info) || 886 if (!is_powered(info) ||
849 m5mols_read(sd, SYSTEM_INT_FACTOR, &info->interrupt)) 887 m5mols_read_u8(sd, SYSTEM_INT_FACTOR, &info->interrupt))
850 return; 888 return;
851 889
852 switch (info->interrupt & REG_INT_MASK) { 890 switch (info->interrupt & REG_INT_MASK) {
853 case REG_INT_AF: 891 case REG_INT_AF:
854 if (!is_available_af(info)) 892 if (!is_available_af(info))
855 break; 893 break;
856 ret = m5mols_read(sd, AF_STATUS, &reg); 894 ret = m5mols_read_u8(sd, AF_STATUS, &reg);
857 v4l2_dbg(2, m5mols_debug, sd, "AF %s\n", 895 v4l2_dbg(2, m5mols_debug, sd, "AF %s\n",
858 reg == REG_AF_FAIL ? "Failed" : 896 reg == REG_AF_FAIL ? "Failed" :
859 reg == REG_AF_SUCCESS ? "Success" : 897 reg == REG_AF_SUCCESS ? "Success" :
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h
index b83e36fc6ac6..c755bd6edfe9 100644
--- a/drivers/media/video/m5mols/m5mols_reg.h
+++ b/drivers/media/video/m5mols/m5mols_reg.h
@@ -2,10 +2,10 @@
2 * Register map for M-5MOLS 8M Pixel camera sensor with ISP 2 * Register map for M-5MOLS 8M Pixel camera sensor with ISP
3 * 3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd. 4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com 5 * Author: HeungJun Kim <riverful.kim@samsung.com>
6 * 6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd. 7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com 8 * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -56,13 +56,24 @@
56 * more specific contents, see definition if file m5mols.h. 56 * more specific contents, see definition if file m5mols.h.
57 */ 57 */
58#define CAT0_VER_CUSTOMER 0x00 /* customer version */ 58#define CAT0_VER_CUSTOMER 0x00 /* customer version */
59#define CAT0_VER_AWB 0x09 /* Auto WB version */ 59#define CAT0_VER_PROJECT 0x01 /* project version */
60#define CAT0_VER_FIRMWARE 0x02 /* Firmware version */
61#define CAT0_VER_HARDWARE 0x04 /* Hardware version */
62#define CAT0_VER_PARAMETER 0x06 /* Parameter version */
63#define CAT0_VER_AWB 0x08 /* Auto WB version */
60#define CAT0_VER_STRING 0x0a /* string including M-5MOLS */ 64#define CAT0_VER_STRING 0x0a /* string including M-5MOLS */
61#define CAT0_SYSMODE 0x0b /* SYSTEM mode register */ 65#define CAT0_SYSMODE 0x0b /* SYSTEM mode register */
62#define CAT0_STATUS 0x0c /* SYSTEM mode status register */ 66#define CAT0_STATUS 0x0c /* SYSTEM mode status register */
63#define CAT0_INT_FACTOR 0x10 /* interrupt pending register */ 67#define CAT0_INT_FACTOR 0x10 /* interrupt pending register */
64#define CAT0_INT_ENABLE 0x11 /* interrupt enable register */ 68#define CAT0_INT_ENABLE 0x11 /* interrupt enable register */
65 69
70#define SYSTEM_VER_CUSTOMER I2C_REG(CAT_SYSTEM, CAT0_VER_CUSTOMER, 1)
71#define SYSTEM_VER_PROJECT I2C_REG(CAT_SYSTEM, CAT0_VER_PROJECT, 1)
72#define SYSTEM_VER_FIRMWARE I2C_REG(CAT_SYSTEM, CAT0_VER_FIRMWARE, 2)
73#define SYSTEM_VER_HARDWARE I2C_REG(CAT_SYSTEM, CAT0_VER_HARDWARE, 2)
74#define SYSTEM_VER_PARAMETER I2C_REG(CAT_SYSTEM, CAT0_VER_PARAMETER, 2)
75#define SYSTEM_VER_AWB I2C_REG(CAT_SYSTEM, CAT0_VER_AWB, 2)
76
66#define SYSTEM_SYSMODE I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1) 77#define SYSTEM_SYSMODE I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1)
67#define REG_SYSINIT 0x00 /* SYSTEM mode */ 78#define REG_SYSINIT 0x00 /* SYSTEM mode */
68#define REG_PARAMETER 0x01 /* PARAMETER mode */ 79#define REG_PARAMETER 0x01 /* PARAMETER mode */
@@ -382,8 +393,8 @@
382#define REG_CAP_START_MAIN 0x01 393#define REG_CAP_START_MAIN 0x01
383#define REG_CAP_START_THUMB 0x03 394#define REG_CAP_START_THUMB 0x03
384 395
385#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 1) 396#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 4)
386#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 1) 397#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 4)
387 398
388/* 399/*
389 * Category F - Flash 400 * Category F - Flash
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index bc0c23a1009c..63f8a0cc33d8 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -444,12 +444,9 @@ static int mx1_camera_add_device(struct soc_camera_device *icd)
444{ 444{
445 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 445 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
446 struct mx1_camera_dev *pcdev = ici->priv; 446 struct mx1_camera_dev *pcdev = ici->priv;
447 int ret;
448 447
449 if (pcdev->icd) { 448 if (pcdev->icd)
450 ret = -EBUSY; 449 return -EBUSY;
451 goto ebusy;
452 }
453 450
454 dev_info(icd->dev.parent, "MX1 Camera driver attached to camera %d\n", 451 dev_info(icd->dev.parent, "MX1 Camera driver attached to camera %d\n",
455 icd->devnum); 452 icd->devnum);
@@ -458,8 +455,7 @@ static int mx1_camera_add_device(struct soc_camera_device *icd)
458 455
459 pcdev->icd = icd; 456 pcdev->icd = icd;
460 457
461ebusy: 458 return 0;
462 return ret;
463} 459}
464 460
465static void mx1_camera_remove_device(struct soc_camera_device *icd) 461static void mx1_camera_remove_device(struct soc_camera_device *icd)
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 4ada9be1d430..4d07c5844402 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -982,6 +982,14 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
982 startindex = (vout->vid == OMAP_VIDEO1) ? 982 startindex = (vout->vid == OMAP_VIDEO1) ?
983 video1_numbuffers : video2_numbuffers; 983 video1_numbuffers : video2_numbuffers;
984 984
985 /* Check the size of the buffer */
986 if (*size > vout->buffer_size) {
987 v4l2_err(&vout->vid_dev->v4l2_dev,
988 "buffer allocation mismatch [%u] [%u]\n",
989 *size, vout->buffer_size);
990 return -ENOMEM;
991 }
992
985 for (i = startindex; i < *count; i++) { 993 for (i = startindex; i < *count; i++) {
986 vout->buffer_size = *size; 994 vout->buffer_size = *size;
987 995
@@ -1228,6 +1236,14 @@ static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
1228 (vma->vm_pgoff << PAGE_SHIFT)); 1236 (vma->vm_pgoff << PAGE_SHIFT));
1229 return -EINVAL; 1237 return -EINVAL;
1230 } 1238 }
1239 /* Check the size of the buffer */
1240 if (size > vout->buffer_size) {
1241 v4l2_err(&vout->vid_dev->v4l2_dev,
1242 "insufficient memory [%lu] [%u]\n",
1243 size, vout->buffer_size);
1244 return -ENOMEM;
1245 }
1246
1231 q->bufs[i]->baddr = vma->vm_start; 1247 q->bufs[i]->baddr = vma->vm_start;
1232 1248
1233 vma->vm_flags |= VM_RESERVED; 1249 vma->vm_flags |= VM_RESERVED;
@@ -2391,7 +2407,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
2391 /* Register the Video device with V4L2 2407 /* Register the Video device with V4L2
2392 */ 2408 */
2393 vfd = vout->vfd; 2409 vfd = vout->vfd;
2394 if (video_register_device(vfd, VFL_TYPE_GRABBER, k + 1) < 0) { 2410 if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
2395 dev_err(&pdev->dev, ": Could not register " 2411 dev_err(&pdev->dev, ": Could not register "
2396 "Video for Linux device\n"); 2412 "Video for Linux device\n");
2397 vfd->minor = -1; 2413 vfd->minor = -1;
diff --git a/drivers/media/video/omap/omap_voutlib.c b/drivers/media/video/omap/omap_voutlib.c
index 2aa6a76c5e59..8ae74817a110 100644
--- a/drivers/media/video/omap/omap_voutlib.c
+++ b/drivers/media/video/omap/omap_voutlib.c
@@ -193,7 +193,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
193 return -EINVAL; 193 return -EINVAL;
194 194
195 if (cpu_is_omap24xx()) { 195 if (cpu_is_omap24xx()) {
196 if (crop->height != win->w.height) { 196 if (try_crop.height != win->w.height) {
197 /* If we're resizing vertically, we can't support a 197 /* If we're resizing vertically, we can't support a
198 * crop width wider than 768 pixels. 198 * crop width wider than 768 pixels.
199 */ 199 */
@@ -202,7 +202,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
202 } 202 }
203 } 203 }
204 /* vertical resizing */ 204 /* vertical resizing */
205 vresize = (1024 * crop->height) / win->w.height; 205 vresize = (1024 * try_crop.height) / win->w.height;
206 if (cpu_is_omap24xx() && (vresize > 2048)) 206 if (cpu_is_omap24xx() && (vresize > 2048))
207 vresize = 2048; 207 vresize = 2048;
208 else if (cpu_is_omap34xx() && (vresize > 4096)) 208 else if (cpu_is_omap34xx() && (vresize > 4096))
@@ -221,7 +221,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
221 try_crop.height = 2; 221 try_crop.height = 2;
222 } 222 }
223 /* horizontal resizing */ 223 /* horizontal resizing */
224 hresize = (1024 * crop->width) / win->w.width; 224 hresize = (1024 * try_crop.width) / win->w.width;
225 if (cpu_is_omap24xx() && (hresize > 2048)) 225 if (cpu_is_omap24xx() && (hresize > 2048))
226 hresize = 2048; 226 hresize = 2048;
227 else if (cpu_is_omap34xx() && (hresize > 4096)) 227 else if (cpu_is_omap34xx() && (hresize > 4096))
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index c9fd04ee70a8..94b6ed89e195 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -1748,7 +1748,7 @@ static int isp_register_entities(struct isp_device *isp)
1748 goto done; 1748 goto done;
1749 1749
1750 /* Register external entities */ 1750 /* Register external entities */
1751 for (subdevs = pdata->subdevs; subdevs->subdevs; ++subdevs) { 1751 for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
1752 struct v4l2_subdev *sensor; 1752 struct v4l2_subdev *sensor;
1753 struct media_entity *input; 1753 struct media_entity *input;
1754 unsigned int flags; 1754 unsigned int flags;
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 1593f8deb810..760b4de13adf 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -1414,7 +1414,7 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
1414 { 1414 {
1415 ARG_DEF(struct pwc_probe, probe) 1415 ARG_DEF(struct pwc_probe, probe)
1416 1416
1417 strcpy(ARGR(probe).name, pdev->vdev->name); 1417 strcpy(ARGR(probe).name, pdev->vdev.name);
1418 ARGR(probe).type = pdev->type; 1418 ARGR(probe).type = pdev->type;
1419 ARG_OUT(probe) 1419 ARG_OUT(probe)
1420 break; 1420 break;
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 356cd42b593b..b0bde5a87c8a 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -40,7 +40,7 @@
40 Oh yes, convention: to disctinguish between all the various pointers to 40 Oh yes, convention: to disctinguish between all the various pointers to
41 device-structures, I use these names for the pointer variables: 41 device-structures, I use these names for the pointer variables:
42 udev: struct usb_device * 42 udev: struct usb_device *
43 vdev: struct video_device * 43 vdev: struct video_device (member of pwc_dev)
44 pdev: struct pwc_devive * 44 pdev: struct pwc_devive *
45*/ 45*/
46 46
@@ -152,6 +152,7 @@ static ssize_t pwc_video_read(struct file *file, char __user *buf,
152 size_t count, loff_t *ppos); 152 size_t count, loff_t *ppos);
153static unsigned int pwc_video_poll(struct file *file, poll_table *wait); 153static unsigned int pwc_video_poll(struct file *file, poll_table *wait);
154static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma); 154static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma);
155static void pwc_video_release(struct video_device *vfd);
155 156
156static const struct v4l2_file_operations pwc_fops = { 157static const struct v4l2_file_operations pwc_fops = {
157 .owner = THIS_MODULE, 158 .owner = THIS_MODULE,
@@ -164,42 +165,12 @@ static const struct v4l2_file_operations pwc_fops = {
164}; 165};
165static struct video_device pwc_template = { 166static struct video_device pwc_template = {
166 .name = "Philips Webcam", /* Filled in later */ 167 .name = "Philips Webcam", /* Filled in later */
167 .release = video_device_release, 168 .release = pwc_video_release,
168 .fops = &pwc_fops, 169 .fops = &pwc_fops,
170 .ioctl_ops = &pwc_ioctl_ops,
169}; 171};
170 172
171/***************************************************************************/ 173/***************************************************************************/
172
173/* Okay, this is some magic that I worked out and the reasoning behind it...
174
175 The biggest problem with any USB device is of course: "what to do
176 when the user unplugs the device while it is in use by an application?"
177 We have several options:
178 1) Curse them with the 7 plagues when they do (requires divine intervention)
179 2) Tell them not to (won't work: they'll do it anyway)
180 3) Oops the kernel (this will have a negative effect on a user's uptime)
181 4) Do something sensible.
182
183 Of course, we go for option 4.
184
185 It happens that this device will be linked to two times, once from
186 usb_device and once from the video_device in their respective 'private'
187 pointers. This is done when the device is probed() and all initialization
188 succeeded. The pwc_device struct links back to both structures.
189
190 When a device is unplugged while in use it will be removed from the
191 list of known USB devices; I also de-register it as a V4L device, but
192 unfortunately I can't free the memory since the struct is still in use
193 by the file descriptor. This free-ing is then deferend until the first
194 opportunity. Crude, but it works.
195
196 A small 'advantage' is that if a user unplugs the cam and plugs it back
197 in, it should get assigned the same video device minor, but unfortunately
198 it's non-trivial to re-link the cam back to the video device... (that
199 would surely be magic! :))
200*/
201
202/***************************************************************************/
203/* Private functions */ 174/* Private functions */
204 175
205/* Here we want the physical address of the memory. 176/* Here we want the physical address of the memory.
@@ -1016,16 +987,15 @@ static ssize_t show_snapshot_button_status(struct device *class_dev,
1016static DEVICE_ATTR(button, S_IRUGO | S_IWUSR, show_snapshot_button_status, 987static DEVICE_ATTR(button, S_IRUGO | S_IWUSR, show_snapshot_button_status,
1017 NULL); 988 NULL);
1018 989
1019static int pwc_create_sysfs_files(struct video_device *vdev) 990static int pwc_create_sysfs_files(struct pwc_device *pdev)
1020{ 991{
1021 struct pwc_device *pdev = video_get_drvdata(vdev);
1022 int rc; 992 int rc;
1023 993
1024 rc = device_create_file(&vdev->dev, &dev_attr_button); 994 rc = device_create_file(&pdev->vdev.dev, &dev_attr_button);
1025 if (rc) 995 if (rc)
1026 goto err; 996 goto err;
1027 if (pdev->features & FEATURE_MOTOR_PANTILT) { 997 if (pdev->features & FEATURE_MOTOR_PANTILT) {
1028 rc = device_create_file(&vdev->dev, &dev_attr_pan_tilt); 998 rc = device_create_file(&pdev->vdev.dev, &dev_attr_pan_tilt);
1029 if (rc) 999 if (rc)
1030 goto err_button; 1000 goto err_button;
1031 } 1001 }
@@ -1033,19 +1003,17 @@ static int pwc_create_sysfs_files(struct video_device *vdev)
1033 return 0; 1003 return 0;
1034 1004
1035err_button: 1005err_button:
1036 device_remove_file(&vdev->dev, &dev_attr_button); 1006 device_remove_file(&pdev->vdev.dev, &dev_attr_button);
1037err: 1007err:
1038 PWC_ERROR("Could not create sysfs files.\n"); 1008 PWC_ERROR("Could not create sysfs files.\n");
1039 return rc; 1009 return rc;
1040} 1010}
1041 1011
1042static void pwc_remove_sysfs_files(struct video_device *vdev) 1012static void pwc_remove_sysfs_files(struct pwc_device *pdev)
1043{ 1013{
1044 struct pwc_device *pdev = video_get_drvdata(vdev);
1045
1046 if (pdev->features & FEATURE_MOTOR_PANTILT) 1014 if (pdev->features & FEATURE_MOTOR_PANTILT)
1047 device_remove_file(&vdev->dev, &dev_attr_pan_tilt); 1015 device_remove_file(&pdev->vdev.dev, &dev_attr_pan_tilt);
1048 device_remove_file(&vdev->dev, &dev_attr_button); 1016 device_remove_file(&pdev->vdev.dev, &dev_attr_button);
1049} 1017}
1050 1018
1051#ifdef CONFIG_USB_PWC_DEBUG 1019#ifdef CONFIG_USB_PWC_DEBUG
@@ -1106,7 +1074,7 @@ static int pwc_video_open(struct file *file)
1106 if (ret >= 0) 1074 if (ret >= 0)
1107 { 1075 {
1108 PWC_DEBUG_OPEN("This %s camera is equipped with a %s (%d).\n", 1076 PWC_DEBUG_OPEN("This %s camera is equipped with a %s (%d).\n",
1109 pdev->vdev->name, 1077 pdev->vdev.name,
1110 pwc_sensor_type_to_string(i), i); 1078 pwc_sensor_type_to_string(i), i);
1111 } 1079 }
1112 } 1080 }
@@ -1180,16 +1148,15 @@ static int pwc_video_open(struct file *file)
1180 return 0; 1148 return 0;
1181} 1149}
1182 1150
1183 1151static void pwc_video_release(struct video_device *vfd)
1184static void pwc_cleanup(struct pwc_device *pdev)
1185{ 1152{
1186 pwc_remove_sysfs_files(pdev->vdev); 1153 struct pwc_device *pdev = container_of(vfd, struct pwc_device, vdev);
1187 video_unregister_device(pdev->vdev); 1154 int hint;
1188 1155
1189#ifdef CONFIG_USB_PWC_INPUT_EVDEV 1156 /* search device_hint[] table if we occupy a slot, by any chance */
1190 if (pdev->button_dev) 1157 for (hint = 0; hint < MAX_DEV_HINTS; hint++)
1191 input_unregister_device(pdev->button_dev); 1158 if (device_hint[hint].pdev == pdev)
1192#endif 1159 device_hint[hint].pdev = NULL;
1193 1160
1194 kfree(pdev); 1161 kfree(pdev);
1195} 1162}
@@ -1199,7 +1166,7 @@ static int pwc_video_close(struct file *file)
1199{ 1166{
1200 struct video_device *vdev = file->private_data; 1167 struct video_device *vdev = file->private_data;
1201 struct pwc_device *pdev; 1168 struct pwc_device *pdev;
1202 int i, hint; 1169 int i;
1203 1170
1204 PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev); 1171 PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
1205 1172
@@ -1234,12 +1201,6 @@ static int pwc_video_close(struct file *file)
1234 } 1201 }
1235 pdev->vopen--; 1202 pdev->vopen--;
1236 PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen); 1203 PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen);
1237 } else {
1238 pwc_cleanup(pdev);
1239 /* search device_hint[] table if we occupy a slot, by any chance */
1240 for (hint = 0; hint < MAX_DEV_HINTS; hint++)
1241 if (device_hint[hint].pdev == pdev)
1242 device_hint[hint].pdev = NULL;
1243 } 1204 }
1244 1205
1245 return 0; 1206 return 0;
@@ -1715,19 +1676,12 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1715 init_waitqueue_head(&pdev->frameq); 1676 init_waitqueue_head(&pdev->frameq);
1716 pdev->vcompression = pwc_preferred_compression; 1677 pdev->vcompression = pwc_preferred_compression;
1717 1678
1718 /* Allocate video_device structure */ 1679 /* Init video_device structure */
1719 pdev->vdev = video_device_alloc(); 1680 memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
1720 if (!pdev->vdev) { 1681 pdev->vdev.parent = &intf->dev;
1721 PWC_ERROR("Err, cannot allocate video_device struture. Failing probe."); 1682 pdev->vdev.lock = &pdev->modlock;
1722 rc = -ENOMEM; 1683 strcpy(pdev->vdev.name, name);
1723 goto err_free_mem; 1684 video_set_drvdata(&pdev->vdev, pdev);
1724 }
1725 memcpy(pdev->vdev, &pwc_template, sizeof(pwc_template));
1726 pdev->vdev->parent = &intf->dev;
1727 pdev->vdev->lock = &pdev->modlock;
1728 pdev->vdev->ioctl_ops = &pwc_ioctl_ops;
1729 strcpy(pdev->vdev->name, name);
1730 video_set_drvdata(pdev->vdev, pdev);
1731 1685
1732 pdev->release = le16_to_cpu(udev->descriptor.bcdDevice); 1686 pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
1733 PWC_DEBUG_PROBE("Release: %04x\n", pdev->release); 1687 PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
@@ -1746,8 +1700,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1746 } 1700 }
1747 } 1701 }
1748 1702
1749 pdev->vdev->release = video_device_release;
1750
1751 /* occupy slot */ 1703 /* occupy slot */
1752 if (hint < MAX_DEV_HINTS) 1704 if (hint < MAX_DEV_HINTS)
1753 device_hint[hint].pdev = pdev; 1705 device_hint[hint].pdev = pdev;
@@ -1759,16 +1711,16 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1759 pwc_set_leds(pdev, 0, 0); 1711 pwc_set_leds(pdev, 0, 0);
1760 pwc_camera_power(pdev, 0); 1712 pwc_camera_power(pdev, 0);
1761 1713
1762 rc = video_register_device(pdev->vdev, VFL_TYPE_GRABBER, video_nr); 1714 rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
1763 if (rc < 0) { 1715 if (rc < 0) {
1764 PWC_ERROR("Failed to register as video device (%d).\n", rc); 1716 PWC_ERROR("Failed to register as video device (%d).\n", rc);
1765 goto err_video_release; 1717 goto err_free_mem;
1766 } 1718 }
1767 rc = pwc_create_sysfs_files(pdev->vdev); 1719 rc = pwc_create_sysfs_files(pdev);
1768 if (rc) 1720 if (rc)
1769 goto err_video_unreg; 1721 goto err_video_unreg;
1770 1722
1771 PWC_INFO("Registered as %s.\n", video_device_node_name(pdev->vdev)); 1723 PWC_INFO("Registered as %s.\n", video_device_node_name(&pdev->vdev));
1772 1724
1773#ifdef CONFIG_USB_PWC_INPUT_EVDEV 1725#ifdef CONFIG_USB_PWC_INPUT_EVDEV
1774 /* register webcam snapshot button input device */ 1726 /* register webcam snapshot button input device */
@@ -1776,7 +1728,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1776 if (!pdev->button_dev) { 1728 if (!pdev->button_dev) {
1777 PWC_ERROR("Err, insufficient memory for webcam snapshot button device."); 1729 PWC_ERROR("Err, insufficient memory for webcam snapshot button device.");
1778 rc = -ENOMEM; 1730 rc = -ENOMEM;
1779 pwc_remove_sysfs_files(pdev->vdev); 1731 pwc_remove_sysfs_files(pdev);
1780 goto err_video_unreg; 1732 goto err_video_unreg;
1781 } 1733 }
1782 1734
@@ -1794,7 +1746,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1794 if (rc) { 1746 if (rc) {
1795 input_free_device(pdev->button_dev); 1747 input_free_device(pdev->button_dev);
1796 pdev->button_dev = NULL; 1748 pdev->button_dev = NULL;
1797 pwc_remove_sysfs_files(pdev->vdev); 1749 pwc_remove_sysfs_files(pdev);
1798 goto err_video_unreg; 1750 goto err_video_unreg;
1799 } 1751 }
1800#endif 1752#endif
@@ -1804,10 +1756,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1804err_video_unreg: 1756err_video_unreg:
1805 if (hint < MAX_DEV_HINTS) 1757 if (hint < MAX_DEV_HINTS)
1806 device_hint[hint].pdev = NULL; 1758 device_hint[hint].pdev = NULL;
1807 video_unregister_device(pdev->vdev); 1759 video_unregister_device(&pdev->vdev);
1808 pdev->vdev = NULL; /* So we don't try to release it below */
1809err_video_release:
1810 video_device_release(pdev->vdev);
1811err_free_mem: 1760err_free_mem:
1812 kfree(pdev); 1761 kfree(pdev);
1813 return rc; 1762 return rc;
@@ -1816,10 +1765,8 @@ err_free_mem:
1816/* The user yanked out the cable... */ 1765/* The user yanked out the cable... */
1817static void usb_pwc_disconnect(struct usb_interface *intf) 1766static void usb_pwc_disconnect(struct usb_interface *intf)
1818{ 1767{
1819 struct pwc_device *pdev; 1768 struct pwc_device *pdev = usb_get_intfdata(intf);
1820 int hint;
1821 1769
1822 pdev = usb_get_intfdata (intf);
1823 mutex_lock(&pdev->modlock); 1770 mutex_lock(&pdev->modlock);
1824 usb_set_intfdata (intf, NULL); 1771 usb_set_intfdata (intf, NULL);
1825 if (pdev == NULL) { 1772 if (pdev == NULL) {
@@ -1836,30 +1783,25 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
1836 } 1783 }
1837 1784
1838 /* We got unplugged; this is signalled by an EPIPE error code */ 1785 /* We got unplugged; this is signalled by an EPIPE error code */
1839 if (pdev->vopen) { 1786 pdev->error_status = EPIPE;
1840 PWC_INFO("Disconnected while webcam is in use!\n"); 1787 pdev->unplugged = 1;
1841 pdev->error_status = EPIPE;
1842 }
1843 1788
1844 /* Alert waiting processes */ 1789 /* Alert waiting processes */
1845 wake_up_interruptible(&pdev->frameq); 1790 wake_up_interruptible(&pdev->frameq);
1846 /* Wait until device is closed */
1847 if (pdev->vopen) {
1848 pdev->unplugged = 1;
1849 pwc_iso_stop(pdev);
1850 } else {
1851 /* Device is closed, so we can safely unregister it */
1852 PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n");
1853 1791
1854disconnect_out: 1792 /* No need to keep the urbs around after disconnection */
1855 /* search device_hint[] table if we occupy a slot, by any chance */ 1793 pwc_isoc_cleanup(pdev);
1856 for (hint = 0; hint < MAX_DEV_HINTS; hint++)
1857 if (device_hint[hint].pdev == pdev)
1858 device_hint[hint].pdev = NULL;
1859 }
1860 1794
1795disconnect_out:
1861 mutex_unlock(&pdev->modlock); 1796 mutex_unlock(&pdev->modlock);
1862 pwc_cleanup(pdev); 1797
1798 pwc_remove_sysfs_files(pdev);
1799 video_unregister_device(&pdev->vdev);
1800
1801#ifdef CONFIG_USB_PWC_INPUT_EVDEV
1802 if (pdev->button_dev)
1803 input_unregister_device(pdev->button_dev);
1804#endif
1863} 1805}
1864 1806
1865 1807
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index e947766337d6..083f8b15df73 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -162,9 +162,9 @@ struct pwc_imgbuf
162 162
163struct pwc_device 163struct pwc_device
164{ 164{
165 struct video_device *vdev; 165 struct video_device vdev;
166 166
167 /* Pointer to our usb_device */ 167 /* Pointer to our usb_device, may be NULL after unplug */
168 struct usb_device *udev; 168 struct usb_device *udev;
169 169
170 int type; /* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */ 170 int type; /* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index d142b40ea64e..81b4a826ee5e 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Samsung S5P SoC series camera interface (camera capture) driver 2 * Samsung S5P/EXYNOS4 SoC series camera interface (camera capture) driver
3 * 3 *
4 * Copyright (c) 2010 Samsung Electronics Co., Ltd 4 * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
5 * Author: Sylwester Nawrocki, <s.nawrocki@samsung.com> 5 * Author: Sylwester Nawrocki, <s.nawrocki@samsung.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -262,12 +262,7 @@ static unsigned int get_plane_size(struct fimc_frame *fr, unsigned int plane)
262{ 262{
263 if (!fr || plane >= fr->fmt->memplanes) 263 if (!fr || plane >= fr->fmt->memplanes)
264 return 0; 264 return 0;
265
266 dbg("%s: w: %d. h: %d. depth[%d]: %d",
267 __func__, fr->width, fr->height, plane, fr->fmt->depth[plane]);
268
269 return fr->f_width * fr->f_height * fr->fmt->depth[plane] / 8; 265 return fr->f_width * fr->f_height * fr->fmt->depth[plane] / 8;
270
271} 266}
272 267
273static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers, 268static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
@@ -283,24 +278,14 @@ static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
283 278
284 *num_planes = fmt->memplanes; 279 *num_planes = fmt->memplanes;
285 280
286 dbg("%s, buffer count=%d, plane count=%d",
287 __func__, *num_buffers, *num_planes);
288
289 for (i = 0; i < fmt->memplanes; i++) { 281 for (i = 0; i < fmt->memplanes; i++) {
290 sizes[i] = get_plane_size(&ctx->d_frame, i); 282 sizes[i] = get_plane_size(&ctx->d_frame, i);
291 dbg("plane: %u, plane_size: %lu", i, sizes[i]);
292 allocators[i] = ctx->fimc_dev->alloc_ctx; 283 allocators[i] = ctx->fimc_dev->alloc_ctx;
293 } 284 }
294 285
295 return 0; 286 return 0;
296} 287}
297 288
298static int buffer_init(struct vb2_buffer *vb)
299{
300 /* TODO: */
301 return 0;
302}
303
304static int buffer_prepare(struct vb2_buffer *vb) 289static int buffer_prepare(struct vb2_buffer *vb)
305{ 290{
306 struct vb2_queue *vq = vb->vb2_queue; 291 struct vb2_queue *vq = vb->vb2_queue;
@@ -380,7 +365,6 @@ static struct vb2_ops fimc_capture_qops = {
380 .queue_setup = queue_setup, 365 .queue_setup = queue_setup,
381 .buf_prepare = buffer_prepare, 366 .buf_prepare = buffer_prepare,
382 .buf_queue = buffer_queue, 367 .buf_queue = buffer_queue,
383 .buf_init = buffer_init,
384 .wait_prepare = fimc_unlock, 368 .wait_prepare = fimc_unlock,
385 .wait_finish = fimc_lock, 369 .wait_finish = fimc_lock,
386 .start_streaming = start_streaming, 370 .start_streaming = start_streaming,
@@ -903,6 +887,7 @@ err_vd_reg:
903err_v4l2_reg: 887err_v4l2_reg:
904 v4l2_device_unregister(v4l2_dev); 888 v4l2_device_unregister(v4l2_dev);
905err_info: 889err_info:
890 kfree(ctx);
906 dev_err(&fimc->pdev->dev, "failed to install\n"); 891 dev_err(&fimc->pdev->dev, "failed to install\n");
907 return ret; 892 return ret;
908} 893}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index dc91a8511af6..bdf19ada9172 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -1,9 +1,8 @@
1/* 1/*
2 * S5P camera interface (video postprocessor) driver 2 * Samsung S5P/EXYNOS4 SoC series camera interface (video postprocessor) driver
3 * 3 *
4 * Copyright (c) 2010 Samsung Electronics Co., Ltd 4 * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
5 * 5 * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
6 * Sylwester Nawrocki, <s.nawrocki@samsung.com>
7 * 6 *
8 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published 8 * it under the terms of the GNU General Public License as published
@@ -42,7 +41,6 @@ static struct fimc_fmt fimc_formats[] = {
42 .color = S5P_FIMC_RGB565, 41 .color = S5P_FIMC_RGB565,
43 .memplanes = 1, 42 .memplanes = 1,
44 .colplanes = 1, 43 .colplanes = 1,
45 .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_BE,
46 .flags = FMT_FLAGS_M2M, 44 .flags = FMT_FLAGS_M2M,
47 }, { 45 }, {
48 .name = "BGR666", 46 .name = "BGR666",
@@ -232,11 +230,7 @@ static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
232 return 0; 230 return 0;
233 } 231 }
234 } 232 }
235
236 *shift = 0, *ratio = 1; 233 *shift = 0, *ratio = 1;
237
238 dbg("s: %d, t: %d, shift: %d, ratio: %d",
239 src, tar, *shift, *ratio);
240 return 0; 234 return 0;
241} 235}
242 236
@@ -268,10 +262,8 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
268 err("invalid source size: %d x %d", sx, sy); 262 err("invalid source size: %d x %d", sx, sy);
269 return -EINVAL; 263 return -EINVAL;
270 } 264 }
271
272 sc->real_width = sx; 265 sc->real_width = sx;
273 sc->real_height = sy; 266 sc->real_height = sy;
274 dbg("sx= %d, sy= %d, tx= %d, ty= %d", sx, sy, tx, ty);
275 267
276 ret = fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor); 268 ret = fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor);
277 if (ret) 269 if (ret)
@@ -711,22 +703,18 @@ static int fimc_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
711 f = ctx_get_frame(ctx, vq->type); 703 f = ctx_get_frame(ctx, vq->type);
712 if (IS_ERR(f)) 704 if (IS_ERR(f))
713 return PTR_ERR(f); 705 return PTR_ERR(f);
714
715 /* 706 /*
716 * Return number of non-contigous planes (plane buffers) 707 * Return number of non-contigous planes (plane buffers)
717 * depending on the configured color format. 708 * depending on the configured color format.
718 */ 709 */
719 if (f->fmt) 710 if (!f->fmt)
720 *num_planes = f->fmt->memplanes; 711 return -EINVAL;
721 712
713 *num_planes = f->fmt->memplanes;
722 for (i = 0; i < f->fmt->memplanes; i++) { 714 for (i = 0; i < f->fmt->memplanes; i++) {
723 sizes[i] = (f->width * f->height * f->fmt->depth[i]) >> 3; 715 sizes[i] = (f->f_width * f->f_height * f->fmt->depth[i]) / 8;
724 allocators[i] = ctx->fimc_dev->alloc_ctx; 716 allocators[i] = ctx->fimc_dev->alloc_ctx;
725 } 717 }
726
727 if (*num_buffers == 0)
728 *num_buffers = 1;
729
730 return 0; 718 return 0;
731} 719}
732 720
@@ -852,7 +840,7 @@ struct fimc_fmt *find_format(struct v4l2_format *f, unsigned int mask)
852 840
853 for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) { 841 for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
854 fmt = &fimc_formats[i]; 842 fmt = &fimc_formats[i];
855 if (fmt->fourcc == f->fmt.pix.pixelformat && 843 if (fmt->fourcc == f->fmt.pix_mp.pixelformat &&
856 (fmt->flags & mask)) 844 (fmt->flags & mask))
857 break; 845 break;
858 } 846 }
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index 3beb1e5320ce..1f70772daaf0 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -1,7 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Samsung Electronics 2 * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
3 *
4 * Sylwester Nawrocki, <s.nawrocki@samsung.com>
5 * 3 *
6 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
@@ -135,9 +133,10 @@ enum fimc_color_fmt {
135 * @name: format description 133 * @name: format description
136 * @fourcc: the fourcc code for this format, 0 if not applicable 134 * @fourcc: the fourcc code for this format, 0 if not applicable
137 * @color: the corresponding fimc_color_fmt 135 * @color: the corresponding fimc_color_fmt
138 * @depth: per plane driver's private 'number of bits per pixel'
139 * @memplanes: number of physically non-contiguous data planes 136 * @memplanes: number of physically non-contiguous data planes
140 * @colplanes: number of physically contiguous data planes 137 * @colplanes: number of physically contiguous data planes
138 * @depth: per plane driver's private 'number of bits per pixel'
139 * @flags: flags indicating which operation mode format applies to
141 */ 140 */
142struct fimc_fmt { 141struct fimc_fmt {
143 enum v4l2_mbus_pixelcode mbus_code; 142 enum v4l2_mbus_pixelcode mbus_code;
@@ -171,7 +170,7 @@ struct fimc_dma_offset {
171}; 170};
172 171
173/** 172/**
174 * struct fimc_effect - the configuration data for the "Arbitrary" image effect 173 * struct fimc_effect - color effect information
175 * @type: effect type 174 * @type: effect type
176 * @pat_cb: cr value when type is "arbitrary" 175 * @pat_cb: cr value when type is "arbitrary"
177 * @pat_cr: cr value when type is "arbitrary" 176 * @pat_cr: cr value when type is "arbitrary"
@@ -184,7 +183,6 @@ struct fimc_effect {
184 183
185/** 184/**
186 * struct fimc_scaler - the configuration data for FIMC inetrnal scaler 185 * struct fimc_scaler - the configuration data for FIMC inetrnal scaler
187 *
188 * @scaleup_h: flag indicating scaling up horizontally 186 * @scaleup_h: flag indicating scaling up horizontally
189 * @scaleup_v: flag indicating scaling up vertically 187 * @scaleup_v: flag indicating scaling up vertically
190 * @copy_mode: flag indicating transparent DMA transfer (no scaling 188 * @copy_mode: flag indicating transparent DMA transfer (no scaling
@@ -220,7 +218,6 @@ struct fimc_scaler {
220 218
221/** 219/**
222 * struct fimc_addr - the FIMC physical address set for DMA 220 * struct fimc_addr - the FIMC physical address set for DMA
223 *
224 * @y: luminance plane physical address 221 * @y: luminance plane physical address
225 * @cb: Cb plane physical address 222 * @cb: Cb plane physical address
226 * @cr: Cr plane physical address 223 * @cr: Cr plane physical address
@@ -234,6 +231,7 @@ struct fimc_addr {
234/** 231/**
235 * struct fimc_vid_buffer - the driver's video buffer 232 * struct fimc_vid_buffer - the driver's video buffer
236 * @vb: v4l videobuf buffer 233 * @vb: v4l videobuf buffer
234 * @list: linked list structure for buffer queue
237 * @paddr: precalculated physical address set 235 * @paddr: precalculated physical address set
238 * @index: buffer index for the output DMA engine 236 * @index: buffer index for the output DMA engine
239 */ 237 */
@@ -254,11 +252,10 @@ struct fimc_vid_buffer {
254 * @offs_v: image vertical pixel offset 252 * @offs_v: image vertical pixel offset
255 * @width: image pixel width 253 * @width: image pixel width
256 * @height: image pixel weight 254 * @height: image pixel weight
257 * @paddr: image frame buffer physical addresses
258 * @buf_cnt: number of buffers depending on a color format
259 * @payload: image size in bytes (w x h x bpp) 255 * @payload: image size in bytes (w x h x bpp)
260 * @color: color format 256 * @paddr: image frame buffer physical addresses
261 * @dma_offset: DMA offset in bytes 257 * @dma_offset: DMA offset in bytes
258 * @fmt: fimc color format pointer
262 */ 259 */
263struct fimc_frame { 260struct fimc_frame {
264 u32 f_width; 261 u32 f_width;
@@ -390,21 +387,22 @@ struct fimc_ctx;
390 387
391/** 388/**
392 * struct fimc_dev - abstraction for FIMC entity 389 * struct fimc_dev - abstraction for FIMC entity
393 *
394 * @slock: the spinlock protecting this data structure 390 * @slock: the spinlock protecting this data structure
395 * @lock: the mutex protecting this data structure 391 * @lock: the mutex protecting this data structure
396 * @pdev: pointer to the FIMC platform device 392 * @pdev: pointer to the FIMC platform device
397 * @pdata: pointer to the device platform data 393 * @pdata: pointer to the device platform data
394 * @variant: the IP variant information
398 * @id: FIMC device index (0..FIMC_MAX_DEVS) 395 * @id: FIMC device index (0..FIMC_MAX_DEVS)
399 * @num_clocks: the number of clocks managed by this device instance 396 * @num_clocks: the number of clocks managed by this device instance
400 * @clock[]: the clocks required for FIMC operation 397 * @clock: clocks required for FIMC operation
401 * @regs: the mapped hardware registers 398 * @regs: the mapped hardware registers
402 * @regs_res: the resource claimed for IO registers 399 * @regs_res: the resource claimed for IO registers
403 * @irq: interrupt number of the FIMC subdevice 400 * @irq: FIMC interrupt number
404 * @irq_queue: 401 * @irq_queue: interrupt handler waitqueue
405 * @m2m: memory-to-memory V4L2 device information 402 * @m2m: memory-to-memory V4L2 device information
406 * @vid_cap: camera capture device information 403 * @vid_cap: camera capture device information
407 * @state: flags used to synchronize m2m and capture mode operation 404 * @state: flags used to synchronize m2m and capture mode operation
405 * @alloc_ctx: videobuf2 memory allocator context
408 */ 406 */
409struct fimc_dev { 407struct fimc_dev {
410 spinlock_t slock; 408 spinlock_t slock;
@@ -427,8 +425,7 @@ struct fimc_dev {
427 425
428/** 426/**
429 * fimc_ctx - the device context data 427 * fimc_ctx - the device context data
430 * 428 * @slock: spinlock protecting this data structure
431 * @lock: mutex protecting this data structure
432 * @s_frame: source frame properties 429 * @s_frame: source frame properties
433 * @d_frame: destination frame properties 430 * @d_frame: destination frame properties
434 * @out_order_1p: output 1-plane YCBCR order 431 * @out_order_1p: output 1-plane YCBCR order
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index ff6c0e97563e..d4ee24bf6928 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -963,7 +963,7 @@ static int saa7134_raw_decode_irq(struct saa7134_dev *dev)
963 * to work with other protocols. 963 * to work with other protocols.
964 */ 964 */
965 if (!ir->active) { 965 if (!ir->active) {
966 timeout = jiffies + jiffies_to_msecs(15); 966 timeout = jiffies + msecs_to_jiffies(15);
967 mod_timer(&ir->timer, timeout); 967 mod_timer(&ir->timer, timeout);
968 ir->active = true; 968 ir->active = true;
969 } 969 }
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
index c3ab0c813be2..48fea373c25a 100644
--- a/drivers/media/video/uvc/uvc_entity.c
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -27,14 +27,20 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
27 struct uvc_entity *entity) 27 struct uvc_entity *entity)
28{ 28{
29 const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE; 29 const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
30 struct uvc_entity *remote; 30 struct media_entity *sink;
31 unsigned int i; 31 unsigned int i;
32 u8 remote_pad; 32 int ret;
33 int ret = 0; 33
34 sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
35 ? (entity->vdev ? &entity->vdev->entity : NULL)
36 : &entity->subdev.entity;
37 if (sink == NULL)
38 return 0;
34 39
35 for (i = 0; i < entity->num_pads; ++i) { 40 for (i = 0; i < entity->num_pads; ++i) {
36 struct media_entity *source; 41 struct media_entity *source;
37 struct media_entity *sink; 42 struct uvc_entity *remote;
43 u8 remote_pad;
38 44
39 if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK)) 45 if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK))
40 continue; 46 continue;
@@ -43,10 +49,11 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
43 if (remote == NULL) 49 if (remote == NULL)
44 return -EINVAL; 50 return -EINVAL;
45 51
46 source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) 52 source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING)
47 ? &remote->vdev->entity : &remote->subdev.entity; 53 ? (remote->vdev ? &remote->vdev->entity : NULL)
48 sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING) 54 : &remote->subdev.entity;
49 ? &entity->vdev->entity : &entity->subdev.entity; 55 if (source == NULL)
56 continue;
50 57
51 remote_pad = remote->num_pads - 1; 58 remote_pad = remote->num_pads - 1;
52 ret = media_entity_create_link(source, remote_pad, 59 ret = media_entity_create_link(source, remote_pad,
@@ -55,11 +62,10 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
55 return ret; 62 return ret;
56 } 63 }
57 64
58 if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) 65 if (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
59 ret = v4l2_device_register_subdev(&chain->dev->vdev, 66 return 0;
60 &entity->subdev);
61 67
62 return ret; 68 return v4l2_device_register_subdev(&chain->dev->vdev, &entity->subdev);
63} 69}
64 70
65static struct v4l2_subdev_ops uvc_subdev_ops = { 71static struct v4l2_subdev_ops uvc_subdev_ops = {
@@ -84,9 +90,11 @@ static int uvc_mc_init_entity(struct uvc_entity *entity)
84 90
85 ret = media_entity_init(&entity->subdev.entity, 91 ret = media_entity_init(&entity->subdev.entity,
86 entity->num_pads, entity->pads, 0); 92 entity->num_pads, entity->pads, 0);
87 } else 93 } else if (entity->vdev != NULL) {
88 ret = media_entity_init(&entity->vdev->entity, 94 ret = media_entity_init(&entity->vdev->entity,
89 entity->num_pads, entity->pads, 0); 95 entity->num_pads, entity->pads, 0);
96 } else
97 ret = 0;
90 98
91 return ret; 99 return ret;
92} 100}
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 109a06384a8f..f90ce9fce539 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -104,6 +104,8 @@ static int __uvc_free_buffers(struct uvc_video_queue *queue)
104 } 104 }
105 105
106 if (queue->count) { 106 if (queue->count) {
107 uvc_queue_cancel(queue, 0);
108 INIT_LIST_HEAD(&queue->mainqueue);
107 vfree(queue->mem); 109 vfree(queue->mem);
108 queue->count = 0; 110 queue->count = 0;
109 } 111 }
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index fc766b9f24c5..49994793cc77 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -1255,8 +1255,10 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
1255 1255
1256 /* Commit the streaming parameters. */ 1256 /* Commit the streaming parameters. */
1257 ret = uvc_commit_video(stream, &stream->ctrl); 1257 ret = uvc_commit_video(stream, &stream->ctrl);
1258 if (ret < 0) 1258 if (ret < 0) {
1259 uvc_queue_enable(&stream->queue, 0);
1259 return ret; 1260 return ret;
1261 }
1260 1262
1261 return uvc_init_video(stream, GFP_KERNEL); 1263 return uvc_init_video(stream, GFP_KERNEL);
1262} 1264}
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 19d5ae293780..06f14008b346 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -167,6 +167,12 @@ static void v4l2_device_release(struct device *cd)
167 167
168 mutex_unlock(&videodev_lock); 168 mutex_unlock(&videodev_lock);
169 169
170#if defined(CONFIG_MEDIA_CONTROLLER)
171 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
172 vdev->vfl_type != VFL_TYPE_SUBDEV)
173 media_device_unregister_entity(&vdev->entity);
174#endif
175
170 /* Release video_device and perform other 176 /* Release video_device and perform other
171 cleanups as needed. */ 177 cleanups as needed. */
172 vdev->release(vdev); 178 vdev->release(vdev);
@@ -389,9 +395,6 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
389static int v4l2_open(struct inode *inode, struct file *filp) 395static int v4l2_open(struct inode *inode, struct file *filp)
390{ 396{
391 struct video_device *vdev; 397 struct video_device *vdev;
392#if defined(CONFIG_MEDIA_CONTROLLER)
393 struct media_entity *entity = NULL;
394#endif
395 int ret = 0; 398 int ret = 0;
396 399
397 /* Check if the video device is available */ 400 /* Check if the video device is available */
@@ -405,17 +408,6 @@ static int v4l2_open(struct inode *inode, struct file *filp)
405 /* and increase the device refcount */ 408 /* and increase the device refcount */
406 video_get(vdev); 409 video_get(vdev);
407 mutex_unlock(&videodev_lock); 410 mutex_unlock(&videodev_lock);
408#if defined(CONFIG_MEDIA_CONTROLLER)
409 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
410 vdev->vfl_type != VFL_TYPE_SUBDEV) {
411 entity = media_entity_get(&vdev->entity);
412 if (!entity) {
413 ret = -EBUSY;
414 video_put(vdev);
415 return ret;
416 }
417 }
418#endif
419 if (vdev->fops->open) { 411 if (vdev->fops->open) {
420 if (vdev->lock && mutex_lock_interruptible(vdev->lock)) { 412 if (vdev->lock && mutex_lock_interruptible(vdev->lock)) {
421 ret = -ERESTARTSYS; 413 ret = -ERESTARTSYS;
@@ -431,14 +423,8 @@ static int v4l2_open(struct inode *inode, struct file *filp)
431 423
432err: 424err:
433 /* decrease the refcount in case of an error */ 425 /* decrease the refcount in case of an error */
434 if (ret) { 426 if (ret)
435#if defined(CONFIG_MEDIA_CONTROLLER)
436 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
437 vdev->vfl_type != VFL_TYPE_SUBDEV)
438 media_entity_put(entity);
439#endif
440 video_put(vdev); 427 video_put(vdev);
441 }
442 return ret; 428 return ret;
443} 429}
444 430
@@ -455,11 +441,6 @@ static int v4l2_release(struct inode *inode, struct file *filp)
455 if (vdev->lock) 441 if (vdev->lock)
456 mutex_unlock(vdev->lock); 442 mutex_unlock(vdev->lock);
457 } 443 }
458#if defined(CONFIG_MEDIA_CONTROLLER)
459 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
460 vdev->vfl_type != VFL_TYPE_SUBDEV)
461 media_entity_put(&vdev->entity);
462#endif
463 /* decrease the refcount unconditionally since the release() 444 /* decrease the refcount unconditionally since the release()
464 return value is ignored. */ 445 return value is ignored. */
465 video_put(vdev); 446 video_put(vdev);
@@ -754,12 +735,6 @@ void video_unregister_device(struct video_device *vdev)
754 if (!vdev || !video_is_registered(vdev)) 735 if (!vdev || !video_is_registered(vdev))
755 return; 736 return;
756 737
757#if defined(CONFIG_MEDIA_CONTROLLER)
758 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
759 vdev->vfl_type != VFL_TYPE_SUBDEV)
760 media_device_unregister_entity(&vdev->entity);
761#endif
762
763 mutex_lock(&videodev_lock); 738 mutex_lock(&videodev_lock);
764 /* This must be in a critical section to prevent a race with v4l2_open. 739 /* This must be in a critical section to prevent a race with v4l2_open.
765 * Once this bit has been cleared video_get may never be called again. 740 * Once this bit has been cleared video_get may never be called again.
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 6ba1461d51ef..3015e6000946 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -492,13 +492,6 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
492 return -EINVAL; 492 return -EINVAL;
493 } 493 }
494 494
495 /*
496 * If the same number of buffers and memory access method is requested
497 * then return immediately.
498 */
499 if (q->memory == req->memory && req->count == q->num_buffers)
500 return 0;
501
502 if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) { 495 if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
503 /* 496 /*
504 * We already have buffers allocated, so first check if they 497 * We already have buffers allocated, so first check if they
@@ -539,9 +532,9 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
539 /* Finally, allocate buffers and video memory */ 532 /* Finally, allocate buffers and video memory */
540 ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes, 533 ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes,
541 plane_sizes); 534 plane_sizes);
542 if (ret < 0) { 535 if (ret == 0) {
543 dprintk(1, "Memory allocation failed with error: %d\n", ret); 536 dprintk(1, "Memory allocation failed\n");
544 return ret; 537 return -ENOMEM;
545 } 538 }
546 539
547 /* 540 /*
@@ -1196,6 +1189,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
1196 * has not already dequeued before initiating cancel. 1189 * has not already dequeued before initiating cancel.
1197 */ 1190 */
1198 INIT_LIST_HEAD(&q->done_list); 1191 INIT_LIST_HEAD(&q->done_list);
1192 atomic_set(&q->queued_count, 0);
1199 wake_up_all(&q->done_wq); 1193 wake_up_all(&q->done_wq);
1200 1194
1201 /* 1195 /*
diff --git a/drivers/media/video/videobuf2-dma-sg.c b/drivers/media/video/videobuf2-dma-sg.c
index b2d9485aac75..10a20d9509d9 100644
--- a/drivers/media/video/videobuf2-dma-sg.c
+++ b/drivers/media/video/videobuf2-dma-sg.c
@@ -62,7 +62,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
62 goto fail_pages_array_alloc; 62 goto fail_pages_array_alloc;
63 63
64 for (i = 0; i < buf->sg_desc.num_pages; ++i) { 64 for (i = 0; i < buf->sg_desc.num_pages; ++i) {
65 buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); 65 buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
66 if (NULL == buf->pages[i]) 66 if (NULL == buf->pages[i])
67 goto fail_pages_alloc; 67 goto fail_pages_alloc;
68 sg_set_page(&buf->sg_desc.sglist[i], 68 sg_set_page(&buf->sg_desc.sglist[i],
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0f09c057e796..6ca938a6bf94 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -728,6 +728,9 @@ config MFD_TPS65910
728 if you say yes here you get support for the TPS65910 series of 728 if you say yes here you get support for the TPS65910 series of
729 Power Management chips. 729 Power Management chips.
730 730
731config TPS65911_COMPARATOR
732 tristate
733
731endif # MFD_SUPPORT 734endif # MFD_SUPPORT
732 735
733menu "Multimedia Capabilities Port drivers" 736menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index efe3cc33ed92..d7d47d2a4c76 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -94,3 +94,4 @@ obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o
94obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o 94obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
95obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o 95obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
96obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o 96obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
97obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 855219526ccb..1717144fe7f4 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -26,7 +26,6 @@
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <plat/usb.h> 28#include <plat/usb.h>
29#include <linux/pm_runtime.h>
30 29
31#define USBHS_DRIVER_NAME "usbhs-omap" 30#define USBHS_DRIVER_NAME "usbhs-omap"
32#define OMAP_EHCI_DEVICE "ehci-omap" 31#define OMAP_EHCI_DEVICE "ehci-omap"
@@ -147,6 +146,9 @@
147 146
148 147
149struct usbhs_hcd_omap { 148struct usbhs_hcd_omap {
149 struct clk *usbhost_ick;
150 struct clk *usbhost_hs_fck;
151 struct clk *usbhost_fs_fck;
150 struct clk *xclk60mhsp1_ck; 152 struct clk *xclk60mhsp1_ck;
151 struct clk *xclk60mhsp2_ck; 153 struct clk *xclk60mhsp2_ck;
152 struct clk *utmi_p1_fck; 154 struct clk *utmi_p1_fck;
@@ -156,6 +158,8 @@ struct usbhs_hcd_omap {
156 struct clk *usbhost_p2_fck; 158 struct clk *usbhost_p2_fck;
157 struct clk *usbtll_p2_fck; 159 struct clk *usbtll_p2_fck;
158 struct clk *init_60m_fclk; 160 struct clk *init_60m_fclk;
161 struct clk *usbtll_fck;
162 struct clk *usbtll_ick;
159 163
160 void __iomem *uhh_base; 164 void __iomem *uhh_base;
161 void __iomem *tll_base; 165 void __iomem *tll_base;
@@ -349,13 +353,46 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
349 omap->platdata.ehci_data = pdata->ehci_data; 353 omap->platdata.ehci_data = pdata->ehci_data;
350 omap->platdata.ohci_data = pdata->ohci_data; 354 omap->platdata.ohci_data = pdata->ohci_data;
351 355
352 pm_runtime_enable(&pdev->dev); 356 omap->usbhost_ick = clk_get(dev, "usbhost_ick");
357 if (IS_ERR(omap->usbhost_ick)) {
358 ret = PTR_ERR(omap->usbhost_ick);
359 dev_err(dev, "usbhost_ick failed error:%d\n", ret);
360 goto err_end;
361 }
362
363 omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
364 if (IS_ERR(omap->usbhost_hs_fck)) {
365 ret = PTR_ERR(omap->usbhost_hs_fck);
366 dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
367 goto err_usbhost_ick;
368 }
369
370 omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
371 if (IS_ERR(omap->usbhost_fs_fck)) {
372 ret = PTR_ERR(omap->usbhost_fs_fck);
373 dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
374 goto err_usbhost_hs_fck;
375 }
376
377 omap->usbtll_fck = clk_get(dev, "usbtll_fck");
378 if (IS_ERR(omap->usbtll_fck)) {
379 ret = PTR_ERR(omap->usbtll_fck);
380 dev_err(dev, "usbtll_fck failed error:%d\n", ret);
381 goto err_usbhost_fs_fck;
382 }
383
384 omap->usbtll_ick = clk_get(dev, "usbtll_ick");
385 if (IS_ERR(omap->usbtll_ick)) {
386 ret = PTR_ERR(omap->usbtll_ick);
387 dev_err(dev, "usbtll_ick failed error:%d\n", ret);
388 goto err_usbtll_fck;
389 }
353 390
354 omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); 391 omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
355 if (IS_ERR(omap->utmi_p1_fck)) { 392 if (IS_ERR(omap->utmi_p1_fck)) {
356 ret = PTR_ERR(omap->utmi_p1_fck); 393 ret = PTR_ERR(omap->utmi_p1_fck);
357 dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret); 394 dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
358 goto err_end; 395 goto err_usbtll_ick;
359 } 396 }
360 397
361 omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); 398 omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
@@ -485,8 +522,22 @@ err_xclk60mhsp1_ck:
485err_utmi_p1_fck: 522err_utmi_p1_fck:
486 clk_put(omap->utmi_p1_fck); 523 clk_put(omap->utmi_p1_fck);
487 524
525err_usbtll_ick:
526 clk_put(omap->usbtll_ick);
527
528err_usbtll_fck:
529 clk_put(omap->usbtll_fck);
530
531err_usbhost_fs_fck:
532 clk_put(omap->usbhost_fs_fck);
533
534err_usbhost_hs_fck:
535 clk_put(omap->usbhost_hs_fck);
536
537err_usbhost_ick:
538 clk_put(omap->usbhost_ick);
539
488err_end: 540err_end:
489 pm_runtime_disable(&pdev->dev);
490 kfree(omap); 541 kfree(omap);
491 542
492end_probe: 543end_probe:
@@ -520,7 +571,11 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
520 clk_put(omap->utmi_p2_fck); 571 clk_put(omap->utmi_p2_fck);
521 clk_put(omap->xclk60mhsp1_ck); 572 clk_put(omap->xclk60mhsp1_ck);
522 clk_put(omap->utmi_p1_fck); 573 clk_put(omap->utmi_p1_fck);
523 pm_runtime_disable(&pdev->dev); 574 clk_put(omap->usbtll_ick);
575 clk_put(omap->usbtll_fck);
576 clk_put(omap->usbhost_fs_fck);
577 clk_put(omap->usbhost_hs_fck);
578 clk_put(omap->usbhost_ick);
524 kfree(omap); 579 kfree(omap);
525 580
526 return 0; 581 return 0;
@@ -640,6 +695,7 @@ static int usbhs_enable(struct device *dev)
640 struct usbhs_omap_platform_data *pdata = &omap->platdata; 695 struct usbhs_omap_platform_data *pdata = &omap->platdata;
641 unsigned long flags = 0; 696 unsigned long flags = 0;
642 int ret = 0; 697 int ret = 0;
698 unsigned long timeout;
643 unsigned reg; 699 unsigned reg;
644 700
645 dev_dbg(dev, "starting TI HSUSB Controller\n"); 701 dev_dbg(dev, "starting TI HSUSB Controller\n");
@@ -652,7 +708,11 @@ static int usbhs_enable(struct device *dev)
652 if (omap->count > 0) 708 if (omap->count > 0)
653 goto end_count; 709 goto end_count;
654 710
655 pm_runtime_get_sync(dev); 711 clk_enable(omap->usbhost_ick);
712 clk_enable(omap->usbhost_hs_fck);
713 clk_enable(omap->usbhost_fs_fck);
714 clk_enable(omap->usbtll_fck);
715 clk_enable(omap->usbtll_ick);
656 716
657 if (pdata->ehci_data->phy_reset) { 717 if (pdata->ehci_data->phy_reset) {
658 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) { 718 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
@@ -676,6 +736,50 @@ static int usbhs_enable(struct device *dev)
676 omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION); 736 omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
677 dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev); 737 dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
678 738
739 /* perform TLL soft reset, and wait until reset is complete */
740 usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
741 OMAP_USBTLL_SYSCONFIG_SOFTRESET);
742
743 /* Wait for TLL reset to complete */
744 timeout = jiffies + msecs_to_jiffies(1000);
745 while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
746 & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
747 cpu_relax();
748
749 if (time_after(jiffies, timeout)) {
750 dev_dbg(dev, "operation timed out\n");
751 ret = -EINVAL;
752 goto err_tll;
753 }
754 }
755
756 dev_dbg(dev, "TLL RESET DONE\n");
757
758 /* (1<<3) = no idle mode only for initial debugging */
759 usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
760 OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
761 OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
762 OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
763
764 /* Put UHH in NoIdle/NoStandby mode */
765 reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
766 if (is_omap_usbhs_rev1(omap)) {
767 reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
768 | OMAP_UHH_SYSCONFIG_SIDLEMODE
769 | OMAP_UHH_SYSCONFIG_CACTIVITY
770 | OMAP_UHH_SYSCONFIG_MIDLEMODE);
771 reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
772
773
774 } else if (is_omap_usbhs_rev2(omap)) {
775 reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
776 reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
777 reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
778 reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
779 }
780
781 usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
782
679 reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG); 783 reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
680 /* setup ULPI bypass and burst configurations */ 784 /* setup ULPI bypass and burst configurations */
681 reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN 785 reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
@@ -815,8 +919,6 @@ end_count:
815 return 0; 919 return 0;
816 920
817err_tll: 921err_tll:
818 pm_runtime_put_sync(dev);
819 spin_unlock_irqrestore(&omap->lock, flags);
820 if (pdata->ehci_data->phy_reset) { 922 if (pdata->ehci_data->phy_reset) {
821 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) 923 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
822 gpio_free(pdata->ehci_data->reset_gpio_port[0]); 924 gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -824,6 +926,13 @@ err_tll:
824 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) 926 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
825 gpio_free(pdata->ehci_data->reset_gpio_port[1]); 927 gpio_free(pdata->ehci_data->reset_gpio_port[1]);
826 } 928 }
929
930 clk_disable(omap->usbtll_ick);
931 clk_disable(omap->usbtll_fck);
932 clk_disable(omap->usbhost_fs_fck);
933 clk_disable(omap->usbhost_hs_fck);
934 clk_disable(omap->usbhost_ick);
935 spin_unlock_irqrestore(&omap->lock, flags);
827 return ret; 936 return ret;
828} 937}
829 938
@@ -896,7 +1005,11 @@ static void usbhs_disable(struct device *dev)
896 clk_disable(omap->utmi_p1_fck); 1005 clk_disable(omap->utmi_p1_fck);
897 } 1006 }
898 1007
899 pm_runtime_put_sync(dev); 1008 clk_disable(omap->usbtll_ick);
1009 clk_disable(omap->usbtll_fck);
1010 clk_disable(omap->usbhost_fs_fck);
1011 clk_disable(omap->usbhost_hs_fck);
1012 clk_disable(omap->usbhost_ick);
900 1013
901 /* The gpio_free migh sleep; so unlock the spinlock */ 1014 /* The gpio_free migh sleep; so unlock the spinlock */
902 spin_unlock_irqrestore(&omap->lock, flags); 1015 spin_unlock_irqrestore(&omap->lock, flags);
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
index 3d2dc56a3d40..283ac6759757 100644
--- a/drivers/mfd/tps65911-comparator.c
+++ b/drivers/mfd/tps65911-comparator.c
@@ -125,7 +125,7 @@ static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL);
125static __devinit int tps65911_comparator_probe(struct platform_device *pdev) 125static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
126{ 126{
127 struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent); 127 struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
128 struct tps65910_platform_data *pdata = dev_get_platdata(tps65910->dev); 128 struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
129 int ret; 129 int ret;
130 130
131 ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold); 131 ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold);
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
index d019746551f3..2a40d0efdff5 100644
--- a/drivers/misc/cb710/sgbuf2.c
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -47,7 +47,7 @@ static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter)
47 47
48static inline bool needs_unaligned_copy(const void *ptr) 48static inline bool needs_unaligned_copy(const void *ptr)
49{ 49{
50#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS 50#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
51 return false; 51 return false;
52#else 52#else
53 return ((ptr - NULL) & 3) != 0; 53 return ((ptr - NULL) & 3) != 0;
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 668d41e594a9..df03dd3bd0e2 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -270,7 +270,7 @@ ioc4_variant(struct ioc4_driver_data *idd)
270 return IOC4_VARIANT_PCI_RT; 270 return IOC4_VARIANT_PCI_RT;
271} 271}
272 272
273static void __devinit 273static void
274ioc4_load_modules(struct work_struct *work) 274ioc4_load_modules(struct work_struct *work)
275{ 275{
276 request_module("sgiioc4"); 276 request_module("sgiioc4");
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 81d7fa4ec0db..150cd7061b80 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -120,6 +120,7 @@ static int recur_count = REC_NUM_DEFAULT;
120static enum cname cpoint = CN_INVALID; 120static enum cname cpoint = CN_INVALID;
121static enum ctype cptype = CT_NONE; 121static enum ctype cptype = CT_NONE;
122static int count = DEFAULT_COUNT; 122static int count = DEFAULT_COUNT;
123static DEFINE_SPINLOCK(count_lock);
123 124
124module_param(recur_count, int, 0644); 125module_param(recur_count, int, 0644);
125MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\ 126MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
@@ -230,11 +231,14 @@ static const char *cp_name_to_str(enum cname name)
230static int lkdtm_parse_commandline(void) 231static int lkdtm_parse_commandline(void)
231{ 232{
232 int i; 233 int i;
234 unsigned long flags;
233 235
234 if (cpoint_count < 1 || recur_count < 1) 236 if (cpoint_count < 1 || recur_count < 1)
235 return -EINVAL; 237 return -EINVAL;
236 238
239 spin_lock_irqsave(&count_lock, flags);
237 count = cpoint_count; 240 count = cpoint_count;
241 spin_unlock_irqrestore(&count_lock, flags);
238 242
239 /* No special parameters */ 243 /* No special parameters */
240 if (!cpoint_type && !cpoint_name) 244 if (!cpoint_type && !cpoint_name)
@@ -349,6 +353,9 @@ static void lkdtm_do_action(enum ctype which)
349 353
350static void lkdtm_handler(void) 354static void lkdtm_handler(void)
351{ 355{
356 unsigned long flags;
357
358 spin_lock_irqsave(&count_lock, flags);
352 count--; 359 count--;
353 printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n", 360 printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n",
354 cp_name_to_str(cpoint), cp_type_to_str(cptype), count); 361 cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
@@ -357,6 +364,7 @@ static void lkdtm_handler(void)
357 lkdtm_do_action(cptype); 364 lkdtm_do_action(cptype);
358 count = cpoint_count; 365 count = cpoint_count;
359 } 366 }
367 spin_unlock_irqrestore(&count_lock, flags);
360} 368}
361 369
362static int lkdtm_register_cpoint(enum cname which) 370static int lkdtm_register_cpoint(enum cname which)
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index bb6f9255c17c..374dfcfccd07 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -317,7 +317,8 @@ EXPORT_SYMBOL_GPL(pti_request_masterchannel);
317 * a master, channel ID address 317 * a master, channel ID address
318 * used to write to PTI HW. 318 * used to write to PTI HW.
319 * 319 *
320 * @mc: master, channel apeture ID address to be released. 320 * @mc: master, channel apeture ID address to be released. This
321 * will de-allocate the structure via kfree().
321 */ 322 */
322void pti_release_masterchannel(struct pti_masterchannel *mc) 323void pti_release_masterchannel(struct pti_masterchannel *mc)
323{ 324{
@@ -475,8 +476,10 @@ static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty)
475 else 476 else
476 pti_tty_data->mc = pti_request_masterchannel(2); 477 pti_tty_data->mc = pti_request_masterchannel(2);
477 478
478 if (pti_tty_data->mc == NULL) 479 if (pti_tty_data->mc == NULL) {
480 kfree(pti_tty_data);
479 return -ENXIO; 481 return -ENXIO;
482 }
480 tty->driver_data = pti_tty_data; 483 tty->driver_data = pti_tty_data;
481 } 484 }
482 485
@@ -495,7 +498,7 @@ static void pti_tty_cleanup(struct tty_struct *tty)
495 if (pti_tty_data == NULL) 498 if (pti_tty_data == NULL)
496 return; 499 return;
497 pti_release_masterchannel(pti_tty_data->mc); 500 pti_release_masterchannel(pti_tty_data->mc);
498 kfree(tty->driver_data); 501 kfree(pti_tty_data);
499 tty->driver_data = NULL; 502 tty->driver_data = NULL;
500} 503}
501 504
@@ -581,7 +584,7 @@ static int pti_char_open(struct inode *inode, struct file *filp)
581static int pti_char_release(struct inode *inode, struct file *filp) 584static int pti_char_release(struct inode *inode, struct file *filp)
582{ 585{
583 pti_release_masterchannel(filp->private_data); 586 pti_release_masterchannel(filp->private_data);
584 kfree(filp->private_data); 587 filp->private_data = NULL;
585 return 0; 588 return 0;
586} 589}
587 590
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index f91f82eabda7..54c91ffe4a91 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -605,7 +605,7 @@ long st_unregister(struct st_proto_s *proto)
605 pr_debug("%s: %d ", __func__, proto->chnl_id); 605 pr_debug("%s: %d ", __func__, proto->chnl_id);
606 606
607 st_kim_ref(&st_gdata, 0); 607 st_kim_ref(&st_gdata, 0);
608 if (proto->chnl_id >= ST_MAX_CHANNELS) { 608 if (!st_gdata || proto->chnl_id >= ST_MAX_CHANNELS) {
609 pr_err(" chnl_id %d not supported", proto->chnl_id); 609 pr_err(" chnl_id %d not supported", proto->chnl_id);
610 return -EPROTONOSUPPORT; 610 return -EPROTONOSUPPORT;
611 } 611 }
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 5da93ee6f6be..38fd2f04c07e 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -245,9 +245,9 @@ void skip_change_remote_baud(unsigned char **ptr, long *len)
245 pr_err("invalid action after change remote baud command"); 245 pr_err("invalid action after change remote baud command");
246 } else { 246 } else {
247 *ptr = *ptr + sizeof(struct bts_action) + 247 *ptr = *ptr + sizeof(struct bts_action) +
248 ((struct bts_action *)nxt_action)->size; 248 ((struct bts_action *)cur_action)->size;
249 *len = *len - (sizeof(struct bts_action) + 249 *len = *len - (sizeof(struct bts_action) +
250 ((struct bts_action *)nxt_action)->size); 250 ((struct bts_action *)cur_action)->size);
251 /* warn user on not commenting these in firmware */ 251 /* warn user on not commenting these in firmware */
252 pr_warn("skipping the wait event of change remote baud"); 252 pr_warn("skipping the wait event of change remote baud");
253 } 253 }
@@ -604,6 +604,10 @@ void st_kim_ref(struct st_data_s **core_data, int id)
604 struct kim_data_s *kim_gdata; 604 struct kim_data_s *kim_gdata;
605 /* get kim_gdata reference from platform device */ 605 /* get kim_gdata reference from platform device */
606 pdev = st_get_plat_device(id); 606 pdev = st_get_plat_device(id);
607 if (!pdev) {
608 *core_data = NULL;
609 return;
610 }
607 kim_gdata = dev_get_drvdata(&pdev->dev); 611 kim_gdata = dev_get_drvdata(&pdev->dev);
608 *core_data = kim_gdata->core_data; 612 *core_data = kim_gdata->core_data;
609} 613}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 71da5641e258..f85e42224559 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1024,7 +1024,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1024 INIT_LIST_HEAD(&md->part); 1024 INIT_LIST_HEAD(&md->part);
1025 md->usage = 1; 1025 md->usage = 1;
1026 1026
1027 ret = mmc_init_queue(&md->queue, card, &md->lock); 1027 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
1028 if (ret) 1028 if (ret)
1029 goto err_putdisk; 1029 goto err_putdisk;
1030 1030
@@ -1297,6 +1297,9 @@ static void mmc_blk_remove(struct mmc_card *card)
1297 struct mmc_blk_data *md = mmc_get_drvdata(card); 1297 struct mmc_blk_data *md = mmc_get_drvdata(card);
1298 1298
1299 mmc_blk_remove_parts(card, md); 1299 mmc_blk_remove_parts(card, md);
1300 mmc_claim_host(card->host);
1301 mmc_blk_part_switch(card, md);
1302 mmc_release_host(card->host);
1300 mmc_blk_remove_req(md); 1303 mmc_blk_remove_req(md);
1301 mmc_set_drvdata(card, NULL); 1304 mmc_set_drvdata(card, NULL);
1302} 1305}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index c07322c2658c..6413afa318d2 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -106,10 +106,12 @@ static void mmc_request(struct request_queue *q)
106 * @mq: mmc queue 106 * @mq: mmc queue
107 * @card: mmc card to attach this queue 107 * @card: mmc card to attach this queue
108 * @lock: queue lock 108 * @lock: queue lock
109 * @subname: partition subname
109 * 110 *
110 * Initialise a MMC card request queue. 111 * Initialise a MMC card request queue.
111 */ 112 */
112int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) 113int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
114 spinlock_t *lock, const char *subname)
113{ 115{
114 struct mmc_host *host = card->host; 116 struct mmc_host *host = card->host;
115 u64 limit = BLK_BOUNCE_HIGH; 117 u64 limit = BLK_BOUNCE_HIGH;
@@ -133,12 +135,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
133 mq->queue->limits.max_discard_sectors = UINT_MAX; 135 mq->queue->limits.max_discard_sectors = UINT_MAX;
134 if (card->erased_byte == 0) 136 if (card->erased_byte == 0)
135 mq->queue->limits.discard_zeroes_data = 1; 137 mq->queue->limits.discard_zeroes_data = 1;
136 if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { 138 mq->queue->limits.discard_granularity = card->pref_erase << 9;
137 mq->queue->limits.discard_granularity =
138 card->erase_size << 9;
139 mq->queue->limits.discard_alignment =
140 card->erase_size << 9;
141 }
142 if (mmc_can_secure_erase_trim(card)) 139 if (mmc_can_secure_erase_trim(card))
143 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, 140 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
144 mq->queue); 141 mq->queue);
@@ -209,8 +206,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
209 206
210 sema_init(&mq->thread_sem, 1); 207 sema_init(&mq->thread_sem, 1);
211 208
212 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d", 209 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
213 host->index); 210 host->index, subname ? subname : "");
214 211
215 if (IS_ERR(mq->thread)) { 212 if (IS_ERR(mq->thread)) {
216 ret = PTR_ERR(mq->thread); 213 ret = PTR_ERR(mq->thread);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 64e66e0d4994..6223ef8dc9cd 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -19,7 +19,8 @@ struct mmc_queue {
19 unsigned int bounce_sg_len; 19 unsigned int bounce_sg_len;
20}; 20};
21 21
22extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *); 22extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
23 const char *);
23extern void mmc_cleanup_queue(struct mmc_queue *); 24extern void mmc_cleanup_queue(struct mmc_queue *);
24extern void mmc_queue_suspend(struct mmc_queue *); 25extern void mmc_queue_suspend(struct mmc_queue *);
25extern void mmc_queue_resume(struct mmc_queue *); 26extern void mmc_queue_resume(struct mmc_queue *);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 68091dda3f31..7843efe22359 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1245,7 +1245,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1245 */ 1245 */
1246 timeout_clks <<= 1; 1246 timeout_clks <<= 1;
1247 timeout_us += (timeout_clks * 1000) / 1247 timeout_us += (timeout_clks * 1000) /
1248 (card->host->ios.clock / 1000); 1248 (mmc_host_clk_rate(card->host) / 1000);
1249 1249
1250 erase_timeout = timeout_us / 1000; 1250 erase_timeout = timeout_us / 1000;
1251 1251
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 4d0c15bfa514..262fff019177 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -691,15 +691,54 @@ static int mmc_sdio_resume(struct mmc_host *host)
691static int mmc_sdio_power_restore(struct mmc_host *host) 691static int mmc_sdio_power_restore(struct mmc_host *host)
692{ 692{
693 int ret; 693 int ret;
694 u32 ocr;
694 695
695 BUG_ON(!host); 696 BUG_ON(!host);
696 BUG_ON(!host->card); 697 BUG_ON(!host->card);
697 698
698 mmc_claim_host(host); 699 mmc_claim_host(host);
700
701 /*
702 * Reset the card by performing the same steps that are taken by
703 * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
704 *
705 * sdio_reset() is technically not needed. Having just powered up the
706 * hardware, it should already be in reset state. However, some
707 * platforms (such as SD8686 on OLPC) do not instantly cut power,
708 * meaning that a reset is required when restoring power soon after
709 * powering off. It is harmless in other cases.
710 *
711 * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
712 * is not necessary for non-removable cards. However, it is required
713 * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
714 * harmless in other situations.
715 *
716 * With these steps taken, mmc_select_voltage() is also required to
717 * restore the correct voltage setting of the card.
718 */
719 sdio_reset(host);
720 mmc_go_idle(host);
721 mmc_send_if_cond(host, host->ocr_avail);
722
723 ret = mmc_send_io_op_cond(host, 0, &ocr);
724 if (ret)
725 goto out;
726
727 if (host->ocr_avail_sdio)
728 host->ocr_avail = host->ocr_avail_sdio;
729
730 host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
731 if (!host->ocr) {
732 ret = -EINVAL;
733 goto out;
734 }
735
699 ret = mmc_sdio_init_card(host, host->ocr, host->card, 736 ret = mmc_sdio_init_card(host, host->ocr, host->card,
700 mmc_card_keep_power(host)); 737 mmc_card_keep_power(host));
701 if (!ret && host->sdio_irqs) 738 if (!ret && host->sdio_irqs)
702 mmc_signal_sdio_irq(host); 739 mmc_signal_sdio_irq(host);
740
741out:
703 mmc_release_host(host); 742 mmc_release_host(host);
704 743
705 return ret; 744 return ret;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d29b9c36919a..d2565df8a7fb 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -189,7 +189,7 @@ static int sdio_bus_remove(struct device *dev)
189 189
190 /* Then undo the runtime PM settings in sdio_bus_probe() */ 190 /* Then undo the runtime PM settings in sdio_bus_probe() */
191 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) 191 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
192 pm_runtime_put_noidle(dev); 192 pm_runtime_put_sync(dev);
193 193
194out: 194out:
195 return ret; 195 return ret;
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index e2aecb7f1d5c..ab66f2454dc4 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -25,6 +25,11 @@
25#include <linux/mmc/core.h> 25#include <linux/mmc/core.h>
26#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
27 27
28/* For archs that don't support NO_IRQ (such as mips), provide a dummy value */
29#ifndef NO_IRQ
30#define NO_IRQ 0
31#endif
32
28MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
29 34
30enum { 35enum {
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 5b2e2155b413..dedf3dab8a3b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -429,7 +429,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
429 return -EINVAL; 429 return -EINVAL;
430 } 430 }
431 } 431 }
432 mmc_slot(host).ocr_mask = mmc_regulator_get_ocrmask(reg);
433 432
434 /* Allow an aux regulator */ 433 /* Allow an aux regulator */
435 reg = regulator_get(host->dev, "vmmc_aux"); 434 reg = regulator_get(host->dev, "vmmc_aux");
@@ -962,7 +961,8 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
962 spin_unlock(&host->irq_lock); 961 spin_unlock(&host->irq_lock);
963 962
964 if (host->use_dma && dma_ch != -1) { 963 if (host->use_dma && dma_ch != -1) {
965 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, 964 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
965 host->data->sg_len,
966 omap_hsmmc_get_dma_dir(host, host->data)); 966 omap_hsmmc_get_dma_dir(host, host->data));
967 omap_free_dma(dma_ch); 967 omap_free_dma(dma_ch);
968 } 968 }
@@ -1346,7 +1346,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1346 return; 1346 return;
1347 } 1347 }
1348 1348
1349 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, 1349 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
1350 omap_hsmmc_get_dma_dir(host, data)); 1350 omap_hsmmc_get_dma_dir(host, data));
1351 1351
1352 req_in_progress = host->req_in_progress; 1352 req_in_progress = host->req_in_progress;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index b3654293017b..ce500f03df85 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -92,7 +92,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
92 mmc_data->ocr_mask = p->tmio_ocr_mask; 92 mmc_data->ocr_mask = p->tmio_ocr_mask;
93 mmc_data->capabilities |= p->tmio_caps; 93 mmc_data->capabilities |= p->tmio_caps;
94 94
95 if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { 95 if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
96 priv->param_tx.slave_id = p->dma_slave_tx; 96 priv->param_tx.slave_id = p->dma_slave_tx;
97 priv->param_rx.slave_id = p->dma_slave_rx; 97 priv->param_rx.slave_id = p->dma_slave_rx;
98 priv->dma_priv.chan_priv_tx = &priv->param_tx; 98 priv->dma_priv.chan_priv_tx = &priv->param_tx;
@@ -165,13 +165,14 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
165 165
166 p->pdata = NULL; 166 p->pdata = NULL;
167 167
168 tmio_mmc_host_remove(host);
169
168 for (i = 0; i < 3; i++) { 170 for (i = 0; i < 3; i++) {
169 irq = platform_get_irq(pdev, i); 171 irq = platform_get_irq(pdev, i);
170 if (irq >= 0) 172 if (irq >= 0)
171 free_irq(irq, host); 173 free_irq(irq, host);
172 } 174 }
173 175
174 tmio_mmc_host_remove(host);
175 clk_disable(priv->clk); 176 clk_disable(priv->clk);
176 clk_put(priv->clk); 177 clk_put(priv->clk);
177 kfree(priv); 178 kfree(priv);
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index ad6347bb02dd..0b09e8239aa0 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -824,8 +824,8 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
824 struct tmio_mmc_host *host = mmc_priv(mmc); 824 struct tmio_mmc_host *host = mmc_priv(mmc);
825 struct tmio_mmc_data *pdata = host->pdata; 825 struct tmio_mmc_data *pdata = host->pdata;
826 826
827 return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || 827 return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
828 !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); 828 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
829} 829}
830 830
831static int tmio_mmc_get_cd(struct mmc_host *mmc) 831static int tmio_mmc_get_cd(struct mmc_host *mmc)
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index cbb03305b77b..d4455ffbefd8 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2096,7 +2096,7 @@ static struct mmc_host_ops vub300_mmc_ops = {
2096static int vub300_probe(struct usb_interface *interface, 2096static int vub300_probe(struct usb_interface *interface,
2097 const struct usb_device_id *id) 2097 const struct usb_device_id *id)
2098{ /* NOT irq */ 2098{ /* NOT irq */
2099 struct vub300_mmc_host *vub300 = NULL; 2099 struct vub300_mmc_host *vub300;
2100 struct usb_host_interface *iface_desc; 2100 struct usb_host_interface *iface_desc;
2101 struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface)); 2101 struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
2102 int i; 2102 int i;
@@ -2118,23 +2118,20 @@ static int vub300_probe(struct usb_interface *interface,
2118 command_out_urb = usb_alloc_urb(0, GFP_KERNEL); 2118 command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
2119 if (!command_out_urb) { 2119 if (!command_out_urb) {
2120 retval = -ENOMEM; 2120 retval = -ENOMEM;
2121 dev_err(&vub300->udev->dev, 2121 dev_err(&udev->dev, "not enough memory for command_out_urb\n");
2122 "not enough memory for the command_out_urb\n");
2123 goto error0; 2122 goto error0;
2124 } 2123 }
2125 command_res_urb = usb_alloc_urb(0, GFP_KERNEL); 2124 command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
2126 if (!command_res_urb) { 2125 if (!command_res_urb) {
2127 retval = -ENOMEM; 2126 retval = -ENOMEM;
2128 dev_err(&vub300->udev->dev, 2127 dev_err(&udev->dev, "not enough memory for command_res_urb\n");
2129 "not enough memory for the command_res_urb\n");
2130 goto error1; 2128 goto error1;
2131 } 2129 }
2132 /* this also allocates memory for our VUB300 mmc host device */ 2130 /* this also allocates memory for our VUB300 mmc host device */
2133 mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev); 2131 mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
2134 if (!mmc) { 2132 if (!mmc) {
2135 retval = -ENOMEM; 2133 retval = -ENOMEM;
2136 dev_err(&vub300->udev->dev, 2134 dev_err(&udev->dev, "not enough memory for the mmc_host\n");
2137 "not enough memory for the mmc_host\n");
2138 goto error4; 2135 goto error4;
2139 } 2136 }
2140 /* MMC core transfer sizes tunable parameters */ 2137 /* MMC core transfer sizes tunable parameters */
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 0bb254c7d2b1..33d8aad8bba5 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -339,9 +339,9 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
339 (FIR_OP_UA << FIR_OP1_SHIFT) | 339 (FIR_OP_UA << FIR_OP1_SHIFT) |
340 (FIR_OP_RBW << FIR_OP2_SHIFT)); 340 (FIR_OP_RBW << FIR_OP2_SHIFT));
341 out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT); 341 out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
342 /* 5 bytes for manuf, device and exts */ 342 /* nand_get_flash_type() reads 8 bytes of entire ID string */
343 out_be32(&lbc->fbcr, 5); 343 out_be32(&lbc->fbcr, 8);
344 elbc_fcm_ctrl->read_bytes = 5; 344 elbc_fcm_ctrl->read_bytes = 8;
345 elbc_fcm_ctrl->use_mdr = 1; 345 elbc_fcm_ctrl->use_mdr = 1;
346 elbc_fcm_ctrl->mdr = 0; 346 elbc_fcm_ctrl->mdr = 0;
347 347
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 98517a373473..e3bad8247fd1 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -992,6 +992,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
992 * features 992 * features
993 */ 993 */
994 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; 994 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
995 dev->vlan_features = dev->features;
995 996
996 dev->irq = pdev->irq; 997 dev->irq = pdev->irq;
997 998
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 19f04a34783a..93359fab240e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3416,7 +3416,8 @@ config NETCONSOLE
3416 3416
3417config NETCONSOLE_DYNAMIC 3417config NETCONSOLE_DYNAMIC
3418 bool "Dynamic reconfiguration of logging targets" 3418 bool "Dynamic reconfiguration of logging targets"
3419 depends on NETCONSOLE && SYSFS && CONFIGFS_FS 3419 depends on NETCONSOLE && SYSFS && CONFIGFS_FS && \
3420 !(NETCONSOLE=y && CONFIGFS_FS=m)
3420 help 3421 help
3421 This option enables the ability to dynamically reconfigure target 3422 This option enables the ability to dynamically reconfigure target
3422 parameters (interface, IP addresses, port numbers, MAC addresses) 3423 parameters (interface, IP addresses, port numbers, MAC addresses)
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 7d25a97d33f6..44e219c910da 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -1111,7 +1111,7 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
1111 struct bna_intr_info *intr_info) 1111 struct bna_intr_info *intr_info)
1112{ 1112{
1113 int err = 0; 1113 int err = 0;
1114 unsigned long flags; 1114 unsigned long irq_flags = 0, flags;
1115 u32 irq; 1115 u32 irq;
1116 irq_handler_t irq_handler; 1116 irq_handler_t irq_handler;
1117 1117
@@ -1125,18 +1125,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
1125 if (bnad->cfg_flags & BNAD_CF_MSIX) { 1125 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1126 irq_handler = (irq_handler_t)bnad_msix_mbox_handler; 1126 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1127 irq = bnad->msix_table[bnad->msix_num - 1].vector; 1127 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1128 flags = 0;
1129 intr_info->intr_type = BNA_INTR_T_MSIX; 1128 intr_info->intr_type = BNA_INTR_T_MSIX;
1130 intr_info->idl[0].vector = bnad->msix_num - 1; 1129 intr_info->idl[0].vector = bnad->msix_num - 1;
1131 } else { 1130 } else {
1132 irq_handler = (irq_handler_t)bnad_isr; 1131 irq_handler = (irq_handler_t)bnad_isr;
1133 irq = bnad->pcidev->irq; 1132 irq = bnad->pcidev->irq;
1134 flags = IRQF_SHARED; 1133 irq_flags = IRQF_SHARED;
1135 intr_info->intr_type = BNA_INTR_T_INTX; 1134 intr_info->intr_type = BNA_INTR_T_INTX;
1136 /* intr_info->idl.vector = 0 ? */ 1135 /* intr_info->idl.vector = 0 ? */
1137 } 1136 }
1138 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1137 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1139 1138 flags = irq_flags;
1140 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); 1139 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1141 1140
1142 /* 1141 /*
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 4b70311a11ef..74be989f51c5 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -49,6 +49,7 @@
49#include <linux/zlib.h> 49#include <linux/zlib.h>
50#include <linux/io.h> 50#include <linux/io.h>
51#include <linux/stringify.h> 51#include <linux/stringify.h>
52#include <linux/vmalloc.h>
52 53
53#define BNX2X_MAIN 54#define BNX2X_MAIN
54#include "bnx2x.h" 55#include "bnx2x.h"
@@ -4537,8 +4538,7 @@ static int bnx2x_gunzip_init(struct bnx2x *bp)
4537 if (bp->strm == NULL) 4538 if (bp->strm == NULL)
4538 goto gunzip_nomem2; 4539 goto gunzip_nomem2;
4539 4540
4540 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), 4541 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
4541 GFP_KERNEL);
4542 if (bp->strm->workspace == NULL) 4542 if (bp->strm->workspace == NULL)
4543 goto gunzip_nomem3; 4543 goto gunzip_nomem3;
4544 4544
@@ -4562,7 +4562,7 @@ gunzip_nomem1:
4562static void bnx2x_gunzip_end(struct bnx2x *bp) 4562static void bnx2x_gunzip_end(struct bnx2x *bp)
4563{ 4563{
4564 if (bp->strm) { 4564 if (bp->strm) {
4565 kfree(bp->strm->workspace); 4565 vfree(bp->strm->workspace);
4566 kfree(bp->strm); 4566 kfree(bp->strm);
4567 bp->strm = NULL; 4567 bp->strm = NULL;
4568 } 4568 }
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 1d699e3df547..754df5ef1729 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -36,7 +36,7 @@ config CAN_SLCAN
36config CAN_DEV 36config CAN_DEV
37 tristate "Platform CAN drivers with Netlink support" 37 tristate "Platform CAN drivers with Netlink support"
38 depends on CAN 38 depends on CAN
39 default Y 39 default y
40 ---help--- 40 ---help---
41 Enables the common framework for platform CAN drivers with Netlink 41 Enables the common framework for platform CAN drivers with Netlink
42 support. This is the standard library for CAN drivers. 42 support. This is the standard library for CAN drivers.
@@ -45,7 +45,7 @@ config CAN_DEV
45config CAN_CALC_BITTIMING 45config CAN_CALC_BITTIMING
46 bool "CAN bit-timing calculation" 46 bool "CAN bit-timing calculation"
47 depends on CAN_DEV 47 depends on CAN_DEV
48 default Y 48 default y
49 ---help--- 49 ---help---
50 If enabled, CAN bit-timing parameters will be calculated for the 50 If enabled, CAN bit-timing parameters will be calculated for the
51 bit-rate specified via Netlink argument "bitrate" when the device 51 bit-rate specified via Netlink argument "bitrate" when the device
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 3f562ba2f0c9..76bf5892b962 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2026,7 +2026,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2026 skb->ip_summed = CHECKSUM_UNNECESSARY; 2026 skb->ip_summed = CHECKSUM_UNNECESSARY;
2027 } else 2027 } else
2028 skb_checksum_none_assert(skb); 2028 skb_checksum_none_assert(skb);
2029 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2029 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2030 2030
2031 if (unlikely(p->vlan_valid)) { 2031 if (unlikely(p->vlan_valid)) {
2032 struct vlan_group *grp = pi->vlan_grp; 2032 struct vlan_group *grp = pi->vlan_grp;
@@ -2145,7 +2145,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2145 if (!complete) 2145 if (!complete)
2146 return; 2146 return;
2147 2147
2148 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2148 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2149 2149
2150 if (unlikely(cpl->vlan_valid)) { 2150 if (unlikely(cpl->vlan_valid)) {
2151 struct vlan_group *grp = pi->vlan_grp; 2151 struct vlan_group *grp = pi->vlan_grp;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index f181304a7ab6..672f096fe090 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1015,11 +1015,10 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
1015 return -EINVAL; 1015 return -EINVAL;
1016 1016
1017 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1017 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1018 GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1019 GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
1020 dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1018 1021
1019 GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]);
1020 GRETH_REGSAVE(regs->esa_lsb,
1021 addr->sa_data[2] << 24 | addr->
1022 sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]);
1023 return 0; 1022 return 0;
1024} 1023}
1025 1024
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 3e5d0b6b6516..0d283781bc5e 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -692,10 +692,10 @@ static void sixpack_close(struct tty_struct *tty)
692{ 692{
693 struct sixpack *sp; 693 struct sixpack *sp;
694 694
695 write_lock(&disc_data_lock); 695 write_lock_bh(&disc_data_lock);
696 sp = tty->disc_data; 696 sp = tty->disc_data;
697 tty->disc_data = NULL; 697 tty->disc_data = NULL;
698 write_unlock(&disc_data_lock); 698 write_unlock_bh(&disc_data_lock);
699 if (!sp) 699 if (!sp)
700 return; 700 return;
701 701
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 4c628393c8b1..bc02968cee16 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -813,10 +813,10 @@ static void mkiss_close(struct tty_struct *tty)
813{ 813{
814 struct mkiss *ax; 814 struct mkiss *ax;
815 815
816 write_lock(&disc_data_lock); 816 write_lock_bh(&disc_data_lock);
817 ax = tty->disc_data; 817 ax = tty->disc_data;
818 tty->disc_data = NULL; 818 tty->disc_data = NULL;
819 write_unlock(&disc_data_lock); 819 write_unlock_bh(&disc_data_lock);
820 820
821 if (!ax) 821 if (!ax)
822 return; 822 return;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index b78be088c4ad..8f8b65af9ed5 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -2360,7 +2360,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2360 PCI_DMA_FROMDEVICE); 2360 PCI_DMA_FROMDEVICE);
2361 } else { 2361 } else {
2362 pci_unmap_single(np->pci_dev, np->rx_dma[entry], 2362 pci_unmap_single(np->pci_dev, np->rx_dma[entry],
2363 buflen, PCI_DMA_FROMDEVICE); 2363 buflen + NATSEMI_PADDING,
2364 PCI_DMA_FROMDEVICE);
2364 skb_put(skb = np->rx_skbuff[entry], pkt_len); 2365 skb_put(skb = np->rx_skbuff[entry], pkt_len);
2365 np->rx_skbuff[entry] = NULL; 2366 np->rx_skbuff[entry] = NULL;
2366 } 2367 }
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 31e9407a0739..1dbdf82a6dfd 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -305,7 +305,7 @@ static void z_decomp_free(void *arg)
305 305
306 if (state) { 306 if (state) {
307 zlib_inflateEnd(&state->strm); 307 zlib_inflateEnd(&state->strm);
308 kfree(state->strm.workspace); 308 vfree(state->strm.workspace);
309 kfree(state); 309 kfree(state);
310 } 310 }
311} 311}
@@ -345,8 +345,7 @@ static void *z_decomp_alloc(unsigned char *options, int opt_len)
345 345
346 state->w_size = w_size; 346 state->w_size = w_size;
347 state->strm.next_out = NULL; 347 state->strm.next_out = NULL;
348 state->strm.workspace = kmalloc(zlib_inflate_workspacesize(), 348 state->strm.workspace = vmalloc(zlib_inflate_workspacesize());
349 GFP_KERNEL|__GFP_REPEAT);
350 if (state->strm.workspace == NULL) 349 if (state->strm.workspace == NULL)
351 goto out_free; 350 goto out_free;
352 351
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index d32850715f5c..ca306fd5f588 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
16 */ 16 */
17#define DRV_NAME "qlge" 17#define DRV_NAME "qlge"
18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
19#define DRV_VERSION "v1.00.00.27.00.00-01" 19#define DRV_VERSION "v1.00.00.29.00.00-01"
20 20
21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
22 22
@@ -1996,6 +1996,7 @@ enum {
1996 QL_LB_LINK_UP = 10, 1996 QL_LB_LINK_UP = 10,
1997 QL_FRC_COREDUMP = 11, 1997 QL_FRC_COREDUMP = 11,
1998 QL_EEH_FATAL = 12, 1998 QL_EEH_FATAL = 12,
1999 QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
1999}; 2000};
2000 2001
2001/* link_status bit definitions */ 2002/* link_status bit definitions */
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 930ae45457bb..6b4ff970972b 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -2152,6 +2152,10 @@ void ql_queue_asic_error(struct ql_adapter *qdev)
2152 * thread 2152 * thread
2153 */ 2153 */
2154 clear_bit(QL_ADAPTER_UP, &qdev->flags); 2154 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2155 /* Set asic recovery bit to indicate reset process that we are
2156 * in fatal error recovery process rather than normal close
2157 */
2158 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2155 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); 2159 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2156} 2160}
2157 2161
@@ -2166,23 +2170,20 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2166 return; 2170 return;
2167 2171
2168 case CAM_LOOKUP_ERR_EVENT: 2172 case CAM_LOOKUP_ERR_EVENT:
2169 netif_err(qdev, link, qdev->ndev, 2173 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2170 "Multiple CAM hits lookup occurred.\n"); 2174 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2171 netif_err(qdev, drv, qdev->ndev,
2172 "This event shouldn't occur.\n");
2173 ql_queue_asic_error(qdev); 2175 ql_queue_asic_error(qdev);
2174 return; 2176 return;
2175 2177
2176 case SOFT_ECC_ERROR_EVENT: 2178 case SOFT_ECC_ERROR_EVENT:
2177 netif_err(qdev, rx_err, qdev->ndev, 2179 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2178 "Soft ECC error detected.\n");
2179 ql_queue_asic_error(qdev); 2180 ql_queue_asic_error(qdev);
2180 break; 2181 break;
2181 2182
2182 case PCI_ERR_ANON_BUF_RD: 2183 case PCI_ERR_ANON_BUF_RD:
2183 netif_err(qdev, rx_err, qdev->ndev, 2184 netdev_err(qdev->ndev, "PCI error occurred when reading "
2184 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n", 2185 "anonymous buffers from rx_ring %d.\n",
2185 ib_ae_rsp->q_id); 2186 ib_ae_rsp->q_id);
2186 ql_queue_asic_error(qdev); 2187 ql_queue_asic_error(qdev);
2187 break; 2188 break;
2188 2189
@@ -2437,11 +2438,10 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2437 */ 2438 */
2438 if (var & STS_FE) { 2439 if (var & STS_FE) {
2439 ql_queue_asic_error(qdev); 2440 ql_queue_asic_error(qdev);
2440 netif_err(qdev, intr, qdev->ndev, 2441 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2441 "Got fatal error, STS = %x.\n", var);
2442 var = ql_read32(qdev, ERR_STS); 2442 var = ql_read32(qdev, ERR_STS);
2443 netif_err(qdev, intr, qdev->ndev, 2443 netdev_err(qdev->ndev, "Resetting chip. "
2444 "Resetting chip. Error Status Register = 0x%x\n", var); 2444 "Error Status Register = 0x%x\n", var);
2445 return IRQ_HANDLED; 2445 return IRQ_HANDLED;
2446 } 2446 }
2447 2447
@@ -3818,11 +3818,17 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3818 end_jiffies = jiffies + 3818 end_jiffies = jiffies +
3819 max((unsigned long)1, usecs_to_jiffies(30)); 3819 max((unsigned long)1, usecs_to_jiffies(30));
3820 3820
3821 /* Stop management traffic. */ 3821 /* Check if bit is set then skip the mailbox command and
3822 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); 3822 * clear the bit, else we are in normal reset process.
3823 */
3824 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3825 /* Stop management traffic. */
3826 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3823 3827
3824 /* Wait for the NIC and MGMNT FIFOs to empty. */ 3828 /* Wait for the NIC and MGMNT FIFOs to empty. */
3825 ql_wait_fifo_empty(qdev); 3829 ql_wait_fifo_empty(qdev);
3830 } else
3831 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3826 3832
3827 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); 3833 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3828 3834
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 05d81780d1fd..5990621fb5cd 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -742,7 +742,7 @@ static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
742 msleep(2); 742 msleep(2);
743 for (i = 0; i < 5; i++) { 743 for (i = 0; i < 5; i++) {
744 udelay(100); 744 udelay(100);
745 if (!(RTL_R32(ERIDR) & ERIAR_FLAG)) 745 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
746 break; 746 break;
747 } 747 }
748 748
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 77c5092a6a40..5d3436d47edd 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -378,7 +378,7 @@ static int rionet_close(struct net_device *ndev)
378 378
379static void rionet_remove(struct rio_dev *rdev) 379static void rionet_remove(struct rio_dev *rdev)
380{ 380{
381 struct net_device *ndev = NULL; 381 struct net_device *ndev = rio_get_drvdata(rdev);
382 struct rionet_peer *peer, *tmp; 382 struct rionet_peer *peer, *tmp;
383 383
384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? 384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
@@ -433,22 +433,12 @@ static const struct net_device_ops rionet_netdev_ops = {
433 .ndo_set_mac_address = eth_mac_addr, 433 .ndo_set_mac_address = eth_mac_addr,
434}; 434};
435 435
436static int rionet_setup_netdev(struct rio_mport *mport) 436static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
437{ 437{
438 int rc = 0; 438 int rc = 0;
439 struct net_device *ndev = NULL;
440 struct rionet_private *rnet; 439 struct rionet_private *rnet;
441 u16 device_id; 440 u16 device_id;
442 441
443 /* Allocate our net_device structure */
444 ndev = alloc_etherdev(sizeof(struct rionet_private));
445 if (ndev == NULL) {
446 printk(KERN_INFO "%s: could not allocate ethernet device.\n",
447 DRV_NAME);
448 rc = -ENOMEM;
449 goto out;
450 }
451
452 rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL, 442 rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
453 mport->sys_size ? __fls(sizeof(void *)) + 4 : 0); 443 mport->sys_size ? __fls(sizeof(void *)) + 4 : 0);
454 if (!rionet_active) { 444 if (!rionet_active) {
@@ -504,11 +494,21 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
504 int rc = -ENODEV; 494 int rc = -ENODEV;
505 u32 lpef, lsrc_ops, ldst_ops; 495 u32 lpef, lsrc_ops, ldst_ops;
506 struct rionet_peer *peer; 496 struct rionet_peer *peer;
497 struct net_device *ndev = NULL;
507 498
508 /* If local device is not rionet capable, give up quickly */ 499 /* If local device is not rionet capable, give up quickly */
509 if (!rionet_capable) 500 if (!rionet_capable)
510 goto out; 501 goto out;
511 502
503 /* Allocate our net_device structure */
504 ndev = alloc_etherdev(sizeof(struct rionet_private));
505 if (ndev == NULL) {
506 printk(KERN_INFO "%s: could not allocate ethernet device.\n",
507 DRV_NAME);
508 rc = -ENOMEM;
509 goto out;
510 }
511
512 /* 512 /*
513 * First time through, make sure local device is rionet 513 * First time through, make sure local device is rionet
514 * capable, setup netdev, and set flags so this is skipped 514 * capable, setup netdev, and set flags so this is skipped
@@ -529,7 +529,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
529 goto out; 529 goto out;
530 } 530 }
531 531
532 rc = rionet_setup_netdev(rdev->net->hport); 532 rc = rionet_setup_netdev(rdev->net->hport, ndev);
533 rionet_check = 1; 533 rionet_check = 1;
534 } 534 }
535 535
@@ -546,6 +546,8 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
546 list_add_tail(&peer->node, &rionet_peers); 546 list_add_tail(&peer->node, &rionet_peers);
547 } 547 }
548 548
549 rio_set_drvdata(rdev, ndev);
550
549 out: 551 out:
550 return rc; 552 return rc;
551} 553}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 8a72a979ee71..1f3f7b4dd638 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -140,6 +140,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
140 .tpauser = 1, 140 .tpauser = 1,
141 .hw_swap = 1, 141 .hw_swap = 1,
142 .no_ade = 1, 142 .no_ade = 1,
143 .rpadir = 1,
144 .rpadir_value = 2 << 16,
143}; 145};
144 146
145#define SH_GIGA_ETH_BASE 0xfee00000 147#define SH_GIGA_ETH_BASE 0xfee00000
@@ -1184,8 +1186,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1184 mdp->cd->set_rate(ndev); 1186 mdp->cd->set_rate(ndev);
1185 } 1187 }
1186 if (mdp->link == PHY_DOWN) { 1188 if (mdp->link == PHY_DOWN) {
1187 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF) 1189 sh_eth_write(ndev,
1188 | ECMR_DM, ECMR); 1190 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
1189 new_state = 1; 1191 new_state = 1;
1190 mdp->link = phydev->link; 1192 mdp->link = phydev->link;
1191 } 1193 }
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index d965fb1e013e..a9b6c63d54e4 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -100,34 +100,42 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
100static int 100static int
101kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr) 101kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
102{ 102{
103 char init_msg_1[] = 103 const static char init_msg_1[] =
104 { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 104 { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
105 0x00, 0x00 }; 105 0x00, 0x00 };
106 char init_msg_2[] = 106 const static char init_msg_2[] =
107 { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4, 107 { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4,
108 0x00, 0x00 }; 108 0x00, 0x00 };
109 char receive_buf[28]; 109 const static int buflen = 28;
110 char *usb_buf;
110 int status; 111 int status;
111 112
112 status = kalmia_send_init_packet(dev, init_msg_1, sizeof(init_msg_1) 113 usb_buf = kmalloc(buflen, GFP_DMA | GFP_KERNEL);
113 / sizeof(init_msg_1[0]), receive_buf, 24); 114 if (!usb_buf)
115 return -ENOMEM;
116
117 memcpy(usb_buf, init_msg_1, 12);
118 status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_1)
119 / sizeof(init_msg_1[0]), usb_buf, 24);
114 if (status != 0) 120 if (status != 0)
115 return status; 121 return status;
116 122
117 status = kalmia_send_init_packet(dev, init_msg_2, sizeof(init_msg_2) 123 memcpy(usb_buf, init_msg_2, 12);
118 / sizeof(init_msg_2[0]), receive_buf, 28); 124 status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_2)
125 / sizeof(init_msg_2[0]), usb_buf, 28);
119 if (status != 0) 126 if (status != 0)
120 return status; 127 return status;
121 128
122 memcpy(ethernet_addr, receive_buf + 10, ETH_ALEN); 129 memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
123 130
131 kfree(usb_buf);
124 return status; 132 return status;
125} 133}
126 134
127static int 135static int
128kalmia_bind(struct usbnet *dev, struct usb_interface *intf) 136kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
129{ 137{
130 u8 status; 138 int status;
131 u8 ethernet_addr[ETH_ALEN]; 139 u8 ethernet_addr[ETH_ALEN];
132 140
133 /* Don't bind to AT command interface */ 141 /* Don't bind to AT command interface */
@@ -190,7 +198,8 @@ kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
190 dev_kfree_skb_any(skb); 198 dev_kfree_skb_any(skb);
191 skb = skb2; 199 skb = skb2;
192 200
193 done: header_start = skb_push(skb, KALMIA_HEADER_LENGTH); 201done:
202 header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
194 ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12]; 203 ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12];
195 ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13]; 204 ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13];
196 205
@@ -201,9 +210,8 @@ kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
201 header_start[0] = 0x57; 210 header_start[0] = 0x57;
202 header_start[1] = 0x44; 211 header_start[1] = 0x44;
203 content_len = skb->len - KALMIA_HEADER_LENGTH; 212 content_len = skb->len - KALMIA_HEADER_LENGTH;
204 header_start[2] = (content_len & 0xff); /* low byte */
205 header_start[3] = (content_len >> 8); /* high byte */
206 213
214 put_unaligned_le16(content_len, &header_start[2]);
207 header_start[4] = ether_type_1; 215 header_start[4] = ether_type_1;
208 header_start[5] = ether_type_2; 216 header_start[5] = ether_type_2;
209 217
@@ -231,13 +239,13 @@ kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
231 * Our task here is to strip off framing, leaving skb with one 239 * Our task here is to strip off framing, leaving skb with one
232 * data frame for the usbnet framework code to process. 240 * data frame for the usbnet framework code to process.
233 */ 241 */
234 const u8 HEADER_END_OF_USB_PACKET[] = 242 const static u8 HEADER_END_OF_USB_PACKET[] =
235 { 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 }; 243 { 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 };
236 const u8 EXPECTED_UNKNOWN_HEADER_1[] = 244 const static u8 EXPECTED_UNKNOWN_HEADER_1[] =
237 { 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 }; 245 { 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 };
238 const u8 EXPECTED_UNKNOWN_HEADER_2[] = 246 const static u8 EXPECTED_UNKNOWN_HEADER_2[] =
239 { 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 }; 247 { 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 };
240 u8 i = 0; 248 int i = 0;
241 249
242 /* incomplete header? */ 250 /* incomplete header? */
243 if (skb->len < KALMIA_HEADER_LENGTH) 251 if (skb->len < KALMIA_HEADER_LENGTH)
@@ -285,7 +293,7 @@ kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
285 293
286 /* subtract start header and end header */ 294 /* subtract start header and end header */
287 usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH); 295 usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
288 ether_packet_length = header_start[2] + (header_start[3] << 8); 296 ether_packet_length = get_unaligned_le16(&header_start[2]);
289 skb_pull(skb, KALMIA_HEADER_LENGTH); 297 skb_pull(skb, KALMIA_HEADER_LENGTH);
290 298
291 /* Some small packets misses end marker */ 299 /* Some small packets misses end marker */
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 241756e0e86f..1a2234c20514 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -331,17 +331,7 @@ static const struct usb_device_id products [] = {
331 ZAURUS_MASTER_INTERFACE, 331 ZAURUS_MASTER_INTERFACE,
332 .driver_info = ZAURUS_PXA_INFO, 332 .driver_info = ZAURUS_PXA_INFO,
333}, 333},
334
335
336/* At least some of the newest PXA units have very different lies about
337 * their standards support: they claim to be cell phones offering
338 * direct access to their radios! (No, they don't conform to CDC MDLM.)
339 */
340{ 334{
341 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
342 USB_CDC_PROTO_NONE),
343 .driver_info = (unsigned long) &bogus_mdlm_info,
344}, {
345 /* Motorola MOTOMAGX phones */ 335 /* Motorola MOTOMAGX phones */
346 USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM, 336 USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM,
347 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 337 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index fa6e2ac7475a..67402350d0df 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -575,7 +575,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
575 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; 575 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
576 u32 val; 576 u32 val;
577 577
578 while (num_allocated < num_to_alloc) { 578 while (num_allocated <= num_to_alloc) {
579 struct vmxnet3_rx_buf_info *rbi; 579 struct vmxnet3_rx_buf_info *rbi;
580 union Vmxnet3_GenericDesc *gd; 580 union Vmxnet3_GenericDesc *gd;
581 581
@@ -621,9 +621,15 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
621 621
622 BUG_ON(rbi->dma_addr == 0); 622 BUG_ON(rbi->dma_addr == 0);
623 gd->rxd.addr = cpu_to_le64(rbi->dma_addr); 623 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
624 gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT) 624 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
625 | val | rbi->len); 625 | val | rbi->len);
626 626
627 /* Fill the last buffer but dont mark it ready, or else the
628 * device will think that the queue is full */
629 if (num_allocated == num_to_alloc)
630 break;
631
632 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
627 num_allocated++; 633 num_allocated++;
628 vmxnet3_cmd_ring_adv_next2fill(ring); 634 vmxnet3_cmd_ring_adv_next2fill(ring);
629 } 635 }
@@ -1140,6 +1146,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1140 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 1146 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1141 }; 1147 };
1142 u32 num_rxd = 0; 1148 u32 num_rxd = 0;
1149 bool skip_page_frags = false;
1143 struct Vmxnet3_RxCompDesc *rcd; 1150 struct Vmxnet3_RxCompDesc *rcd;
1144 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 1151 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1145#ifdef __BIG_ENDIAN_BITFIELD 1152#ifdef __BIG_ENDIAN_BITFIELD
@@ -1150,11 +1157,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1150 &rxComp); 1157 &rxComp);
1151 while (rcd->gen == rq->comp_ring.gen) { 1158 while (rcd->gen == rq->comp_ring.gen) {
1152 struct vmxnet3_rx_buf_info *rbi; 1159 struct vmxnet3_rx_buf_info *rbi;
1153 struct sk_buff *skb; 1160 struct sk_buff *skb, *new_skb = NULL;
1161 struct page *new_page = NULL;
1154 int num_to_alloc; 1162 int num_to_alloc;
1155 struct Vmxnet3_RxDesc *rxd; 1163 struct Vmxnet3_RxDesc *rxd;
1156 u32 idx, ring_idx; 1164 u32 idx, ring_idx;
1157 1165 struct vmxnet3_cmd_ring *ring = NULL;
1158 if (num_rxd >= quota) { 1166 if (num_rxd >= quota) {
1159 /* we may stop even before we see the EOP desc of 1167 /* we may stop even before we see the EOP desc of
1160 * the current pkt 1168 * the current pkt
@@ -1165,6 +1173,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1165 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); 1173 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1166 idx = rcd->rxdIdx; 1174 idx = rcd->rxdIdx;
1167 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; 1175 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
1176 ring = rq->rx_ring + ring_idx;
1168 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, 1177 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1169 &rxCmdDesc); 1178 &rxCmdDesc);
1170 rbi = rq->buf_info[ring_idx] + idx; 1179 rbi = rq->buf_info[ring_idx] + idx;
@@ -1193,37 +1202,80 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1193 goto rcd_done; 1202 goto rcd_done;
1194 } 1203 }
1195 1204
1205 skip_page_frags = false;
1196 ctx->skb = rbi->skb; 1206 ctx->skb = rbi->skb;
1197 rbi->skb = NULL; 1207 new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
1208 if (new_skb == NULL) {
1209 /* Skb allocation failed, do not handover this
1210 * skb to stack. Reuse it. Drop the existing pkt
1211 */
1212 rq->stats.rx_buf_alloc_failure++;
1213 ctx->skb = NULL;
1214 rq->stats.drop_total++;
1215 skip_page_frags = true;
1216 goto rcd_done;
1217 }
1198 1218
1199 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, 1219 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1200 PCI_DMA_FROMDEVICE); 1220 PCI_DMA_FROMDEVICE);
1201 1221
1202 skb_put(ctx->skb, rcd->len); 1222 skb_put(ctx->skb, rcd->len);
1223
1224 /* Immediate refill */
1225 new_skb->dev = adapter->netdev;
1226 skb_reserve(new_skb, NET_IP_ALIGN);
1227 rbi->skb = new_skb;
1228 rbi->dma_addr = pci_map_single(adapter->pdev,
1229 rbi->skb->data, rbi->len,
1230 PCI_DMA_FROMDEVICE);
1231 rxd->addr = cpu_to_le64(rbi->dma_addr);
1232 rxd->len = rbi->len;
1233
1203 } else { 1234 } else {
1204 BUG_ON(ctx->skb == NULL); 1235 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1236
1205 /* non SOP buffer must be type 1 in most cases */ 1237 /* non SOP buffer must be type 1 in most cases */
1206 if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) { 1238 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1207 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); 1239 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1208 1240
1209 if (rcd->len) { 1241 /* If an sop buffer was dropped, skip all
1210 pci_unmap_page(adapter->pdev, 1242 * following non-sop fragments. They will be reused.
1211 rbi->dma_addr, rbi->len, 1243 */
1212 PCI_DMA_FROMDEVICE); 1244 if (skip_page_frags)
1245 goto rcd_done;
1213 1246
1214 vmxnet3_append_frag(ctx->skb, rcd, rbi); 1247 new_page = alloc_page(GFP_ATOMIC);
1215 rbi->page = NULL; 1248 if (unlikely(new_page == NULL)) {
1216 } 1249 /* Replacement page frag could not be allocated.
1217 } else { 1250 * Reuse this page. Drop the pkt and free the
1218 /* 1251 * skb which contained this page as a frag. Skip
1219 * The only time a non-SOP buffer is type 0 is 1252 * processing all the following non-sop frags.
1220 * when it's EOP and error flag is raised, which
1221 * has already been handled.
1222 */ 1253 */
1223 BUG_ON(true); 1254 rq->stats.rx_buf_alloc_failure++;
1255 dev_kfree_skb(ctx->skb);
1256 ctx->skb = NULL;
1257 skip_page_frags = true;
1258 goto rcd_done;
1259 }
1260
1261 if (rcd->len) {
1262 pci_unmap_page(adapter->pdev,
1263 rbi->dma_addr, rbi->len,
1264 PCI_DMA_FROMDEVICE);
1265
1266 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1224 } 1267 }
1268
1269 /* Immediate refill */
1270 rbi->page = new_page;
1271 rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
1272 0, PAGE_SIZE,
1273 PCI_DMA_FROMDEVICE);
1274 rxd->addr = cpu_to_le64(rbi->dma_addr);
1275 rxd->len = rbi->len;
1225 } 1276 }
1226 1277
1278
1227 skb = ctx->skb; 1279 skb = ctx->skb;
1228 if (rcd->eop) { 1280 if (rcd->eop) {
1229 skb->len += skb->data_len; 1281 skb->len += skb->data_len;
@@ -1244,26 +1296,27 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1244 } 1296 }
1245 1297
1246rcd_done: 1298rcd_done:
1247 /* device may skip some rx descs */ 1299 /* device may have skipped some rx descs */
1248 rq->rx_ring[ring_idx].next2comp = idx; 1300 ring->next2comp = idx;
1249 VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp, 1301 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1250 rq->rx_ring[ring_idx].size); 1302 ring = rq->rx_ring + ring_idx;
1251 1303 while (num_to_alloc) {
1252 /* refill rx buffers frequently to avoid starving the h/w */ 1304 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1253 num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring + 1305 &rxCmdDesc);
1254 ring_idx); 1306 BUG_ON(!rxd->addr);
1255 if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq, 1307
1256 ring_idx, adapter))) { 1308 /* Recv desc is ready to be used by the device */
1257 vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc, 1309 rxd->gen = ring->gen;
1258 adapter); 1310 vmxnet3_cmd_ring_adv_next2fill(ring);
1259 1311 num_to_alloc--;
1260 /* if needed, update the register */ 1312 }
1261 if (unlikely(rq->shared->updateRxProd)) { 1313
1262 VMXNET3_WRITE_BAR0_REG(adapter, 1314 /* if needed, update the register */
1263 rxprod_reg[ring_idx] + rq->qid * 8, 1315 if (unlikely(rq->shared->updateRxProd)) {
1264 rq->rx_ring[ring_idx].next2fill); 1316 VMXNET3_WRITE_BAR0_REG(adapter,
1265 rq->uncommitted[ring_idx] = 0; 1317 rxprod_reg[ring_idx] + rq->qid * 8,
1266 } 1318 ring->next2fill);
1319 rq->uncommitted[ring_idx] = 0;
1267 } 1320 }
1268 1321
1269 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); 1322 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
@@ -2894,6 +2947,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2894 else 2947 else
2895#endif 2948#endif
2896 num_rx_queues = 1; 2949 num_rx_queues = 1;
2950 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
2897 2951
2898 if (enable_mq) 2952 if (enable_mq)
2899 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES, 2953 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
@@ -2901,6 +2955,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2901 else 2955 else
2902 num_tx_queues = 1; 2956 num_tx_queues = 1;
2903 2957
2958 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
2904 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter), 2959 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
2905 max(num_tx_queues, num_rx_queues)); 2960 max(num_tx_queues, num_rx_queues));
2906 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n", 2961 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
@@ -3085,6 +3140,7 @@ vmxnet3_remove_device(struct pci_dev *pdev)
3085 else 3140 else
3086#endif 3141#endif
3087 num_rx_queues = 1; 3142 num_rx_queues = 1;
3143 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3088 3144
3089 cancel_work_sync(&adapter->work); 3145 cancel_work_sync(&adapter->work);
3090 3146
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index f50d36fdf405..e08d75e3f170 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -55,6 +55,7 @@
55#include <linux/if_vlan.h> 55#include <linux/if_vlan.h>
56#include <linux/if_arp.h> 56#include <linux/if_arp.h>
57#include <linux/inetdevice.h> 57#include <linux/inetdevice.h>
58#include <linux/log2.h>
58 59
59#include "vmxnet3_defs.h" 60#include "vmxnet3_defs.h"
60 61
@@ -68,10 +69,10 @@
68/* 69/*
69 * Version numbers 70 * Version numbers
70 */ 71 */
71#define VMXNET3_DRIVER_VERSION_STRING "1.1.9.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.1.18.0-k"
72 73
73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
74#define VMXNET3_DRIVER_VERSION_NUM 0x01010900 75#define VMXNET3_DRIVER_VERSION_NUM 0x01011200
75 76
76#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
77 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 1fef84f87c78..392771f93759 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -691,14 +691,12 @@ ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
691 if (!chinfo[pier].pd_curves) 691 if (!chinfo[pier].pd_curves)
692 continue; 692 continue;
693 693
694 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { 694 for (pdg = 0; pdg < AR5K_EEPROM_N_PD_CURVES; pdg++) {
695 struct ath5k_pdgain_info *pd = 695 struct ath5k_pdgain_info *pd =
696 &chinfo[pier].pd_curves[pdg]; 696 &chinfo[pier].pd_curves[pdg];
697 697
698 if (pd != NULL) { 698 kfree(pd->pd_step);
699 kfree(pd->pd_step); 699 kfree(pd->pd_pwr);
700 kfree(pd->pd_pwr);
701 }
702 } 700 }
703 701
704 kfree(chinfo[pier].pd_curves); 702 kfree(chinfo[pier].pd_curves);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index b8cbfc707213..3bad0b2cf9a3 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -278,6 +278,12 @@ static int ath_pci_suspend(struct device *device)
278 278
279 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); 279 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
280 280
281 /* The device has to be moved to FULLSLEEP forcibly.
282 * Otherwise the chip never moved to full sleep,
283 * when no interface is up.
284 */
285 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
286
281 return 0; 287 return 0;
282} 288}
283 289
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 61d4a11f566b..2a88e73bb39c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -36,6 +36,7 @@
36#include <net/mac80211.h> 36#include <net/mac80211.h>
37#include <linux/etherdevice.h> 37#include <linux/etherdevice.h>
38#include <asm/unaligned.h> 38#include <asm/unaligned.h>
39#include <linux/stringify.h>
39 40
40#include "iwl-eeprom.h" 41#include "iwl-eeprom.h"
41#include "iwl-dev.h" 42#include "iwl-dev.h"
@@ -55,10 +56,10 @@
55#define IWL100_UCODE_API_MIN 5 56#define IWL100_UCODE_API_MIN 5
56 57
57#define IWL1000_FW_PRE "iwlwifi-1000-" 58#define IWL1000_FW_PRE "iwlwifi-1000-"
58#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE #api ".ucode" 59#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
59 60
60#define IWL100_FW_PRE "iwlwifi-100-" 61#define IWL100_FW_PRE "iwlwifi-100-"
61#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE #api ".ucode" 62#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
62 63
63 64
64/* 65/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 2282279cffc4..3df76f53a41b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -36,6 +36,7 @@
36#include <net/mac80211.h> 36#include <net/mac80211.h>
37#include <linux/etherdevice.h> 37#include <linux/etherdevice.h>
38#include <asm/unaligned.h> 38#include <asm/unaligned.h>
39#include <linux/stringify.h>
39 40
40#include "iwl-eeprom.h" 41#include "iwl-eeprom.h"
41#include "iwl-dev.h" 42#include "iwl-dev.h"
@@ -58,13 +59,13 @@
58#define IWL105_UCODE_API_MIN 5 59#define IWL105_UCODE_API_MIN 5
59 60
60#define IWL2030_FW_PRE "iwlwifi-2030-" 61#define IWL2030_FW_PRE "iwlwifi-2030-"
61#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode" 62#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
62 63
63#define IWL2000_FW_PRE "iwlwifi-2000-" 64#define IWL2000_FW_PRE "iwlwifi-2000-"
64#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode" 65#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode"
65 66
66#define IWL105_FW_PRE "iwlwifi-105-" 67#define IWL105_FW_PRE "iwlwifi-105-"
67#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE #api ".ucode" 68#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
68 69
69static void iwl2000_set_ct_threshold(struct iwl_priv *priv) 70static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
70{ 71{
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index f99f9c193352..e816c27db794 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -37,6 +37,7 @@
37#include <net/mac80211.h> 37#include <net/mac80211.h>
38#include <linux/etherdevice.h> 38#include <linux/etherdevice.h>
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40#include <linux/stringify.h>
40 41
41#include "iwl-eeprom.h" 42#include "iwl-eeprom.h"
42#include "iwl-dev.h" 43#include "iwl-dev.h"
@@ -57,10 +58,10 @@
57#define IWL5150_UCODE_API_MIN 1 58#define IWL5150_UCODE_API_MIN 1
58 59
59#define IWL5000_FW_PRE "iwlwifi-5000-" 60#define IWL5000_FW_PRE "iwlwifi-5000-"
60#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE #api ".ucode" 61#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
61 62
62#define IWL5150_FW_PRE "iwlwifi-5150-" 63#define IWL5150_FW_PRE "iwlwifi-5150-"
63#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode" 64#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
64 65
65/* NIC configuration for 5000 series */ 66/* NIC configuration for 5000 series */
66static void iwl5000_nic_config(struct iwl_priv *priv) 67static void iwl5000_nic_config(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index fbe565c816e3..5b150bc70b06 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -36,6 +36,7 @@
36#include <net/mac80211.h> 36#include <net/mac80211.h>
37#include <linux/etherdevice.h> 37#include <linux/etherdevice.h>
38#include <asm/unaligned.h> 38#include <asm/unaligned.h>
39#include <linux/stringify.h>
39 40
40#include "iwl-eeprom.h" 41#include "iwl-eeprom.h"
41#include "iwl-dev.h" 42#include "iwl-dev.h"
@@ -58,16 +59,16 @@
58#define IWL6000G2_UCODE_API_MIN 4 59#define IWL6000G2_UCODE_API_MIN 4
59 60
60#define IWL6000_FW_PRE "iwlwifi-6000-" 61#define IWL6000_FW_PRE "iwlwifi-6000-"
61#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" 62#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
62 63
63#define IWL6050_FW_PRE "iwlwifi-6050-" 64#define IWL6050_FW_PRE "iwlwifi-6050-"
64#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode" 65#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
65 66
66#define IWL6005_FW_PRE "iwlwifi-6000g2a-" 67#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
67#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode" 68#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
68 69
69#define IWL6030_FW_PRE "iwlwifi-6000g2b-" 70#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
70#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode" 71#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
71 72
72static void iwl6000_set_ct_threshold(struct iwl_priv *priv) 73static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
73{ 74{
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 213c80c6a668..45cc51c9c93e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1763,6 +1763,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1763 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 1763 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1764 struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 1764 struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1765 struct iwl_rxon_context *tmp; 1765 struct iwl_rxon_context *tmp;
1766 enum nl80211_iftype newviftype = newtype;
1766 u32 interface_modes; 1767 u32 interface_modes;
1767 int err; 1768 int err;
1768 1769
@@ -1818,7 +1819,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1818 1819
1819 /* success */ 1820 /* success */
1820 iwl_teardown_interface(priv, vif, true); 1821 iwl_teardown_interface(priv, vif, true);
1821 vif->type = newtype; 1822 vif->type = newviftype;
1822 vif->p2p = newp2p; 1823 vif->p2p = newp2p;
1823 err = iwl_setup_interface(priv, ctx); 1824 err = iwl_setup_interface(priv, ctx);
1824 WARN_ON(err); 1825 WARN_ON(err);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 686e176b5ebd..137dba95b1ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -126,7 +126,7 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
126} 126}
127 127
128static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, 128static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
129 struct iwl_tfd *tfd) 129 struct iwl_tfd *tfd, int dma_dir)
130{ 130{
131 struct pci_dev *dev = priv->pci_dev; 131 struct pci_dev *dev = priv->pci_dev;
132 int i; 132 int i;
@@ -151,7 +151,7 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
151 /* Unmap chunks, if any. */ 151 /* Unmap chunks, if any. */
152 for (i = 1; i < num_tbs; i++) 152 for (i = 1; i < num_tbs; i++)
153 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), 153 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
154 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); 154 iwl_tfd_tb_get_len(tfd, i), dma_dir);
155} 155}
156 156
157/** 157/**
@@ -167,7 +167,8 @@ void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
167 struct iwl_tfd *tfd_tmp = txq->tfds; 167 struct iwl_tfd *tfd_tmp = txq->tfds;
168 int index = txq->q.read_ptr; 168 int index = txq->q.read_ptr;
169 169
170 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]); 170 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
171 PCI_DMA_TODEVICE);
171 172
172 /* free SKB */ 173 /* free SKB */
173 if (txq->txb) { 174 if (txq->txb) {
@@ -310,9 +311,7 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv)
310 i = get_cmd_index(q, q->read_ptr); 311 i = get_cmd_index(q, q->read_ptr);
311 312
312 if (txq->meta[i].flags & CMD_MAPPED) { 313 if (txq->meta[i].flags & CMD_MAPPED) {
313 pci_unmap_single(priv->pci_dev, 314 iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i],
314 dma_unmap_addr(&txq->meta[i], mapping),
315 dma_unmap_len(&txq->meta[i], len),
316 PCI_DMA_BIDIRECTIONAL); 315 PCI_DMA_BIDIRECTIONAL);
317 txq->meta[i].flags = 0; 316 txq->meta[i].flags = 0;
318 } 317 }
@@ -535,12 +534,7 @@ out_free_arrays:
535void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 534void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
536 int slots_num, u32 txq_id) 535 int slots_num, u32 txq_id)
537{ 536{
538 int actual_slots = slots_num; 537 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * slots_num);
539
540 if (txq_id == priv->cmd_queue)
541 actual_slots++;
542
543 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
544 538
545 txq->need_update = 0; 539 txq->need_update = 0;
546 540
@@ -700,10 +694,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
700 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) 694 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
701 continue; 695 continue;
702 phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i], 696 phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
703 cmd->len[i], PCI_DMA_TODEVICE); 697 cmd->len[i], PCI_DMA_BIDIRECTIONAL);
704 if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) { 698 if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
705 iwlagn_unmap_tfd(priv, out_meta, 699 iwlagn_unmap_tfd(priv, out_meta,
706 &txq->tfds[q->write_ptr]); 700 &txq->tfds[q->write_ptr],
701 PCI_DMA_BIDIRECTIONAL);
707 idx = -ENOMEM; 702 idx = -ENOMEM;
708 goto out; 703 goto out;
709 } 704 }
@@ -807,7 +802,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
807 cmd = txq->cmd[cmd_index]; 802 cmd = txq->cmd[cmd_index];
808 meta = &txq->meta[cmd_index]; 803 meta = &txq->meta[cmd_index];
809 804
810 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]); 805 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], PCI_DMA_BIDIRECTIONAL);
811 806
812 /* Input error checking is done when commands are added to queue. */ 807 /* Input error checking is done when commands are added to queue. */
813 if (meta->flags & CMD_WANT_SKB) { 808 if (meta->flags & CMD_WANT_SKB) {
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 9f8ccae93317..254b64ba4bf6 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1624,6 +1624,16 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1624 pci_read_config_byte(pdev, 0x8, &revisionid); 1624 pci_read_config_byte(pdev, 0x8, &revisionid);
1625 pci_read_config_word(pdev, 0x3C, &irqline); 1625 pci_read_config_word(pdev, 0x3C, &irqline);
1626 1626
1627 /* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses
1628 * r8192e_pci, and RTL8192SE, which uses this driver. If the
1629 * revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then
1630 * the correct driver is r8192e_pci, thus this routine should
1631 * return false.
1632 */
1633 if (deviceid == RTL_PCI_8192SE_DID &&
1634 revisionid == RTL_PCI_REVISION_ID_8192PCIE)
1635 return false;
1636
1627 if (deviceid == RTL_PCI_8192_DID || 1637 if (deviceid == RTL_PCI_8192_DID ||
1628 deviceid == RTL_PCI_0044_DID || 1638 deviceid == RTL_PCI_0044_DID ||
1629 deviceid == RTL_PCI_0047_DID || 1639 deviceid == RTL_PCI_0047_DID ||
@@ -1856,7 +1866,8 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
1856 pci_write_config_byte(pdev, 0x04, 0x07); 1866 pci_write_config_byte(pdev, 0x04, 0x07);
1857 1867
1858 /* find adapter */ 1868 /* find adapter */
1859 _rtl_pci_find_adapter(pdev, hw); 1869 if (!_rtl_pci_find_adapter(pdev, hw))
1870 goto fail3;
1860 1871
1861 /* Init IO handler */ 1872 /* Init IO handler */
1862 _rtl_pci_io_handler_init(&pdev->dev, hw); 1873 _rtl_pci_io_handler_init(&pdev->dev, hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index bee7c1480f63..092e342c19df 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -53,6 +53,8 @@ MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
53static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) 53static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
54{ 54{
55 struct rtl_priv *rtlpriv = rtl_priv(hw); 55 struct rtl_priv *rtlpriv = rtl_priv(hw);
56 const struct firmware *firmware;
57 int err;
56 58
57 rtlpriv->dm.dm_initialgain_enable = 1; 59 rtlpriv->dm.dm_initialgain_enable = 1;
58 rtlpriv->dm.dm_flag = 0; 60 rtlpriv->dm.dm_flag = 0;
@@ -64,6 +66,24 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
64 ("Can't alloc buffer for fw.\n")); 66 ("Can't alloc buffer for fw.\n"));
65 return 1; 67 return 1;
66 } 68 }
69 /* request fw */
70 err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
71 rtlpriv->io.dev);
72 if (err) {
73 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
74 ("Failed to request firmware!\n"));
75 return 1;
76 }
77 if (firmware->size > 0x4000) {
78 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
79 ("Firmware is too big!\n"));
80 release_firmware(firmware);
81 return 1;
82 }
83 memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
84 rtlpriv->rtlhal.fwsize = firmware->size;
85 release_firmware(firmware);
86
67 return 0; 87 return 0;
68} 88}
69 89
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e8a140669f90..02145e9697a9 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2761,6 +2761,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
2761} 2761}
2762DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); 2762DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
2763DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); 2763DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
2764DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
2765DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
2764#endif /*CONFIG_MMC_RICOH_MMC*/ 2766#endif /*CONFIG_MMC_RICOH_MMC*/
2765 2767
2766#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) 2768#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 4724ba3acf1a..b2005b44e4f7 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -149,6 +149,7 @@ static const struct i2c_device_id ds1307_id[] = {
149 { "ds1340", ds_1340 }, 149 { "ds1340", ds_1340 },
150 { "ds3231", ds_3231 }, 150 { "ds3231", ds_3231 },
151 { "m41t00", m41t00 }, 151 { "m41t00", m41t00 },
152 { "pt7c4338", ds_1307 },
152 { "rx8025", rx_8025 }, 153 { "rx8025", rx_8025 },
153 { } 154 { }
154}; 155};
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4a1f029c4fe9..8d9dae89f065 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -830,6 +830,19 @@ config SCSI_GDTH
830 To compile this driver as a module, choose M here: the 830 To compile this driver as a module, choose M here: the
831 module will be called gdth. 831 module will be called gdth.
832 832
833config SCSI_ISCI
834 tristate "Intel(R) C600 Series Chipset SAS Controller"
835 depends on PCI && SCSI
836 depends on X86
837 # (temporary): known alpha quality driver
838 depends on EXPERIMENTAL
839 select SCSI_SAS_LIBSAS
840 ---help---
841 This driver supports the 6Gb/s SAS capabilities of the storage
842 control unit found in the Intel(R) C600 series chipset.
843
844 The experimental tag will be removed after the driver exits alpha
845
833config SCSI_GENERIC_NCR5380 846config SCSI_GENERIC_NCR5380
834 tristate "Generic NCR5380/53c400 SCSI PIO support" 847 tristate "Generic NCR5380/53c400 SCSI PIO support"
835 depends on ISA && SCSI 848 depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 7ad0b8a79ae8..3c08f5352b2d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_SCSI_AACRAID) += aacraid/
73obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o 73obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
74obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/ 74obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
75obj-$(CONFIG_SCSI_PM8001) += pm8001/ 75obj-$(CONFIG_SCSI_PM8001) += pm8001/
76obj-$(CONFIG_SCSI_ISCI) += isci/
76obj-$(CONFIG_SCSI_IPS) += ips.o 77obj-$(CONFIG_SCSI_IPS) += ips.o
77obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o 78obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o
78obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o 79obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c6c0434d8034..6bba23a26303 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1037,6 +1037,7 @@ static void complete_scsi_command(struct CommandList *cp)
1037 unsigned char sense_key; 1037 unsigned char sense_key;
1038 unsigned char asc; /* additional sense code */ 1038 unsigned char asc; /* additional sense code */
1039 unsigned char ascq; /* additional sense code qualifier */ 1039 unsigned char ascq; /* additional sense code qualifier */
1040 unsigned long sense_data_size;
1040 1041
1041 ei = cp->err_info; 1042 ei = cp->err_info;
1042 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1043 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
@@ -1051,10 +1052,14 @@ static void complete_scsi_command(struct CommandList *cp)
1051 cmd->result |= ei->ScsiStatus; 1052 cmd->result |= ei->ScsiStatus;
1052 1053
1053 /* copy the sense data whether we need to or not. */ 1054 /* copy the sense data whether we need to or not. */
1054 memcpy(cmd->sense_buffer, ei->SenseInfo, 1055 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1055 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? 1056 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1056 SCSI_SENSE_BUFFERSIZE : 1057 else
1057 ei->SenseLen); 1058 sense_data_size = sizeof(ei->SenseInfo);
1059 if (ei->SenseLen < sense_data_size)
1060 sense_data_size = ei->SenseLen;
1061
1062 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1058 scsi_set_resid(cmd, ei->ResidualCnt); 1063 scsi_set_resid(cmd, ei->ResidualCnt);
1059 1064
1060 if (ei->CommandStatus == 0) { 1065 if (ei->CommandStatus == 0) {
@@ -2580,7 +2585,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2580 c->SG[0].Ext = 0; /* we are not chaining*/ 2585 c->SG[0].Ext = 0; /* we are not chaining*/
2581 } 2586 }
2582 hpsa_scsi_do_simple_cmd_core(h, c); 2587 hpsa_scsi_do_simple_cmd_core(h, c);
2583 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 2588 if (iocommand.buf_size > 0)
2589 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2584 check_ioctl_unit_attention(h, c); 2590 check_ioctl_unit_attention(h, c);
2585 2591
2586 /* Copy the error information out */ 2592 /* Copy the error information out */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b7650613b8c2..bdfa223a7dbb 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4306,8 +4306,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4306 spin_lock_irqsave(vhost->host->host_lock, flags); 4306 spin_lock_irqsave(vhost->host->host_lock, flags);
4307 if (rc == H_CLOSED) 4307 if (rc == H_CLOSED)
4308 vio_enable_interrupts(to_vio_dev(vhost->dev)); 4308 vio_enable_interrupts(to_vio_dev(vhost->dev));
4309 else if (rc || (rc = ibmvfc_send_crq_init(vhost)) || 4309 if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
4310 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { 4310 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
4311 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); 4311 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4312 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); 4312 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
4313 } 4313 }
diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
new file mode 100644
index 000000000000..3359e10e0d8f
--- /dev/null
+++ b/drivers/scsi/isci/Makefile
@@ -0,0 +1,8 @@
1obj-$(CONFIG_SCSI_ISCI) += isci.o
2isci-objs := init.o phy.o request.o \
3 remote_device.o port.o \
4 host.o task.o probe_roms.o \
5 remote_node_context.o \
6 remote_node_table.o \
7 unsolicited_frame_control.o \
8 port_config.o \
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
new file mode 100644
index 000000000000..5f54461cabc5
--- /dev/null
+++ b/drivers/scsi/isci/firmware/Makefile
@@ -0,0 +1,19 @@
1# Makefile for create_fw
2#
3CC=gcc
4CFLAGS=-c -Wall -O2 -g
5LDFLAGS=
6SOURCES=create_fw.c
7OBJECTS=$(SOURCES:.cpp=.o)
8EXECUTABLE=create_fw
9
10all: $(SOURCES) $(EXECUTABLE)
11
12$(EXECUTABLE): $(OBJECTS)
13 $(CC) $(LDFLAGS) $(OBJECTS) -o $@
14
15.c.o:
16 $(CC) $(CFLAGS) $< -O $@
17
18clean:
19 rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
new file mode 100644
index 000000000000..8056d2bd233b
--- /dev/null
+++ b/drivers/scsi/isci/firmware/README
@@ -0,0 +1,36 @@
1This defines the temporary binary blow we are to pass to the SCU
2driver to emulate the binary firmware that we will eventually be
3able to access via NVRAM on the SCU controller.
4
5The current size of the binary blob is expected to be 149 bytes or larger
6
7Header Types:
80x1: Phy Masks
90x2: Phy Gens
100x3: SAS Addrs
110xff: End of Data
12
13ID string - u8[12]: "#SCU MAGIC#\0"
14Version - u8: 1
15SubVersion - u8: 0
16
17Header Type - u8: 0x1
18Size - u8: 8
19Phy Mask - u32[8]
20
21Header Type - u8: 0x2
22Size - u8: 8
23Phy Gen - u32[8]
24
25Header Type - u8: 0x3
26Size - u8: 8
27Sas Addr - u64[8]
28
29Header Type - u8: 0xf
30
31
32==============================================================================
33
34Place isci_firmware.bin in /lib/firmware
35Be sure to recreate the initramfs image to include the firmware.
36
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
new file mode 100644
index 000000000000..c7a2887a7e95
--- /dev/null
+++ b/drivers/scsi/isci/firmware/create_fw.c
@@ -0,0 +1,99 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <sys/types.h>
5#include <sys/stat.h>
6#include <fcntl.h>
7#include <string.h>
8#include <errno.h>
9#include <asm/types.h>
10#include <strings.h>
11#include <stdint.h>
12
13#include "create_fw.h"
14#include "../probe_roms.h"
15
16int write_blob(struct isci_orom *isci_orom)
17{
18 FILE *fd;
19 int err;
20 size_t count;
21
22 fd = fopen(blob_name, "w+");
23 if (!fd) {
24 perror("Open file for write failed");
25 fclose(fd);
26 return -EIO;
27 }
28
29 count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
30 if (count != 1) {
31 perror("Write data failed");
32 fclose(fd);
33 return -EIO;
34 }
35
36 fclose(fd);
37
38 return 0;
39}
40
41void set_binary_values(struct isci_orom *isci_orom)
42{
43 int ctrl_idx, phy_idx, port_idx;
44
45 /* setting OROM signature */
46 strncpy(isci_orom->hdr.signature, sig, strlen(sig));
47 isci_orom->hdr.version = version;
48 isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
49 isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
50 isci_orom->hdr.num_elements = num_elements;
51
52 for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
53 isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
54 isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
55 max_num_concurrent_dev_spin_up;
56 isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
57 enable_ssc;
58
59 for (port_idx = 0; port_idx < 4; port_idx++)
60 isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
61 phy_mask[ctrl_idx][port_idx];
62
63 for (phy_idx = 0; phy_idx < 4; phy_idx++) {
64 isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
65 (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
66 isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
67 (__u32)(sas_addr[ctrl_idx][phy_idx]);
68
69 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
70 afe_tx_amp_control0;
71 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
72 afe_tx_amp_control1;
73 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
74 afe_tx_amp_control2;
75 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
76 afe_tx_amp_control3;
77 }
78 }
79}
80
81int main(void)
82{
83 int err;
84 struct isci_orom *isci_orom;
85
86 isci_orom = malloc(sizeof(struct isci_orom));
87 memset(isci_orom, 0, sizeof(struct isci_orom));
88
89 set_binary_values(isci_orom);
90
91 err = write_blob(isci_orom);
92 if (err < 0) {
93 free(isci_orom);
94 return err;
95 }
96
97 free(isci_orom);
98 return 0;
99}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
new file mode 100644
index 000000000000..5f298828d22e
--- /dev/null
+++ b/drivers/scsi/isci/firmware/create_fw.h
@@ -0,0 +1,77 @@
1#ifndef _CREATE_FW_H_
2#define _CREATE_FW_H_
3#include "../probe_roms.h"
4
5
6/* we are configuring for 2 SCUs */
7static const int num_elements = 2;
8
9/*
10 * For all defined arrays:
11 * elements 0-3 are for SCU0, ports 0-3
12 * elements 4-7 are for SCU1, ports 0-3
13 *
14 * valid configurations for one SCU are:
15 * P0 P1 P2 P3
16 * ----------------
17 * 0xF,0x0,0x0,0x0 # 1 x4 port
18 * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
19 * # ports
20 * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
21 * # port
22 * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
23 * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
24 *
25 * if there is a port/phy on which you do not wish to override the default
26 * values, use the value assigned to UNINIT_PARAM (255).
27 */
28
29/* discovery mode type (port auto config mode by default ) */
30
31/*
32 * if there is a port/phy on which you do not wish to override the default
33 * values, use the value "0000000000000000". SAS address of zero's is
34 * considered invalid and will not be used.
35 */
36#ifdef MPC
37static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
38static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
39 {1, 2, 4, 8} };
40static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
41 0x5FCFFFFFF0000002ULL,
42 0x5FCFFFFFF0000003ULL,
43 0x5FCFFFFFF0000004ULL },
44 { 0x5FCFFFFFF0000005ULL,
45 0x5FCFFFFFF0000006ULL,
46 0x5FCFFFFFF0000007ULL,
47 0x5FCFFFFFF0000008ULL } };
48#else /* APC (default) */
49static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
50static const __u8 phy_mask[2][4];
51static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
52 0x5FCFFFFF00000001ULL,
53 0x5FCFFFFF00000001ULL,
54 0x5FCFFFFF00000001ULL },
55 { 0x5FCFFFFF00000002ULL,
56 0x5FCFFFFF00000002ULL,
57 0x5FCFFFFF00000002ULL,
58 0x5FCFFFFF00000002ULL } };
59#endif
60
61/* Maximum number of concurrent device spin up */
62static const int max_num_concurrent_dev_spin_up = 1;
63
64/* enable of ssc operation */
65static const int enable_ssc;
66
67/* AFE_TX_AMP_CONTROL */
68static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
69static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
70static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
71static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
72
73static const char blob_name[] = "isci_firmware.bin";
74static const char sig[] = "ISCUOEMB";
75static const unsigned char version = 0x10;
76
77#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
new file mode 100644
index 000000000000..26072f1e9852
--- /dev/null
+++ b/drivers/scsi/isci/host.c
@@ -0,0 +1,2751 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#include <linux/circ_buf.h>
56#include <linux/device.h>
57#include <scsi/sas.h>
58#include "host.h"
59#include "isci.h"
60#include "port.h"
61#include "host.h"
62#include "probe_roms.h"
63#include "remote_device.h"
64#include "request.h"
65#include "scu_completion_codes.h"
66#include "scu_event_codes.h"
67#include "registers.h"
68#include "scu_remote_node_context.h"
69#include "scu_task_context.h"
70
71#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
72
73#define smu_max_ports(dcc_value) \
74 (\
75 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
76 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
77 )
78
79#define smu_max_task_contexts(dcc_value) \
80 (\
81 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
82 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
83 )
84
85#define smu_max_rncs(dcc_value) \
86 (\
87 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
88 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
89 )
90
91#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
92
93/**
94 *
95 *
96 * The number of milliseconds to wait while a given phy is consuming power
97 * before allowing another set of phys to consume power. Ultimately, this will
98 * be specified by OEM parameter.
99 */
100#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
101
102/**
103 * NORMALIZE_PUT_POINTER() -
104 *
105 * This macro will normalize the completion queue put pointer so its value can
106 * be used as an array inde
107 */
108#define NORMALIZE_PUT_POINTER(x) \
109 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
110
111
112/**
113 * NORMALIZE_EVENT_POINTER() -
114 *
115 * This macro will normalize the completion queue event entry so its value can
116 * be used as an index.
117 */
118#define NORMALIZE_EVENT_POINTER(x) \
119 (\
120 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
121 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
122 )
123
124/**
125 * NORMALIZE_GET_POINTER() -
126 *
127 * This macro will normalize the completion queue get pointer so its value can
128 * be used as an index into an array
129 */
130#define NORMALIZE_GET_POINTER(x) \
131 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
132
133/**
134 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
135 *
136 * This macro will normalize the completion queue cycle pointer so it matches
137 * the completion queue cycle bit
138 */
139#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
140 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
141
142/**
143 * COMPLETION_QUEUE_CYCLE_BIT() -
144 *
145 * This macro will return the cycle bit of the completion queue entry
146 */
147#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
148
149/* Init the state machine and call the state entry function (if any) */
150void sci_init_sm(struct sci_base_state_machine *sm,
151 const struct sci_base_state *state_table, u32 initial_state)
152{
153 sci_state_transition_t handler;
154
155 sm->initial_state_id = initial_state;
156 sm->previous_state_id = initial_state;
157 sm->current_state_id = initial_state;
158 sm->state_table = state_table;
159
160 handler = sm->state_table[initial_state].enter_state;
161 if (handler)
162 handler(sm);
163}
164
165/* Call the state exit fn, update the current state, call the state entry fn */
166void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
167{
168 sci_state_transition_t handler;
169
170 handler = sm->state_table[sm->current_state_id].exit_state;
171 if (handler)
172 handler(sm);
173
174 sm->previous_state_id = sm->current_state_id;
175 sm->current_state_id = next_state;
176
177 handler = sm->state_table[sm->current_state_id].enter_state;
178 if (handler)
179 handler(sm);
180}
181
182static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
183{
184 u32 get_value = ihost->completion_queue_get;
185 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
186
187 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
188 COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
189 return true;
190
191 return false;
192}
193
194static bool sci_controller_isr(struct isci_host *ihost)
195{
196 if (sci_controller_completion_queue_has_entries(ihost)) {
197 return true;
198 } else {
199 /*
200 * we have a spurious interrupt it could be that we have already
201 * emptied the completion queue from a previous interrupt */
202 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
203
204 /*
205 * There is a race in the hardware that could cause us not to be notified
206 * of an interrupt completion if we do not take this step. We will mask
207 * then unmask the interrupts so if there is another interrupt pending
208 * the clearing of the interrupt source we get the next interrupt message. */
209 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
210 writel(0, &ihost->smu_registers->interrupt_mask);
211 }
212
213 return false;
214}
215
216irqreturn_t isci_msix_isr(int vec, void *data)
217{
218 struct isci_host *ihost = data;
219
220 if (sci_controller_isr(ihost))
221 tasklet_schedule(&ihost->completion_tasklet);
222
223 return IRQ_HANDLED;
224}
225
226static bool sci_controller_error_isr(struct isci_host *ihost)
227{
228 u32 interrupt_status;
229
230 interrupt_status =
231 readl(&ihost->smu_registers->interrupt_status);
232 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
233
234 if (interrupt_status != 0) {
235 /*
236 * There is an error interrupt pending so let it through and handle
237 * in the callback */
238 return true;
239 }
240
241 /*
242 * There is a race in the hardware that could cause us not to be notified
243 * of an interrupt completion if we do not take this step. We will mask
244 * then unmask the error interrupts so if there was another interrupt
245 * pending we will be notified.
246 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
247 writel(0xff, &ihost->smu_registers->interrupt_mask);
248 writel(0, &ihost->smu_registers->interrupt_mask);
249
250 return false;
251}
252
253static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
254{
255 u32 index = SCU_GET_COMPLETION_INDEX(ent);
256 struct isci_request *ireq = ihost->reqs[index];
257
258 /* Make sure that we really want to process this IO request */
259 if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
260 ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
261 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
262 /* Yep this is a valid io request pass it along to the
263 * io request handler
264 */
265 sci_io_request_tc_completion(ireq, ent);
266}
267
268static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
269{
270 u32 index;
271 struct isci_request *ireq;
272 struct isci_remote_device *idev;
273
274 index = SCU_GET_COMPLETION_INDEX(ent);
275
276 switch (scu_get_command_request_type(ent)) {
277 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
278 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
279 ireq = ihost->reqs[index];
280 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
281 __func__, ent, ireq);
282 /* @todo For a post TC operation we need to fail the IO
283 * request
284 */
285 break;
286 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
287 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
288 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
289 idev = ihost->device_table[index];
290 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
291 __func__, ent, idev);
292 /* @todo For a port RNC operation we need to fail the
293 * device
294 */
295 break;
296 default:
297 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
298 __func__, ent);
299 break;
300 }
301}
302
303static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
304{
305 u32 index;
306 u32 frame_index;
307
308 struct scu_unsolicited_frame_header *frame_header;
309 struct isci_phy *iphy;
310 struct isci_remote_device *idev;
311
312 enum sci_status result = SCI_FAILURE;
313
314 frame_index = SCU_GET_FRAME_INDEX(ent);
315
316 frame_header = ihost->uf_control.buffers.array[frame_index].header;
317 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
318
319 if (SCU_GET_FRAME_ERROR(ent)) {
320 /*
321 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
322 * / this cause a problem? We expect the phy initialization will
323 * / fail if there is an error in the frame. */
324 sci_controller_release_frame(ihost, frame_index);
325 return;
326 }
327
328 if (frame_header->is_address_frame) {
329 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
330 iphy = &ihost->phys[index];
331 result = sci_phy_frame_handler(iphy, frame_index);
332 } else {
333
334 index = SCU_GET_COMPLETION_INDEX(ent);
335
336 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
337 /*
338 * This is a signature fis or a frame from a direct attached SATA
339 * device that has not yet been created. In either case forwared
340 * the frame to the PE and let it take care of the frame data. */
341 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
342 iphy = &ihost->phys[index];
343 result = sci_phy_frame_handler(iphy, frame_index);
344 } else {
345 if (index < ihost->remote_node_entries)
346 idev = ihost->device_table[index];
347 else
348 idev = NULL;
349
350 if (idev != NULL)
351 result = sci_remote_device_frame_handler(idev, frame_index);
352 else
353 sci_controller_release_frame(ihost, frame_index);
354 }
355 }
356
357 if (result != SCI_SUCCESS) {
358 /*
359 * / @todo Is there any reason to report some additional error message
360 * / when we get this failure notifiction? */
361 }
362}
363
364static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
365{
366 struct isci_remote_device *idev;
367 struct isci_request *ireq;
368 struct isci_phy *iphy;
369 u32 index;
370
371 index = SCU_GET_COMPLETION_INDEX(ent);
372
373 switch (scu_get_event_type(ent)) {
374 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
375 /* / @todo The driver did something wrong and we need to fix the condtion. */
376 dev_err(&ihost->pdev->dev,
377 "%s: SCIC Controller 0x%p received SMU command error "
378 "0x%x\n",
379 __func__,
380 ihost,
381 ent);
382 break;
383
384 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
385 case SCU_EVENT_TYPE_SMU_ERROR:
386 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
387 /*
388 * / @todo This is a hardware failure and its likely that we want to
389 * / reset the controller. */
390 dev_err(&ihost->pdev->dev,
391 "%s: SCIC Controller 0x%p received fatal controller "
392 "event 0x%x\n",
393 __func__,
394 ihost,
395 ent);
396 break;
397
398 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
399 ireq = ihost->reqs[index];
400 sci_io_request_event_handler(ireq, ent);
401 break;
402
403 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
404 switch (scu_get_event_specifier(ent)) {
405 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
406 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
407 ireq = ihost->reqs[index];
408 if (ireq != NULL)
409 sci_io_request_event_handler(ireq, ent);
410 else
411 dev_warn(&ihost->pdev->dev,
412 "%s: SCIC Controller 0x%p received "
413 "event 0x%x for io request object "
414 "that doesnt exist.\n",
415 __func__,
416 ihost,
417 ent);
418
419 break;
420
421 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
422 idev = ihost->device_table[index];
423 if (idev != NULL)
424 sci_remote_device_event_handler(idev, ent);
425 else
426 dev_warn(&ihost->pdev->dev,
427 "%s: SCIC Controller 0x%p received "
428 "event 0x%x for remote device object "
429 "that doesnt exist.\n",
430 __func__,
431 ihost,
432 ent);
433
434 break;
435 }
436 break;
437
438 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
439 /*
440 * direct the broadcast change event to the phy first and then let
441 * the phy redirect the broadcast change to the port object */
442 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
443 /*
444 * direct error counter event to the phy object since that is where
445 * we get the event notification. This is a type 4 event. */
446 case SCU_EVENT_TYPE_OSSP_EVENT:
447 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
448 iphy = &ihost->phys[index];
449 sci_phy_event_handler(iphy, ent);
450 break;
451
452 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
453 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
454 case SCU_EVENT_TYPE_RNC_OPS_MISC:
455 if (index < ihost->remote_node_entries) {
456 idev = ihost->device_table[index];
457
458 if (idev != NULL)
459 sci_remote_device_event_handler(idev, ent);
460 } else
461 dev_err(&ihost->pdev->dev,
462 "%s: SCIC Controller 0x%p received event 0x%x "
463 "for remote device object 0x%0x that doesnt "
464 "exist.\n",
465 __func__,
466 ihost,
467 ent,
468 index);
469
470 break;
471
472 default:
473 dev_warn(&ihost->pdev->dev,
474 "%s: SCIC Controller received unknown event code %x\n",
475 __func__,
476 ent);
477 break;
478 }
479}
480
481static void sci_controller_process_completions(struct isci_host *ihost)
482{
483 u32 completion_count = 0;
484 u32 ent;
485 u32 get_index;
486 u32 get_cycle;
487 u32 event_get;
488 u32 event_cycle;
489
490 dev_dbg(&ihost->pdev->dev,
491 "%s: completion queue begining get:0x%08x\n",
492 __func__,
493 ihost->completion_queue_get);
494
495 /* Get the component parts of the completion queue */
496 get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
497 get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
498
499 event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
500 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
501
502 while (
503 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
504 == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
505 ) {
506 completion_count++;
507
508 ent = ihost->completion_queue[get_index];
509
510 /* increment the get pointer and check for rollover to toggle the cycle bit */
511 get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
512 (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
513 get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
514
515 dev_dbg(&ihost->pdev->dev,
516 "%s: completion queue entry:0x%08x\n",
517 __func__,
518 ent);
519
520 switch (SCU_GET_COMPLETION_TYPE(ent)) {
521 case SCU_COMPLETION_TYPE_TASK:
522 sci_controller_task_completion(ihost, ent);
523 break;
524
525 case SCU_COMPLETION_TYPE_SDMA:
526 sci_controller_sdma_completion(ihost, ent);
527 break;
528
529 case SCU_COMPLETION_TYPE_UFI:
530 sci_controller_unsolicited_frame(ihost, ent);
531 break;
532
533 case SCU_COMPLETION_TYPE_EVENT:
534 case SCU_COMPLETION_TYPE_NOTIFY: {
535 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
536 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
537 event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
538
539 sci_controller_event_completion(ihost, ent);
540 break;
541 }
542 default:
543 dev_warn(&ihost->pdev->dev,
544 "%s: SCIC Controller received unknown "
545 "completion type %x\n",
546 __func__,
547 ent);
548 break;
549 }
550 }
551
552 /* Update the get register if we completed one or more entries */
553 if (completion_count > 0) {
554 ihost->completion_queue_get =
555 SMU_CQGR_GEN_BIT(ENABLE) |
556 SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
557 event_cycle |
558 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
559 get_cycle |
560 SMU_CQGR_GEN_VAL(POINTER, get_index);
561
562 writel(ihost->completion_queue_get,
563 &ihost->smu_registers->completion_queue_get);
564
565 }
566
567 dev_dbg(&ihost->pdev->dev,
568 "%s: completion queue ending get:0x%08x\n",
569 __func__,
570 ihost->completion_queue_get);
571
572}
573
574static void sci_controller_error_handler(struct isci_host *ihost)
575{
576 u32 interrupt_status;
577
578 interrupt_status =
579 readl(&ihost->smu_registers->interrupt_status);
580
581 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
582 sci_controller_completion_queue_has_entries(ihost)) {
583
584 sci_controller_process_completions(ihost);
585 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
586 } else {
587 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
588 interrupt_status);
589
590 sci_change_state(&ihost->sm, SCIC_FAILED);
591
592 return;
593 }
594
595 /* If we dont process any completions I am not sure that we want to do this.
596 * We are in the middle of a hardware fault and should probably be reset.
597 */
598 writel(0, &ihost->smu_registers->interrupt_mask);
599}
600
601irqreturn_t isci_intx_isr(int vec, void *data)
602{
603 irqreturn_t ret = IRQ_NONE;
604 struct isci_host *ihost = data;
605
606 if (sci_controller_isr(ihost)) {
607 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
608 tasklet_schedule(&ihost->completion_tasklet);
609 ret = IRQ_HANDLED;
610 } else if (sci_controller_error_isr(ihost)) {
611 spin_lock(&ihost->scic_lock);
612 sci_controller_error_handler(ihost);
613 spin_unlock(&ihost->scic_lock);
614 ret = IRQ_HANDLED;
615 }
616
617 return ret;
618}
619
620irqreturn_t isci_error_isr(int vec, void *data)
621{
622 struct isci_host *ihost = data;
623
624 if (sci_controller_error_isr(ihost))
625 sci_controller_error_handler(ihost);
626
627 return IRQ_HANDLED;
628}
629
630/**
631 * isci_host_start_complete() - This function is called by the core library,
632 * through the ISCI Module, to indicate controller start status.
633 * @isci_host: This parameter specifies the ISCI host object
634 * @completion_status: This parameter specifies the completion status from the
635 * core library.
636 *
637 */
638static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
639{
640 if (completion_status != SCI_SUCCESS)
641 dev_info(&ihost->pdev->dev,
642 "controller start timed out, continuing...\n");
643 isci_host_change_state(ihost, isci_ready);
644 clear_bit(IHOST_START_PENDING, &ihost->flags);
645 wake_up(&ihost->eventq);
646}
647
648int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
649{
650 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
651
652 if (test_bit(IHOST_START_PENDING, &ihost->flags))
653 return 0;
654
655 /* todo: use sas_flush_discovery once it is upstream */
656 scsi_flush_work(shost);
657
658 scsi_flush_work(shost);
659
660 dev_dbg(&ihost->pdev->dev,
661 "%s: ihost->status = %d, time = %ld\n",
662 __func__, isci_host_get_state(ihost), time);
663
664 return 1;
665
666}
667
668/**
669 * sci_controller_get_suggested_start_timeout() - This method returns the
670 * suggested sci_controller_start() timeout amount. The user is free to
671 * use any timeout value, but this method provides the suggested minimum
672 * start timeout value. The returned value is based upon empirical
673 * information determined as a result of interoperability testing.
674 * @controller: the handle to the controller object for which to return the
675 * suggested start timeout.
676 *
677 * This method returns the number of milliseconds for the suggested start
678 * operation timeout.
679 */
680static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
681{
682 /* Validate the user supplied parameters. */
683 if (!ihost)
684 return 0;
685
686 /*
687 * The suggested minimum timeout value for a controller start operation:
688 *
689 * Signature FIS Timeout
690 * + Phy Start Timeout
691 * + Number of Phy Spin Up Intervals
692 * ---------------------------------
693 * Number of milliseconds for the controller start operation.
694 *
695 * NOTE: The number of phy spin up intervals will be equivalent
696 * to the number of phys divided by the number phys allowed
697 * per interval - 1 (once OEM parameters are supported).
698 * Currently we assume only 1 phy per interval. */
699
700 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
701 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
702 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
703}
704
705static void sci_controller_enable_interrupts(struct isci_host *ihost)
706{
707 BUG_ON(ihost->smu_registers == NULL);
708 writel(0, &ihost->smu_registers->interrupt_mask);
709}
710
711void sci_controller_disable_interrupts(struct isci_host *ihost)
712{
713 BUG_ON(ihost->smu_registers == NULL);
714 writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
715}
716
717static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
718{
719 u32 port_task_scheduler_value;
720
721 port_task_scheduler_value =
722 readl(&ihost->scu_registers->peg0.ptsg.control);
723 port_task_scheduler_value |=
724 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
725 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
726 writel(port_task_scheduler_value,
727 &ihost->scu_registers->peg0.ptsg.control);
728}
729
730static void sci_controller_assign_task_entries(struct isci_host *ihost)
731{
732 u32 task_assignment;
733
734 /*
735 * Assign all the TCs to function 0
736 * TODO: Do we actually need to read this register to write it back?
737 */
738
739 task_assignment =
740 readl(&ihost->smu_registers->task_context_assignment[0]);
741
742 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
743 (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
744 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
745
746 writel(task_assignment,
747 &ihost->smu_registers->task_context_assignment[0]);
748
749}
750
751static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
752{
753 u32 index;
754 u32 completion_queue_control_value;
755 u32 completion_queue_get_value;
756 u32 completion_queue_put_value;
757
758 ihost->completion_queue_get = 0;
759
760 completion_queue_control_value =
761 (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
762 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
763
764 writel(completion_queue_control_value,
765 &ihost->smu_registers->completion_queue_control);
766
767
768 /* Set the completion queue get pointer and enable the queue */
769 completion_queue_get_value = (
770 (SMU_CQGR_GEN_VAL(POINTER, 0))
771 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
772 | (SMU_CQGR_GEN_BIT(ENABLE))
773 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
774 );
775
776 writel(completion_queue_get_value,
777 &ihost->smu_registers->completion_queue_get);
778
779 /* Set the completion queue put pointer */
780 completion_queue_put_value = (
781 (SMU_CQPR_GEN_VAL(POINTER, 0))
782 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
783 );
784
785 writel(completion_queue_put_value,
786 &ihost->smu_registers->completion_queue_put);
787
788 /* Initialize the cycle bit of the completion queue entries */
789 for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
790 /*
791 * If get.cycle_bit != completion_queue.cycle_bit
792 * its not a valid completion queue entry
793 * so at system start all entries are invalid */
794 ihost->completion_queue[index] = 0x80000000;
795 }
796}
797
798static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
799{
800 u32 frame_queue_control_value;
801 u32 frame_queue_get_value;
802 u32 frame_queue_put_value;
803
804 /* Write the queue size */
805 frame_queue_control_value =
806 SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
807
808 writel(frame_queue_control_value,
809 &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
810
811 /* Setup the get pointer for the unsolicited frame queue */
812 frame_queue_get_value = (
813 SCU_UFQGP_GEN_VAL(POINTER, 0)
814 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
815 );
816
817 writel(frame_queue_get_value,
818 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
819 /* Setup the put pointer for the unsolicited frame queue */
820 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
821 writel(frame_queue_put_value,
822 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
823}
824
825static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
826{
827 if (ihost->sm.current_state_id == SCIC_STARTING) {
828 /*
829 * We move into the ready state, because some of the phys/ports
830 * may be up and operational.
831 */
832 sci_change_state(&ihost->sm, SCIC_READY);
833
834 isci_host_start_complete(ihost, status);
835 }
836}
837
838static bool is_phy_starting(struct isci_phy *iphy)
839{
840 enum sci_phy_states state;
841
842 state = iphy->sm.current_state_id;
843 switch (state) {
844 case SCI_PHY_STARTING:
845 case SCI_PHY_SUB_INITIAL:
846 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
847 case SCI_PHY_SUB_AWAIT_IAF_UF:
848 case SCI_PHY_SUB_AWAIT_SAS_POWER:
849 case SCI_PHY_SUB_AWAIT_SATA_POWER:
850 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
851 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
852 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
853 case SCI_PHY_SUB_FINAL:
854 return true;
855 default:
856 return false;
857 }
858}
859
860/**
861 * sci_controller_start_next_phy - start phy
862 * @scic: controller
863 *
864 * If all the phys have been started, then attempt to transition the
865 * controller to the READY state and inform the user
866 * (sci_cb_controller_start_complete()).
867 */
868static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
869{
870 struct sci_oem_params *oem = &ihost->oem_parameters;
871 struct isci_phy *iphy;
872 enum sci_status status;
873
874 status = SCI_SUCCESS;
875
876 if (ihost->phy_startup_timer_pending)
877 return status;
878
879 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
880 bool is_controller_start_complete = true;
881 u32 state;
882 u8 index;
883
884 for (index = 0; index < SCI_MAX_PHYS; index++) {
885 iphy = &ihost->phys[index];
886 state = iphy->sm.current_state_id;
887
888 if (!phy_get_non_dummy_port(iphy))
889 continue;
890
891 /* The controller start operation is complete iff:
892 * - all links have been given an opportunity to start
893 * - have no indication of a connected device
894 * - have an indication of a connected device and it has
895 * finished the link training process.
896 */
897 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
898 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
899 (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
900 is_controller_start_complete = false;
901 break;
902 }
903 }
904
905 /*
906 * The controller has successfully finished the start process.
907 * Inform the SCI Core user and transition to the READY state. */
908 if (is_controller_start_complete == true) {
909 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
910 sci_del_timer(&ihost->phy_timer);
911 ihost->phy_startup_timer_pending = false;
912 }
913 } else {
914 iphy = &ihost->phys[ihost->next_phy_to_start];
915
916 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
917 if (phy_get_non_dummy_port(iphy) == NULL) {
918 ihost->next_phy_to_start++;
919
920 /* Caution recursion ahead be forwarned
921 *
922 * The PHY was never added to a PORT in MPC mode
923 * so start the next phy in sequence This phy
924 * will never go link up and will not draw power
925 * the OEM parameters either configured the phy
926 * incorrectly for the PORT or it was never
927 * assigned to a PORT
928 */
929 return sci_controller_start_next_phy(ihost);
930 }
931 }
932
933 status = sci_phy_start(iphy);
934
935 if (status == SCI_SUCCESS) {
936 sci_mod_timer(&ihost->phy_timer,
937 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
938 ihost->phy_startup_timer_pending = true;
939 } else {
940 dev_warn(&ihost->pdev->dev,
941 "%s: Controller stop operation failed "
942 "to stop phy %d because of status "
943 "%d.\n",
944 __func__,
945 ihost->phys[ihost->next_phy_to_start].phy_index,
946 status);
947 }
948
949 ihost->next_phy_to_start++;
950 }
951
952 return status;
953}
954
955static void phy_startup_timeout(unsigned long data)
956{
957 struct sci_timer *tmr = (struct sci_timer *)data;
958 struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
959 unsigned long flags;
960 enum sci_status status;
961
962 spin_lock_irqsave(&ihost->scic_lock, flags);
963
964 if (tmr->cancel)
965 goto done;
966
967 ihost->phy_startup_timer_pending = false;
968
969 do {
970 status = sci_controller_start_next_phy(ihost);
971 } while (status != SCI_SUCCESS);
972
973done:
974 spin_unlock_irqrestore(&ihost->scic_lock, flags);
975}
976
977static u16 isci_tci_active(struct isci_host *ihost)
978{
979 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
980}
981
982static enum sci_status sci_controller_start(struct isci_host *ihost,
983 u32 timeout)
984{
985 enum sci_status result;
986 u16 index;
987
988 if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
989 dev_warn(&ihost->pdev->dev,
990 "SCIC Controller start operation requested in "
991 "invalid state\n");
992 return SCI_FAILURE_INVALID_STATE;
993 }
994
995 /* Build the TCi free pool */
996 BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
997 ihost->tci_head = 0;
998 ihost->tci_tail = 0;
999 for (index = 0; index < ihost->task_context_entries; index++)
1000 isci_tci_free(ihost, index);
1001
1002 /* Build the RNi free pool */
1003 sci_remote_node_table_initialize(&ihost->available_remote_nodes,
1004 ihost->remote_node_entries);
1005
1006 /*
1007 * Before anything else lets make sure we will not be
1008 * interrupted by the hardware.
1009 */
1010 sci_controller_disable_interrupts(ihost);
1011
1012 /* Enable the port task scheduler */
1013 sci_controller_enable_port_task_scheduler(ihost);
1014
1015 /* Assign all the task entries to ihost physical function */
1016 sci_controller_assign_task_entries(ihost);
1017
1018 /* Now initialize the completion queue */
1019 sci_controller_initialize_completion_queue(ihost);
1020
1021 /* Initialize the unsolicited frame queue for use */
1022 sci_controller_initialize_unsolicited_frame_queue(ihost);
1023
1024 /* Start all of the ports on this controller */
1025 for (index = 0; index < ihost->logical_port_entries; index++) {
1026 struct isci_port *iport = &ihost->ports[index];
1027
1028 result = sci_port_start(iport);
1029 if (result)
1030 return result;
1031 }
1032
1033 sci_controller_start_next_phy(ihost);
1034
1035 sci_mod_timer(&ihost->timer, timeout);
1036
1037 sci_change_state(&ihost->sm, SCIC_STARTING);
1038
1039 return SCI_SUCCESS;
1040}
1041
1042void isci_host_scan_start(struct Scsi_Host *shost)
1043{
1044 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1045 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1046
1047 set_bit(IHOST_START_PENDING, &ihost->flags);
1048
1049 spin_lock_irq(&ihost->scic_lock);
1050 sci_controller_start(ihost, tmo);
1051 sci_controller_enable_interrupts(ihost);
1052 spin_unlock_irq(&ihost->scic_lock);
1053}
1054
1055static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
1056{
1057 isci_host_change_state(ihost, isci_stopped);
1058 sci_controller_disable_interrupts(ihost);
1059 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1060 wake_up(&ihost->eventq);
1061}
1062
1063static void sci_controller_completion_handler(struct isci_host *ihost)
1064{
1065 /* Empty out the completion queue */
1066 if (sci_controller_completion_queue_has_entries(ihost))
1067 sci_controller_process_completions(ihost);
1068
1069 /* Clear the interrupt and enable all interrupts again */
1070 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1071 /* Could we write the value of SMU_ISR_COMPLETION? */
1072 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
1073 writel(0, &ihost->smu_registers->interrupt_mask);
1074}
1075
1076/**
1077 * isci_host_completion_routine() - This function is the delayed service
1078 * routine that calls the sci core library's completion handler. It's
1079 * scheduled as a tasklet from the interrupt service routine when interrupts
1080 * in use, or set as the timeout function in polled mode.
1081 * @data: This parameter specifies the ISCI host object
1082 *
1083 */
1084static void isci_host_completion_routine(unsigned long data)
1085{
1086 struct isci_host *ihost = (struct isci_host *)data;
1087 struct list_head completed_request_list;
1088 struct list_head errored_request_list;
1089 struct list_head *current_position;
1090 struct list_head *next_position;
1091 struct isci_request *request;
1092 struct isci_request *next_request;
1093 struct sas_task *task;
1094
1095 INIT_LIST_HEAD(&completed_request_list);
1096 INIT_LIST_HEAD(&errored_request_list);
1097
1098 spin_lock_irq(&ihost->scic_lock);
1099
1100 sci_controller_completion_handler(ihost);
1101
1102 /* Take the lists of completed I/Os from the host. */
1103
1104 list_splice_init(&ihost->requests_to_complete,
1105 &completed_request_list);
1106
1107 /* Take the list of errored I/Os from the host. */
1108 list_splice_init(&ihost->requests_to_errorback,
1109 &errored_request_list);
1110
1111 spin_unlock_irq(&ihost->scic_lock);
1112
1113 /* Process any completions in the lists. */
1114 list_for_each_safe(current_position, next_position,
1115 &completed_request_list) {
1116
1117 request = list_entry(current_position, struct isci_request,
1118 completed_node);
1119 task = isci_request_access_task(request);
1120
1121 /* Normal notification (task_done) */
1122 dev_dbg(&ihost->pdev->dev,
1123 "%s: Normal - request/task = %p/%p\n",
1124 __func__,
1125 request,
1126 task);
1127
1128 /* Return the task to libsas */
1129 if (task != NULL) {
1130
1131 task->lldd_task = NULL;
1132 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1133
1134 /* If the task is already in the abort path,
1135 * the task_done callback cannot be called.
1136 */
1137 task->task_done(task);
1138 }
1139 }
1140
1141 spin_lock_irq(&ihost->scic_lock);
1142 isci_free_tag(ihost, request->io_tag);
1143 spin_unlock_irq(&ihost->scic_lock);
1144 }
1145 list_for_each_entry_safe(request, next_request, &errored_request_list,
1146 completed_node) {
1147
1148 task = isci_request_access_task(request);
1149
1150 /* Use sas_task_abort */
1151 dev_warn(&ihost->pdev->dev,
1152 "%s: Error - request/task = %p/%p\n",
1153 __func__,
1154 request,
1155 task);
1156
1157 if (task != NULL) {
1158
1159 /* Put the task into the abort path if it's not there
1160 * already.
1161 */
1162 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
1163 sas_task_abort(task);
1164
1165 } else {
1166 /* This is a case where the request has completed with a
1167 * status such that it needed further target servicing,
1168 * but the sas_task reference has already been removed
1169 * from the request. Since it was errored, it was not
1170 * being aborted, so there is nothing to do except free
1171 * it.
1172 */
1173
1174 spin_lock_irq(&ihost->scic_lock);
1175 /* Remove the request from the remote device's list
1176 * of pending requests.
1177 */
1178 list_del_init(&request->dev_node);
1179 isci_free_tag(ihost, request->io_tag);
1180 spin_unlock_irq(&ihost->scic_lock);
1181 }
1182 }
1183
1184}
1185
1186/**
1187 * sci_controller_stop() - This method will stop an individual controller
1188 * object.This method will invoke the associated user callback upon
1189 * completion. The completion callback is called when the following
1190 * conditions are met: -# the method return status is SCI_SUCCESS. -# the
1191 * controller has been quiesced. This method will ensure that all IO
1192 * requests are quiesced, phys are stopped, and all additional operation by
1193 * the hardware is halted.
1194 * @controller: the handle to the controller object to stop.
1195 * @timeout: This parameter specifies the number of milliseconds in which the
1196 * stop operation should complete.
1197 *
1198 * The controller must be in the STARTED or STOPPED state. Indicate if the
1199 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1200 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1201 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1202 * controller is not either in the STARTED or STOPPED states.
1203 */
1204static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1205{
1206 if (ihost->sm.current_state_id != SCIC_READY) {
1207 dev_warn(&ihost->pdev->dev,
1208 "SCIC Controller stop operation requested in "
1209 "invalid state\n");
1210 return SCI_FAILURE_INVALID_STATE;
1211 }
1212
1213 sci_mod_timer(&ihost->timer, timeout);
1214 sci_change_state(&ihost->sm, SCIC_STOPPING);
1215 return SCI_SUCCESS;
1216}
1217
1218/**
1219 * sci_controller_reset() - This method will reset the supplied core
1220 * controller regardless of the state of said controller. This operation is
1221 * considered destructive. In other words, all current operations are wiped
1222 * out. No IO completions for outstanding devices occur. Outstanding IO
1223 * requests are not aborted or completed at the actual remote device.
1224 * @controller: the handle to the controller object to reset.
1225 *
1226 * Indicate if the controller reset method succeeded or failed in some way.
1227 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1228 * the controller reset operation is unable to complete.
1229 */
1230static enum sci_status sci_controller_reset(struct isci_host *ihost)
1231{
1232 switch (ihost->sm.current_state_id) {
1233 case SCIC_RESET:
1234 case SCIC_READY:
1235 case SCIC_STOPPED:
1236 case SCIC_FAILED:
1237 /*
1238 * The reset operation is not a graceful cleanup, just
1239 * perform the state transition.
1240 */
1241 sci_change_state(&ihost->sm, SCIC_RESETTING);
1242 return SCI_SUCCESS;
1243 default:
1244 dev_warn(&ihost->pdev->dev,
1245 "SCIC Controller reset operation requested in "
1246 "invalid state\n");
1247 return SCI_FAILURE_INVALID_STATE;
1248 }
1249}
1250
1251void isci_host_deinit(struct isci_host *ihost)
1252{
1253 int i;
1254
1255 isci_host_change_state(ihost, isci_stopping);
1256 for (i = 0; i < SCI_MAX_PORTS; i++) {
1257 struct isci_port *iport = &ihost->ports[i];
1258 struct isci_remote_device *idev, *d;
1259
1260 list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
1261 if (test_bit(IDEV_ALLOCATED, &idev->flags))
1262 isci_remote_device_stop(ihost, idev);
1263 }
1264 }
1265
1266 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1267
1268 spin_lock_irq(&ihost->scic_lock);
1269 sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
1270 spin_unlock_irq(&ihost->scic_lock);
1271
1272 wait_for_stop(ihost);
1273 sci_controller_reset(ihost);
1274
1275 /* Cancel any/all outstanding port timers */
1276 for (i = 0; i < ihost->logical_port_entries; i++) {
1277 struct isci_port *iport = &ihost->ports[i];
1278 del_timer_sync(&iport->timer.timer);
1279 }
1280
1281 /* Cancel any/all outstanding phy timers */
1282 for (i = 0; i < SCI_MAX_PHYS; i++) {
1283 struct isci_phy *iphy = &ihost->phys[i];
1284 del_timer_sync(&iphy->sata_timer.timer);
1285 }
1286
1287 del_timer_sync(&ihost->port_agent.timer.timer);
1288
1289 del_timer_sync(&ihost->power_control.timer.timer);
1290
1291 del_timer_sync(&ihost->timer.timer);
1292
1293 del_timer_sync(&ihost->phy_timer.timer);
1294}
1295
1296static void __iomem *scu_base(struct isci_host *isci_host)
1297{
1298 struct pci_dev *pdev = isci_host->pdev;
1299 int id = isci_host->id;
1300
1301 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1302}
1303
1304static void __iomem *smu_base(struct isci_host *isci_host)
1305{
1306 struct pci_dev *pdev = isci_host->pdev;
1307 int id = isci_host->id;
1308
1309 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1310}
1311
1312static void isci_user_parameters_get(struct sci_user_parameters *u)
1313{
1314 int i;
1315
1316 for (i = 0; i < SCI_MAX_PHYS; i++) {
1317 struct sci_phy_user_params *u_phy = &u->phys[i];
1318
1319 u_phy->max_speed_generation = phy_gen;
1320
1321 /* we are not exporting these for now */
1322 u_phy->align_insertion_frequency = 0x7f;
1323 u_phy->in_connection_align_insertion_frequency = 0xff;
1324 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
1325 }
1326
1327 u->stp_inactivity_timeout = stp_inactive_to;
1328 u->ssp_inactivity_timeout = ssp_inactive_to;
1329 u->stp_max_occupancy_timeout = stp_max_occ_to;
1330 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
1331 u->no_outbound_task_timeout = no_outbound_task_to;
1332 u->max_number_concurrent_device_spin_up = max_concurr_spinup;
1333}
1334
1335static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1336{
1337 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1338
1339 sci_change_state(&ihost->sm, SCIC_RESET);
1340}
1341
1342static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
1343{
1344 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1345
1346 sci_del_timer(&ihost->timer);
1347}
1348
1349#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1350#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1351#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1352#define INTERRUPT_COALESCE_NUMBER_MAX 256
1353#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1354#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1355
1356/**
1357 * sci_controller_set_interrupt_coalescence() - This method allows the user to
1358 * configure the interrupt coalescence.
1359 * @controller: This parameter represents the handle to the controller object
1360 * for which its interrupt coalesce register is overridden.
1361 * @coalesce_number: Used to control the number of entries in the Completion
1362 * Queue before an interrupt is generated. If the number of entries exceed
1363 * this number, an interrupt will be generated. The valid range of the input
1364 * is [0, 256]. A setting of 0 results in coalescing being disabled.
1365 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
1366 * input is [0, 2700000] . A setting of 0 is allowed and results in no
1367 * interrupt coalescing timeout.
1368 *
1369 * Indicate if the user successfully set the interrupt coalesce parameters.
1370 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
1371 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1372 */
1373static enum sci_status
1374sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1375 u32 coalesce_number,
1376 u32 coalesce_timeout)
1377{
1378 u8 timeout_encode = 0;
1379 u32 min = 0;
1380 u32 max = 0;
1381
1382 /* Check if the input parameters fall in the range. */
1383 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1384 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1385
1386 /*
1387 * Defined encoding for interrupt coalescing timeout:
1388 * Value Min Max Units
1389 * ----- --- --- -----
1390 * 0 - - Disabled
1391 * 1 13.3 20.0 ns
1392 * 2 26.7 40.0
1393 * 3 53.3 80.0
1394 * 4 106.7 160.0
1395 * 5 213.3 320.0
1396 * 6 426.7 640.0
1397 * 7 853.3 1280.0
1398 * 8 1.7 2.6 us
1399 * 9 3.4 5.1
1400 * 10 6.8 10.2
1401 * 11 13.7 20.5
1402 * 12 27.3 41.0
1403 * 13 54.6 81.9
1404 * 14 109.2 163.8
1405 * 15 218.5 327.7
1406 * 16 436.9 655.4
1407 * 17 873.8 1310.7
1408 * 18 1.7 2.6 ms
1409 * 19 3.5 5.2
1410 * 20 7.0 10.5
1411 * 21 14.0 21.0
1412 * 22 28.0 41.9
1413 * 23 55.9 83.9
1414 * 24 111.8 167.8
1415 * 25 223.7 335.5
1416 * 26 447.4 671.1
1417 * 27 894.8 1342.2
1418 * 28 1.8 2.7 s
1419 * Others Undefined */
1420
1421 /*
1422 * Use the table above to decide the encode of interrupt coalescing timeout
1423 * value for register writing. */
1424 if (coalesce_timeout == 0)
1425 timeout_encode = 0;
1426 else{
1427 /* make the timeout value in unit of (10 ns). */
1428 coalesce_timeout = coalesce_timeout * 100;
1429 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1430 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1431
1432 /* get the encode of timeout for register writing. */
1433 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1434 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1435 timeout_encode++) {
1436 if (min <= coalesce_timeout && max > coalesce_timeout)
1437 break;
1438 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1439 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1440 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1441 break;
1442 else{
1443 timeout_encode++;
1444 break;
1445 }
1446 } else {
1447 max = max * 2;
1448 min = min * 2;
1449 }
1450 }
1451
1452 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1453 /* the value is out of range. */
1454 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1455 }
1456
1457 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1458 SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1459 &ihost->smu_registers->interrupt_coalesce_control);
1460
1461
1462 ihost->interrupt_coalesce_number = (u16)coalesce_number;
1463 ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1464
1465 return SCI_SUCCESS;
1466}
1467
1468
1469static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1470{
1471 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1472
1473 /* set the default interrupt coalescence number and timeout value. */
1474 sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
1475}
1476
1477static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1478{
1479 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1480
1481 /* disable interrupt coalescence. */
1482 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1483}
1484
1485static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1486{
1487 u32 index;
1488 enum sci_status status;
1489 enum sci_status phy_status;
1490
1491 status = SCI_SUCCESS;
1492
1493 for (index = 0; index < SCI_MAX_PHYS; index++) {
1494 phy_status = sci_phy_stop(&ihost->phys[index]);
1495
1496 if (phy_status != SCI_SUCCESS &&
1497 phy_status != SCI_FAILURE_INVALID_STATE) {
1498 status = SCI_FAILURE;
1499
1500 dev_warn(&ihost->pdev->dev,
1501 "%s: Controller stop operation failed to stop "
1502 "phy %d because of status %d.\n",
1503 __func__,
1504 ihost->phys[index].phy_index, phy_status);
1505 }
1506 }
1507
1508 return status;
1509}
1510
1511static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1512{
1513 u32 index;
1514 enum sci_status port_status;
1515 enum sci_status status = SCI_SUCCESS;
1516
1517 for (index = 0; index < ihost->logical_port_entries; index++) {
1518 struct isci_port *iport = &ihost->ports[index];
1519
1520 port_status = sci_port_stop(iport);
1521
1522 if ((port_status != SCI_SUCCESS) &&
1523 (port_status != SCI_FAILURE_INVALID_STATE)) {
1524 status = SCI_FAILURE;
1525
1526 dev_warn(&ihost->pdev->dev,
1527 "%s: Controller stop operation failed to "
1528 "stop port %d because of status %d.\n",
1529 __func__,
1530 iport->logical_port_index,
1531 port_status);
1532 }
1533 }
1534
1535 return status;
1536}
1537
1538static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1539{
1540 u32 index;
1541 enum sci_status status;
1542 enum sci_status device_status;
1543
1544 status = SCI_SUCCESS;
1545
1546 for (index = 0; index < ihost->remote_node_entries; index++) {
1547 if (ihost->device_table[index] != NULL) {
1548 /* / @todo What timeout value do we want to provide to this request? */
1549 device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1550
1551 if ((device_status != SCI_SUCCESS) &&
1552 (device_status != SCI_FAILURE_INVALID_STATE)) {
1553 dev_warn(&ihost->pdev->dev,
1554 "%s: Controller stop operation failed "
1555 "to stop device 0x%p because of "
1556 "status %d.\n",
1557 __func__,
1558 ihost->device_table[index], device_status);
1559 }
1560 }
1561 }
1562
1563 return status;
1564}
1565
1566static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1567{
1568 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1569
1570 /* Stop all of the components for this controller */
1571 sci_controller_stop_phys(ihost);
1572 sci_controller_stop_ports(ihost);
1573 sci_controller_stop_devices(ihost);
1574}
1575
1576static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1577{
1578 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1579
1580 sci_del_timer(&ihost->timer);
1581}
1582
1583static void sci_controller_reset_hardware(struct isci_host *ihost)
1584{
1585 /* Disable interrupts so we dont take any spurious interrupts */
1586 sci_controller_disable_interrupts(ihost);
1587
1588 /* Reset the SCU */
1589 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1590
1591 /* Delay for 1ms to before clearing the CQP and UFQPR. */
1592 udelay(1000);
1593
1594 /* The write to the CQGR clears the CQP */
1595 writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1596
1597 /* The write to the UFQGP clears the UFQPR */
1598 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1599}
1600
1601static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1602{
1603 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1604
1605 sci_controller_reset_hardware(ihost);
1606 sci_change_state(&ihost->sm, SCIC_RESET);
1607}
1608
1609static const struct sci_base_state sci_controller_state_table[] = {
1610 [SCIC_INITIAL] = {
1611 .enter_state = sci_controller_initial_state_enter,
1612 },
1613 [SCIC_RESET] = {},
1614 [SCIC_INITIALIZING] = {},
1615 [SCIC_INITIALIZED] = {},
1616 [SCIC_STARTING] = {
1617 .exit_state = sci_controller_starting_state_exit,
1618 },
1619 [SCIC_READY] = {
1620 .enter_state = sci_controller_ready_state_enter,
1621 .exit_state = sci_controller_ready_state_exit,
1622 },
1623 [SCIC_RESETTING] = {
1624 .enter_state = sci_controller_resetting_state_enter,
1625 },
1626 [SCIC_STOPPING] = {
1627 .enter_state = sci_controller_stopping_state_enter,
1628 .exit_state = sci_controller_stopping_state_exit,
1629 },
1630 [SCIC_STOPPED] = {},
1631 [SCIC_FAILED] = {}
1632};
1633
1634static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
1635{
1636 /* these defaults are overridden by the platform / firmware */
1637 u16 index;
1638
1639 /* Default to APC mode. */
1640 ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1641
1642 /* Default to APC mode. */
1643 ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
1644
1645 /* Default to no SSC operation. */
1646 ihost->oem_parameters.controller.do_enable_ssc = false;
1647
1648 /* Initialize all of the port parameter information to narrow ports. */
1649 for (index = 0; index < SCI_MAX_PORTS; index++) {
1650 ihost->oem_parameters.ports[index].phy_mask = 0;
1651 }
1652
1653 /* Initialize all of the phy parameter information. */
1654 for (index = 0; index < SCI_MAX_PHYS; index++) {
1655 /* Default to 6G (i.e. Gen 3) for now. */
1656 ihost->user_parameters.phys[index].max_speed_generation = 3;
1657
1658 /* the frequencies cannot be 0 */
1659 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
1660 ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
1661 ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1662
1663 /*
1664 * Previous Vitesse based expanders had a arbitration issue that
1665 * is worked around by having the upper 32-bits of SAS address
1666 * with a value greater then the Vitesse company identifier.
1667 * Hence, usage of 0x5FCFFFFF. */
1668 ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
1669 ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
1670 }
1671
1672 ihost->user_parameters.stp_inactivity_timeout = 5;
1673 ihost->user_parameters.ssp_inactivity_timeout = 5;
1674 ihost->user_parameters.stp_max_occupancy_timeout = 5;
1675 ihost->user_parameters.ssp_max_occupancy_timeout = 20;
1676 ihost->user_parameters.no_outbound_task_timeout = 20;
1677}
1678
1679static void controller_timeout(unsigned long data)
1680{
1681 struct sci_timer *tmr = (struct sci_timer *)data;
1682 struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
1683 struct sci_base_state_machine *sm = &ihost->sm;
1684 unsigned long flags;
1685
1686 spin_lock_irqsave(&ihost->scic_lock, flags);
1687
1688 if (tmr->cancel)
1689 goto done;
1690
1691 if (sm->current_state_id == SCIC_STARTING)
1692 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1693 else if (sm->current_state_id == SCIC_STOPPING) {
1694 sci_change_state(sm, SCIC_FAILED);
1695 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
1696 } else /* / @todo Now what do we want to do in this case? */
1697 dev_err(&ihost->pdev->dev,
1698 "%s: Controller timer fired when controller was not "
1699 "in a state being timed.\n",
1700 __func__);
1701
1702done:
1703 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1704}
1705
1706static enum sci_status sci_controller_construct(struct isci_host *ihost,
1707 void __iomem *scu_base,
1708 void __iomem *smu_base)
1709{
1710 u8 i;
1711
1712 sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1713
1714 ihost->scu_registers = scu_base;
1715 ihost->smu_registers = smu_base;
1716
1717 sci_port_configuration_agent_construct(&ihost->port_agent);
1718
1719 /* Construct the ports for this controller */
1720 for (i = 0; i < SCI_MAX_PORTS; i++)
1721 sci_port_construct(&ihost->ports[i], i, ihost);
1722 sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1723
1724 /* Construct the phys for this controller */
1725 for (i = 0; i < SCI_MAX_PHYS; i++) {
1726 /* Add all the PHYs to the dummy port */
1727 sci_phy_construct(&ihost->phys[i],
1728 &ihost->ports[SCI_MAX_PORTS], i);
1729 }
1730
1731 ihost->invalid_phy_mask = 0;
1732
1733 sci_init_timer(&ihost->timer, controller_timeout);
1734
1735 /* Initialize the User and OEM parameters to default values. */
1736 sci_controller_set_default_config_parameters(ihost);
1737
1738 return sci_controller_reset(ihost);
1739}
1740
1741int sci_oem_parameters_validate(struct sci_oem_params *oem)
1742{
1743 int i;
1744
1745 for (i = 0; i < SCI_MAX_PORTS; i++)
1746 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1747 return -EINVAL;
1748
1749 for (i = 0; i < SCI_MAX_PHYS; i++)
1750 if (oem->phys[i].sas_address.high == 0 &&
1751 oem->phys[i].sas_address.low == 0)
1752 return -EINVAL;
1753
1754 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1755 for (i = 0; i < SCI_MAX_PHYS; i++)
1756 if (oem->ports[i].phy_mask != 0)
1757 return -EINVAL;
1758 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1759 u8 phy_mask = 0;
1760
1761 for (i = 0; i < SCI_MAX_PHYS; i++)
1762 phy_mask |= oem->ports[i].phy_mask;
1763
1764 if (phy_mask == 0)
1765 return -EINVAL;
1766 } else
1767 return -EINVAL;
1768
1769 if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
1770 return -EINVAL;
1771
1772 return 0;
1773}
1774
1775static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
1776{
1777 u32 state = ihost->sm.current_state_id;
1778
1779 if (state == SCIC_RESET ||
1780 state == SCIC_INITIALIZING ||
1781 state == SCIC_INITIALIZED) {
1782
1783 if (sci_oem_parameters_validate(&ihost->oem_parameters))
1784 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1785
1786 return SCI_SUCCESS;
1787 }
1788
1789 return SCI_FAILURE_INVALID_STATE;
1790}
1791
1792static void power_control_timeout(unsigned long data)
1793{
1794 struct sci_timer *tmr = (struct sci_timer *)data;
1795 struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1796 struct isci_phy *iphy;
1797 unsigned long flags;
1798 u8 i;
1799
1800 spin_lock_irqsave(&ihost->scic_lock, flags);
1801
1802 if (tmr->cancel)
1803 goto done;
1804
1805 ihost->power_control.phys_granted_power = 0;
1806
1807 if (ihost->power_control.phys_waiting == 0) {
1808 ihost->power_control.timer_started = false;
1809 goto done;
1810 }
1811
1812 for (i = 0; i < SCI_MAX_PHYS; i++) {
1813
1814 if (ihost->power_control.phys_waiting == 0)
1815 break;
1816
1817 iphy = ihost->power_control.requesters[i];
1818 if (iphy == NULL)
1819 continue;
1820
1821 if (ihost->power_control.phys_granted_power >=
1822 ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
1823 break;
1824
1825 ihost->power_control.requesters[i] = NULL;
1826 ihost->power_control.phys_waiting--;
1827 ihost->power_control.phys_granted_power++;
1828 sci_phy_consume_power_handler(iphy);
1829 }
1830
1831 /*
1832 * It doesn't matter if the power list is empty, we need to start the
1833 * timer in case another phy becomes ready.
1834 */
1835 sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1836 ihost->power_control.timer_started = true;
1837
1838done:
1839 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1840}
1841
1842void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1843 struct isci_phy *iphy)
1844{
1845 BUG_ON(iphy == NULL);
1846
1847 if (ihost->power_control.phys_granted_power <
1848 ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
1849 ihost->power_control.phys_granted_power++;
1850 sci_phy_consume_power_handler(iphy);
1851
1852 /*
1853 * stop and start the power_control timer. When the timer fires, the
1854 * no_of_phys_granted_power will be set to 0
1855 */
1856 if (ihost->power_control.timer_started)
1857 sci_del_timer(&ihost->power_control.timer);
1858
1859 sci_mod_timer(&ihost->power_control.timer,
1860 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1861 ihost->power_control.timer_started = true;
1862
1863 } else {
1864 /* Add the phy in the waiting list */
1865 ihost->power_control.requesters[iphy->phy_index] = iphy;
1866 ihost->power_control.phys_waiting++;
1867 }
1868}
1869
1870void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1871 struct isci_phy *iphy)
1872{
1873 BUG_ON(iphy == NULL);
1874
1875 if (ihost->power_control.requesters[iphy->phy_index])
1876 ihost->power_control.phys_waiting--;
1877
1878 ihost->power_control.requesters[iphy->phy_index] = NULL;
1879}
1880
1881#define AFE_REGISTER_WRITE_DELAY 10
1882
1883/* Initialize the AFE for this phy index. We need to read the AFE setup from
1884 * the OEM parameters
1885 */
1886static void sci_controller_afe_initialization(struct isci_host *ihost)
1887{
1888 const struct sci_oem_params *oem = &ihost->oem_parameters;
1889 struct pci_dev *pdev = ihost->pdev;
1890 u32 afe_status;
1891 u32 phy_id;
1892
1893 /* Clear DFX Status registers */
1894 writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
1895 udelay(AFE_REGISTER_WRITE_DELAY);
1896
1897 if (is_b0(pdev)) {
1898 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
1899 * Timer, PM Stagger Timer */
1900 writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
1901 udelay(AFE_REGISTER_WRITE_DELAY);
1902 }
1903
1904 /* Configure bias currents to normal */
1905 if (is_a2(pdev))
1906 writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
1907 else if (is_b0(pdev) || is_c0(pdev))
1908 writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
1909
1910 udelay(AFE_REGISTER_WRITE_DELAY);
1911
1912 /* Enable PLL */
1913 if (is_b0(pdev) || is_c0(pdev))
1914 writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
1915 else
1916 writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
1917
1918 udelay(AFE_REGISTER_WRITE_DELAY);
1919
1920 /* Wait for the PLL to lock */
1921 do {
1922 afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
1923 udelay(AFE_REGISTER_WRITE_DELAY);
1924 } while ((afe_status & 0x00001000) == 0);
1925
1926 if (is_a2(pdev)) {
1927 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
1928 writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
1929 udelay(AFE_REGISTER_WRITE_DELAY);
1930 }
1931
1932 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
1933 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
1934
1935 if (is_b0(pdev)) {
1936 /* Configure transmitter SSC parameters */
1937 writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
1938 udelay(AFE_REGISTER_WRITE_DELAY);
1939 } else if (is_c0(pdev)) {
1940 /* Configure transmitter SSC parameters */
1941 writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
1942 udelay(AFE_REGISTER_WRITE_DELAY);
1943
1944 /*
1945 * All defaults, except the Receive Word Alignament/Comma Detect
1946 * Enable....(0xe800) */
1947 writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1948 udelay(AFE_REGISTER_WRITE_DELAY);
1949 } else {
1950 /*
1951 * All defaults, except the Receive Word Alignament/Comma Detect
1952 * Enable....(0xe800) */
1953 writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1954 udelay(AFE_REGISTER_WRITE_DELAY);
1955
1956 writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
1957 udelay(AFE_REGISTER_WRITE_DELAY);
1958 }
1959
1960 /*
1961 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
1962 * & increase TX int & ext bias 20%....(0xe85c) */
1963 if (is_a2(pdev))
1964 writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1965 else if (is_b0(pdev)) {
1966 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
1967 writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1968 udelay(AFE_REGISTER_WRITE_DELAY);
1969
1970 /*
1971 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
1972 * & increase TX int & ext bias 20%....(0xe85c) */
1973 writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1974 } else {
1975 writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1976 udelay(AFE_REGISTER_WRITE_DELAY);
1977
1978 /*
1979 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
1980 * & increase TX int & ext bias 20%....(0xe85c) */
1981 writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1982 }
1983 udelay(AFE_REGISTER_WRITE_DELAY);
1984
1985 if (is_a2(pdev)) {
1986 /* Enable TX equalization (0xe824) */
1987 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
1988 udelay(AFE_REGISTER_WRITE_DELAY);
1989 }
1990
1991 /*
1992 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
1993 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
1994 writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1995 udelay(AFE_REGISTER_WRITE_DELAY);
1996
1997 /* Leave DFE/FFE on */
1998 if (is_a2(pdev))
1999 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2000 else if (is_b0(pdev)) {
2001 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2002 udelay(AFE_REGISTER_WRITE_DELAY);
2003 /* Enable TX equalization (0xe824) */
2004 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2005 } else {
2006 writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
2007 udelay(AFE_REGISTER_WRITE_DELAY);
2008
2009 writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2010 udelay(AFE_REGISTER_WRITE_DELAY);
2011
2012 /* Enable TX equalization (0xe824) */
2013 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2014 }
2015
2016 udelay(AFE_REGISTER_WRITE_DELAY);
2017
2018 writel(oem_phy->afe_tx_amp_control0,
2019 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2020 udelay(AFE_REGISTER_WRITE_DELAY);
2021
2022 writel(oem_phy->afe_tx_amp_control1,
2023 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2024 udelay(AFE_REGISTER_WRITE_DELAY);
2025
2026 writel(oem_phy->afe_tx_amp_control2,
2027 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2028 udelay(AFE_REGISTER_WRITE_DELAY);
2029
2030 writel(oem_phy->afe_tx_amp_control3,
2031 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2032 udelay(AFE_REGISTER_WRITE_DELAY);
2033 }
2034
2035 /* Transfer control to the PEs */
2036 writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
2037 udelay(AFE_REGISTER_WRITE_DELAY);
2038}
2039
2040static void sci_controller_initialize_power_control(struct isci_host *ihost)
2041{
2042 sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2043
2044 memset(ihost->power_control.requesters, 0,
2045 sizeof(ihost->power_control.requesters));
2046
2047 ihost->power_control.phys_waiting = 0;
2048 ihost->power_control.phys_granted_power = 0;
2049}
2050
2051static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2052{
2053 struct sci_base_state_machine *sm = &ihost->sm;
2054 enum sci_status result = SCI_FAILURE;
2055 unsigned long i, state, val;
2056
2057 if (ihost->sm.current_state_id != SCIC_RESET) {
2058 dev_warn(&ihost->pdev->dev,
2059 "SCIC Controller initialize operation requested "
2060 "in invalid state\n");
2061 return SCI_FAILURE_INVALID_STATE;
2062 }
2063
2064 sci_change_state(sm, SCIC_INITIALIZING);
2065
2066 sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2067
2068 ihost->next_phy_to_start = 0;
2069 ihost->phy_startup_timer_pending = false;
2070
2071 sci_controller_initialize_power_control(ihost);
2072
2073 /*
2074 * There is nothing to do here for B0 since we do not have to
2075 * program the AFE registers.
2076 * / @todo The AFE settings are supposed to be correct for the B0 but
2077 * / presently they seem to be wrong. */
2078 sci_controller_afe_initialization(ihost);
2079
2080
2081 /* Take the hardware out of reset */
2082 writel(0, &ihost->smu_registers->soft_reset_control);
2083
2084 /*
2085 * / @todo Provide meaningfull error code for hardware failure
2086 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2087 for (i = 100; i >= 1; i--) {
2088 u32 status;
2089
2090 /* Loop until the hardware reports success */
2091 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2092 status = readl(&ihost->smu_registers->control_status);
2093
2094 if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
2095 break;
2096 }
2097 if (i == 0)
2098 goto out;
2099
2100 /*
2101 * Determine what are the actaul device capacities that the
2102 * hardware will support */
2103 val = readl(&ihost->smu_registers->device_context_capacity);
2104
2105 /* Record the smaller of the two capacity values */
2106 ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
2107 ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
2108 ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
2109
2110 /*
2111 * Make all PEs that are unassigned match up with the
2112 * logical ports
2113 */
2114 for (i = 0; i < ihost->logical_port_entries; i++) {
2115 struct scu_port_task_scheduler_group_registers __iomem
2116 *ptsg = &ihost->scu_registers->peg0.ptsg;
2117
2118 writel(i, &ptsg->protocol_engine[i]);
2119 }
2120
2121 /* Initialize hardware PCI Relaxed ordering in DMA engines */
2122 val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2123 val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2124 writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2125
2126 val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2127 val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2128 writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2129
2130 /*
2131 * Initialize the PHYs before the PORTs because the PHY registers
2132 * are accessed during the port initialization.
2133 */
2134 for (i = 0; i < SCI_MAX_PHYS; i++) {
2135 result = sci_phy_initialize(&ihost->phys[i],
2136 &ihost->scu_registers->peg0.pe[i].tl,
2137 &ihost->scu_registers->peg0.pe[i].ll);
2138 if (result != SCI_SUCCESS)
2139 goto out;
2140 }
2141
2142 for (i = 0; i < ihost->logical_port_entries; i++) {
2143 struct isci_port *iport = &ihost->ports[i];
2144
2145 iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
2146 iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
2147 iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2148 }
2149
2150 result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2151
2152 out:
2153 /* Advance the controller state machine */
2154 if (result == SCI_SUCCESS)
2155 state = SCIC_INITIALIZED;
2156 else
2157 state = SCIC_FAILED;
2158 sci_change_state(sm, state);
2159
2160 return result;
2161}
2162
2163static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
2164 struct sci_user_parameters *sci_parms)
2165{
2166 u32 state = ihost->sm.current_state_id;
2167
2168 if (state == SCIC_RESET ||
2169 state == SCIC_INITIALIZING ||
2170 state == SCIC_INITIALIZED) {
2171 u16 index;
2172
2173 /*
2174 * Validate the user parameters. If they are not legal, then
2175 * return a failure.
2176 */
2177 for (index = 0; index < SCI_MAX_PHYS; index++) {
2178 struct sci_phy_user_params *user_phy;
2179
2180 user_phy = &sci_parms->phys[index];
2181
2182 if (!((user_phy->max_speed_generation <=
2183 SCIC_SDS_PARM_MAX_SPEED) &&
2184 (user_phy->max_speed_generation >
2185 SCIC_SDS_PARM_NO_SPEED)))
2186 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2187
2188 if (user_phy->in_connection_align_insertion_frequency <
2189 3)
2190 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2191
2192 if ((user_phy->in_connection_align_insertion_frequency <
2193 3) ||
2194 (user_phy->align_insertion_frequency == 0) ||
2195 (user_phy->
2196 notify_enable_spin_up_insertion_frequency ==
2197 0))
2198 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2199 }
2200
2201 if ((sci_parms->stp_inactivity_timeout == 0) ||
2202 (sci_parms->ssp_inactivity_timeout == 0) ||
2203 (sci_parms->stp_max_occupancy_timeout == 0) ||
2204 (sci_parms->ssp_max_occupancy_timeout == 0) ||
2205 (sci_parms->no_outbound_task_timeout == 0))
2206 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2207
2208 memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
2209
2210 return SCI_SUCCESS;
2211 }
2212
2213 return SCI_FAILURE_INVALID_STATE;
2214}
2215
2216static int sci_controller_mem_init(struct isci_host *ihost)
2217{
2218 struct device *dev = &ihost->pdev->dev;
2219 dma_addr_t dma;
2220 size_t size;
2221 int err;
2222
2223 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2224 ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
2225 if (!ihost->completion_queue)
2226 return -ENOMEM;
2227
2228 writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
2229 writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
2230
2231 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2232 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
2233 GFP_KERNEL);
2234 if (!ihost->remote_node_context_table)
2235 return -ENOMEM;
2236
2237 writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
2238 writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
2239
2240 size = ihost->task_context_entries * sizeof(struct scu_task_context),
2241 ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
2242 if (!ihost->task_context_table)
2243 return -ENOMEM;
2244
2245 ihost->task_context_dma = dma;
2246 writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
2247 writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
2248
2249 err = sci_unsolicited_frame_control_construct(ihost);
2250 if (err)
2251 return err;
2252
2253 /*
2254 * Inform the silicon as to the location of the UF headers and
2255 * address table.
2256 */
2257 writel(lower_32_bits(ihost->uf_control.headers.physical_address),
2258 &ihost->scu_registers->sdma.uf_header_base_address_lower);
2259 writel(upper_32_bits(ihost->uf_control.headers.physical_address),
2260 &ihost->scu_registers->sdma.uf_header_base_address_upper);
2261
2262 writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
2263 &ihost->scu_registers->sdma.uf_address_table_lower);
2264 writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
2265 &ihost->scu_registers->sdma.uf_address_table_upper);
2266
2267 return 0;
2268}
2269
2270int isci_host_init(struct isci_host *ihost)
2271{
2272 int err = 0, i;
2273 enum sci_status status;
2274 struct sci_user_parameters sci_user_params;
2275 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
2276
2277 spin_lock_init(&ihost->state_lock);
2278 spin_lock_init(&ihost->scic_lock);
2279 init_waitqueue_head(&ihost->eventq);
2280
2281 isci_host_change_state(ihost, isci_starting);
2282
2283 status = sci_controller_construct(ihost, scu_base(ihost),
2284 smu_base(ihost));
2285
2286 if (status != SCI_SUCCESS) {
2287 dev_err(&ihost->pdev->dev,
2288 "%s: sci_controller_construct failed - status = %x\n",
2289 __func__,
2290 status);
2291 return -ENODEV;
2292 }
2293
2294 ihost->sas_ha.dev = &ihost->pdev->dev;
2295 ihost->sas_ha.lldd_ha = ihost;
2296
2297 /*
2298 * grab initial values stored in the controller object for OEM and USER
2299 * parameters
2300 */
2301 isci_user_parameters_get(&sci_user_params);
2302 status = sci_user_parameters_set(ihost, &sci_user_params);
2303 if (status != SCI_SUCCESS) {
2304 dev_warn(&ihost->pdev->dev,
2305 "%s: sci_user_parameters_set failed\n",
2306 __func__);
2307 return -ENODEV;
2308 }
2309
2310 /* grab any OEM parameters specified in orom */
2311 if (pci_info->orom) {
2312 status = isci_parse_oem_parameters(&ihost->oem_parameters,
2313 pci_info->orom,
2314 ihost->id);
2315 if (status != SCI_SUCCESS) {
2316 dev_warn(&ihost->pdev->dev,
2317 "parsing firmware oem parameters failed\n");
2318 return -EINVAL;
2319 }
2320 }
2321
2322 status = sci_oem_parameters_set(ihost);
2323 if (status != SCI_SUCCESS) {
2324 dev_warn(&ihost->pdev->dev,
2325 "%s: sci_oem_parameters_set failed\n",
2326 __func__);
2327 return -ENODEV;
2328 }
2329
2330 tasklet_init(&ihost->completion_tasklet,
2331 isci_host_completion_routine, (unsigned long)ihost);
2332
2333 INIT_LIST_HEAD(&ihost->requests_to_complete);
2334 INIT_LIST_HEAD(&ihost->requests_to_errorback);
2335
2336 spin_lock_irq(&ihost->scic_lock);
2337 status = sci_controller_initialize(ihost);
2338 spin_unlock_irq(&ihost->scic_lock);
2339 if (status != SCI_SUCCESS) {
2340 dev_warn(&ihost->pdev->dev,
2341 "%s: sci_controller_initialize failed -"
2342 " status = 0x%x\n",
2343 __func__, status);
2344 return -ENODEV;
2345 }
2346
2347 err = sci_controller_mem_init(ihost);
2348 if (err)
2349 return err;
2350
2351 for (i = 0; i < SCI_MAX_PORTS; i++)
2352 isci_port_init(&ihost->ports[i], ihost, i);
2353
2354 for (i = 0; i < SCI_MAX_PHYS; i++)
2355 isci_phy_init(&ihost->phys[i], ihost, i);
2356
2357 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
2358 struct isci_remote_device *idev = &ihost->devices[i];
2359
2360 INIT_LIST_HEAD(&idev->reqs_in_process);
2361 INIT_LIST_HEAD(&idev->node);
2362 }
2363
2364 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2365 struct isci_request *ireq;
2366 dma_addr_t dma;
2367
2368 ireq = dmam_alloc_coherent(&ihost->pdev->dev,
2369 sizeof(struct isci_request), &dma,
2370 GFP_KERNEL);
2371 if (!ireq)
2372 return -ENOMEM;
2373
2374 ireq->tc = &ihost->task_context_table[i];
2375 ireq->owning_controller = ihost;
2376 spin_lock_init(&ireq->state_lock);
2377 ireq->request_daddr = dma;
2378 ireq->isci_host = ihost;
2379 ihost->reqs[i] = ireq;
2380 }
2381
2382 return 0;
2383}
2384
2385void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
2386 struct isci_phy *iphy)
2387{
2388 switch (ihost->sm.current_state_id) {
2389 case SCIC_STARTING:
2390 sci_del_timer(&ihost->phy_timer);
2391 ihost->phy_startup_timer_pending = false;
2392 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2393 iport, iphy);
2394 sci_controller_start_next_phy(ihost);
2395 break;
2396 case SCIC_READY:
2397 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2398 iport, iphy);
2399 break;
2400 default:
2401 dev_dbg(&ihost->pdev->dev,
2402 "%s: SCIC Controller linkup event from phy %d in "
2403 "unexpected state %d\n", __func__, iphy->phy_index,
2404 ihost->sm.current_state_id);
2405 }
2406}
2407
2408void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2409 struct isci_phy *iphy)
2410{
2411 switch (ihost->sm.current_state_id) {
2412 case SCIC_STARTING:
2413 case SCIC_READY:
2414 ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2415 iport, iphy);
2416 break;
2417 default:
2418 dev_dbg(&ihost->pdev->dev,
2419 "%s: SCIC Controller linkdown event from phy %d in "
2420 "unexpected state %d\n",
2421 __func__,
2422 iphy->phy_index,
2423 ihost->sm.current_state_id);
2424 }
2425}
2426
2427static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2428{
2429 u32 index;
2430
2431 for (index = 0; index < ihost->remote_node_entries; index++) {
2432 if ((ihost->device_table[index] != NULL) &&
2433 (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2434 return true;
2435 }
2436
2437 return false;
2438}
2439
2440void sci_controller_remote_device_stopped(struct isci_host *ihost,
2441 struct isci_remote_device *idev)
2442{
2443 if (ihost->sm.current_state_id != SCIC_STOPPING) {
2444 dev_dbg(&ihost->pdev->dev,
2445 "SCIC Controller 0x%p remote device stopped event "
2446 "from device 0x%p in unexpected state %d\n",
2447 ihost, idev,
2448 ihost->sm.current_state_id);
2449 return;
2450 }
2451
2452 if (!sci_controller_has_remote_devices_stopping(ihost))
2453 sci_change_state(&ihost->sm, SCIC_STOPPED);
2454}
2455
2456void sci_controller_post_request(struct isci_host *ihost, u32 request)
2457{
2458 dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
2459 __func__, ihost->id, request);
2460
2461 writel(request, &ihost->smu_registers->post_context_port);
2462}
2463
2464struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2465{
2466 u16 task_index;
2467 u16 task_sequence;
2468
2469 task_index = ISCI_TAG_TCI(io_tag);
2470
2471 if (task_index < ihost->task_context_entries) {
2472 struct isci_request *ireq = ihost->reqs[task_index];
2473
2474 if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
2475 task_sequence = ISCI_TAG_SEQ(io_tag);
2476
2477 if (task_sequence == ihost->io_request_sequence[task_index])
2478 return ireq;
2479 }
2480 }
2481
2482 return NULL;
2483}
2484
2485/**
2486 * This method allocates remote node index and the reserves the remote node
2487 * context space for use. This method can fail if there are no more remote
2488 * node index available.
2489 * @scic: This is the controller object which contains the set of
2490 * free remote node ids
2491 * @sci_dev: This is the device object which is requesting the a remote node
2492 * id
2493 * @node_id: This is the remote node id that is assinged to the device if one
2494 * is available
2495 *
2496 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2497 * node index available.
2498 */
2499enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
2500 struct isci_remote_device *idev,
2501 u16 *node_id)
2502{
2503 u16 node_index;
2504 u32 remote_node_count = sci_remote_device_node_count(idev);
2505
2506 node_index = sci_remote_node_table_allocate_remote_node(
2507 &ihost->available_remote_nodes, remote_node_count
2508 );
2509
2510 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2511 ihost->device_table[node_index] = idev;
2512
2513 *node_id = node_index;
2514
2515 return SCI_SUCCESS;
2516 }
2517
2518 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2519}
2520
2521void sci_controller_free_remote_node_context(struct isci_host *ihost,
2522 struct isci_remote_device *idev,
2523 u16 node_id)
2524{
2525 u32 remote_node_count = sci_remote_device_node_count(idev);
2526
2527 if (ihost->device_table[node_id] == idev) {
2528 ihost->device_table[node_id] = NULL;
2529
2530 sci_remote_node_table_release_remote_node_index(
2531 &ihost->available_remote_nodes, remote_node_count, node_id
2532 );
2533 }
2534}
2535
2536void sci_controller_copy_sata_response(void *response_buffer,
2537 void *frame_header,
2538 void *frame_buffer)
2539{
2540 /* XXX type safety? */
2541 memcpy(response_buffer, frame_header, sizeof(u32));
2542
2543 memcpy(response_buffer + sizeof(u32),
2544 frame_buffer,
2545 sizeof(struct dev_to_host_fis) - sizeof(u32));
2546}
2547
2548void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2549{
2550 if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2551 writel(ihost->uf_control.get,
2552 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2553}
2554
2555void isci_tci_free(struct isci_host *ihost, u16 tci)
2556{
2557 u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
2558
2559 ihost->tci_pool[tail] = tci;
2560 ihost->tci_tail = tail + 1;
2561}
2562
2563static u16 isci_tci_alloc(struct isci_host *ihost)
2564{
2565 u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
2566 u16 tci = ihost->tci_pool[head];
2567
2568 ihost->tci_head = head + 1;
2569 return tci;
2570}
2571
2572static u16 isci_tci_space(struct isci_host *ihost)
2573{
2574 return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
2575}
2576
2577u16 isci_alloc_tag(struct isci_host *ihost)
2578{
2579 if (isci_tci_space(ihost)) {
2580 u16 tci = isci_tci_alloc(ihost);
2581 u8 seq = ihost->io_request_sequence[tci];
2582
2583 return ISCI_TAG(seq, tci);
2584 }
2585
2586 return SCI_CONTROLLER_INVALID_IO_TAG;
2587}
2588
2589enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
2590{
2591 u16 tci = ISCI_TAG_TCI(io_tag);
2592 u16 seq = ISCI_TAG_SEQ(io_tag);
2593
2594 /* prevent tail from passing head */
2595 if (isci_tci_active(ihost) == 0)
2596 return SCI_FAILURE_INVALID_IO_TAG;
2597
2598 if (seq == ihost->io_request_sequence[tci]) {
2599 ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2600
2601 isci_tci_free(ihost, tci);
2602
2603 return SCI_SUCCESS;
2604 }
2605 return SCI_FAILURE_INVALID_IO_TAG;
2606}
2607
2608enum sci_status sci_controller_start_io(struct isci_host *ihost,
2609 struct isci_remote_device *idev,
2610 struct isci_request *ireq)
2611{
2612 enum sci_status status;
2613
2614 if (ihost->sm.current_state_id != SCIC_READY) {
2615 dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
2616 return SCI_FAILURE_INVALID_STATE;
2617 }
2618
2619 status = sci_remote_device_start_io(ihost, idev, ireq);
2620 if (status != SCI_SUCCESS)
2621 return status;
2622
2623 set_bit(IREQ_ACTIVE, &ireq->flags);
2624 sci_controller_post_request(ihost, ireq->post_context);
2625 return SCI_SUCCESS;
2626}
2627
2628enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2629 struct isci_remote_device *idev,
2630 struct isci_request *ireq)
2631{
2632 /* terminate an ongoing (i.e. started) core IO request. This does not
2633 * abort the IO request at the target, but rather removes the IO
2634 * request from the host controller.
2635 */
2636 enum sci_status status;
2637
2638 if (ihost->sm.current_state_id != SCIC_READY) {
2639 dev_warn(&ihost->pdev->dev,
2640 "invalid state to terminate request\n");
2641 return SCI_FAILURE_INVALID_STATE;
2642 }
2643
2644 status = sci_io_request_terminate(ireq);
2645 if (status != SCI_SUCCESS)
2646 return status;
2647
2648 /*
2649 * Utilize the original post context command and or in the POST_TC_ABORT
2650 * request sub-type.
2651 */
2652 sci_controller_post_request(ihost,
2653 ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2654 return SCI_SUCCESS;
2655}
2656
2657/**
2658 * sci_controller_complete_io() - This method will perform core specific
2659 * completion operations for an IO request. After this method is invoked,
2660 * the user should consider the IO request as invalid until it is properly
2661 * reused (i.e. re-constructed).
2662 * @ihost: The handle to the controller object for which to complete the
2663 * IO request.
2664 * @idev: The handle to the remote device object for which to complete
2665 * the IO request.
2666 * @ireq: the handle to the io request object to complete.
2667 */
2668enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2669 struct isci_remote_device *idev,
2670 struct isci_request *ireq)
2671{
2672 enum sci_status status;
2673 u16 index;
2674
2675 switch (ihost->sm.current_state_id) {
2676 case SCIC_STOPPING:
2677 /* XXX: Implement this function */
2678 return SCI_FAILURE;
2679 case SCIC_READY:
2680 status = sci_remote_device_complete_io(ihost, idev, ireq);
2681 if (status != SCI_SUCCESS)
2682 return status;
2683
2684 index = ISCI_TAG_TCI(ireq->io_tag);
2685 clear_bit(IREQ_ACTIVE, &ireq->flags);
2686 return SCI_SUCCESS;
2687 default:
2688 dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
2689 return SCI_FAILURE_INVALID_STATE;
2690 }
2691
2692}
2693
2694enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2695{
2696 struct isci_host *ihost = ireq->owning_controller;
2697
2698 if (ihost->sm.current_state_id != SCIC_READY) {
2699 dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
2700 return SCI_FAILURE_INVALID_STATE;
2701 }
2702
2703 set_bit(IREQ_ACTIVE, &ireq->flags);
2704 sci_controller_post_request(ihost, ireq->post_context);
2705 return SCI_SUCCESS;
2706}
2707
2708/**
2709 * sci_controller_start_task() - This method is called by the SCIC user to
2710 * send/start a framework task management request.
2711 * @controller: the handle to the controller object for which to start the task
2712 * management request.
2713 * @remote_device: the handle to the remote device object for which to start
2714 * the task management request.
2715 * @task_request: the handle to the task request object to start.
2716 */
2717enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
2718 struct isci_remote_device *idev,
2719 struct isci_request *ireq)
2720{
2721 enum sci_status status;
2722
2723 if (ihost->sm.current_state_id != SCIC_READY) {
2724 dev_warn(&ihost->pdev->dev,
2725 "%s: SCIC Controller starting task from invalid "
2726 "state\n",
2727 __func__);
2728 return SCI_TASK_FAILURE_INVALID_STATE;
2729 }
2730
2731 status = sci_remote_device_start_task(ihost, idev, ireq);
2732 switch (status) {
2733 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
2734 set_bit(IREQ_ACTIVE, &ireq->flags);
2735
2736 /*
2737 * We will let framework know this task request started successfully,
2738 * although core is still woring on starting the request (to post tc when
2739 * RNC is resumed.)
2740 */
2741 return SCI_SUCCESS;
2742 case SCI_SUCCESS:
2743 set_bit(IREQ_ACTIVE, &ireq->flags);
2744 sci_controller_post_request(ihost, ireq->post_context);
2745 break;
2746 default:
2747 break;
2748 }
2749
2750 return status;
2751}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
new file mode 100644
index 000000000000..062101a39f79
--- /dev/null
+++ b/drivers/scsi/isci/host.h
@@ -0,0 +1,542 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#ifndef _SCI_HOST_H_
56#define _SCI_HOST_H_
57
58#include "remote_device.h"
59#include "phy.h"
60#include "isci.h"
61#include "remote_node_table.h"
62#include "registers.h"
63#include "unsolicited_frame_control.h"
64#include "probe_roms.h"
65
66struct isci_request;
67struct scu_task_context;
68
69
70/**
71 * struct sci_power_control -
72 *
73 * This structure defines the fields for managing power control for direct
74 * attached disk devices.
75 */
76struct sci_power_control {
77 /**
78 * This field is set when the power control timer is running and cleared when
79 * it is not.
80 */
81 bool timer_started;
82
83 /**
84 * Timer to control when the directed attached disks can consume power.
85 */
86 struct sci_timer timer;
87
88 /**
89 * This field is used to keep track of how many phys are put into the
90 * requesters field.
91 */
92 u8 phys_waiting;
93
94 /**
95 * This field is used to keep track of how many phys have been granted to consume power
96 */
97 u8 phys_granted_power;
98
99 /**
100 * This field is an array of phys that we are waiting on. The phys are direct
101 * mapped into requesters via struct sci_phy.phy_index
102 */
103 struct isci_phy *requesters[SCI_MAX_PHYS];
104
105};
106
107struct sci_port_configuration_agent;
108typedef void (*port_config_fn)(struct isci_host *,
109 struct sci_port_configuration_agent *,
110 struct isci_port *, struct isci_phy *);
111
112struct sci_port_configuration_agent {
113 u16 phy_configured_mask;
114 u16 phy_ready_mask;
115 struct {
116 u8 min_index;
117 u8 max_index;
118 } phy_valid_port_range[SCI_MAX_PHYS];
119 bool timer_pending;
120 port_config_fn link_up_handler;
121 port_config_fn link_down_handler;
122 struct sci_timer timer;
123};
124
125/**
126 * isci_host - primary host/controller object
127 * @timer: timeout start/stop operations
128 * @device_table: rni (hw remote node index) to remote device lookup table
129 * @available_remote_nodes: rni allocator
130 * @power_control: manage device spin up
131 * @io_request_sequence: generation number for tci's (task contexts)
132 * @task_context_table: hw task context table
133 * @remote_node_context_table: hw remote node context table
134 * @completion_queue: hw-producer driver-consumer communication ring
135 * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
136 * @logical_port_entries: min({driver|silicon}-supported-port-count)
137 * @remote_node_entries: min({driver|silicon}-supported-node-count)
138 * @task_context_entries: min({driver|silicon}-supported-task-count)
139 * @phy_timer: phy startup timer
140 * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
141 * the phy index is set so further notifications are not
142 * made. Once the phy reports link up and is made part of a
143 * port then this bit is cleared.
144
145 */
146struct isci_host {
147 struct sci_base_state_machine sm;
148 /* XXX can we time this externally */
149 struct sci_timer timer;
150 /* XXX drop reference module params directly */
151 struct sci_user_parameters user_parameters;
152 /* XXX no need to be a union */
153 struct sci_oem_params oem_parameters;
154 struct sci_port_configuration_agent port_agent;
155 struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
156 struct sci_remote_node_table available_remote_nodes;
157 struct sci_power_control power_control;
158 u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
159 struct scu_task_context *task_context_table;
160 dma_addr_t task_context_dma;
161 union scu_remote_node_context *remote_node_context_table;
162 u32 *completion_queue;
163 u32 completion_queue_get;
164 u32 logical_port_entries;
165 u32 remote_node_entries;
166 u32 task_context_entries;
167 struct sci_unsolicited_frame_control uf_control;
168
169 /* phy startup */
170 struct sci_timer phy_timer;
171 /* XXX kill */
172 bool phy_startup_timer_pending;
173 u32 next_phy_to_start;
174 /* XXX convert to unsigned long and use bitops */
175 u8 invalid_phy_mask;
176
177 /* TODO attempt dynamic interrupt coalescing scheme */
178 u16 interrupt_coalesce_number;
179 u32 interrupt_coalesce_timeout;
180 struct smu_registers __iomem *smu_registers;
181 struct scu_registers __iomem *scu_registers;
182
183 u16 tci_head;
184 u16 tci_tail;
185 u16 tci_pool[SCI_MAX_IO_REQUESTS];
186
187 int id; /* unique within a given pci device */
188 struct isci_phy phys[SCI_MAX_PHYS];
189 struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
190 struct sas_ha_struct sas_ha;
191
192 spinlock_t state_lock;
193 struct pci_dev *pdev;
194 enum isci_status status;
195 #define IHOST_START_PENDING 0
196 #define IHOST_STOP_PENDING 1
197 unsigned long flags;
198 wait_queue_head_t eventq;
199 struct Scsi_Host *shost;
200 struct tasklet_struct completion_tasklet;
201 struct list_head requests_to_complete;
202 struct list_head requests_to_errorback;
203 spinlock_t scic_lock;
204 struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
205 struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
206};
207
208/**
209 * enum sci_controller_states - This enumeration depicts all the states
210 * for the common controller state machine.
211 */
212enum sci_controller_states {
213 /**
214 * Simply the initial state for the base controller state machine.
215 */
216 SCIC_INITIAL = 0,
217
218 /**
219 * This state indicates that the controller is reset. The memory for
220 * the controller is in it's initial state, but the controller requires
221 * initialization.
222 * This state is entered from the INITIAL state.
223 * This state is entered from the RESETTING state.
224 */
225 SCIC_RESET,
226
227 /**
228 * This state is typically an action state that indicates the controller
229 * is in the process of initialization. In this state no new IO operations
230 * are permitted.
231 * This state is entered from the RESET state.
232 */
233 SCIC_INITIALIZING,
234
235 /**
236 * This state indicates that the controller has been successfully
237 * initialized. In this state no new IO operations are permitted.
238 * This state is entered from the INITIALIZING state.
239 */
240 SCIC_INITIALIZED,
241
242 /**
243 * This state indicates the the controller is in the process of becoming
244 * ready (i.e. starting). In this state no new IO operations are permitted.
245 * This state is entered from the INITIALIZED state.
246 */
247 SCIC_STARTING,
248
249 /**
250 * This state indicates the controller is now ready. Thus, the user
251 * is able to perform IO operations on the controller.
252 * This state is entered from the STARTING state.
253 */
254 SCIC_READY,
255
256 /**
257 * This state is typically an action state that indicates the controller
258 * is in the process of resetting. Thus, the user is unable to perform
259 * IO operations on the controller. A reset is considered destructive in
260 * most cases.
261 * This state is entered from the READY state.
262 * This state is entered from the FAILED state.
263 * This state is entered from the STOPPED state.
264 */
265 SCIC_RESETTING,
266
267 /**
268 * This state indicates that the controller is in the process of stopping.
269 * In this state no new IO operations are permitted, but existing IO
270 * operations are allowed to complete.
271 * This state is entered from the READY state.
272 */
273 SCIC_STOPPING,
274
275 /**
276 * This state indicates that the controller has successfully been stopped.
277 * In this state no new IO operations are permitted.
278 * This state is entered from the STOPPING state.
279 */
280 SCIC_STOPPED,
281
282 /**
283 * This state indicates that the controller could not successfully be
284 * initialized. In this state no new IO operations are permitted.
285 * This state is entered from the INITIALIZING state.
286 * This state is entered from the STARTING state.
287 * This state is entered from the STOPPING state.
288 * This state is entered from the RESETTING state.
289 */
290 SCIC_FAILED,
291};
292
293/**
294 * struct isci_pci_info - This class represents the pci function containing the
295 * controllers. Depending on PCI SKU, there could be up to 2 controllers in
296 * the PCI function.
297 */
298#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
299
300struct isci_pci_info {
301 struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
302 struct isci_host *hosts[SCI_MAX_CONTROLLERS];
303 struct isci_orom *orom;
304};
305
306static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
307{
308 return pci_get_drvdata(pdev);
309}
310
311#define for_each_isci_host(id, ihost, pdev) \
312 for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
313 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
314 ihost = to_pci_info(pdev)->hosts[++id])
315
316static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
317{
318 return isci_host->status;
319}
320
321static inline void isci_host_change_state(struct isci_host *isci_host,
322 enum isci_status status)
323{
324 unsigned long flags;
325
326 dev_dbg(&isci_host->pdev->dev,
327 "%s: isci_host = %p, state = 0x%x",
328 __func__,
329 isci_host,
330 status);
331 spin_lock_irqsave(&isci_host->state_lock, flags);
332 isci_host->status = status;
333 spin_unlock_irqrestore(&isci_host->state_lock, flags);
334
335}
336
337static inline void wait_for_start(struct isci_host *ihost)
338{
339 wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
340}
341
342static inline void wait_for_stop(struct isci_host *ihost)
343{
344 wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
345}
346
347static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
348{
349 wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
350}
351
352static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
353{
354 wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
355}
356
357static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
358{
359 return dev->port->ha->lldd_ha;
360}
361
362/* we always use protocol engine group zero */
363#define ISCI_PEG 0
364
365/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
366#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
367
368/* these are returned by the hardware, so sanitize them */
369#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
370#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
371
372/* expander attached sata devices require 3 rnc slots */
373static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
374{
375 struct domain_device *dev = idev->domain_dev;
376
377 if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
378 !idev->is_direct_attached)
379 return SCU_STP_REMOTE_NODE_COUNT;
380 return SCU_SSP_REMOTE_NODE_COUNT;
381}
382
383/**
384 * sci_controller_clear_invalid_phy() -
385 *
386 * This macro will clear the bit in the invalid phy mask for this controller
387 * object. This is used to control messages reported for invalid link up
388 * notifications.
389 */
390#define sci_controller_clear_invalid_phy(controller, phy) \
391 ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
392
393static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
394{
395
396 if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
397 return NULL;
398
399 return &iphy->isci_port->isci_host->pdev->dev;
400}
401
402static inline struct device *sciport_to_dev(struct isci_port *iport)
403{
404
405 if (!iport || !iport->isci_host)
406 return NULL;
407
408 return &iport->isci_host->pdev->dev;
409}
410
411static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
412{
413 if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
414 return NULL;
415
416 return &idev->isci_port->isci_host->pdev->dev;
417}
418
419static inline bool is_a2(struct pci_dev *pdev)
420{
421 if (pdev->revision < 4)
422 return true;
423 return false;
424}
425
426static inline bool is_b0(struct pci_dev *pdev)
427{
428 if (pdev->revision == 4)
429 return true;
430 return false;
431}
432
433static inline bool is_c0(struct pci_dev *pdev)
434{
435 if (pdev->revision >= 5)
436 return true;
437 return false;
438}
439
440void sci_controller_post_request(struct isci_host *ihost,
441 u32 request);
442void sci_controller_release_frame(struct isci_host *ihost,
443 u32 frame_index);
444void sci_controller_copy_sata_response(void *response_buffer,
445 void *frame_header,
446 void *frame_buffer);
447enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
448 struct isci_remote_device *idev,
449 u16 *node_id);
450void sci_controller_free_remote_node_context(
451 struct isci_host *ihost,
452 struct isci_remote_device *idev,
453 u16 node_id);
454
455struct isci_request *sci_request_by_tag(struct isci_host *ihost,
456 u16 io_tag);
457
458void sci_controller_power_control_queue_insert(
459 struct isci_host *ihost,
460 struct isci_phy *iphy);
461
462void sci_controller_power_control_queue_remove(
463 struct isci_host *ihost,
464 struct isci_phy *iphy);
465
466void sci_controller_link_up(
467 struct isci_host *ihost,
468 struct isci_port *iport,
469 struct isci_phy *iphy);
470
471void sci_controller_link_down(
472 struct isci_host *ihost,
473 struct isci_port *iport,
474 struct isci_phy *iphy);
475
476void sci_controller_remote_device_stopped(
477 struct isci_host *ihost,
478 struct isci_remote_device *idev);
479
480void sci_controller_copy_task_context(
481 struct isci_host *ihost,
482 struct isci_request *ireq);
483
484void sci_controller_register_setup(struct isci_host *ihost);
485
486enum sci_status sci_controller_continue_io(struct isci_request *ireq);
487int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
488void isci_host_scan_start(struct Scsi_Host *);
489u16 isci_alloc_tag(struct isci_host *ihost);
490enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
491void isci_tci_free(struct isci_host *ihost, u16 tci);
492
493int isci_host_init(struct isci_host *);
494
495void isci_host_init_controller_names(
496 struct isci_host *isci_host,
497 unsigned int controller_idx);
498
499void isci_host_deinit(
500 struct isci_host *);
501
502void isci_host_port_link_up(
503 struct isci_host *,
504 struct isci_port *,
505 struct isci_phy *);
506int isci_host_dev_found(struct domain_device *);
507
508void isci_host_remote_device_start_complete(
509 struct isci_host *,
510 struct isci_remote_device *,
511 enum sci_status);
512
513void sci_controller_disable_interrupts(
514 struct isci_host *ihost);
515
516enum sci_status sci_controller_start_io(
517 struct isci_host *ihost,
518 struct isci_remote_device *idev,
519 struct isci_request *ireq);
520
521enum sci_task_status sci_controller_start_task(
522 struct isci_host *ihost,
523 struct isci_remote_device *idev,
524 struct isci_request *ireq);
525
526enum sci_status sci_controller_terminate_request(
527 struct isci_host *ihost,
528 struct isci_remote_device *idev,
529 struct isci_request *ireq);
530
531enum sci_status sci_controller_complete_io(
532 struct isci_host *ihost,
533 struct isci_remote_device *idev,
534 struct isci_request *ireq);
535
536void sci_port_configuration_agent_construct(
537 struct sci_port_configuration_agent *port_agent);
538
539enum sci_status sci_port_configuration_agent_initialize(
540 struct isci_host *ihost,
541 struct sci_port_configuration_agent *port_agent);
542#endif
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
new file mode 100644
index 000000000000..61e0d09e2b57
--- /dev/null
+++ b/drivers/scsi/isci/init.c
@@ -0,0 +1,565 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include <linux/kernel.h>
57#include <linux/init.h>
58#include <linux/module.h>
59#include <linux/firmware.h>
60#include <linux/efi.h>
61#include <asm/string.h>
62#include "isci.h"
63#include "task.h"
64#include "probe_roms.h"
65
66static struct scsi_transport_template *isci_transport_template;
67
68static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
69 { PCI_VDEVICE(INTEL, 0x1D61),},
70 { PCI_VDEVICE(INTEL, 0x1D63),},
71 { PCI_VDEVICE(INTEL, 0x1D65),},
72 { PCI_VDEVICE(INTEL, 0x1D67),},
73 { PCI_VDEVICE(INTEL, 0x1D69),},
74 { PCI_VDEVICE(INTEL, 0x1D6B),},
75 { PCI_VDEVICE(INTEL, 0x1D60),},
76 { PCI_VDEVICE(INTEL, 0x1D62),},
77 { PCI_VDEVICE(INTEL, 0x1D64),},
78 { PCI_VDEVICE(INTEL, 0x1D66),},
79 { PCI_VDEVICE(INTEL, 0x1D68),},
80 { PCI_VDEVICE(INTEL, 0x1D6A),},
81 {}
82};
83
84MODULE_DEVICE_TABLE(pci, isci_id_table);
85
86/* linux isci specific settings */
87
88unsigned char no_outbound_task_to = 20;
89module_param(no_outbound_task_to, byte, 0);
90MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
91
92u16 ssp_max_occ_to = 20;
93module_param(ssp_max_occ_to, ushort, 0);
94MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
95
96u16 stp_max_occ_to = 5;
97module_param(stp_max_occ_to, ushort, 0);
98MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
99
100u16 ssp_inactive_to = 5;
101module_param(ssp_inactive_to, ushort, 0);
102MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
103
104u16 stp_inactive_to = 5;
105module_param(stp_inactive_to, ushort, 0);
106MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
107
108unsigned char phy_gen = 3;
109module_param(phy_gen, byte, 0);
110MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
111
112unsigned char max_concurr_spinup = 1;
113module_param(max_concurr_spinup, byte, 0);
114MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
115
116static struct scsi_host_template isci_sht = {
117
118 .module = THIS_MODULE,
119 .name = DRV_NAME,
120 .proc_name = DRV_NAME,
121 .queuecommand = sas_queuecommand,
122 .target_alloc = sas_target_alloc,
123 .slave_configure = sas_slave_configure,
124 .slave_destroy = sas_slave_destroy,
125 .scan_finished = isci_host_scan_finished,
126 .scan_start = isci_host_scan_start,
127 .change_queue_depth = sas_change_queue_depth,
128 .change_queue_type = sas_change_queue_type,
129 .bios_param = sas_bios_param,
130 .can_queue = ISCI_CAN_QUEUE_VAL,
131 .cmd_per_lun = 1,
132 .this_id = -1,
133 .sg_tablesize = SG_ALL,
134 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
135 .use_clustering = ENABLE_CLUSTERING,
136 .eh_device_reset_handler = sas_eh_device_reset_handler,
137 .eh_bus_reset_handler = isci_bus_reset_handler,
138 .slave_alloc = sas_slave_alloc,
139 .target_destroy = sas_target_destroy,
140 .ioctl = sas_ioctl,
141};
142
143static struct sas_domain_function_template isci_transport_ops = {
144
145 /* The class calls these to notify the LLDD of an event. */
146 .lldd_port_formed = isci_port_formed,
147 .lldd_port_deformed = isci_port_deformed,
148
149 /* The class calls these when a device is found or gone. */
150 .lldd_dev_found = isci_remote_device_found,
151 .lldd_dev_gone = isci_remote_device_gone,
152
153 .lldd_execute_task = isci_task_execute_task,
154 /* Task Management Functions. Must be called from process context. */
155 .lldd_abort_task = isci_task_abort_task,
156 .lldd_abort_task_set = isci_task_abort_task_set,
157 .lldd_clear_aca = isci_task_clear_aca,
158 .lldd_clear_task_set = isci_task_clear_task_set,
159 .lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset,
160 .lldd_lu_reset = isci_task_lu_reset,
161 .lldd_query_task = isci_task_query_task,
162
163 /* Port and Adapter management */
164 .lldd_clear_nexus_port = isci_task_clear_nexus_port,
165 .lldd_clear_nexus_ha = isci_task_clear_nexus_ha,
166
167 /* Phy management */
168 .lldd_control_phy = isci_phy_control,
169};
170
171
172/******************************************************************************
173* P R O T E C T E D M E T H O D S
174******************************************************************************/
175
176
177
178/**
179 * isci_register_sas_ha() - This method initializes various lldd
180 * specific members of the sas_ha struct and calls the libsas
181 * sas_register_ha() function.
182 * @isci_host: This parameter specifies the lldd specific wrapper for the
183 * libsas sas_ha struct.
184 *
185 * This method returns an error code indicating sucess or failure. The user
186 * should check for possible memory allocation error return otherwise, a zero
187 * indicates success.
188 */
189static int isci_register_sas_ha(struct isci_host *isci_host)
190{
191 int i;
192 struct sas_ha_struct *sas_ha = &(isci_host->sas_ha);
193 struct asd_sas_phy **sas_phys;
194 struct asd_sas_port **sas_ports;
195
196 sas_phys = devm_kzalloc(&isci_host->pdev->dev,
197 SCI_MAX_PHYS * sizeof(void *),
198 GFP_KERNEL);
199 if (!sas_phys)
200 return -ENOMEM;
201
202 sas_ports = devm_kzalloc(&isci_host->pdev->dev,
203 SCI_MAX_PORTS * sizeof(void *),
204 GFP_KERNEL);
205 if (!sas_ports)
206 return -ENOMEM;
207
208 /*----------------- Libsas Initialization Stuff----------------------
209 * Set various fields in the sas_ha struct:
210 */
211
212 sas_ha->sas_ha_name = DRV_NAME;
213 sas_ha->lldd_module = THIS_MODULE;
214 sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
215
216 /* set the array of phy and port structs. */
217 for (i = 0; i < SCI_MAX_PHYS; i++) {
218 sas_phys[i] = &isci_host->phys[i].sas_phy;
219 sas_ports[i] = &isci_host->ports[i].sas_port;
220 }
221
222 sas_ha->sas_phy = sas_phys;
223 sas_ha->sas_port = sas_ports;
224 sas_ha->num_phys = SCI_MAX_PHYS;
225
226 sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL;
227 sas_ha->lldd_max_execute_num = 1;
228 sas_ha->strict_wide_ports = 1;
229
230 sas_register_ha(sas_ha);
231
232 return 0;
233}
234
235static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
236{
237 struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
238 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
239 struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
240
241 return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
242}
243
244static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
245
246static void isci_unregister(struct isci_host *isci_host)
247{
248 struct Scsi_Host *shost;
249
250 if (!isci_host)
251 return;
252
253 shost = isci_host->shost;
254 device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
255
256 sas_unregister_ha(&isci_host->sas_ha);
257
258 sas_remove_host(isci_host->shost);
259 scsi_remove_host(isci_host->shost);
260 scsi_host_put(isci_host->shost);
261}
262
263static int __devinit isci_pci_init(struct pci_dev *pdev)
264{
265 int err, bar_num, bar_mask = 0;
266 void __iomem * const *iomap;
267
268 err = pcim_enable_device(pdev);
269 if (err) {
270 dev_err(&pdev->dev,
271 "failed enable PCI device %s!\n",
272 pci_name(pdev));
273 return err;
274 }
275
276 for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
277 bar_mask |= 1 << (bar_num * 2);
278
279 err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
280 if (err)
281 return err;
282
283 iomap = pcim_iomap_table(pdev);
284 if (!iomap)
285 return -ENOMEM;
286
287 pci_set_master(pdev);
288
289 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
290 if (err) {
291 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
292 if (err)
293 return err;
294 }
295
296 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
297 if (err) {
298 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
299 if (err)
300 return err;
301 }
302
303 return 0;
304}
305
306static int num_controllers(struct pci_dev *pdev)
307{
308 /* bar size alone can tell us if we are running with a dual controller
309 * part, no need to trust revision ids that might be under broken firmware
310 * control
311 */
312 resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2);
313 resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2);
314
315 if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS &&
316 smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS)
317 return SCI_MAX_CONTROLLERS;
318 else
319 return 1;
320}
321
322static int isci_setup_interrupts(struct pci_dev *pdev)
323{
324 int err, i, num_msix;
325 struct isci_host *ihost;
326 struct isci_pci_info *pci_info = to_pci_info(pdev);
327
328 /*
329 * Determine the number of vectors associated with this
330 * PCI function.
331 */
332 num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
333
334 for (i = 0; i < num_msix; i++)
335 pci_info->msix_entries[i].entry = i;
336
337 err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix);
338 if (err)
339 goto intx;
340
341 for (i = 0; i < num_msix; i++) {
342 int id = i / SCI_NUM_MSI_X_INT;
343 struct msix_entry *msix = &pci_info->msix_entries[i];
344 irq_handler_t isr;
345
346 ihost = pci_info->hosts[id];
347 /* odd numbered vectors are error interrupts */
348 if (i & 1)
349 isr = isci_error_isr;
350 else
351 isr = isci_msix_isr;
352
353 err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
354 DRV_NAME"-msix", ihost);
355 if (!err)
356 continue;
357
358 dev_info(&pdev->dev, "msix setup failed falling back to intx\n");
359 while (i--) {
360 id = i / SCI_NUM_MSI_X_INT;
361 ihost = pci_info->hosts[id];
362 msix = &pci_info->msix_entries[i];
363 devm_free_irq(&pdev->dev, msix->vector, ihost);
364 }
365 pci_disable_msix(pdev);
366 goto intx;
367 }
368 return 0;
369
370 intx:
371 for_each_isci_host(i, ihost, pdev) {
372 err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
373 IRQF_SHARED, DRV_NAME"-intx", ihost);
374 if (err)
375 break;
376 }
377 return err;
378}
379
380static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
381{
382 struct isci_host *isci_host;
383 struct Scsi_Host *shost;
384 int err;
385
386 isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
387 if (!isci_host)
388 return NULL;
389
390 isci_host->pdev = pdev;
391 isci_host->id = id;
392
393 shost = scsi_host_alloc(&isci_sht, sizeof(void *));
394 if (!shost)
395 return NULL;
396 isci_host->shost = shost;
397
398 err = isci_host_init(isci_host);
399 if (err)
400 goto err_shost;
401
402 SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
403 isci_host->sas_ha.core.shost = shost;
404 shost->transportt = isci_transport_template;
405
406 shost->max_id = ~0;
407 shost->max_lun = ~0;
408 shost->max_cmd_len = MAX_COMMAND_SIZE;
409
410 err = scsi_add_host(shost, &pdev->dev);
411 if (err)
412 goto err_shost;
413
414 err = isci_register_sas_ha(isci_host);
415 if (err)
416 goto err_shost_remove;
417
418 err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
419 if (err)
420 goto err_unregister_ha;
421
422 return isci_host;
423
424 err_unregister_ha:
425 sas_unregister_ha(&(isci_host->sas_ha));
426 err_shost_remove:
427 scsi_remove_host(shost);
428 err_shost:
429 scsi_host_put(shost);
430
431 return NULL;
432}
433
434static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
435{
436 struct isci_pci_info *pci_info;
437 int err, i;
438 struct isci_host *isci_host;
439 const struct firmware *fw = NULL;
440 struct isci_orom *orom = NULL;
441 char *source = "(platform)";
442
443 dev_info(&pdev->dev, "driver configured for rev: %d silicon\n",
444 pdev->revision);
445
446 pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
447 if (!pci_info)
448 return -ENOMEM;
449 pci_set_drvdata(pdev, pci_info);
450
451 if (efi_enabled)
452 orom = isci_get_efi_var(pdev);
453
454 if (!orom)
455 orom = isci_request_oprom(pdev);
456
457 for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
458 if (sci_oem_parameters_validate(&orom->ctrl[i])) {
459 dev_warn(&pdev->dev,
460 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
461 devm_kfree(&pdev->dev, orom);
462 orom = NULL;
463 break;
464 }
465 }
466
467 if (!orom) {
468 source = "(firmware)";
469 orom = isci_request_firmware(pdev, fw);
470 if (!orom) {
471 /* TODO convert this to WARN_TAINT_ONCE once the
472 * orom/efi parameter support is widely available
473 */
474 dev_warn(&pdev->dev,
475 "Loading user firmware failed, using default "
476 "values\n");
477 dev_warn(&pdev->dev,
478 "Default OEM configuration being used: 4 "
479 "narrow ports, and default SAS Addresses\n");
480 }
481 }
482
483 if (orom)
484 dev_info(&pdev->dev,
485 "OEM SAS parameters (version: %u.%u) loaded %s\n",
486 (orom->hdr.version & 0xf0) >> 4,
487 (orom->hdr.version & 0xf), source);
488
489 pci_info->orom = orom;
490
491 err = isci_pci_init(pdev);
492 if (err)
493 return err;
494
495 for (i = 0; i < num_controllers(pdev); i++) {
496 struct isci_host *h = isci_host_alloc(pdev, i);
497
498 if (!h) {
499 err = -ENOMEM;
500 goto err_host_alloc;
501 }
502 pci_info->hosts[i] = h;
503 }
504
505 err = isci_setup_interrupts(pdev);
506 if (err)
507 goto err_host_alloc;
508
509 for_each_isci_host(i, isci_host, pdev)
510 scsi_scan_host(isci_host->shost);
511
512 return 0;
513
514 err_host_alloc:
515 for_each_isci_host(i, isci_host, pdev)
516 isci_unregister(isci_host);
517 return err;
518}
519
520static void __devexit isci_pci_remove(struct pci_dev *pdev)
521{
522 struct isci_host *ihost;
523 int i;
524
525 for_each_isci_host(i, ihost, pdev) {
526 isci_unregister(ihost);
527 isci_host_deinit(ihost);
528 sci_controller_disable_interrupts(ihost);
529 }
530}
531
532static struct pci_driver isci_pci_driver = {
533 .name = DRV_NAME,
534 .id_table = isci_id_table,
535 .probe = isci_pci_probe,
536 .remove = __devexit_p(isci_pci_remove),
537};
538
539static __init int isci_init(void)
540{
541 int err;
542
543 pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME);
544
545 isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
546 if (!isci_transport_template)
547 return -ENOMEM;
548
549 err = pci_register_driver(&isci_pci_driver);
550 if (err)
551 sas_release_transport(isci_transport_template);
552
553 return err;
554}
555
556static __exit void isci_exit(void)
557{
558 pci_unregister_driver(&isci_pci_driver);
559 sas_release_transport(isci_transport_template);
560}
561
562MODULE_LICENSE("Dual BSD/GPL");
563MODULE_FIRMWARE(ISCI_FW_NAME);
564module_init(isci_init);
565module_exit(isci_exit);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
new file mode 100644
index 000000000000..d1de63312e7f
--- /dev/null
+++ b/drivers/scsi/isci/isci.h
@@ -0,0 +1,538 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef __ISCI_H__
57#define __ISCI_H__
58
59#include <linux/interrupt.h>
60#include <linux/types.h>
61
62#define DRV_NAME "isci"
63#define SCI_PCI_BAR_COUNT 2
64#define SCI_NUM_MSI_X_INT 2
65#define SCI_SMU_BAR 0
66#define SCI_SMU_BAR_SIZE (16*1024)
67#define SCI_SCU_BAR 1
68#define SCI_SCU_BAR_SIZE (4*1024*1024)
69#define SCI_IO_SPACE_BAR0 2
70#define SCI_IO_SPACE_BAR1 3
71#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */
72#define SCIC_CONTROLLER_STOP_TIMEOUT 5000
73
74#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
75
76#define SCI_MAX_PHYS (4UL)
77#define SCI_MAX_PORTS SCI_MAX_PHYS
78#define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */
79#define SCI_MAX_REMOTE_DEVICES (256UL)
80#define SCI_MAX_IO_REQUESTS (256UL)
81#define SCI_MAX_SEQ (16)
82#define SCI_MAX_MSIX_MESSAGES (2)
83#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */
84#define SCI_MAX_CONTROLLERS 2
85#define SCI_MAX_DOMAINS SCI_MAX_PORTS
86
87#define SCU_MAX_CRITICAL_NOTIFICATIONS (384)
88#define SCU_MAX_EVENTS_SHIFT (7)
89#define SCU_MAX_EVENTS (1 << SCU_MAX_EVENTS_SHIFT)
90#define SCU_MAX_UNSOLICITED_FRAMES (128)
91#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128)
92#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \
93 + SCU_MAX_EVENTS \
94 + SCU_MAX_UNSOLICITED_FRAMES \
95 + SCI_MAX_IO_REQUESTS \
96 + SCU_MAX_COMPLETION_QUEUE_SCRATCH)
97#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
98
99#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
100#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024)
101#define SCU_INVALID_FRAME_INDEX (0xFFFF)
102
103#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF)
104#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH (0x00FFFFFF)
105
106static inline void check_sizes(void)
107{
108 BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS);
109 BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8);
110 BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES);
111 BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES);
112 BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES);
113 BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS);
114 BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ);
115}
116
117/**
118 * enum sci_status - This is the general return status enumeration for non-IO,
119 * non-task management related SCI interface methods.
120 *
121 *
122 */
123enum sci_status {
124 /**
125 * This member indicates successful completion.
126 */
127 SCI_SUCCESS = 0,
128
129 /**
130 * This value indicates that the calling method completed successfully,
131 * but that the IO may have completed before having it's start method
132 * invoked. This occurs during SAT translation for requests that do
133 * not require an IO to the target or for any other requests that may
134 * be completed without having to submit IO.
135 */
136 SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
137
138 /**
139 * This Value indicates that the SCU hardware returned an early response
140 * because the io request specified more data than is returned by the
141 * target device (mode pages, inquiry data, etc.). The completion routine
142 * will handle this case to get the actual number of bytes transferred.
143 */
144 SCI_SUCCESS_IO_DONE_EARLY,
145
146 /**
147 * This member indicates that the object for which a state change is
148 * being requested is already in said state.
149 */
150 SCI_WARNING_ALREADY_IN_STATE,
151
152 /**
153 * This member indicates interrupt coalescence timer may cause SAS
154 * specification compliance issues (i.e. SMP target mode response
155 * frames must be returned within 1.9 milliseconds).
156 */
157 SCI_WARNING_TIMER_CONFLICT,
158
159 /**
160 * This field indicates a sequence of action is not completed yet. Mostly,
161 * this status is used when multiple ATA commands are needed in a SATI translation.
162 */
163 SCI_WARNING_SEQUENCE_INCOMPLETE,
164
165 /**
166 * This member indicates that there was a general failure.
167 */
168 SCI_FAILURE,
169
170 /**
171 * This member indicates that the SCI implementation is unable to complete
172 * an operation due to a critical flaw the prevents any further operation
173 * (i.e. an invalid pointer).
174 */
175 SCI_FATAL_ERROR,
176
177 /**
178 * This member indicates the calling function failed, because the state
179 * of the controller is in a state that prevents successful completion.
180 */
181 SCI_FAILURE_INVALID_STATE,
182
183 /**
184 * This member indicates the calling function failed, because there is
185 * insufficient resources/memory to complete the request.
186 */
187 SCI_FAILURE_INSUFFICIENT_RESOURCES,
188
189 /**
190 * This member indicates the calling function failed, because the
191 * controller object required for the operation can't be located.
192 */
193 SCI_FAILURE_CONTROLLER_NOT_FOUND,
194
195 /**
196 * This member indicates the calling function failed, because the
197 * discovered controller type is not supported by the library.
198 */
199 SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE,
200
201 /**
202 * This member indicates the calling function failed, because the
203 * requested initialization data version isn't supported.
204 */
205 SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION,
206
207 /**
208 * This member indicates the calling function failed, because the
209 * requested configuration of SAS Phys into SAS Ports is not supported.
210 */
211 SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION,
212
213 /**
214 * This member indicates the calling function failed, because the
215 * requested protocol is not supported by the remote device, port,
216 * or controller.
217 */
218 SCI_FAILURE_UNSUPPORTED_PROTOCOL,
219
220 /**
221 * This member indicates the calling function failed, because the
222 * requested information type is not supported by the SCI implementation.
223 */
224 SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE,
225
226 /**
227 * This member indicates the calling function failed, because the
228 * device already exists.
229 */
230 SCI_FAILURE_DEVICE_EXISTS,
231
232 /**
233 * This member indicates the calling function failed, because adding
234 * a phy to the object is not possible.
235 */
236 SCI_FAILURE_ADDING_PHY_UNSUPPORTED,
237
238 /**
239 * This member indicates the calling function failed, because the
240 * requested information type is not supported by the SCI implementation.
241 */
242 SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD,
243
244 /**
245 * This member indicates the calling function failed, because the SCI
246 * implementation does not support the supplied time limit.
247 */
248 SCI_FAILURE_UNSUPPORTED_TIME_LIMIT,
249
250 /**
251 * This member indicates the calling method failed, because the SCI
252 * implementation does not contain the specified Phy.
253 */
254 SCI_FAILURE_INVALID_PHY,
255
256 /**
257 * This member indicates the calling method failed, because the SCI
258 * implementation does not contain the specified Port.
259 */
260 SCI_FAILURE_INVALID_PORT,
261
262 /**
263 * This member indicates the calling method was partly successful
264 * The port was reset but not all phys in port are operational
265 */
266 SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS,
267
268 /**
269 * This member indicates that calling method failed
270 * The port reset did not complete because none of the phys are operational
271 */
272 SCI_FAILURE_RESET_PORT_FAILURE,
273
274 /**
275 * This member indicates the calling method failed, because the SCI
276 * implementation does not contain the specified remote device.
277 */
278 SCI_FAILURE_INVALID_REMOTE_DEVICE,
279
280 /**
281 * This member indicates the calling method failed, because the remote
282 * device is in a bad state and requires a reset.
283 */
284 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
285
286 /**
287 * This member indicates the calling method failed, because the SCI
288 * implementation does not contain or support the specified IO tag.
289 */
290 SCI_FAILURE_INVALID_IO_TAG,
291
292 /**
293 * This member indicates that the operation failed and the user should
294 * check the response data associated with the IO.
295 */
296 SCI_FAILURE_IO_RESPONSE_VALID,
297
298 /**
299 * This member indicates that the operation failed, the failure is
300 * controller implementation specific, and the response data associated
301 * with the request is not valid. You can query for the controller
302 * specific error information via sci_controller_get_request_status()
303 */
304 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
305
306 /**
307 * This member indicated that the operation failed because the
308 * user requested this IO to be terminated.
309 */
310 SCI_FAILURE_IO_TERMINATED,
311
312 /**
313 * This member indicates that the operation failed and the associated
314 * request requires a SCSI abort task to be sent to the target.
315 */
316 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
317
318 /**
319 * This member indicates that the operation failed because the supplied
320 * device could not be located.
321 */
322 SCI_FAILURE_DEVICE_NOT_FOUND,
323
324 /**
325 * This member indicates that the operation failed because the
326 * objects association is required and is not correctly set.
327 */
328 SCI_FAILURE_INVALID_ASSOCIATION,
329
330 /**
331 * This member indicates that the operation failed, because a timeout
332 * occurred.
333 */
334 SCI_FAILURE_TIMEOUT,
335
336 /**
337 * This member indicates that the operation failed, because the user
338 * specified a value that is either invalid or not supported.
339 */
340 SCI_FAILURE_INVALID_PARAMETER_VALUE,
341
342 /**
343 * This value indicates that the operation failed, because the number
344 * of messages (MSI-X) is not supported.
345 */
346 SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT,
347
348 /**
349 * This value indicates that the method failed due to a lack of
350 * available NCQ tags.
351 */
352 SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
353
354 /**
355 * This value indicates that a protocol violation has occurred on the
356 * link.
357 */
358 SCI_FAILURE_PROTOCOL_VIOLATION,
359
360 /**
361 * This value indicates a failure condition that retry may help to clear.
362 */
363 SCI_FAILURE_RETRY_REQUIRED,
364
365 /**
366 * This field indicates the retry limit was reached when a retry is attempted
367 */
368 SCI_FAILURE_RETRY_LIMIT_REACHED,
369
370 /**
371 * This member indicates the calling method was partly successful.
372 * Mostly, this status is used when a LUN_RESET issued to an expander attached
373 * STP device in READY NCQ substate needs to have RNC suspended/resumed
374 * before posting TC.
375 */
376 SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS,
377
378 /**
379 * This field indicates an illegal phy connection based on the routing attribute
380 * of both expander phy attached to each other.
381 */
382 SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION,
383
384 /**
385 * This field indicates a CONFIG ROUTE INFO command has a response with function result
386 * INDEX DOES NOT EXIST, usually means exceeding max route index.
387 */
388 SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX,
389
390 /**
391 * This value indicates that an unsupported PCI device ID has been
392 * specified. This indicates that attempts to invoke
393 * sci_library_allocate_controller() will fail.
394 */
395 SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
396
397};
398
399/**
400 * enum sci_io_status - This enumeration depicts all of the possible IO
401 * completion status values. Each value in this enumeration maps directly
402 * to a value in the enum sci_status enumeration. Please refer to that
403 * enumeration for detailed comments concerning what the status represents.
404 *
405 * Add the API to retrieve the SCU status from the core. Check to see that the
406 * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL
407 * - SCI_IO_FAILURE_INVALID_IO_TAG
408 */
409enum sci_io_status {
410 SCI_IO_SUCCESS = SCI_SUCCESS,
411 SCI_IO_FAILURE = SCI_FAILURE,
412 SCI_IO_SUCCESS_COMPLETE_BEFORE_START = SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
413 SCI_IO_SUCCESS_IO_DONE_EARLY = SCI_SUCCESS_IO_DONE_EARLY,
414 SCI_IO_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
415 SCI_IO_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
416 SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
417 SCI_IO_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
418 SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
419 SCI_IO_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
420 SCI_IO_FAILURE_REQUIRES_SCSI_ABORT = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
421 SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
422 SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
423 SCI_IO_FAILURE_PROTOCOL_VIOLATION = SCI_FAILURE_PROTOCOL_VIOLATION,
424
425 SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
426
427 SCI_IO_FAILURE_RETRY_REQUIRED = SCI_FAILURE_RETRY_REQUIRED,
428 SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED,
429 SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE
430};
431
432/**
433 * enum sci_task_status - This enumeration depicts all of the possible task
434 * completion status values. Each value in this enumeration maps directly
435 * to a value in the enum sci_status enumeration. Please refer to that
436 * enumeration for detailed comments concerning what the status represents.
437 *
438 * Check to see that the following status are properly handled:
439 */
440enum sci_task_status {
441 SCI_TASK_SUCCESS = SCI_SUCCESS,
442 SCI_TASK_FAILURE = SCI_FAILURE,
443 SCI_TASK_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
444 SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
445 SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
446 SCI_TASK_FAILURE_INVALID_TAG = SCI_FAILURE_INVALID_IO_TAG,
447 SCI_TASK_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
448 SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
449 SCI_TASK_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
450 SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
451
452 SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
453 SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS
454
455};
456
457/**
458 * sci_swab32_cpy - convert between scsi and scu-hardware byte format
459 * @dest: receive the 4-byte endian swapped version of src
460 * @src: word aligned source buffer
461 *
462 * scu hardware handles SSP/SMP control, response, and unidentified
463 * frames in "big endian dword" order. Regardless of host endian this
464 * is always a swab32()-per-dword conversion of the standard definition,
465 * i.e. single byte fields swapped and multi-byte fields in little-
466 * endian
467 */
468static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
469{
470 u32 *dest = _dest, *src = _src;
471
472 while (--word_cnt >= 0)
473 dest[word_cnt] = swab32(src[word_cnt]);
474}
475
476extern unsigned char no_outbound_task_to;
477extern u16 ssp_max_occ_to;
478extern u16 stp_max_occ_to;
479extern u16 ssp_inactive_to;
480extern u16 stp_inactive_to;
481extern unsigned char phy_gen;
482extern unsigned char max_concurr_spinup;
483
484irqreturn_t isci_msix_isr(int vec, void *data);
485irqreturn_t isci_intx_isr(int vec, void *data);
486irqreturn_t isci_error_isr(int vec, void *data);
487
488/*
489 * Each timer is associated with a cancellation flag that is set when
490 * del_timer() is called and checked in the timer callback function. This
491 * is needed since del_timer_sync() cannot be called with sci_lock held.
492 * For deinit however, del_timer_sync() is used without holding the lock.
493 */
494struct sci_timer {
495 struct timer_list timer;
496 bool cancel;
497};
498
499static inline
500void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long))
501{
502 tmr->timer.function = fn;
503 tmr->timer.data = (unsigned long) tmr;
504 tmr->cancel = 0;
505 init_timer(&tmr->timer);
506}
507
508static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
509{
510 tmr->cancel = 0;
511 mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
512}
513
514static inline void sci_del_timer(struct sci_timer *tmr)
515{
516 tmr->cancel = 1;
517 del_timer(&tmr->timer);
518}
519
520struct sci_base_state_machine {
521 const struct sci_base_state *state_table;
522 u32 initial_state_id;
523 u32 current_state_id;
524 u32 previous_state_id;
525};
526
527typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm);
528
529struct sci_base_state {
530 sci_state_transition_t enter_state; /* Called on state entry */
531 sci_state_transition_t exit_state; /* Called on state exit */
532};
533
534extern void sci_init_sm(struct sci_base_state_machine *sm,
535 const struct sci_base_state *state_table,
536 u32 initial_state);
537extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state);
538#endif /* __ISCI_H__ */
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
new file mode 100644
index 000000000000..79313a7a2356
--- /dev/null
+++ b/drivers/scsi/isci/phy.c
@@ -0,0 +1,1312 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
57#include "host.h"
58#include "phy.h"
59#include "scu_event_codes.h"
60#include "probe_roms.h"
61
62/* Maximum arbitration wait time in micro-seconds */
63#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700)
64
65enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
66{
67 return iphy->max_negotiated_speed;
68}
69
70static enum sci_status
71sci_phy_transport_layer_initialization(struct isci_phy *iphy,
72 struct scu_transport_layer_registers __iomem *reg)
73{
74 u32 tl_control;
75
76 iphy->transport_layer_registers = reg;
77
78 writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
79 &iphy->transport_layer_registers->stp_rni);
80
81 /*
82 * Hardware team recommends that we enable the STP prefetch for all
83 * transports
84 */
85 tl_control = readl(&iphy->transport_layer_registers->control);
86 tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH);
87 writel(tl_control, &iphy->transport_layer_registers->control);
88
89 return SCI_SUCCESS;
90}
91
92static enum sci_status
93sci_phy_link_layer_initialization(struct isci_phy *iphy,
94 struct scu_link_layer_registers __iomem *reg)
95{
96 struct isci_host *ihost = iphy->owning_port->owning_controller;
97 int phy_idx = iphy->phy_index;
98 struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
99 struct sci_phy_oem_params *phy_oem =
100 &ihost->oem_parameters.phys[phy_idx];
101 u32 phy_configuration;
102 struct sci_phy_cap phy_cap;
103 u32 parity_check = 0;
104 u32 parity_count = 0;
105 u32 llctl, link_rate;
106 u32 clksm_value = 0;
107
108 iphy->link_layer_registers = reg;
109
110 /* Set our IDENTIFY frame data */
111 #define SCI_END_DEVICE 0x01
112
113 writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) |
114 SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) |
115 SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
116 SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
117 SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
118 &iphy->link_layer_registers->transmit_identification);
119
120 /* Write the device SAS Address */
121 writel(0xFEDCBA98,
122 &iphy->link_layer_registers->sas_device_name_high);
123 writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
124
125 /* Write the source SAS Address */
126 writel(phy_oem->sas_address.high,
127 &iphy->link_layer_registers->source_sas_address_high);
128 writel(phy_oem->sas_address.low,
129 &iphy->link_layer_registers->source_sas_address_low);
130
131 /* Clear and Set the PHY Identifier */
132 writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
133 writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
134 &iphy->link_layer_registers->identify_frame_phy_id);
135
136 /* Change the initial state of the phy configuration register */
137 phy_configuration =
138 readl(&iphy->link_layer_registers->phy_configuration);
139
140 /* Hold OOB state machine in reset */
141 phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
142 writel(phy_configuration,
143 &iphy->link_layer_registers->phy_configuration);
144
145 /* Configure the SNW capabilities */
146 phy_cap.all = 0;
147 phy_cap.start = 1;
148 phy_cap.gen3_no_ssc = 1;
149 phy_cap.gen2_no_ssc = 1;
150 phy_cap.gen1_no_ssc = 1;
151 if (ihost->oem_parameters.controller.do_enable_ssc == true) {
152 phy_cap.gen3_ssc = 1;
153 phy_cap.gen2_ssc = 1;
154 phy_cap.gen1_ssc = 1;
155 }
156
157 /*
158 * The SAS specification indicates that the phy_capabilities that
159 * are transmitted shall have an even parity. Calculate the parity. */
160 parity_check = phy_cap.all;
161 while (parity_check != 0) {
162 if (parity_check & 0x1)
163 parity_count++;
164 parity_check >>= 1;
165 }
166
167 /*
168 * If parity indicates there are an odd number of bits set, then
169 * set the parity bit to 1 in the phy capabilities. */
170 if ((parity_count % 2) != 0)
171 phy_cap.parity = 1;
172
173 writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
174
175 /* Set the enable spinup period but disable the ability to send
176 * notify enable spinup
177 */
178 writel(SCU_ENSPINUP_GEN_VAL(COUNT,
179 phy_user->notify_enable_spin_up_insertion_frequency),
180 &iphy->link_layer_registers->notify_enable_spinup_control);
181
182 /* Write the ALIGN Insertion Ferequency for connected phy and
183 * inpendent of connected state
184 */
185 clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED,
186 phy_user->in_connection_align_insertion_frequency);
187
188 clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
189 phy_user->align_insertion_frequency);
190
191 writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
192
193 /* @todo Provide a way to write this register correctly */
194 writel(0x02108421,
195 &iphy->link_layer_registers->afe_lookup_table_control);
196
197 llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
198 (u8)ihost->user_parameters.no_outbound_task_timeout);
199
200 switch (phy_user->max_speed_generation) {
201 case SCIC_SDS_PARM_GEN3_SPEED:
202 link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3;
203 break;
204 case SCIC_SDS_PARM_GEN2_SPEED:
205 link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2;
206 break;
207 default:
208 link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1;
209 break;
210 }
211 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
212 writel(llctl, &iphy->link_layer_registers->link_layer_control);
213
214 if (is_a2(ihost->pdev)) {
215 /* Program the max ARB time for the PHY to 700us so we inter-operate with
216 * the PMC expander which shuts down PHYs if the expander PHY generates too
217 * many breaks. This time value will guarantee that the initiator PHY will
218 * generate the break.
219 */
220 writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
221 &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
222 }
223
224 /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
225 writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
226
227 /* We can exit the initial state to the stopped state */
228 sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
229
230 return SCI_SUCCESS;
231}
232
233static void phy_sata_timeout(unsigned long data)
234{
235 struct sci_timer *tmr = (struct sci_timer *)data;
236 struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
237 struct isci_host *ihost = iphy->owning_port->owning_controller;
238 unsigned long flags;
239
240 spin_lock_irqsave(&ihost->scic_lock, flags);
241
242 if (tmr->cancel)
243 goto done;
244
245 dev_dbg(sciphy_to_dev(iphy),
246 "%s: SCIC SDS Phy 0x%p did not receive signature fis before "
247 "timeout.\n",
248 __func__,
249 iphy);
250
251 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
252done:
253 spin_unlock_irqrestore(&ihost->scic_lock, flags);
254}
255
256/**
257 * This method returns the port currently containing this phy. If the phy is
258 * currently contained by the dummy port, then the phy is considered to not
259 * be part of a port.
260 * @sci_phy: This parameter specifies the phy for which to retrieve the
261 * containing port.
262 *
263 * This method returns a handle to a port that contains the supplied phy.
264 * NULL This value is returned if the phy is not part of a real
265 * port (i.e. it's contained in the dummy port). !NULL All other
266 * values indicate a handle/pointer to the port containing the phy.
267 */
268struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
269{
270 struct isci_port *iport = iphy->owning_port;
271
272 if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
273 return NULL;
274
275 return iphy->owning_port;
276}
277
278/**
279 * This method will assign a port to the phy object.
280 * @out]: iphy This parameter specifies the phy for which to assign a port
281 * object.
282 *
283 *
284 */
285void sci_phy_set_port(
286 struct isci_phy *iphy,
287 struct isci_port *iport)
288{
289 iphy->owning_port = iport;
290
291 if (iphy->bcn_received_while_port_unassigned) {
292 iphy->bcn_received_while_port_unassigned = false;
293 sci_port_broadcast_change_received(iphy->owning_port, iphy);
294 }
295}
296
297enum sci_status sci_phy_initialize(struct isci_phy *iphy,
298 struct scu_transport_layer_registers __iomem *tl,
299 struct scu_link_layer_registers __iomem *ll)
300{
301 /* Perfrom the initialization of the TL hardware */
302 sci_phy_transport_layer_initialization(iphy, tl);
303
304 /* Perofrm the initialization of the PE hardware */
305 sci_phy_link_layer_initialization(iphy, ll);
306
307 /* There is nothing that needs to be done in this state just
308 * transition to the stopped state
309 */
310 sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
311
312 return SCI_SUCCESS;
313}
314
315/**
316 * This method assigns the direct attached device ID for this phy.
317 *
318 * @iphy The phy for which the direct attached device id is to
319 * be assigned.
320 * @device_id The direct attached device ID to assign to the phy.
321 * This will either be the RNi for the device or an invalid RNi if there
322 * is no current device assigned to the phy.
323 */
324void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
325{
326 u32 tl_control;
327
328 writel(device_id, &iphy->transport_layer_registers->stp_rni);
329
330 /*
331 * The read should guarantee that the first write gets posted
332 * before the next write
333 */
334 tl_control = readl(&iphy->transport_layer_registers->control);
335 tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE);
336 writel(tl_control, &iphy->transport_layer_registers->control);
337}
338
339static void sci_phy_suspend(struct isci_phy *iphy)
340{
341 u32 scu_sas_pcfg_value;
342
343 scu_sas_pcfg_value =
344 readl(&iphy->link_layer_registers->phy_configuration);
345 scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
346 writel(scu_sas_pcfg_value,
347 &iphy->link_layer_registers->phy_configuration);
348
349 sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
350}
351
352void sci_phy_resume(struct isci_phy *iphy)
353{
354 u32 scu_sas_pcfg_value;
355
356 scu_sas_pcfg_value =
357 readl(&iphy->link_layer_registers->phy_configuration);
358 scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
359 writel(scu_sas_pcfg_value,
360 &iphy->link_layer_registers->phy_configuration);
361}
362
363void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
364{
365 sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
366 sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
367}
368
369void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
370{
371 struct sas_identify_frame *iaf;
372
373 iaf = &iphy->frame_rcvd.iaf;
374 memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
375}
376
377void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
378{
379 proto->all = readl(&iphy->link_layer_registers->transmit_identification);
380}
381
382enum sci_status sci_phy_start(struct isci_phy *iphy)
383{
384 enum sci_phy_states state = iphy->sm.current_state_id;
385
386 if (state != SCI_PHY_STOPPED) {
387 dev_dbg(sciphy_to_dev(iphy),
388 "%s: in wrong state: %d\n", __func__, state);
389 return SCI_FAILURE_INVALID_STATE;
390 }
391
392 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
393 return SCI_SUCCESS;
394}
395
396enum sci_status sci_phy_stop(struct isci_phy *iphy)
397{
398 enum sci_phy_states state = iphy->sm.current_state_id;
399
400 switch (state) {
401 case SCI_PHY_SUB_INITIAL:
402 case SCI_PHY_SUB_AWAIT_OSSP_EN:
403 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
404 case SCI_PHY_SUB_AWAIT_SAS_POWER:
405 case SCI_PHY_SUB_AWAIT_SATA_POWER:
406 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
407 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
408 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
409 case SCI_PHY_SUB_FINAL:
410 case SCI_PHY_READY:
411 break;
412 default:
413 dev_dbg(sciphy_to_dev(iphy),
414 "%s: in wrong state: %d\n", __func__, state);
415 return SCI_FAILURE_INVALID_STATE;
416 }
417
418 sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
419 return SCI_SUCCESS;
420}
421
422enum sci_status sci_phy_reset(struct isci_phy *iphy)
423{
424 enum sci_phy_states state = iphy->sm.current_state_id;
425
426 if (state != SCI_PHY_READY) {
427 dev_dbg(sciphy_to_dev(iphy),
428 "%s: in wrong state: %d\n", __func__, state);
429 return SCI_FAILURE_INVALID_STATE;
430 }
431
432 sci_change_state(&iphy->sm, SCI_PHY_RESETTING);
433 return SCI_SUCCESS;
434}
435
436enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
437{
438 enum sci_phy_states state = iphy->sm.current_state_id;
439
440 switch (state) {
441 case SCI_PHY_SUB_AWAIT_SAS_POWER: {
442 u32 enable_spinup;
443
444 enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
445 enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE);
446 writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control);
447
448 /* Change state to the final state this substate machine has run to completion */
449 sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
450
451 return SCI_SUCCESS;
452 }
453 case SCI_PHY_SUB_AWAIT_SATA_POWER: {
454 u32 scu_sas_pcfg_value;
455
456 /* Release the spinup hold state and reset the OOB state machine */
457 scu_sas_pcfg_value =
458 readl(&iphy->link_layer_registers->phy_configuration);
459 scu_sas_pcfg_value &=
460 ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
461 scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
462 writel(scu_sas_pcfg_value,
463 &iphy->link_layer_registers->phy_configuration);
464
465 /* Now restart the OOB operation */
466 scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
467 scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
468 writel(scu_sas_pcfg_value,
469 &iphy->link_layer_registers->phy_configuration);
470
471 /* Change state to the final state this substate machine has run to completion */
472 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN);
473
474 return SCI_SUCCESS;
475 }
476 default:
477 dev_dbg(sciphy_to_dev(iphy),
478 "%s: in wrong state: %d\n", __func__, state);
479 return SCI_FAILURE_INVALID_STATE;
480 }
481}
482
483static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
484{
485 /* continue the link training for the phy as if it were a SAS PHY
486 * instead of a SATA PHY. This is done because the completion queue had a SAS
487 * PHY DETECTED event when the state machine was expecting a SATA PHY event.
488 */
489 u32 phy_control;
490
491 phy_control = readl(&iphy->link_layer_registers->phy_configuration);
492 phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
493 writel(phy_control,
494 &iphy->link_layer_registers->phy_configuration);
495
496 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
497
498 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
499}
500
501static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
502{
503 /* This method continues the link training for the phy as if it were a SATA PHY
504 * instead of a SAS PHY. This is done because the completion queue had a SATA
505 * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
506 */
507 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
508
509 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
510}
511
512/**
513 * sci_phy_complete_link_training - perform processing common to
514 * all protocols upon completion of link training.
515 * @sci_phy: This parameter specifies the phy object for which link training
516 * has completed.
517 * @max_link_rate: This parameter specifies the maximum link rate to be
518 * associated with this phy.
519 * @next_state: This parameter specifies the next state for the phy's starting
520 * sub-state machine.
521 *
522 */
523static void sci_phy_complete_link_training(struct isci_phy *iphy,
524 enum sas_linkrate max_link_rate,
525 u32 next_state)
526{
527 iphy->max_negotiated_speed = max_link_rate;
528
529 sci_change_state(&iphy->sm, next_state);
530}
531
532enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
533{
534 enum sci_phy_states state = iphy->sm.current_state_id;
535
536 switch (state) {
537 case SCI_PHY_SUB_AWAIT_OSSP_EN:
538 switch (scu_get_event_code(event_code)) {
539 case SCU_EVENT_SAS_PHY_DETECTED:
540 sci_phy_start_sas_link_training(iphy);
541 iphy->is_in_link_training = true;
542 break;
543 case SCU_EVENT_SATA_SPINUP_HOLD:
544 sci_phy_start_sata_link_training(iphy);
545 iphy->is_in_link_training = true;
546 break;
547 default:
548 dev_dbg(sciphy_to_dev(iphy),
549 "%s: PHY starting substate machine received "
550 "unexpected event_code %x\n",
551 __func__,
552 event_code);
553 return SCI_FAILURE;
554 }
555 return SCI_SUCCESS;
556 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
557 switch (scu_get_event_code(event_code)) {
558 case SCU_EVENT_SAS_PHY_DETECTED:
559 /*
560 * Why is this being reported again by the controller?
561 * We would re-enter this state so just stay here */
562 break;
563 case SCU_EVENT_SAS_15:
564 case SCU_EVENT_SAS_15_SSC:
565 sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
566 SCI_PHY_SUB_AWAIT_IAF_UF);
567 break;
568 case SCU_EVENT_SAS_30:
569 case SCU_EVENT_SAS_30_SSC:
570 sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
571 SCI_PHY_SUB_AWAIT_IAF_UF);
572 break;
573 case SCU_EVENT_SAS_60:
574 case SCU_EVENT_SAS_60_SSC:
575 sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
576 SCI_PHY_SUB_AWAIT_IAF_UF);
577 break;
578 case SCU_EVENT_SATA_SPINUP_HOLD:
579 /*
580 * We were doing SAS PHY link training and received a SATA PHY event
581 * continue OOB/SN as if this were a SATA PHY */
582 sci_phy_start_sata_link_training(iphy);
583 break;
584 case SCU_EVENT_LINK_FAILURE:
585 /* Link failure change state back to the starting state */
586 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
587 break;
588 default:
589 dev_warn(sciphy_to_dev(iphy),
590 "%s: PHY starting substate machine received "
591 "unexpected event_code %x\n",
592 __func__, event_code);
593
594 return SCI_FAILURE;
595 break;
596 }
597 return SCI_SUCCESS;
598 case SCI_PHY_SUB_AWAIT_IAF_UF:
599 switch (scu_get_event_code(event_code)) {
600 case SCU_EVENT_SAS_PHY_DETECTED:
601 /* Backup the state machine */
602 sci_phy_start_sas_link_training(iphy);
603 break;
604 case SCU_EVENT_SATA_SPINUP_HOLD:
605 /* We were doing SAS PHY link training and received a
606 * SATA PHY event continue OOB/SN as if this were a
607 * SATA PHY
608 */
609 sci_phy_start_sata_link_training(iphy);
610 break;
611 case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
612 case SCU_EVENT_LINK_FAILURE:
613 case SCU_EVENT_HARD_RESET_RECEIVED:
614 /* Start the oob/sn state machine over again */
615 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
616 break;
617 default:
618 dev_warn(sciphy_to_dev(iphy),
619 "%s: PHY starting substate machine received "
620 "unexpected event_code %x\n",
621 __func__, event_code);
622 return SCI_FAILURE;
623 }
624 return SCI_SUCCESS;
625 case SCI_PHY_SUB_AWAIT_SAS_POWER:
626 switch (scu_get_event_code(event_code)) {
627 case SCU_EVENT_LINK_FAILURE:
628 /* Link failure change state back to the starting state */
629 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
630 break;
631 default:
632 dev_warn(sciphy_to_dev(iphy),
633 "%s: PHY starting substate machine received unexpected "
634 "event_code %x\n",
635 __func__,
636 event_code);
637 return SCI_FAILURE;
638 }
639 return SCI_SUCCESS;
640 case SCI_PHY_SUB_AWAIT_SATA_POWER:
641 switch (scu_get_event_code(event_code)) {
642 case SCU_EVENT_LINK_FAILURE:
643 /* Link failure change state back to the starting state */
644 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
645 break;
646 case SCU_EVENT_SATA_SPINUP_HOLD:
647 /* These events are received every 10ms and are
648 * expected while in this state
649 */
650 break;
651
652 case SCU_EVENT_SAS_PHY_DETECTED:
653 /* There has been a change in the phy type before OOB/SN for the
654 * SATA finished start down the SAS link traning path.
655 */
656 sci_phy_start_sas_link_training(iphy);
657 break;
658
659 default:
660 dev_warn(sciphy_to_dev(iphy),
661 "%s: PHY starting substate machine received "
662 "unexpected event_code %x\n",
663 __func__, event_code);
664
665 return SCI_FAILURE;
666 }
667 return SCI_SUCCESS;
668 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
669 switch (scu_get_event_code(event_code)) {
670 case SCU_EVENT_LINK_FAILURE:
671 /* Link failure change state back to the starting state */
672 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
673 break;
674 case SCU_EVENT_SATA_SPINUP_HOLD:
675 /* These events might be received since we dont know how many may be in
676 * the completion queue while waiting for power
677 */
678 break;
679 case SCU_EVENT_SATA_PHY_DETECTED:
680 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
681
682 /* We have received the SATA PHY notification change state */
683 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
684 break;
685 case SCU_EVENT_SAS_PHY_DETECTED:
686 /* There has been a change in the phy type before OOB/SN for the
687 * SATA finished start down the SAS link traning path.
688 */
689 sci_phy_start_sas_link_training(iphy);
690 break;
691 default:
692 dev_warn(sciphy_to_dev(iphy),
693 "%s: PHY starting substate machine received "
694 "unexpected event_code %x\n",
695 __func__,
696 event_code);
697
698 return SCI_FAILURE;;
699 }
700 return SCI_SUCCESS;
701 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
702 switch (scu_get_event_code(event_code)) {
703 case SCU_EVENT_SATA_PHY_DETECTED:
704 /*
705 * The hardware reports multiple SATA PHY detected events
706 * ignore the extras */
707 break;
708 case SCU_EVENT_SATA_15:
709 case SCU_EVENT_SATA_15_SSC:
710 sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
711 SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
712 break;
713 case SCU_EVENT_SATA_30:
714 case SCU_EVENT_SATA_30_SSC:
715 sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
716 SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
717 break;
718 case SCU_EVENT_SATA_60:
719 case SCU_EVENT_SATA_60_SSC:
720 sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
721 SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
722 break;
723 case SCU_EVENT_LINK_FAILURE:
724 /* Link failure change state back to the starting state */
725 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
726 break;
727 case SCU_EVENT_SAS_PHY_DETECTED:
728 /*
729 * There has been a change in the phy type before OOB/SN for the
730 * SATA finished start down the SAS link traning path. */
731 sci_phy_start_sas_link_training(iphy);
732 break;
733 default:
734 dev_warn(sciphy_to_dev(iphy),
735 "%s: PHY starting substate machine received "
736 "unexpected event_code %x\n",
737 __func__, event_code);
738
739 return SCI_FAILURE;
740 }
741
742 return SCI_SUCCESS;
743 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
744 switch (scu_get_event_code(event_code)) {
745 case SCU_EVENT_SATA_PHY_DETECTED:
746 /* Backup the state machine */
747 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
748 break;
749
750 case SCU_EVENT_LINK_FAILURE:
751 /* Link failure change state back to the starting state */
752 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
753 break;
754
755 default:
756 dev_warn(sciphy_to_dev(iphy),
757 "%s: PHY starting substate machine received "
758 "unexpected event_code %x\n",
759 __func__,
760 event_code);
761
762 return SCI_FAILURE;
763 }
764 return SCI_SUCCESS;
765 case SCI_PHY_READY:
766 switch (scu_get_event_code(event_code)) {
767 case SCU_EVENT_LINK_FAILURE:
768 /* Link failure change state back to the starting state */
769 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
770 break;
771 case SCU_EVENT_BROADCAST_CHANGE:
772 /* Broadcast change received. Notify the port. */
773 if (phy_get_non_dummy_port(iphy) != NULL)
774 sci_port_broadcast_change_received(iphy->owning_port, iphy);
775 else
776 iphy->bcn_received_while_port_unassigned = true;
777 break;
778 default:
779 dev_warn(sciphy_to_dev(iphy),
780 "%sP SCIC PHY 0x%p ready state machine received "
781 "unexpected event_code %x\n",
782 __func__, iphy, event_code);
783 return SCI_FAILURE_INVALID_STATE;
784 }
785 return SCI_SUCCESS;
786 case SCI_PHY_RESETTING:
787 switch (scu_get_event_code(event_code)) {
788 case SCU_EVENT_HARD_RESET_TRANSMITTED:
789 /* Link failure change state back to the starting state */
790 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
791 break;
792 default:
793 dev_warn(sciphy_to_dev(iphy),
794 "%s: SCIC PHY 0x%p resetting state machine received "
795 "unexpected event_code %x\n",
796 __func__, iphy, event_code);
797
798 return SCI_FAILURE_INVALID_STATE;
799 break;
800 }
801 return SCI_SUCCESS;
802 default:
803 dev_dbg(sciphy_to_dev(iphy),
804 "%s: in wrong state: %d\n", __func__, state);
805 return SCI_FAILURE_INVALID_STATE;
806 }
807}
808
809enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
810{
811 enum sci_phy_states state = iphy->sm.current_state_id;
812 struct isci_host *ihost = iphy->owning_port->owning_controller;
813 enum sci_status result;
814 unsigned long flags;
815
816 switch (state) {
817 case SCI_PHY_SUB_AWAIT_IAF_UF: {
818 u32 *frame_words;
819 struct sas_identify_frame iaf;
820
821 result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
822 frame_index,
823 (void **)&frame_words);
824
825 if (result != SCI_SUCCESS)
826 return result;
827
828 sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32));
829 if (iaf.frame_type == 0) {
830 u32 state;
831
832 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
833 memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf));
834 spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
835 if (iaf.smp_tport) {
836 /* We got the IAF for an expander PHY go to the final
837 * state since there are no power requirements for
838 * expander phys.
839 */
840 state = SCI_PHY_SUB_FINAL;
841 } else {
842 /* We got the IAF we can now go to the await spinup
843 * semaphore state
844 */
845 state = SCI_PHY_SUB_AWAIT_SAS_POWER;
846 }
847 sci_change_state(&iphy->sm, state);
848 result = SCI_SUCCESS;
849 } else
850 dev_warn(sciphy_to_dev(iphy),
851 "%s: PHY starting substate machine received "
852 "unexpected frame id %x\n",
853 __func__, frame_index);
854
855 sci_controller_release_frame(ihost, frame_index);
856 return result;
857 }
858 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
859 struct dev_to_host_fis *frame_header;
860 u32 *fis_frame_data;
861
862 result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
863 frame_index,
864 (void **)&frame_header);
865
866 if (result != SCI_SUCCESS)
867 return result;
868
869 if ((frame_header->fis_type == FIS_REGD2H) &&
870 !(frame_header->status & ATA_BUSY)) {
871 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
872 frame_index,
873 (void **)&fis_frame_data);
874
875 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
876 sci_controller_copy_sata_response(&iphy->frame_rcvd.fis,
877 frame_header,
878 fis_frame_data);
879 spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
880
881 /* got IAF we can now go to the await spinup semaphore state */
882 sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
883
884 result = SCI_SUCCESS;
885 } else
886 dev_warn(sciphy_to_dev(iphy),
887 "%s: PHY starting substate machine received "
888 "unexpected frame id %x\n",
889 __func__, frame_index);
890
891 /* Regardless of the result we are done with this frame with it */
892 sci_controller_release_frame(ihost, frame_index);
893
894 return result;
895 }
896 default:
897 dev_dbg(sciphy_to_dev(iphy),
898 "%s: in wrong state: %d\n", __func__, state);
899 return SCI_FAILURE_INVALID_STATE;
900 }
901
902}
903
904static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
905{
906 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
907
908 /* This is just an temporary state go off to the starting state */
909 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
910}
911
912static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
913{
914 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
915 struct isci_host *ihost = iphy->owning_port->owning_controller;
916
917 sci_controller_power_control_queue_insert(ihost, iphy);
918}
919
920static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
921{
922 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
923 struct isci_host *ihost = iphy->owning_port->owning_controller;
924
925 sci_controller_power_control_queue_remove(ihost, iphy);
926}
927
928static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
929{
930 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
931 struct isci_host *ihost = iphy->owning_port->owning_controller;
932
933 sci_controller_power_control_queue_insert(ihost, iphy);
934}
935
936static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
937{
938 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
939 struct isci_host *ihost = iphy->owning_port->owning_controller;
940
941 sci_controller_power_control_queue_remove(ihost, iphy);
942}
943
944static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
945{
946 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
947
948 sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
949}
950
951static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
952{
953 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
954
955 sci_del_timer(&iphy->sata_timer);
956}
957
958static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
959{
960 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
961
962 sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
963}
964
965static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
966{
967 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
968
969 sci_del_timer(&iphy->sata_timer);
970}
971
972static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
973{
974 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
975
976 if (sci_port_link_detected(iphy->owning_port, iphy)) {
977
978 /*
979 * Clear the PE suspend condition so we can actually
980 * receive SIG FIS
981 * The hardware will not respond to the XRDY until the PE
982 * suspend condition is cleared.
983 */
984 sci_phy_resume(iphy);
985
986 sci_mod_timer(&iphy->sata_timer,
987 SCIC_SDS_SIGNATURE_FIS_TIMEOUT);
988 } else
989 iphy->is_in_link_training = false;
990}
991
992static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
993{
994 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
995
996 sci_del_timer(&iphy->sata_timer);
997}
998
999static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
1000{
1001 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
1002
1003 /* State machine has run to completion so exit out and change
1004 * the base state machine to the ready state
1005 */
1006 sci_change_state(&iphy->sm, SCI_PHY_READY);
1007}
1008
1009/**
1010 *
1011 * @sci_phy: This is the struct isci_phy object to stop.
1012 *
1013 * This method will stop the struct isci_phy object. This does not reset the
1014 * protocol engine it just suspends it and places it in a state where it will
1015 * not cause the end device to power up. none
1016 */
1017static void scu_link_layer_stop_protocol_engine(
1018 struct isci_phy *iphy)
1019{
1020 u32 scu_sas_pcfg_value;
1021 u32 enable_spinup_value;
1022
1023 /* Suspend the protocol engine and place it in a sata spinup hold state */
1024 scu_sas_pcfg_value =
1025 readl(&iphy->link_layer_registers->phy_configuration);
1026 scu_sas_pcfg_value |=
1027 (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
1028 SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) |
1029 SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD));
1030 writel(scu_sas_pcfg_value,
1031 &iphy->link_layer_registers->phy_configuration);
1032
1033 /* Disable the notify enable spinup primitives */
1034 enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
1035 enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE);
1036 writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
1037}
1038
1039/**
1040 *
1041 *
1042 * This method will start the OOB/SN state machine for this struct isci_phy object.
1043 */
1044static void scu_link_layer_start_oob(
1045 struct isci_phy *iphy)
1046{
1047 u32 scu_sas_pcfg_value;
1048
1049 scu_sas_pcfg_value =
1050 readl(&iphy->link_layer_registers->phy_configuration);
1051 scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
1052 scu_sas_pcfg_value &=
1053 ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
1054 SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
1055 writel(scu_sas_pcfg_value,
1056 &iphy->link_layer_registers->phy_configuration);
1057}
1058
1059/**
1060 *
1061 *
1062 * This method will transmit a hard reset request on the specified phy. The SCU
1063 * hardware requires that we reset the OOB state machine and set the hard reset
1064 * bit in the phy configuration register. We then must start OOB over with the
1065 * hard reset bit set.
1066 */
1067static void scu_link_layer_tx_hard_reset(
1068 struct isci_phy *iphy)
1069{
1070 u32 phy_configuration_value;
1071
1072 /*
1073 * SAS Phys must wait for the HARD_RESET_TX event notification to transition
1074 * to the starting state. */
1075 phy_configuration_value =
1076 readl(&iphy->link_layer_registers->phy_configuration);
1077 phy_configuration_value |=
1078 (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) |
1079 SCU_SAS_PCFG_GEN_BIT(OOB_RESET));
1080 writel(phy_configuration_value,
1081 &iphy->link_layer_registers->phy_configuration);
1082
1083 /* Now take the OOB state machine out of reset */
1084 phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
1085 phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
1086 writel(phy_configuration_value,
1087 &iphy->link_layer_registers->phy_configuration);
1088}
1089
1090static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
1091{
1092 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
1093 struct isci_port *iport = iphy->owning_port;
1094 struct isci_host *ihost = iport->owning_controller;
1095
1096 /*
1097 * @todo We need to get to the controller to place this PE in a
1098 * reset state
1099 */
1100 sci_del_timer(&iphy->sata_timer);
1101
1102 scu_link_layer_stop_protocol_engine(iphy);
1103
1104 if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
1105 sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
1106}
1107
1108static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
1109{
1110 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
1111 struct isci_port *iport = iphy->owning_port;
1112 struct isci_host *ihost = iport->owning_controller;
1113
1114 scu_link_layer_stop_protocol_engine(iphy);
1115 scu_link_layer_start_oob(iphy);
1116
1117 /* We don't know what kind of phy we are going to be just yet */
1118 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
1119 iphy->bcn_received_while_port_unassigned = false;
1120
1121 if (iphy->sm.previous_state_id == SCI_PHY_READY)
1122 sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
1123
1124 sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
1125}
1126
1127static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
1128{
1129 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
1130 struct isci_port *iport = iphy->owning_port;
1131 struct isci_host *ihost = iport->owning_controller;
1132
1133 sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy);
1134}
1135
1136static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
1137{
1138 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
1139
1140 sci_phy_suspend(iphy);
1141}
1142
1143static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
1144{
1145 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
1146
1147 /* The phy is being reset, therefore deactivate it from the port. In
1148 * the resetting state we don't notify the user regarding link up and
1149 * link down notifications
1150 */
1151 sci_port_deactivate_phy(iphy->owning_port, iphy, false);
1152
1153 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
1154 scu_link_layer_tx_hard_reset(iphy);
1155 } else {
1156 /* The SCU does not need to have a discrete reset state so
1157 * just go back to the starting state.
1158 */
1159 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
1160 }
1161}
1162
1163static const struct sci_base_state sci_phy_state_table[] = {
1164 [SCI_PHY_INITIAL] = { },
1165 [SCI_PHY_STOPPED] = {
1166 .enter_state = sci_phy_stopped_state_enter,
1167 },
1168 [SCI_PHY_STARTING] = {
1169 .enter_state = sci_phy_starting_state_enter,
1170 },
1171 [SCI_PHY_SUB_INITIAL] = {
1172 .enter_state = sci_phy_starting_initial_substate_enter,
1173 },
1174 [SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
1175 [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
1176 [SCI_PHY_SUB_AWAIT_IAF_UF] = { },
1177 [SCI_PHY_SUB_AWAIT_SAS_POWER] = {
1178 .enter_state = sci_phy_starting_await_sas_power_substate_enter,
1179 .exit_state = sci_phy_starting_await_sas_power_substate_exit,
1180 },
1181 [SCI_PHY_SUB_AWAIT_SATA_POWER] = {
1182 .enter_state = sci_phy_starting_await_sata_power_substate_enter,
1183 .exit_state = sci_phy_starting_await_sata_power_substate_exit
1184 },
1185 [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
1186 .enter_state = sci_phy_starting_await_sata_phy_substate_enter,
1187 .exit_state = sci_phy_starting_await_sata_phy_substate_exit
1188 },
1189 [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
1190 .enter_state = sci_phy_starting_await_sata_speed_substate_enter,
1191 .exit_state = sci_phy_starting_await_sata_speed_substate_exit
1192 },
1193 [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
1194 .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter,
1195 .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit
1196 },
1197 [SCI_PHY_SUB_FINAL] = {
1198 .enter_state = sci_phy_starting_final_substate_enter,
1199 },
1200 [SCI_PHY_READY] = {
1201 .enter_state = sci_phy_ready_state_enter,
1202 .exit_state = sci_phy_ready_state_exit,
1203 },
1204 [SCI_PHY_RESETTING] = {
1205 .enter_state = sci_phy_resetting_state_enter,
1206 },
1207 [SCI_PHY_FINAL] = { },
1208};
1209
1210void sci_phy_construct(struct isci_phy *iphy,
1211 struct isci_port *iport, u8 phy_index)
1212{
1213 sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL);
1214
1215 /* Copy the rest of the input data to our locals */
1216 iphy->owning_port = iport;
1217 iphy->phy_index = phy_index;
1218 iphy->bcn_received_while_port_unassigned = false;
1219 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
1220 iphy->link_layer_registers = NULL;
1221 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
1222
1223 /* Create the SIGNATURE FIS Timeout timer for this phy */
1224 sci_init_timer(&iphy->sata_timer, phy_sata_timeout);
1225}
1226
1227void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
1228{
1229 struct sci_oem_params *oem = &ihost->oem_parameters;
1230 u64 sci_sas_addr;
1231 __be64 sas_addr;
1232
1233 sci_sas_addr = oem->phys[index].sas_address.high;
1234 sci_sas_addr <<= 32;
1235 sci_sas_addr |= oem->phys[index].sas_address.low;
1236 sas_addr = cpu_to_be64(sci_sas_addr);
1237 memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
1238
1239 iphy->isci_port = NULL;
1240 iphy->sas_phy.enabled = 0;
1241 iphy->sas_phy.id = index;
1242 iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
1243 iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd;
1244 iphy->sas_phy.ha = &ihost->sas_ha;
1245 iphy->sas_phy.lldd_phy = iphy;
1246 iphy->sas_phy.enabled = 1;
1247 iphy->sas_phy.class = SAS;
1248 iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
1249 iphy->sas_phy.tproto = 0;
1250 iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
1251 iphy->sas_phy.role = PHY_ROLE_INITIATOR;
1252 iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
1253 iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
1254 memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd));
1255}
1256
1257
1258/**
1259 * isci_phy_control() - This function is one of the SAS Domain Template
1260 * functions. This is a phy management function.
1261 * @phy: This parameter specifies the sphy being controlled.
1262 * @func: This parameter specifies the phy control function being invoked.
1263 * @buf: This parameter is specific to the phy function being invoked.
1264 *
1265 * status, zero indicates success.
1266 */
1267int isci_phy_control(struct asd_sas_phy *sas_phy,
1268 enum phy_func func,
1269 void *buf)
1270{
1271 int ret = 0;
1272 struct isci_phy *iphy = sas_phy->lldd_phy;
1273 struct isci_port *iport = iphy->isci_port;
1274 struct isci_host *ihost = sas_phy->ha->lldd_ha;
1275 unsigned long flags;
1276
1277 dev_dbg(&ihost->pdev->dev,
1278 "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
1279 __func__, sas_phy, func, buf, iphy, iport);
1280
1281 switch (func) {
1282 case PHY_FUNC_DISABLE:
1283 spin_lock_irqsave(&ihost->scic_lock, flags);
1284 sci_phy_stop(iphy);
1285 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1286 break;
1287
1288 case PHY_FUNC_LINK_RESET:
1289 spin_lock_irqsave(&ihost->scic_lock, flags);
1290 sci_phy_stop(iphy);
1291 sci_phy_start(iphy);
1292 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1293 break;
1294
1295 case PHY_FUNC_HARD_RESET:
1296 if (!iport)
1297 return -ENODEV;
1298
1299 /* Perform the port reset. */
1300 ret = isci_port_perform_hard_reset(ihost, iport, iphy);
1301
1302 break;
1303
1304 default:
1305 dev_dbg(&ihost->pdev->dev,
1306 "%s: phy %p; func %d NOT IMPLEMENTED!\n",
1307 __func__, sas_phy, func);
1308 ret = -ENOSYS;
1309 break;
1310 }
1311 return ret;
1312}
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
new file mode 100644
index 000000000000..67699c8e321c
--- /dev/null
+++ b/drivers/scsi/isci/phy.h
@@ -0,0 +1,504 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#ifndef _ISCI_PHY_H_
56#define _ISCI_PHY_H_
57
58#include <scsi/sas.h>
59#include <scsi/libsas.h>
60#include "isci.h"
61#include "sas.h"
62
63/* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS
64 * before restarting the starting state machine. Technically, the old parallel
65 * ATA specification required up to 30 seconds for a device to issue its
66 * signature FIS as a result of a soft reset. Now we see that devices respond
67 * generally within 15 seconds, but we'll use 25 for now.
68 */
69#define SCIC_SDS_SIGNATURE_FIS_TIMEOUT 25000
70
71/* This is the timeout for the SATA OOB/SN because the hardware does not
72 * recognize a hot plug after OOB signal but before the SN signals. We need to
73 * make sure after a hotplug timeout if we have not received the speed event
74 * notification from the hardware that we restart the hardware OOB state
75 * machine.
76 */
77#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
78
79enum sci_phy_protocol {
80 SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
81 SCIC_SDS_PHY_PROTOCOL_SAS,
82 SCIC_SDS_PHY_PROTOCOL_SATA,
83 SCIC_SDS_MAX_PHY_PROTOCOLS
84};
85
86/**
87 * isci_phy - hba local phy infrastructure
88 * @sm:
89 * @protocol: attached device protocol
90 * @phy_index: physical index relative to the controller (0-3)
91 * @bcn_received_while_port_unassigned: bcn to report after port association
92 * @sata_timer: timeout SATA signature FIS arrival
93 */
94struct isci_phy {
95 struct sci_base_state_machine sm;
96 struct isci_port *owning_port;
97 enum sas_linkrate max_negotiated_speed;
98 enum sci_phy_protocol protocol;
99 u8 phy_index;
100 bool bcn_received_while_port_unassigned;
101 bool is_in_link_training;
102 struct sci_timer sata_timer;
103 struct scu_transport_layer_registers __iomem *transport_layer_registers;
104 struct scu_link_layer_registers __iomem *link_layer_registers;
105 struct asd_sas_phy sas_phy;
106 struct isci_port *isci_port;
107 u8 sas_addr[SAS_ADDR_SIZE];
108 union {
109 struct sas_identify_frame iaf;
110 struct dev_to_host_fis fis;
111 } frame_rcvd;
112};
113
114static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy)
115{
116 struct isci_phy *iphy = container_of(sas_phy, typeof(*iphy), sas_phy);
117
118 return iphy;
119}
120
121struct sci_phy_cap {
122 union {
123 struct {
124 /*
125 * The SAS specification indicates the start bit shall
126 * always be set to
127 * 1. This implementation will have the start bit set
128 * to 0 if the PHY CAPABILITIES were either not
129 * received or speed negotiation failed.
130 */
131 u8 start:1;
132 u8 tx_ssc_type:1;
133 u8 res1:2;
134 u8 req_logical_linkrate:4;
135
136 u32 gen1_no_ssc:1;
137 u32 gen1_ssc:1;
138 u32 gen2_no_ssc:1;
139 u32 gen2_ssc:1;
140 u32 gen3_no_ssc:1;
141 u32 gen3_ssc:1;
142 u32 res2:17;
143 u32 parity:1;
144 };
145 u32 all;
146 };
147} __packed;
148
149/* this data structure reflects the link layer transmit identification reg */
150struct sci_phy_proto {
151 union {
152 struct {
153 u16 _r_a:1;
154 u16 smp_iport:1;
155 u16 stp_iport:1;
156 u16 ssp_iport:1;
157 u16 _r_b:4;
158 u16 _r_c:1;
159 u16 smp_tport:1;
160 u16 stp_tport:1;
161 u16 ssp_tport:1;
162 u16 _r_d:4;
163 };
164 u16 all;
165 };
166} __packed;
167
168
169/**
170 * struct sci_phy_properties - This structure defines the properties common to
171 * all phys that can be retrieved.
172 *
173 *
174 */
175struct sci_phy_properties {
176 /**
177 * This field specifies the port that currently contains the
178 * supplied phy. This field may be set to NULL
179 * if the phy is not currently contained in a port.
180 */
181 struct isci_port *iport;
182
183 /**
184 * This field specifies the link rate at which the phy is
185 * currently operating.
186 */
187 enum sas_linkrate negotiated_link_rate;
188
189 /**
190 * This field specifies the index of the phy in relation to other
191 * phys within the controller. This index is zero relative.
192 */
193 u8 index;
194};
195
196/**
197 * struct sci_sas_phy_properties - This structure defines the properties,
198 * specific to a SAS phy, that can be retrieved.
199 *
200 *
201 */
202struct sci_sas_phy_properties {
203 /**
204 * This field delineates the Identify Address Frame received
205 * from the remote end point.
206 */
207 struct sas_identify_frame rcvd_iaf;
208
209 /**
210 * This field delineates the Phy capabilities structure received
211 * from the remote end point.
212 */
213 struct sci_phy_cap rcvd_cap;
214
215};
216
217/**
218 * struct sci_sata_phy_properties - This structure defines the properties,
219 * specific to a SATA phy, that can be retrieved.
220 *
221 *
222 */
223struct sci_sata_phy_properties {
224 /**
225 * This field delineates the signature FIS received from the
226 * attached target.
227 */
228 struct dev_to_host_fis signature_fis;
229
230 /**
231 * This field specifies to the user if a port selector is connected
232 * on the specified phy.
233 */
234 bool is_port_selector_present;
235
236};
237
238/**
239 * enum sci_phy_counter_id - This enumeration depicts the various pieces of
240 * optional information that can be retrieved for a specific phy.
241 *
242 *
243 */
244enum sci_phy_counter_id {
245 /**
246 * This PHY information field tracks the number of frames received.
247 */
248 SCIC_PHY_COUNTER_RECEIVED_FRAME,
249
250 /**
251 * This PHY information field tracks the number of frames transmitted.
252 */
253 SCIC_PHY_COUNTER_TRANSMITTED_FRAME,
254
255 /**
256 * This PHY information field tracks the number of DWORDs received.
257 */
258 SCIC_PHY_COUNTER_RECEIVED_FRAME_WORD,
259
260 /**
261 * This PHY information field tracks the number of DWORDs transmitted.
262 */
263 SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD,
264
265 /**
266 * This PHY information field tracks the number of times DWORD
267 * synchronization was lost.
268 */
269 SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR,
270
271 /**
272 * This PHY information field tracks the number of received DWORDs with
273 * running disparity errors.
274 */
275 SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR,
276
277 /**
278 * This PHY information field tracks the number of received frames with a
279 * CRC error (not including short or truncated frames).
280 */
281 SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR,
282
283 /**
284 * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
285 * primitives received.
286 */
287 SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT,
288
289 /**
290 * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
291 * primitives transmitted.
292 */
293 SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT,
294
295 /**
296 * This PHY information field tracks the number of times the inactivity
297 * timer for connections on the phy has been utilized.
298 */
299 SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED,
300
301 /**
302 * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
303 * primitives received.
304 */
305 SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT,
306
307 /**
308 * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
309 * primitives transmitted.
310 */
311 SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT,
312
313 /**
314 * This PHY information field tracks the number of CREDIT BLOCKED
315 * primitives received.
316 * @note Depending on remote device implementation, credit blocks
317 * may occur regularly.
318 */
319 SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED,
320
321 /**
322 * This PHY information field contains the number of short frames
323 * received. A short frame is simply a frame smaller then what is
324 * allowed by either the SAS or SATA specification.
325 */
326 SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME,
327
328 /**
329 * This PHY information field contains the number of frames received after
330 * credit has been exhausted.
331 */
332 SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT,
333
334 /**
335 * This PHY information field contains the number of frames received after
336 * a DONE has been received.
337 */
338 SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE,
339
340 /**
341 * This PHY information field contains the number of times the phy
342 * failed to achieve DWORD synchronization during speed negotiation.
343 */
344 SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
345};
346
347enum sci_phy_states {
348 /**
349 * Simply the initial state for the base domain state machine.
350 */
351 SCI_PHY_INITIAL,
352
353 /**
354 * This state indicates that the phy has successfully been stopped.
355 * In this state no new IO operations are permitted on this phy.
356 * This state is entered from the INITIAL state.
357 * This state is entered from the STARTING state.
358 * This state is entered from the READY state.
359 * This state is entered from the RESETTING state.
360 */
361 SCI_PHY_STOPPED,
362
363 /**
364 * This state indicates that the phy is in the process of becomming
365 * ready. In this state no new IO operations are permitted on this phy.
366 * This state is entered from the STOPPED state.
367 * This state is entered from the READY state.
368 * This state is entered from the RESETTING state.
369 */
370 SCI_PHY_STARTING,
371
372 /**
373 * Initial state
374 */
375 SCI_PHY_SUB_INITIAL,
376
377 /**
378 * Wait state for the hardware OSSP event type notification
379 */
380 SCI_PHY_SUB_AWAIT_OSSP_EN,
381
382 /**
383 * Wait state for the PHY speed notification
384 */
385 SCI_PHY_SUB_AWAIT_SAS_SPEED_EN,
386
387 /**
388 * Wait state for the IAF Unsolicited frame notification
389 */
390 SCI_PHY_SUB_AWAIT_IAF_UF,
391
392 /**
393 * Wait state for the request to consume power
394 */
395 SCI_PHY_SUB_AWAIT_SAS_POWER,
396
397 /**
398 * Wait state for request to consume power
399 */
400 SCI_PHY_SUB_AWAIT_SATA_POWER,
401
402 /**
403 * Wait state for the SATA PHY notification
404 */
405 SCI_PHY_SUB_AWAIT_SATA_PHY_EN,
406
407 /**
408 * Wait for the SATA PHY speed notification
409 */
410 SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
411
412 /**
413 * Wait state for the SIGNATURE FIS unsolicited frame notification
414 */
415 SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
416
417 /**
418 * Exit state for this state machine
419 */
420 SCI_PHY_SUB_FINAL,
421
422 /**
423 * This state indicates the the phy is now ready. Thus, the user
424 * is able to perform IO operations utilizing this phy as long as it
425 * is currently part of a valid port.
426 * This state is entered from the STARTING state.
427 */
428 SCI_PHY_READY,
429
430 /**
431 * This state indicates that the phy is in the process of being reset.
432 * In this state no new IO operations are permitted on this phy.
433 * This state is entered from the READY state.
434 */
435 SCI_PHY_RESETTING,
436
437 /**
438 * Simply the final state for the base phy state machine.
439 */
440 SCI_PHY_FINAL,
441};
442
443void sci_phy_construct(
444 struct isci_phy *iphy,
445 struct isci_port *iport,
446 u8 phy_index);
447
448struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy);
449
450void sci_phy_set_port(
451 struct isci_phy *iphy,
452 struct isci_port *iport);
453
454enum sci_status sci_phy_initialize(
455 struct isci_phy *iphy,
456 struct scu_transport_layer_registers __iomem *transport_layer_registers,
457 struct scu_link_layer_registers __iomem *link_layer_registers);
458
459enum sci_status sci_phy_start(
460 struct isci_phy *iphy);
461
462enum sci_status sci_phy_stop(
463 struct isci_phy *iphy);
464
465enum sci_status sci_phy_reset(
466 struct isci_phy *iphy);
467
468void sci_phy_resume(
469 struct isci_phy *iphy);
470
471void sci_phy_setup_transport(
472 struct isci_phy *iphy,
473 u32 device_id);
474
475enum sci_status sci_phy_event_handler(
476 struct isci_phy *iphy,
477 u32 event_code);
478
479enum sci_status sci_phy_frame_handler(
480 struct isci_phy *iphy,
481 u32 frame_index);
482
483enum sci_status sci_phy_consume_power_handler(
484 struct isci_phy *iphy);
485
486void sci_phy_get_sas_address(
487 struct isci_phy *iphy,
488 struct sci_sas_address *sas_address);
489
490void sci_phy_get_attached_sas_address(
491 struct isci_phy *iphy,
492 struct sci_sas_address *sas_address);
493
494struct sci_phy_proto;
495void sci_phy_get_protocols(
496 struct isci_phy *iphy,
497 struct sci_phy_proto *protocols);
498enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy);
499
500struct isci_host;
501void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index);
502int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf);
503
504#endif /* !defined(_ISCI_PHY_H_) */
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
new file mode 100644
index 000000000000..8f6f9b77e41a
--- /dev/null
+++ b/drivers/scsi/isci/port.c
@@ -0,0 +1,1757 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
57#include "port.h"
58#include "request.h"
59
60#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
61#define SCU_DUMMY_INDEX (0xFFFF)
62
63static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
64{
65 unsigned long flags;
66
67 dev_dbg(&iport->isci_host->pdev->dev,
68 "%s: iport = %p, state = 0x%x\n",
69 __func__, iport, status);
70
71 /* XXX pointless lock */
72 spin_lock_irqsave(&iport->state_lock, flags);
73 iport->status = status;
74 spin_unlock_irqrestore(&iport->state_lock, flags);
75}
76
77static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
78{
79 u8 index;
80
81 proto->all = 0;
82 for (index = 0; index < SCI_MAX_PHYS; index++) {
83 struct isci_phy *iphy = iport->phy_table[index];
84
85 if (!iphy)
86 continue;
87 sci_phy_get_protocols(iphy, proto);
88 }
89}
90
91static u32 sci_port_get_phys(struct isci_port *iport)
92{
93 u32 index;
94 u32 mask;
95
96 mask = 0;
97 for (index = 0; index < SCI_MAX_PHYS; index++)
98 if (iport->phy_table[index])
99 mask |= (1 << index);
100
101 return mask;
102}
103
104/**
105 * sci_port_get_properties() - This method simply returns the properties
106 * regarding the port, such as: physical index, protocols, sas address, etc.
107 * @port: this parameter specifies the port for which to retrieve the physical
108 * index.
109 * @properties: This parameter specifies the properties structure into which to
110 * copy the requested information.
111 *
112 * Indicate if the user specified a valid port. SCI_SUCCESS This value is
113 * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
114 * value is returned if the specified port is not valid. When this value is
115 * returned, no data is copied to the properties output parameter.
116 */
117static enum sci_status sci_port_get_properties(struct isci_port *iport,
118 struct sci_port_properties *prop)
119{
120 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
121 return SCI_FAILURE_INVALID_PORT;
122
123 prop->index = iport->logical_port_index;
124 prop->phy_mask = sci_port_get_phys(iport);
125 sci_port_get_sas_address(iport, &prop->local.sas_address);
126 sci_port_get_protocols(iport, &prop->local.protocols);
127 sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
128
129 return SCI_SUCCESS;
130}
131
132static void sci_port_bcn_enable(struct isci_port *iport)
133{
134 struct isci_phy *iphy;
135 u32 val;
136 int i;
137
138 for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
139 iphy = iport->phy_table[i];
140 if (!iphy)
141 continue;
142 val = readl(&iphy->link_layer_registers->link_layer_control);
143 /* clear the bit by writing 1. */
144 writel(val, &iphy->link_layer_registers->link_layer_control);
145 }
146}
147
148/* called under sci_lock to stabilize phy:port associations */
149void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
150{
151 int i;
152
153 clear_bit(IPORT_BCN_BLOCKED, &iport->flags);
154 wake_up(&ihost->eventq);
155
156 if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags))
157 return;
158
159 for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
160 struct isci_phy *iphy = iport->phy_table[i];
161
162 if (!iphy)
163 continue;
164
165 ihost->sas_ha.notify_port_event(&iphy->sas_phy,
166 PORTE_BROADCAST_RCVD);
167 break;
168 }
169}
170
171static void isci_port_bc_change_received(struct isci_host *ihost,
172 struct isci_port *iport,
173 struct isci_phy *iphy)
174{
175 if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) {
176 dev_dbg(&ihost->pdev->dev,
177 "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n",
178 __func__, iphy, &iphy->sas_phy);
179 set_bit(IPORT_BCN_PENDING, &iport->flags);
180 atomic_inc(&iport->event);
181 wake_up(&ihost->eventq);
182 } else {
183 dev_dbg(&ihost->pdev->dev,
184 "%s: isci_phy = %p, sas_phy = %p\n",
185 __func__, iphy, &iphy->sas_phy);
186
187 ihost->sas_ha.notify_port_event(&iphy->sas_phy,
188 PORTE_BROADCAST_RCVD);
189 }
190 sci_port_bcn_enable(iport);
191}
192
193static void isci_port_link_up(struct isci_host *isci_host,
194 struct isci_port *iport,
195 struct isci_phy *iphy)
196{
197 unsigned long flags;
198 struct sci_port_properties properties;
199 unsigned long success = true;
200
201 BUG_ON(iphy->isci_port != NULL);
202
203 iphy->isci_port = iport;
204
205 dev_dbg(&isci_host->pdev->dev,
206 "%s: isci_port = %p\n",
207 __func__, iport);
208
209 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
210
211 isci_port_change_state(iphy->isci_port, isci_starting);
212
213 sci_port_get_properties(iport, &properties);
214
215 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
216 u64 attached_sas_address;
217
218 iphy->sas_phy.oob_mode = SATA_OOB_MODE;
219 iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
220
221 /*
222 * For direct-attached SATA devices, the SCI core will
223 * automagically assign a SAS address to the end device
224 * for the purpose of creating a port. This SAS address
225 * will not be the same as assigned to the PHY and needs
226 * to be obtained from struct sci_port_properties properties.
227 */
228 attached_sas_address = properties.remote.sas_address.high;
229 attached_sas_address <<= 32;
230 attached_sas_address |= properties.remote.sas_address.low;
231 swab64s(&attached_sas_address);
232
233 memcpy(&iphy->sas_phy.attached_sas_addr,
234 &attached_sas_address, sizeof(attached_sas_address));
235 } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
236 iphy->sas_phy.oob_mode = SAS_OOB_MODE;
237 iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
238
239 /* Copy the attached SAS address from the IAF */
240 memcpy(iphy->sas_phy.attached_sas_addr,
241 iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
242 } else {
243 dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
244 success = false;
245 }
246
247 iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
248
249 spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
250
251 /* Notify libsas that we have an address frame, if indeed
252 * we've found an SSP, SMP, or STP target */
253 if (success)
254 isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
255 PORTE_BYTES_DMAED);
256}
257
258
259/**
260 * isci_port_link_down() - This function is called by the sci core when a link
261 * becomes inactive.
262 * @isci_host: This parameter specifies the isci host object.
263 * @phy: This parameter specifies the isci phy with the active link.
264 * @port: This parameter specifies the isci port with the active link.
265 *
266 */
267static void isci_port_link_down(struct isci_host *isci_host,
268 struct isci_phy *isci_phy,
269 struct isci_port *isci_port)
270{
271 struct isci_remote_device *isci_device;
272
273 dev_dbg(&isci_host->pdev->dev,
274 "%s: isci_port = %p\n", __func__, isci_port);
275
276 if (isci_port) {
277
278 /* check to see if this is the last phy on this port. */
279 if (isci_phy->sas_phy.port &&
280 isci_phy->sas_phy.port->num_phys == 1) {
281 atomic_inc(&isci_port->event);
282 isci_port_bcn_enable(isci_host, isci_port);
283
284 /* change the state for all devices on this port. The
285 * next task sent to this device will be returned as
286 * SAS_TASK_UNDELIVERED, and the scsi mid layer will
287 * remove the target
288 */
289 list_for_each_entry(isci_device,
290 &isci_port->remote_dev_list,
291 node) {
292 dev_dbg(&isci_host->pdev->dev,
293 "%s: isci_device = %p\n",
294 __func__, isci_device);
295 set_bit(IDEV_GONE, &isci_device->flags);
296 }
297 }
298 isci_port_change_state(isci_port, isci_stopping);
299 }
300
301 /* Notify libsas of the borken link, this will trigger calls to our
302 * isci_port_deformed and isci_dev_gone functions.
303 */
304 sas_phy_disconnected(&isci_phy->sas_phy);
305 isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
306 PHYE_LOSS_OF_SIGNAL);
307
308 isci_phy->isci_port = NULL;
309
310 dev_dbg(&isci_host->pdev->dev,
311 "%s: isci_port = %p - Done\n", __func__, isci_port);
312}
313
314
315/**
316 * isci_port_ready() - This function is called by the sci core when a link
317 * becomes ready.
318 * @isci_host: This parameter specifies the isci host object.
319 * @port: This parameter specifies the sci port with the active link.
320 *
321 */
322static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
323{
324 dev_dbg(&isci_host->pdev->dev,
325 "%s: isci_port = %p\n", __func__, isci_port);
326
327 complete_all(&isci_port->start_complete);
328 isci_port_change_state(isci_port, isci_ready);
329 return;
330}
331
332/**
333 * isci_port_not_ready() - This function is called by the sci core when a link
334 * is not ready. All remote devices on this link will be removed if they are
335 * in the stopping state.
336 * @isci_host: This parameter specifies the isci host object.
337 * @port: This parameter specifies the sci port with the active link.
338 *
339 */
340static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
341{
342 dev_dbg(&isci_host->pdev->dev,
343 "%s: isci_port = %p\n", __func__, isci_port);
344}
345
346static void isci_port_stop_complete(struct isci_host *ihost,
347 struct isci_port *iport,
348 enum sci_status completion_status)
349{
350 dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
351}
352
353/**
354 * isci_port_hard_reset_complete() - This function is called by the sci core
355 * when the hard reset complete notification has been received.
356 * @port: This parameter specifies the sci port with the active link.
357 * @completion_status: This parameter specifies the core status for the reset
358 * process.
359 *
360 */
361static void isci_port_hard_reset_complete(struct isci_port *isci_port,
362 enum sci_status completion_status)
363{
364 dev_dbg(&isci_port->isci_host->pdev->dev,
365 "%s: isci_port = %p, completion_status=%x\n",
366 __func__, isci_port, completion_status);
367
368 /* Save the status of the hard reset from the port. */
369 isci_port->hard_reset_status = completion_status;
370
371 complete_all(&isci_port->hard_reset_complete);
372}
373
374/* This method will return a true value if the specified phy can be assigned to
375 * this port The following is a list of phys for each port that are allowed: -
376 * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method
377 * doesn't preclude all configurations. It merely ensures that a phy is part
378 * of the allowable set of phy identifiers for that port. For example, one
379 * could assign phy 3 to port 0 and no other phys. Please refer to
380 * sci_port_is_phy_mask_valid() for information regarding whether the
381 * phy_mask for a port can be supported. bool true if this is a valid phy
382 * assignment for the port false if this is not a valid phy assignment for the
383 * port
384 */
385bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
386{
387 struct isci_host *ihost = iport->owning_controller;
388 struct sci_user_parameters *user = &ihost->user_parameters;
389
390 /* Initialize to invalid value. */
391 u32 existing_phy_index = SCI_MAX_PHYS;
392 u32 index;
393
394 if ((iport->physical_port_index == 1) && (phy_index != 1))
395 return false;
396
397 if (iport->physical_port_index == 3 && phy_index != 3)
398 return false;
399
400 if (iport->physical_port_index == 2 &&
401 (phy_index == 0 || phy_index == 1))
402 return false;
403
404 for (index = 0; index < SCI_MAX_PHYS; index++)
405 if (iport->phy_table[index] && index != phy_index)
406 existing_phy_index = index;
407
408 /* Ensure that all of the phys in the port are capable of
409 * operating at the same maximum link rate.
410 */
411 if (existing_phy_index < SCI_MAX_PHYS &&
412 user->phys[phy_index].max_speed_generation !=
413 user->phys[existing_phy_index].max_speed_generation)
414 return false;
415
416 return true;
417}
418
419/**
420 *
421 * @sci_port: This is the port object for which to determine if the phy mask
422 * can be supported.
423 *
424 * This method will return a true value if the port's phy mask can be supported
425 * by the SCU. The following is a list of valid PHY mask configurations for
426 * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2]
427 * - Port 3 - [3] This method returns a boolean indication specifying if the
428 * phy mask can be supported. true if this is a valid phy assignment for the
429 * port false if this is not a valid phy assignment for the port
430 */
431static bool sci_port_is_phy_mask_valid(
432 struct isci_port *iport,
433 u32 phy_mask)
434{
435 if (iport->physical_port_index == 0) {
436 if (((phy_mask & 0x0F) == 0x0F)
437 || ((phy_mask & 0x03) == 0x03)
438 || ((phy_mask & 0x01) == 0x01)
439 || (phy_mask == 0))
440 return true;
441 } else if (iport->physical_port_index == 1) {
442 if (((phy_mask & 0x02) == 0x02)
443 || (phy_mask == 0))
444 return true;
445 } else if (iport->physical_port_index == 2) {
446 if (((phy_mask & 0x0C) == 0x0C)
447 || ((phy_mask & 0x04) == 0x04)
448 || (phy_mask == 0))
449 return true;
450 } else if (iport->physical_port_index == 3) {
451 if (((phy_mask & 0x08) == 0x08)
452 || (phy_mask == 0))
453 return true;
454 }
455
456 return false;
457}
458
459/*
460 * This method retrieves a currently active (i.e. connected) phy contained in
461 * the port. Currently, the lowest order phy that is connected is returned.
462 * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
463 * returned if there are no currently active (i.e. connected to a remote end
464 * point) phys contained in the port. All other values specify a struct sci_phy
465 * object that is active in the port.
466 */
467static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
468{
469 u32 index;
470 struct isci_phy *iphy;
471
472 for (index = 0; index < SCI_MAX_PHYS; index++) {
473 /* Ensure that the phy is both part of the port and currently
474 * connected to the remote end-point.
475 */
476 iphy = iport->phy_table[index];
477 if (iphy && sci_port_active_phy(iport, iphy))
478 return iphy;
479 }
480
481 return NULL;
482}
483
484static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
485{
486 /* Check to see if we can add this phy to a port
487 * that means that the phy is not part of a port and that the port does
488 * not already have a phy assinged to the phy index.
489 */
490 if (!iport->phy_table[iphy->phy_index] &&
491 !phy_get_non_dummy_port(iphy) &&
492 sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
493 /* Phy is being added in the stopped state so we are in MPC mode
494 * make logical port index = physical port index
495 */
496 iport->logical_port_index = iport->physical_port_index;
497 iport->phy_table[iphy->phy_index] = iphy;
498 sci_phy_set_port(iphy, iport);
499
500 return SCI_SUCCESS;
501 }
502
503 return SCI_FAILURE;
504}
505
506static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
507{
508 /* Make sure that this phy is part of this port */
509 if (iport->phy_table[iphy->phy_index] == iphy &&
510 phy_get_non_dummy_port(iphy) == iport) {
511 struct isci_host *ihost = iport->owning_controller;
512
513 /* Yep it is assigned to this port so remove it */
514 sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
515 iport->phy_table[iphy->phy_index] = NULL;
516 return SCI_SUCCESS;
517 }
518
519 return SCI_FAILURE;
520}
521
522void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
523{
524 u32 index;
525
526 sas->high = 0;
527 sas->low = 0;
528 for (index = 0; index < SCI_MAX_PHYS; index++)
529 if (iport->phy_table[index])
530 sci_phy_get_sas_address(iport->phy_table[index], sas);
531}
532
533void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
534{
535 struct isci_phy *iphy;
536
537 /*
538 * Ensure that the phy is both part of the port and currently
539 * connected to the remote end-point.
540 */
541 iphy = sci_port_get_a_connected_phy(iport);
542 if (iphy) {
543 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
544 sci_phy_get_attached_sas_address(iphy, sas);
545 } else {
546 sci_phy_get_sas_address(iphy, sas);
547 sas->low += iphy->phy_index;
548 }
549 } else {
550 sas->high = 0;
551 sas->low = 0;
552 }
553}
554
555/**
556 * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
557 *
558 * @sci_port: logical port on which we need to create the remote node context
559 * @rni: remote node index for this remote node context.
560 *
561 * This routine will construct a dummy remote node context data structure
562 * This structure will be posted to the hardware to work around a scheduler
563 * error in the hardware.
564 */
565static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
566{
567 union scu_remote_node_context *rnc;
568
569 rnc = &iport->owning_controller->remote_node_context_table[rni];
570
571 memset(rnc, 0, sizeof(union scu_remote_node_context));
572
573 rnc->ssp.remote_sas_address_hi = 0;
574 rnc->ssp.remote_sas_address_lo = 0;
575
576 rnc->ssp.remote_node_index = rni;
577 rnc->ssp.remote_node_port_width = 1;
578 rnc->ssp.logical_port_index = iport->physical_port_index;
579
580 rnc->ssp.nexus_loss_timer_enable = false;
581 rnc->ssp.check_bit = false;
582 rnc->ssp.is_valid = true;
583 rnc->ssp.is_remote_node_context = true;
584 rnc->ssp.function_number = 0;
585 rnc->ssp.arbitration_wait_time = 0;
586}
587
588/*
589 * construct a dummy task context data structure. This
590 * structure will be posted to the hardwre to work around a scheduler error
591 * in the hardware.
592 */
593static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
594{
595 struct isci_host *ihost = iport->owning_controller;
596 struct scu_task_context *task_context;
597
598 task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
599 memset(task_context, 0, sizeof(struct scu_task_context));
600
601 task_context->initiator_request = 1;
602 task_context->connection_rate = 1;
603 task_context->logical_port_index = iport->physical_port_index;
604 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
605 task_context->task_index = ISCI_TAG_TCI(tag);
606 task_context->valid = SCU_TASK_CONTEXT_VALID;
607 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
608 task_context->remote_node_index = iport->reserved_rni;
609 task_context->do_not_dma_ssp_good_response = 1;
610 task_context->task_phase = 0x01;
611}
612
613static void sci_port_destroy_dummy_resources(struct isci_port *iport)
614{
615 struct isci_host *ihost = iport->owning_controller;
616
617 if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
618 isci_free_tag(ihost, iport->reserved_tag);
619
620 if (iport->reserved_rni != SCU_DUMMY_INDEX)
621 sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
622 1, iport->reserved_rni);
623
624 iport->reserved_rni = SCU_DUMMY_INDEX;
625 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
626}
627
628void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
629{
630 u8 index;
631
632 for (index = 0; index < SCI_MAX_PHYS; index++) {
633 if (iport->active_phy_mask & (1 << index))
634 sci_phy_setup_transport(iport->phy_table[index], device_id);
635 }
636}
637
638static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
639 bool do_notify_user)
640{
641 struct isci_host *ihost = iport->owning_controller;
642
643 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
644 sci_phy_resume(iphy);
645
646 iport->active_phy_mask |= 1 << iphy->phy_index;
647
648 sci_controller_clear_invalid_phy(ihost, iphy);
649
650 if (do_notify_user == true)
651 isci_port_link_up(ihost, iport, iphy);
652}
653
654void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
655 bool do_notify_user)
656{
657 struct isci_host *ihost = iport->owning_controller;
658
659 iport->active_phy_mask &= ~(1 << iphy->phy_index);
660
661 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
662
663 /* Re-assign the phy back to the LP as if it were a narrow port */
664 writel(iphy->phy_index,
665 &iport->port_pe_configuration_register[iphy->phy_index]);
666
667 if (do_notify_user == true)
668 isci_port_link_down(ihost, iphy, iport);
669}
670
671static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
672{
673 struct isci_host *ihost = iport->owning_controller;
674
675 /*
676 * Check to see if we have alreay reported this link as bad and if
677 * not go ahead and tell the SCI_USER that we have discovered an
678 * invalid link.
679 */
680 if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
681 ihost->invalid_phy_mask |= 1 << iphy->phy_index;
682 dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
683 }
684}
685
686static bool is_port_ready_state(enum sci_port_states state)
687{
688 switch (state) {
689 case SCI_PORT_READY:
690 case SCI_PORT_SUB_WAITING:
691 case SCI_PORT_SUB_OPERATIONAL:
692 case SCI_PORT_SUB_CONFIGURING:
693 return true;
694 default:
695 return false;
696 }
697}
698
699/* flag dummy rnc hanling when exiting a ready state */
700static void port_state_machine_change(struct isci_port *iport,
701 enum sci_port_states state)
702{
703 struct sci_base_state_machine *sm = &iport->sm;
704 enum sci_port_states old_state = sm->current_state_id;
705
706 if (is_port_ready_state(old_state) && !is_port_ready_state(state))
707 iport->ready_exit = true;
708
709 sci_change_state(sm, state);
710 iport->ready_exit = false;
711}
712
713/**
714 * sci_port_general_link_up_handler - phy can be assigned to port?
715 * @sci_port: sci_port object for which has a phy that has gone link up.
716 * @sci_phy: This is the struct isci_phy object that has gone link up.
717 * @do_notify_user: This parameter specifies whether to inform the user (via
718 * sci_port_link_up()) as to the fact that a new phy as become ready.
719 *
720 * Determine if this phy can be assigned to this
721 * port . If the phy is not a valid PHY for
722 * this port then the function will notify the user. A PHY can only be
723 * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
724 * the same port. none
725 */
726static void sci_port_general_link_up_handler(struct isci_port *iport,
727 struct isci_phy *iphy,
728 bool do_notify_user)
729{
730 struct sci_sas_address port_sas_address;
731 struct sci_sas_address phy_sas_address;
732
733 sci_port_get_attached_sas_address(iport, &port_sas_address);
734 sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
735
736 /* If the SAS address of the new phy matches the SAS address of
737 * other phys in the port OR this is the first phy in the port,
738 * then activate the phy and allow it to be used for operations
739 * in this port.
740 */
741 if ((phy_sas_address.high == port_sas_address.high &&
742 phy_sas_address.low == port_sas_address.low) ||
743 iport->active_phy_mask == 0) {
744 struct sci_base_state_machine *sm = &iport->sm;
745
746 sci_port_activate_phy(iport, iphy, do_notify_user);
747 if (sm->current_state_id == SCI_PORT_RESETTING)
748 port_state_machine_change(iport, SCI_PORT_READY);
749 } else
750 sci_port_invalid_link_up(iport, iphy);
751}
752
753
754
755/**
756 * This method returns false if the port only has a single phy object assigned.
757 * If there are no phys or more than one phy then the method will return
758 * true.
759 * @sci_port: The port for which the wide port condition is to be checked.
760 *
761 * bool true Is returned if this is a wide ported port. false Is returned if
762 * this is a narrow port.
763 */
764static bool sci_port_is_wide(struct isci_port *iport)
765{
766 u32 index;
767 u32 phy_count = 0;
768
769 for (index = 0; index < SCI_MAX_PHYS; index++) {
770 if (iport->phy_table[index] != NULL) {
771 phy_count++;
772 }
773 }
774
775 return phy_count != 1;
776}
777
778/**
779 * This method is called by the PHY object when the link is detected. if the
780 * port wants the PHY to continue on to the link up state then the port
781 * layer must return true. If the port object returns false the phy object
782 * must halt its attempt to go link up.
783 * @sci_port: The port associated with the phy object.
784 * @sci_phy: The phy object that is trying to go link up.
785 *
786 * true if the phy object can continue to the link up condition. true Is
787 * returned if this phy can continue to the ready state. false Is returned if
788 * can not continue on to the ready state. This notification is in place for
789 * wide ports and direct attached phys. Since there are no wide ported SATA
790 * devices this could become an invalid port configuration.
791 */
792bool sci_port_link_detected(
793 struct isci_port *iport,
794 struct isci_phy *iphy)
795{
796 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
797 (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
798 sci_port_is_wide(iport)) {
799 sci_port_invalid_link_up(iport, iphy);
800
801 return false;
802 }
803
804 return true;
805}
806
807static void port_timeout(unsigned long data)
808{
809 struct sci_timer *tmr = (struct sci_timer *)data;
810 struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
811 struct isci_host *ihost = iport->owning_controller;
812 unsigned long flags;
813 u32 current_state;
814
815 spin_lock_irqsave(&ihost->scic_lock, flags);
816
817 if (tmr->cancel)
818 goto done;
819
820 current_state = iport->sm.current_state_id;
821
822 if (current_state == SCI_PORT_RESETTING) {
823 /* if the port is still in the resetting state then the timeout
824 * fired before the reset completed.
825 */
826 port_state_machine_change(iport, SCI_PORT_FAILED);
827 } else if (current_state == SCI_PORT_STOPPED) {
828 /* if the port is stopped then the start request failed In this
829 * case stay in the stopped state.
830 */
831 dev_err(sciport_to_dev(iport),
832 "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
833 __func__,
834 iport);
835 } else if (current_state == SCI_PORT_STOPPING) {
836 /* if the port is still stopping then the stop has not completed */
837 isci_port_stop_complete(iport->owning_controller,
838 iport,
839 SCI_FAILURE_TIMEOUT);
840 } else {
841 /* The port is in the ready state and we have a timer
842 * reporting a timeout this should not happen.
843 */
844 dev_err(sciport_to_dev(iport),
845 "%s: SCIC Port 0x%p is processing a timeout operation "
846 "in state %d.\n", __func__, iport, current_state);
847 }
848
849done:
850 spin_unlock_irqrestore(&ihost->scic_lock, flags);
851}
852
853/* --------------------------------------------------------------------------- */
854
855/**
856 * This function updates the hardwares VIIT entry for this port.
857 *
858 *
859 */
860static void sci_port_update_viit_entry(struct isci_port *iport)
861{
862 struct sci_sas_address sas_address;
863
864 sci_port_get_sas_address(iport, &sas_address);
865
866 writel(sas_address.high,
867 &iport->viit_registers->initiator_sas_address_hi);
868 writel(sas_address.low,
869 &iport->viit_registers->initiator_sas_address_lo);
870
871 /* This value get cleared just in case its not already cleared */
872 writel(0, &iport->viit_registers->reserved);
873
874 /* We are required to update the status register last */
875 writel(SCU_VIIT_ENTRY_ID_VIIT |
876 SCU_VIIT_IPPT_INITIATOR |
877 ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
878 SCU_VIIT_STATUS_ALL_VALID,
879 &iport->viit_registers->status);
880}
881
882enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
883{
884 u16 index;
885 struct isci_phy *iphy;
886 enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
887
888 /*
889 * Loop through all of the phys in this port and find the phy with the
890 * lowest maximum link rate. */
891 for (index = 0; index < SCI_MAX_PHYS; index++) {
892 iphy = iport->phy_table[index];
893 if (iphy && sci_port_active_phy(iport, iphy) &&
894 iphy->max_negotiated_speed < max_allowed_speed)
895 max_allowed_speed = iphy->max_negotiated_speed;
896 }
897
898 return max_allowed_speed;
899}
900
901static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
902{
903 u32 pts_control_value;
904
905 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
906 pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
907 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
908}
909
910/**
911 * sci_port_post_dummy_request() - post dummy/workaround request
912 * @sci_port: port to post task
913 *
914 * Prevent the hardware scheduler from posting new requests to the front
915 * of the scheduler queue causing a starvation problem for currently
916 * ongoing requests.
917 *
918 */
919static void sci_port_post_dummy_request(struct isci_port *iport)
920{
921 struct isci_host *ihost = iport->owning_controller;
922 u16 tag = iport->reserved_tag;
923 struct scu_task_context *tc;
924 u32 command;
925
926 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
927 tc->abort = 0;
928
929 command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
930 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
931 ISCI_TAG_TCI(tag);
932
933 sci_controller_post_request(ihost, command);
934}
935
936/**
937 * This routine will abort the dummy request. This will alow the hardware to
938 * power down parts of the silicon to save power.
939 *
940 * @sci_port: The port on which the task must be aborted.
941 *
942 */
943static void sci_port_abort_dummy_request(struct isci_port *iport)
944{
945 struct isci_host *ihost = iport->owning_controller;
946 u16 tag = iport->reserved_tag;
947 struct scu_task_context *tc;
948 u32 command;
949
950 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
951 tc->abort = 1;
952
953 command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
954 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
955 ISCI_TAG_TCI(tag);
956
957 sci_controller_post_request(ihost, command);
958}
959
960/**
961 *
962 * @sci_port: This is the struct isci_port object to resume.
963 *
964 * This method will resume the port task scheduler for this port object. none
965 */
966static void
967sci_port_resume_port_task_scheduler(struct isci_port *iport)
968{
969 u32 pts_control_value;
970
971 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
972 pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
973 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
974}
975
976static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
977{
978 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
979
980 sci_port_suspend_port_task_scheduler(iport);
981
982 iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
983
984 if (iport->active_phy_mask != 0) {
985 /* At least one of the phys on the port is ready */
986 port_state_machine_change(iport,
987 SCI_PORT_SUB_OPERATIONAL);
988 }
989}
990
991static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
992{
993 u32 index;
994 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
995 struct isci_host *ihost = iport->owning_controller;
996
997 isci_port_ready(ihost, iport);
998
999 for (index = 0; index < SCI_MAX_PHYS; index++) {
1000 if (iport->phy_table[index]) {
1001 writel(iport->physical_port_index,
1002 &iport->port_pe_configuration_register[
1003 iport->phy_table[index]->phy_index]);
1004 }
1005 }
1006
1007 sci_port_update_viit_entry(iport);
1008
1009 sci_port_resume_port_task_scheduler(iport);
1010
1011 /*
1012 * Post the dummy task for the port so the hardware can schedule
1013 * io correctly
1014 */
1015 sci_port_post_dummy_request(iport);
1016}
1017
1018static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
1019{
1020 struct isci_host *ihost = iport->owning_controller;
1021 u8 phys_index = iport->physical_port_index;
1022 union scu_remote_node_context *rnc;
1023 u16 rni = iport->reserved_rni;
1024 u32 command;
1025
1026 rnc = &ihost->remote_node_context_table[rni];
1027
1028 rnc->ssp.is_valid = false;
1029
1030 /* ensure the preceding tc abort request has reached the
1031 * controller and give it ample time to act before posting the rnc
1032 * invalidate
1033 */
1034 readl(&ihost->smu_registers->interrupt_status); /* flush */
1035 udelay(10);
1036
1037 command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
1038 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1039
1040 sci_controller_post_request(ihost, command);
1041}
1042
1043/**
1044 *
1045 * @object: This is the object which is cast to a struct isci_port object.
1046 *
1047 * This method will perform the actions required by the struct isci_port on
1048 * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
1049 * the port not ready and suspends the port task scheduler. none
1050 */
1051static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
1052{
1053 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1054 struct isci_host *ihost = iport->owning_controller;
1055
1056 /*
1057 * Kill the dummy task for this port if it has not yet posted
1058 * the hardware will treat this as a NOP and just return abort
1059 * complete.
1060 */
1061 sci_port_abort_dummy_request(iport);
1062
1063 isci_port_not_ready(ihost, iport);
1064
1065 if (iport->ready_exit)
1066 sci_port_invalidate_dummy_remote_node(iport);
1067}
1068
1069static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
1070{
1071 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1072 struct isci_host *ihost = iport->owning_controller;
1073
1074 if (iport->active_phy_mask == 0) {
1075 isci_port_not_ready(ihost, iport);
1076
1077 port_state_machine_change(iport,
1078 SCI_PORT_SUB_WAITING);
1079 } else if (iport->started_request_count == 0)
1080 port_state_machine_change(iport,
1081 SCI_PORT_SUB_OPERATIONAL);
1082}
1083
1084static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
1085{
1086 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1087
1088 sci_port_suspend_port_task_scheduler(iport);
1089 if (iport->ready_exit)
1090 sci_port_invalidate_dummy_remote_node(iport);
1091}
1092
1093enum sci_status sci_port_start(struct isci_port *iport)
1094{
1095 struct isci_host *ihost = iport->owning_controller;
1096 enum sci_status status = SCI_SUCCESS;
1097 enum sci_port_states state;
1098 u32 phy_mask;
1099
1100 state = iport->sm.current_state_id;
1101 if (state != SCI_PORT_STOPPED) {
1102 dev_warn(sciport_to_dev(iport),
1103 "%s: in wrong state: %d\n", __func__, state);
1104 return SCI_FAILURE_INVALID_STATE;
1105 }
1106
1107 if (iport->assigned_device_count > 0) {
1108 /* TODO This is a start failure operation because
1109 * there are still devices assigned to this port.
1110 * There must be no devices assigned to a port on a
1111 * start operation.
1112 */
1113 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1114 }
1115
1116 if (iport->reserved_rni == SCU_DUMMY_INDEX) {
1117 u16 rni = sci_remote_node_table_allocate_remote_node(
1118 &ihost->available_remote_nodes, 1);
1119
1120 if (rni != SCU_DUMMY_INDEX)
1121 sci_port_construct_dummy_rnc(iport, rni);
1122 else
1123 status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1124 iport->reserved_rni = rni;
1125 }
1126
1127 if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1128 u16 tag;
1129
1130 tag = isci_alloc_tag(ihost);
1131 if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
1132 status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1133 else
1134 sci_port_construct_dummy_task(iport, tag);
1135 iport->reserved_tag = tag;
1136 }
1137
1138 if (status == SCI_SUCCESS) {
1139 phy_mask = sci_port_get_phys(iport);
1140
1141 /*
1142 * There are one or more phys assigned to this port. Make sure
1143 * the port's phy mask is in fact legal and supported by the
1144 * silicon.
1145 */
1146 if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
1147 port_state_machine_change(iport,
1148 SCI_PORT_READY);
1149
1150 return SCI_SUCCESS;
1151 }
1152 status = SCI_FAILURE;
1153 }
1154
1155 if (status != SCI_SUCCESS)
1156 sci_port_destroy_dummy_resources(iport);
1157
1158 return status;
1159}
1160
1161enum sci_status sci_port_stop(struct isci_port *iport)
1162{
1163 enum sci_port_states state;
1164
1165 state = iport->sm.current_state_id;
1166 switch (state) {
1167 case SCI_PORT_STOPPED:
1168 return SCI_SUCCESS;
1169 case SCI_PORT_SUB_WAITING:
1170 case SCI_PORT_SUB_OPERATIONAL:
1171 case SCI_PORT_SUB_CONFIGURING:
1172 case SCI_PORT_RESETTING:
1173 port_state_machine_change(iport,
1174 SCI_PORT_STOPPING);
1175 return SCI_SUCCESS;
1176 default:
1177 dev_warn(sciport_to_dev(iport),
1178 "%s: in wrong state: %d\n", __func__, state);
1179 return SCI_FAILURE_INVALID_STATE;
1180 }
1181}
1182
1183static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
1184{
1185 enum sci_status status = SCI_FAILURE_INVALID_PHY;
1186 struct isci_phy *iphy = NULL;
1187 enum sci_port_states state;
1188 u32 phy_index;
1189
1190 state = iport->sm.current_state_id;
1191 if (state != SCI_PORT_SUB_OPERATIONAL) {
1192 dev_warn(sciport_to_dev(iport),
1193 "%s: in wrong state: %d\n", __func__, state);
1194 return SCI_FAILURE_INVALID_STATE;
1195 }
1196
1197 /* Select a phy on which we can send the hard reset request. */
1198 for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
1199 iphy = iport->phy_table[phy_index];
1200 if (iphy && !sci_port_active_phy(iport, iphy)) {
1201 /*
1202 * We found a phy but it is not ready select
1203 * different phy
1204 */
1205 iphy = NULL;
1206 }
1207 }
1208
1209 /* If we have a phy then go ahead and start the reset procedure */
1210 if (!iphy)
1211 return status;
1212 status = sci_phy_reset(iphy);
1213
1214 if (status != SCI_SUCCESS)
1215 return status;
1216
1217 sci_mod_timer(&iport->timer, timeout);
1218 iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
1219
1220 port_state_machine_change(iport, SCI_PORT_RESETTING);
1221 return SCI_SUCCESS;
1222}
1223
1224/**
1225 * sci_port_add_phy() -
1226 * @sci_port: This parameter specifies the port in which the phy will be added.
1227 * @sci_phy: This parameter is the phy which is to be added to the port.
1228 *
1229 * This method will add a PHY to the selected port. This method returns an
1230 * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
1231 * status is a failure to add the phy to the port.
1232 */
1233enum sci_status sci_port_add_phy(struct isci_port *iport,
1234 struct isci_phy *iphy)
1235{
1236 enum sci_status status;
1237 enum sci_port_states state;
1238
1239 state = iport->sm.current_state_id;
1240 switch (state) {
1241 case SCI_PORT_STOPPED: {
1242 struct sci_sas_address port_sas_address;
1243
1244 /* Read the port assigned SAS Address if there is one */
1245 sci_port_get_sas_address(iport, &port_sas_address);
1246
1247 if (port_sas_address.high != 0 && port_sas_address.low != 0) {
1248 struct sci_sas_address phy_sas_address;
1249
1250 /* Make sure that the PHY SAS Address matches the SAS Address
1251 * for this port
1252 */
1253 sci_phy_get_sas_address(iphy, &phy_sas_address);
1254
1255 if (port_sas_address.high != phy_sas_address.high ||
1256 port_sas_address.low != phy_sas_address.low)
1257 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1258 }
1259 return sci_port_set_phy(iport, iphy);
1260 }
1261 case SCI_PORT_SUB_WAITING:
1262 case SCI_PORT_SUB_OPERATIONAL:
1263 status = sci_port_set_phy(iport, iphy);
1264
1265 if (status != SCI_SUCCESS)
1266 return status;
1267
1268 sci_port_general_link_up_handler(iport, iphy, true);
1269 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1270 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
1271
1272 return status;
1273 case SCI_PORT_SUB_CONFIGURING:
1274 status = sci_port_set_phy(iport, iphy);
1275
1276 if (status != SCI_SUCCESS)
1277 return status;
1278 sci_port_general_link_up_handler(iport, iphy, true);
1279
1280 /* Re-enter the configuring state since this may be the last phy in
1281 * the port.
1282 */
1283 port_state_machine_change(iport,
1284 SCI_PORT_SUB_CONFIGURING);
1285 return SCI_SUCCESS;
1286 default:
1287 dev_warn(sciport_to_dev(iport),
1288 "%s: in wrong state: %d\n", __func__, state);
1289 return SCI_FAILURE_INVALID_STATE;
1290 }
1291}
1292
1293/**
1294 * sci_port_remove_phy() -
1295 * @sci_port: This parameter specifies the port in which the phy will be added.
1296 * @sci_phy: This parameter is the phy which is to be added to the port.
1297 *
1298 * This method will remove the PHY from the selected PORT. This method returns
1299 * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
1300 * other status is a failure to add the phy to the port.
1301 */
1302enum sci_status sci_port_remove_phy(struct isci_port *iport,
1303 struct isci_phy *iphy)
1304{
1305 enum sci_status status;
1306 enum sci_port_states state;
1307
1308 state = iport->sm.current_state_id;
1309
1310 switch (state) {
1311 case SCI_PORT_STOPPED:
1312 return sci_port_clear_phy(iport, iphy);
1313 case SCI_PORT_SUB_OPERATIONAL:
1314 status = sci_port_clear_phy(iport, iphy);
1315 if (status != SCI_SUCCESS)
1316 return status;
1317
1318 sci_port_deactivate_phy(iport, iphy, true);
1319 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1320 port_state_machine_change(iport,
1321 SCI_PORT_SUB_CONFIGURING);
1322 return SCI_SUCCESS;
1323 case SCI_PORT_SUB_CONFIGURING:
1324 status = sci_port_clear_phy(iport, iphy);
1325
1326 if (status != SCI_SUCCESS)
1327 return status;
1328 sci_port_deactivate_phy(iport, iphy, true);
1329
1330 /* Re-enter the configuring state since this may be the last phy in
1331 * the port
1332 */
1333 port_state_machine_change(iport,
1334 SCI_PORT_SUB_CONFIGURING);
1335 return SCI_SUCCESS;
1336 default:
1337 dev_warn(sciport_to_dev(iport),
1338 "%s: in wrong state: %d\n", __func__, state);
1339 return SCI_FAILURE_INVALID_STATE;
1340 }
1341}
1342
1343enum sci_status sci_port_link_up(struct isci_port *iport,
1344 struct isci_phy *iphy)
1345{
1346 enum sci_port_states state;
1347
1348 state = iport->sm.current_state_id;
1349 switch (state) {
1350 case SCI_PORT_SUB_WAITING:
1351 /* Since this is the first phy going link up for the port we
1352 * can just enable it and continue
1353 */
1354 sci_port_activate_phy(iport, iphy, true);
1355
1356 port_state_machine_change(iport,
1357 SCI_PORT_SUB_OPERATIONAL);
1358 return SCI_SUCCESS;
1359 case SCI_PORT_SUB_OPERATIONAL:
1360 sci_port_general_link_up_handler(iport, iphy, true);
1361 return SCI_SUCCESS;
1362 case SCI_PORT_RESETTING:
1363 /* TODO We should make sure that the phy that has gone
1364 * link up is the same one on which we sent the reset. It is
1365 * possible that the phy on which we sent the reset is not the
1366 * one that has gone link up and we want to make sure that
1367 * phy being reset comes back. Consider the case where a
1368 * reset is sent but before the hardware processes the reset it
1369 * get a link up on the port because of a hot plug event.
1370 * because of the reset request this phy will go link down
1371 * almost immediately.
1372 */
1373
1374 /* In the resetting state we don't notify the user regarding
1375 * link up and link down notifications.
1376 */
1377 sci_port_general_link_up_handler(iport, iphy, false);
1378 return SCI_SUCCESS;
1379 default:
1380 dev_warn(sciport_to_dev(iport),
1381 "%s: in wrong state: %d\n", __func__, state);
1382 return SCI_FAILURE_INVALID_STATE;
1383 }
1384}
1385
1386enum sci_status sci_port_link_down(struct isci_port *iport,
1387 struct isci_phy *iphy)
1388{
1389 enum sci_port_states state;
1390
1391 state = iport->sm.current_state_id;
1392 switch (state) {
1393 case SCI_PORT_SUB_OPERATIONAL:
1394 sci_port_deactivate_phy(iport, iphy, true);
1395
1396 /* If there are no active phys left in the port, then
1397 * transition the port to the WAITING state until such time
1398 * as a phy goes link up
1399 */
1400 if (iport->active_phy_mask == 0)
1401 port_state_machine_change(iport,
1402 SCI_PORT_SUB_WAITING);
1403 return SCI_SUCCESS;
1404 case SCI_PORT_RESETTING:
1405 /* In the resetting state we don't notify the user regarding
1406 * link up and link down notifications. */
1407 sci_port_deactivate_phy(iport, iphy, false);
1408 return SCI_SUCCESS;
1409 default:
1410 dev_warn(sciport_to_dev(iport),
1411 "%s: in wrong state: %d\n", __func__, state);
1412 return SCI_FAILURE_INVALID_STATE;
1413 }
1414}
1415
1416enum sci_status sci_port_start_io(struct isci_port *iport,
1417 struct isci_remote_device *idev,
1418 struct isci_request *ireq)
1419{
1420 enum sci_port_states state;
1421
1422 state = iport->sm.current_state_id;
1423 switch (state) {
1424 case SCI_PORT_SUB_WAITING:
1425 return SCI_FAILURE_INVALID_STATE;
1426 case SCI_PORT_SUB_OPERATIONAL:
1427 iport->started_request_count++;
1428 return SCI_SUCCESS;
1429 default:
1430 dev_warn(sciport_to_dev(iport),
1431 "%s: in wrong state: %d\n", __func__, state);
1432 return SCI_FAILURE_INVALID_STATE;
1433 }
1434}
1435
1436enum sci_status sci_port_complete_io(struct isci_port *iport,
1437 struct isci_remote_device *idev,
1438 struct isci_request *ireq)
1439{
1440 enum sci_port_states state;
1441
1442 state = iport->sm.current_state_id;
1443 switch (state) {
1444 case SCI_PORT_STOPPED:
1445 dev_warn(sciport_to_dev(iport),
1446 "%s: in wrong state: %d\n", __func__, state);
1447 return SCI_FAILURE_INVALID_STATE;
1448 case SCI_PORT_STOPPING:
1449 sci_port_decrement_request_count(iport);
1450
1451 if (iport->started_request_count == 0)
1452 port_state_machine_change(iport,
1453 SCI_PORT_STOPPED);
1454 break;
1455 case SCI_PORT_READY:
1456 case SCI_PORT_RESETTING:
1457 case SCI_PORT_FAILED:
1458 case SCI_PORT_SUB_WAITING:
1459 case SCI_PORT_SUB_OPERATIONAL:
1460 sci_port_decrement_request_count(iport);
1461 break;
1462 case SCI_PORT_SUB_CONFIGURING:
1463 sci_port_decrement_request_count(iport);
1464 if (iport->started_request_count == 0) {
1465 port_state_machine_change(iport,
1466 SCI_PORT_SUB_OPERATIONAL);
1467 }
1468 break;
1469 }
1470 return SCI_SUCCESS;
1471}
1472
1473static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
1474{
1475 u32 pts_control_value;
1476
1477 /* enable the port task scheduler in a suspended state */
1478 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1479 pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
1480 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1481}
1482
1483static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
1484{
1485 u32 pts_control_value;
1486
1487 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1488 pts_control_value &=
1489 ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
1490 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1491}
1492
1493static void sci_port_post_dummy_remote_node(struct isci_port *iport)
1494{
1495 struct isci_host *ihost = iport->owning_controller;
1496 u8 phys_index = iport->physical_port_index;
1497 union scu_remote_node_context *rnc;
1498 u16 rni = iport->reserved_rni;
1499 u32 command;
1500
1501 rnc = &ihost->remote_node_context_table[rni];
1502 rnc->ssp.is_valid = true;
1503
1504 command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
1505 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1506
1507 sci_controller_post_request(ihost, command);
1508
1509 /* ensure hardware has seen the post rnc command and give it
1510 * ample time to act before sending the suspend
1511 */
1512 readl(&ihost->smu_registers->interrupt_status); /* flush */
1513 udelay(10);
1514
1515 command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
1516 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1517
1518 sci_controller_post_request(ihost, command);
1519}
1520
1521static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
1522{
1523 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1524
1525 if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
1526 /*
1527 * If we enter this state becasuse of a request to stop
1528 * the port then we want to disable the hardwares port
1529 * task scheduler. */
1530 sci_port_disable_port_task_scheduler(iport);
1531 }
1532}
1533
1534static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
1535{
1536 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1537
1538 /* Enable and suspend the port task scheduler */
1539 sci_port_enable_port_task_scheduler(iport);
1540}
1541
1542static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
1543{
1544 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1545 struct isci_host *ihost = iport->owning_controller;
1546 u32 prev_state;
1547
1548 prev_state = iport->sm.previous_state_id;
1549 if (prev_state == SCI_PORT_RESETTING)
1550 isci_port_hard_reset_complete(iport, SCI_SUCCESS);
1551 else
1552 isci_port_not_ready(ihost, iport);
1553
1554 /* Post and suspend the dummy remote node context for this port. */
1555 sci_port_post_dummy_remote_node(iport);
1556
1557 /* Start the ready substate machine */
1558 port_state_machine_change(iport,
1559 SCI_PORT_SUB_WAITING);
1560}
1561
1562static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
1563{
1564 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1565
1566 sci_del_timer(&iport->timer);
1567}
1568
1569static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
1570{
1571 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1572
1573 sci_del_timer(&iport->timer);
1574
1575 sci_port_destroy_dummy_resources(iport);
1576}
1577
1578static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
1579{
1580 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1581
1582 isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
1583}
1584
1585/* --------------------------------------------------------------------------- */
1586
1587static const struct sci_base_state sci_port_state_table[] = {
1588 [SCI_PORT_STOPPED] = {
1589 .enter_state = sci_port_stopped_state_enter,
1590 .exit_state = sci_port_stopped_state_exit
1591 },
1592 [SCI_PORT_STOPPING] = {
1593 .exit_state = sci_port_stopping_state_exit
1594 },
1595 [SCI_PORT_READY] = {
1596 .enter_state = sci_port_ready_state_enter,
1597 },
1598 [SCI_PORT_SUB_WAITING] = {
1599 .enter_state = sci_port_ready_substate_waiting_enter,
1600 },
1601 [SCI_PORT_SUB_OPERATIONAL] = {
1602 .enter_state = sci_port_ready_substate_operational_enter,
1603 .exit_state = sci_port_ready_substate_operational_exit
1604 },
1605 [SCI_PORT_SUB_CONFIGURING] = {
1606 .enter_state = sci_port_ready_substate_configuring_enter,
1607 .exit_state = sci_port_ready_substate_configuring_exit
1608 },
1609 [SCI_PORT_RESETTING] = {
1610 .exit_state = sci_port_resetting_state_exit
1611 },
1612 [SCI_PORT_FAILED] = {
1613 .enter_state = sci_port_failed_state_enter,
1614 }
1615};
1616
1617void sci_port_construct(struct isci_port *iport, u8 index,
1618 struct isci_host *ihost)
1619{
1620 sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
1621
1622 iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
1623 iport->physical_port_index = index;
1624 iport->active_phy_mask = 0;
1625 iport->ready_exit = false;
1626
1627 iport->owning_controller = ihost;
1628
1629 iport->started_request_count = 0;
1630 iport->assigned_device_count = 0;
1631
1632 iport->reserved_rni = SCU_DUMMY_INDEX;
1633 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
1634
1635 sci_init_timer(&iport->timer, port_timeout);
1636
1637 iport->port_task_scheduler_registers = NULL;
1638
1639 for (index = 0; index < SCI_MAX_PHYS; index++)
1640 iport->phy_table[index] = NULL;
1641}
1642
1643void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
1644{
1645 INIT_LIST_HEAD(&iport->remote_dev_list);
1646 INIT_LIST_HEAD(&iport->domain_dev_list);
1647 spin_lock_init(&iport->state_lock);
1648 init_completion(&iport->start_complete);
1649 iport->isci_host = ihost;
1650 isci_port_change_state(iport, isci_freed);
1651 atomic_set(&iport->event, 0);
1652}
1653
1654/**
1655 * isci_port_get_state() - This function gets the status of the port object.
1656 * @isci_port: This parameter points to the isci_port object
1657 *
1658 * status of the object as a isci_status enum.
1659 */
1660enum isci_status isci_port_get_state(
1661 struct isci_port *isci_port)
1662{
1663 return isci_port->status;
1664}
1665
1666void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
1667{
1668 struct isci_host *ihost = iport->owning_controller;
1669
1670 /* notify the user. */
1671 isci_port_bc_change_received(ihost, iport, iphy);
1672}
1673
1674int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
1675 struct isci_phy *iphy)
1676{
1677 unsigned long flags;
1678 enum sci_status status;
1679 int idx, ret = TMF_RESP_FUNC_COMPLETE;
1680
1681 dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
1682 __func__, iport);
1683
1684 init_completion(&iport->hard_reset_complete);
1685
1686 spin_lock_irqsave(&ihost->scic_lock, flags);
1687
1688 #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
1689 status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
1690
1691 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1692
1693 if (status == SCI_SUCCESS) {
1694 wait_for_completion(&iport->hard_reset_complete);
1695
1696 dev_dbg(&ihost->pdev->dev,
1697 "%s: iport = %p; hard reset completion\n",
1698 __func__, iport);
1699
1700 if (iport->hard_reset_status != SCI_SUCCESS)
1701 ret = TMF_RESP_FUNC_FAILED;
1702 } else {
1703 ret = TMF_RESP_FUNC_FAILED;
1704
1705 dev_err(&ihost->pdev->dev,
1706 "%s: iport = %p; sci_port_hard_reset call"
1707 " failed 0x%x\n",
1708 __func__, iport, status);
1709
1710 }
1711
1712 /* If the hard reset for the port has failed, consider this
1713 * the same as link failures on all phys in the port.
1714 */
1715 if (ret != TMF_RESP_FUNC_COMPLETE) {
1716
1717 dev_err(&ihost->pdev->dev,
1718 "%s: iport = %p; hard reset failed "
1719 "(0x%x) - driving explicit link fail for all phys\n",
1720 __func__, iport, iport->hard_reset_status);
1721
1722 /* Down all phys in the port. */
1723 spin_lock_irqsave(&ihost->scic_lock, flags);
1724 for (idx = 0; idx < SCI_MAX_PHYS; ++idx) {
1725 struct isci_phy *iphy = iport->phy_table[idx];
1726
1727 if (!iphy)
1728 continue;
1729 sci_phy_stop(iphy);
1730 sci_phy_start(iphy);
1731 }
1732 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1733 }
1734 return ret;
1735}
1736
1737/**
1738 * isci_port_deformed() - This function is called by libsas when a port becomes
1739 * inactive.
1740 * @phy: This parameter specifies the libsas phy with the inactive port.
1741 *
1742 */
1743void isci_port_deformed(struct asd_sas_phy *phy)
1744{
1745 pr_debug("%s: sas_phy = %p\n", __func__, phy);
1746}
1747
1748/**
1749 * isci_port_formed() - This function is called by libsas when a port becomes
1750 * active.
1751 * @phy: This parameter specifies the libsas phy with the active port.
1752 *
1753 */
1754void isci_port_formed(struct asd_sas_phy *phy)
1755{
1756 pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
1757}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
new file mode 100644
index 000000000000..b50ecd4e8f9c
--- /dev/null
+++ b/drivers/scsi/isci/port.h
@@ -0,0 +1,306 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _ISCI_PORT_H_
57#define _ISCI_PORT_H_
58
59#include <scsi/libsas.h>
60#include "isci.h"
61#include "sas.h"
62#include "phy.h"
63
64#define SCIC_SDS_DUMMY_PORT 0xFF
65
66struct isci_phy;
67struct isci_host;
68
69enum isci_status {
70 isci_freed = 0x00,
71 isci_starting = 0x01,
72 isci_ready = 0x02,
73 isci_ready_for_io = 0x03,
74 isci_stopping = 0x04,
75 isci_stopped = 0x05,
76};
77
78/**
79 * struct isci_port - isci direct attached sas port object
80 * @event: counts bcns and port stop events (for bcn filtering)
81 * @ready_exit: several states constitute 'ready'. When exiting ready we
82 * need to take extra port-teardown actions that are
83 * skipped when exiting to another 'ready' state.
84 * @logical_port_index: software port index
85 * @physical_port_index: hardware port index
86 * @active_phy_mask: identifies phy members
87 * @reserved_tag:
88 * @reserved_rni: reserver for port task scheduler workaround
89 * @started_request_count: reference count for outstanding commands
90 * @not_ready_reason: set during state transitions and notified
91 * @timer: timeout start/stop operations
92 */
93struct isci_port {
94 enum isci_status status;
95 #define IPORT_BCN_BLOCKED 0
96 #define IPORT_BCN_PENDING 1
97 unsigned long flags;
98 atomic_t event;
99 struct isci_host *isci_host;
100 struct asd_sas_port sas_port;
101 struct list_head remote_dev_list;
102 spinlock_t state_lock;
103 struct list_head domain_dev_list;
104 struct completion start_complete;
105 struct completion hard_reset_complete;
106 enum sci_status hard_reset_status;
107 struct sci_base_state_machine sm;
108 bool ready_exit;
109 u8 logical_port_index;
110 u8 physical_port_index;
111 u8 active_phy_mask;
112 u16 reserved_rni;
113 u16 reserved_tag;
114 u32 started_request_count;
115 u32 assigned_device_count;
116 u32 not_ready_reason;
117 struct isci_phy *phy_table[SCI_MAX_PHYS];
118 struct isci_host *owning_controller;
119 struct sci_timer timer;
120 struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers;
121 /* XXX rework: only one register, no need to replicate per-port */
122 u32 __iomem *port_pe_configuration_register;
123 struct scu_viit_entry __iomem *viit_registers;
124};
125
126enum sci_port_not_ready_reason_code {
127 SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS,
128 SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED,
129 SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION,
130 SCIC_PORT_NOT_READY_RECONFIGURING,
131
132 SCIC_PORT_NOT_READY_REASON_CODE_MAX
133};
134
135struct sci_port_end_point_properties {
136 struct sci_sas_address sas_address;
137 struct sci_phy_proto protocols;
138};
139
140struct sci_port_properties {
141 u32 index;
142 struct sci_port_end_point_properties local;
143 struct sci_port_end_point_properties remote;
144 u32 phy_mask;
145};
146
147/**
148 * enum sci_port_states - This enumeration depicts all the states for the
149 * common port state machine.
150 *
151 *
152 */
153enum sci_port_states {
154 /**
155 * This state indicates that the port has successfully been stopped.
156 * In this state no new IO operations are permitted.
157 * This state is entered from the STOPPING state.
158 */
159 SCI_PORT_STOPPED,
160
161 /**
162 * This state indicates that the port is in the process of stopping.
163 * In this state no new IO operations are permitted, but existing IO
164 * operations are allowed to complete.
165 * This state is entered from the READY state.
166 */
167 SCI_PORT_STOPPING,
168
169 /**
170 * This state indicates the port is now ready. Thus, the user is
171 * able to perform IO operations on this port.
172 * This state is entered from the STARTING state.
173 */
174 SCI_PORT_READY,
175
176 /**
177 * The substate where the port is started and ready but has no
178 * active phys.
179 */
180 SCI_PORT_SUB_WAITING,
181
182 /**
183 * The substate where the port is started and ready and there is
184 * at least one phy operational.
185 */
186 SCI_PORT_SUB_OPERATIONAL,
187
188 /**
189 * The substate where the port is started and there was an
190 * add/remove phy event. This state is only used in Automatic
191 * Port Configuration Mode (APC)
192 */
193 SCI_PORT_SUB_CONFIGURING,
194
195 /**
196 * This state indicates the port is in the process of performing a hard
197 * reset. Thus, the user is unable to perform IO operations on this
198 * port.
199 * This state is entered from the READY state.
200 */
201 SCI_PORT_RESETTING,
202
203 /**
204 * This state indicates the port has failed a reset request. This state
205 * is entered when a port reset request times out.
206 * This state is entered from the RESETTING state.
207 */
208 SCI_PORT_FAILED,
209
210
211};
212
213static inline void sci_port_decrement_request_count(struct isci_port *iport)
214{
215 if (WARN_ONCE(iport->started_request_count == 0,
216 "%s: tried to decrement started_request_count past 0!?",
217 __func__))
218 /* pass */;
219 else
220 iport->started_request_count--;
221}
222
223#define sci_port_active_phy(port, phy) \
224 (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0)
225
226void sci_port_construct(
227 struct isci_port *iport,
228 u8 port_index,
229 struct isci_host *ihost);
230
231enum sci_status sci_port_start(struct isci_port *iport);
232enum sci_status sci_port_stop(struct isci_port *iport);
233
234enum sci_status sci_port_add_phy(
235 struct isci_port *iport,
236 struct isci_phy *iphy);
237
238enum sci_status sci_port_remove_phy(
239 struct isci_port *iport,
240 struct isci_phy *iphy);
241
242void sci_port_setup_transports(
243 struct isci_port *iport,
244 u32 device_id);
245
246void isci_port_bcn_enable(struct isci_host *, struct isci_port *);
247
248void sci_port_deactivate_phy(
249 struct isci_port *iport,
250 struct isci_phy *iphy,
251 bool do_notify_user);
252
253bool sci_port_link_detected(
254 struct isci_port *iport,
255 struct isci_phy *iphy);
256
257enum sci_status sci_port_link_up(struct isci_port *iport,
258 struct isci_phy *iphy);
259enum sci_status sci_port_link_down(struct isci_port *iport,
260 struct isci_phy *iphy);
261
262struct isci_request;
263struct isci_remote_device;
264enum sci_status sci_port_start_io(
265 struct isci_port *iport,
266 struct isci_remote_device *idev,
267 struct isci_request *ireq);
268
269enum sci_status sci_port_complete_io(
270 struct isci_port *iport,
271 struct isci_remote_device *idev,
272 struct isci_request *ireq);
273
274enum sas_linkrate sci_port_get_max_allowed_speed(
275 struct isci_port *iport);
276
277void sci_port_broadcast_change_received(
278 struct isci_port *iport,
279 struct isci_phy *iphy);
280
281bool sci_port_is_valid_phy_assignment(
282 struct isci_port *iport,
283 u32 phy_index);
284
285void sci_port_get_sas_address(
286 struct isci_port *iport,
287 struct sci_sas_address *sas_address);
288
289void sci_port_get_attached_sas_address(
290 struct isci_port *iport,
291 struct sci_sas_address *sas_address);
292
293enum isci_status isci_port_get_state(
294 struct isci_port *isci_port);
295
296void isci_port_formed(struct asd_sas_phy *);
297void isci_port_deformed(struct asd_sas_phy *);
298
299void isci_port_init(
300 struct isci_port *port,
301 struct isci_host *host,
302 int index);
303
304int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
305 struct isci_phy *iphy);
306#endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
new file mode 100644
index 000000000000..486b113c634a
--- /dev/null
+++ b/drivers/scsi/isci/port_config.c
@@ -0,0 +1,754 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "host.h"
57
58#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
59#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
60#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100)
61
62enum SCIC_SDS_APC_ACTIVITY {
63 SCIC_SDS_APC_SKIP_PHY,
64 SCIC_SDS_APC_ADD_PHY,
65 SCIC_SDS_APC_START_TIMER,
66
67 SCIC_SDS_APC_ACTIVITY_MAX
68};
69
70/*
71 * ******************************************************************************
72 * General port configuration agent routines
73 * ****************************************************************************** */
74
75/**
76 *
77 * @address_one: A SAS Address to be compared.
78 * @address_two: A SAS Address to be compared.
79 *
80 * Compare the two SAS Address and if SAS Address One is greater than SAS
81 * Address Two then return > 0 else if SAS Address One is less than SAS Address
82 * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0
83 * > y where x is returned for Address One > Address Two y is returned for
84 * Address One < Address Two 0 is returned ofr Address One = Address Two
85 */
86static s32 sci_sas_address_compare(
87 struct sci_sas_address address_one,
88 struct sci_sas_address address_two)
89{
90 if (address_one.high > address_two.high) {
91 return 1;
92 } else if (address_one.high < address_two.high) {
93 return -1;
94 } else if (address_one.low > address_two.low) {
95 return 1;
96 } else if (address_one.low < address_two.low) {
97 return -1;
98 }
99
100 /* The two SAS Address must be identical */
101 return 0;
102}
103
104/**
105 *
106 * @controller: The controller object used for the port search.
107 * @phy: The phy object to match.
108 *
109 * This routine will find a matching port for the phy. This means that the
110 * port and phy both have the same broadcast sas address and same received sas
111 * address. The port address or the NULL if there is no matching
112 * port. port address if the port can be found to match the phy.
113 * NULL if there is no matching port for the phy.
114 */
115static struct isci_port *sci_port_configuration_agent_find_port(
116 struct isci_host *ihost,
117 struct isci_phy *iphy)
118{
119 u8 i;
120 struct sci_sas_address port_sas_address;
121 struct sci_sas_address port_attached_device_address;
122 struct sci_sas_address phy_sas_address;
123 struct sci_sas_address phy_attached_device_address;
124
125 /*
126 * Since this phy can be a member of a wide port check to see if one or
127 * more phys match the sent and received SAS address as this phy in which
128 * case it should participate in the same port.
129 */
130 sci_phy_get_sas_address(iphy, &phy_sas_address);
131 sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
132
133 for (i = 0; i < ihost->logical_port_entries; i++) {
134 struct isci_port *iport = &ihost->ports[i];
135
136 sci_port_get_sas_address(iport, &port_sas_address);
137 sci_port_get_attached_sas_address(iport, &port_attached_device_address);
138
139 if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 &&
140 sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0)
141 return iport;
142 }
143
144 return NULL;
145}
146
147/**
148 *
149 * @controller: This is the controller object that contains the port agent
150 * @port_agent: This is the port configruation agent for the controller.
151 *
152 * This routine will validate the port configuration is correct for the SCU
153 * hardware. The SCU hardware allows for port configurations as follows. LP0
154 * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2,
155 * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for
156 * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION
157 * the port configuration is not valid for this port configuration agent.
158 */
159static enum sci_status sci_port_configuration_agent_validate_ports(
160 struct isci_host *ihost,
161 struct sci_port_configuration_agent *port_agent)
162{
163 struct sci_sas_address first_address;
164 struct sci_sas_address second_address;
165
166 /*
167 * Sanity check the max ranges for all the phys the max index
168 * is always equal to the port range index */
169 if (port_agent->phy_valid_port_range[0].max_index != 0 ||
170 port_agent->phy_valid_port_range[1].max_index != 1 ||
171 port_agent->phy_valid_port_range[2].max_index != 2 ||
172 port_agent->phy_valid_port_range[3].max_index != 3)
173 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
174
175 /*
176 * This is a request to configure a single x4 port or at least attempt
177 * to make all the phys into a single port */
178 if (port_agent->phy_valid_port_range[0].min_index == 0 &&
179 port_agent->phy_valid_port_range[1].min_index == 0 &&
180 port_agent->phy_valid_port_range[2].min_index == 0 &&
181 port_agent->phy_valid_port_range[3].min_index == 0)
182 return SCI_SUCCESS;
183
184 /*
185 * This is a degenerate case where phy 1 and phy 2 are assigned
186 * to the same port this is explicitly disallowed by the hardware
187 * unless they are part of the same x4 port and this condition was
188 * already checked above. */
189 if (port_agent->phy_valid_port_range[2].min_index == 1) {
190 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
191 }
192
193 /*
194 * PE0 and PE3 can never have the same SAS Address unless they
195 * are part of the same x4 wide port and we have already checked
196 * for this condition. */
197 sci_phy_get_sas_address(&ihost->phys[0], &first_address);
198 sci_phy_get_sas_address(&ihost->phys[3], &second_address);
199
200 if (sci_sas_address_compare(first_address, second_address) == 0) {
201 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
202 }
203
204 /*
205 * PE0 and PE1 are configured into a 2x1 ports make sure that the
206 * SAS Address for PE0 and PE2 are different since they can not be
207 * part of the same port. */
208 if (port_agent->phy_valid_port_range[0].min_index == 0 &&
209 port_agent->phy_valid_port_range[1].min_index == 1) {
210 sci_phy_get_sas_address(&ihost->phys[0], &first_address);
211 sci_phy_get_sas_address(&ihost->phys[2], &second_address);
212
213 if (sci_sas_address_compare(first_address, second_address) == 0) {
214 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
215 }
216 }
217
218 /*
219 * PE2 and PE3 are configured into a 2x1 ports make sure that the
220 * SAS Address for PE1 and PE3 are different since they can not be
221 * part of the same port. */
222 if (port_agent->phy_valid_port_range[2].min_index == 2 &&
223 port_agent->phy_valid_port_range[3].min_index == 3) {
224 sci_phy_get_sas_address(&ihost->phys[1], &first_address);
225 sci_phy_get_sas_address(&ihost->phys[3], &second_address);
226
227 if (sci_sas_address_compare(first_address, second_address) == 0) {
228 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
229 }
230 }
231
232 return SCI_SUCCESS;
233}
234
235/*
236 * ******************************************************************************
237 * Manual port configuration agent routines
238 * ****************************************************************************** */
239
240/* verify all of the phys in the same port are using the same SAS address */
241static enum sci_status
242sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
243 struct sci_port_configuration_agent *port_agent)
244{
245 u32 phy_mask;
246 u32 assigned_phy_mask;
247 struct sci_sas_address sas_address;
248 struct sci_sas_address phy_assigned_address;
249 u8 port_index;
250 u8 phy_index;
251
252 assigned_phy_mask = 0;
253 sas_address.high = 0;
254 sas_address.low = 0;
255
256 for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) {
257 phy_mask = ihost->oem_parameters.ports[port_index].phy_mask;
258
259 if (!phy_mask)
260 continue;
261 /*
262 * Make sure that one or more of the phys were not already assinged to
263 * a different port. */
264 if ((phy_mask & ~assigned_phy_mask) == 0) {
265 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
266 }
267
268 /* Find the starting phy index for this round through the loop */
269 for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
270 if ((phy_mask & (1 << phy_index)) == 0)
271 continue;
272 sci_phy_get_sas_address(&ihost->phys[phy_index],
273 &sas_address);
274
275 /*
276 * The phy_index can be used as the starting point for the
277 * port range since the hardware starts all logical ports
278 * the same as the PE index. */
279 port_agent->phy_valid_port_range[phy_index].min_index = port_index;
280 port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
281
282 if (phy_index != port_index) {
283 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
284 }
285
286 break;
287 }
288
289 /*
290 * See how many additional phys are being added to this logical port.
291 * Note: We have not moved the current phy_index so we will actually
292 * compare the startting phy with itself.
293 * This is expected and required to add the phy to the port. */
294 while (phy_index < SCI_MAX_PHYS) {
295 if ((phy_mask & (1 << phy_index)) == 0)
296 continue;
297 sci_phy_get_sas_address(&ihost->phys[phy_index],
298 &phy_assigned_address);
299
300 if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) {
301 /*
302 * The phy mask specified that this phy is part of the same port
303 * as the starting phy and it is not so fail this configuration */
304 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
305 }
306
307 port_agent->phy_valid_port_range[phy_index].min_index = port_index;
308 port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
309
310 sci_port_add_phy(&ihost->ports[port_index],
311 &ihost->phys[phy_index]);
312
313 assigned_phy_mask |= (1 << phy_index);
314 }
315
316 phy_index++;
317 }
318
319 return sci_port_configuration_agent_validate_ports(ihost, port_agent);
320}
321
322static void mpc_agent_timeout(unsigned long data)
323{
324 u8 index;
325 struct sci_timer *tmr = (struct sci_timer *)data;
326 struct sci_port_configuration_agent *port_agent;
327 struct isci_host *ihost;
328 unsigned long flags;
329 u16 configure_phy_mask;
330
331 port_agent = container_of(tmr, typeof(*port_agent), timer);
332 ihost = container_of(port_agent, typeof(*ihost), port_agent);
333
334 spin_lock_irqsave(&ihost->scic_lock, flags);
335
336 if (tmr->cancel)
337 goto done;
338
339 port_agent->timer_pending = false;
340
341 /* Find the mask of phys that are reported read but as yet unconfigured into a port */
342 configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
343
344 for (index = 0; index < SCI_MAX_PHYS; index++) {
345 struct isci_phy *iphy = &ihost->phys[index];
346
347 if (configure_phy_mask & (1 << index)) {
348 port_agent->link_up_handler(ihost, port_agent,
349 phy_get_non_dummy_port(iphy),
350 iphy);
351 }
352 }
353
354done:
355 spin_unlock_irqrestore(&ihost->scic_lock, flags);
356}
357
358static void sci_mpc_agent_link_up(struct isci_host *ihost,
359 struct sci_port_configuration_agent *port_agent,
360 struct isci_port *iport,
361 struct isci_phy *iphy)
362{
363 /* If the port is NULL then the phy was not assigned to a port.
364 * This is because the phy was not given the same SAS Address as
365 * the other PHYs in the port.
366 */
367 if (!iport)
368 return;
369
370 port_agent->phy_ready_mask |= (1 << iphy->phy_index);
371 sci_port_link_up(iport, iphy);
372 if ((iport->active_phy_mask & (1 << iphy->phy_index)))
373 port_agent->phy_configured_mask |= (1 << iphy->phy_index);
374}
375
376/**
377 *
378 * @controller: This is the controller object that receives the link down
379 * notification.
380 * @port: This is the port object associated with the phy. If the is no
381 * associated port this is an NULL. The port is an invalid
382 * handle only if the phy was never port of this port. This happens when
383 * the phy is not broadcasting the same SAS address as the other phys in the
384 * assigned port.
385 * @phy: This is the phy object which has gone link down.
386 *
387 * This function handles the manual port configuration link down notifications.
388 * Since all ports and phys are associated at initialization time we just turn
389 * around and notifiy the port object of the link down event. If this PHY is
390 * not associated with a port there is no action taken. Is it possible to get a
391 * link down notification from a phy that has no assocoated port?
392 */
393static void sci_mpc_agent_link_down(
394 struct isci_host *ihost,
395 struct sci_port_configuration_agent *port_agent,
396 struct isci_port *iport,
397 struct isci_phy *iphy)
398{
399 if (iport != NULL) {
400 /*
401 * If we can form a new port from the remainder of the phys
402 * then we want to start the timer to allow the SCI User to
403 * cleanup old devices and rediscover the port before
404 * rebuilding the port with the phys that remain in the ready
405 * state.
406 */
407 port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
408 port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
409
410 /*
411 * Check to see if there are more phys waiting to be
412 * configured into a port. If there are allow the SCI User
413 * to tear down this port, if necessary, and then reconstruct
414 * the port after the timeout.
415 */
416 if ((port_agent->phy_configured_mask == 0x0000) &&
417 (port_agent->phy_ready_mask != 0x0000) &&
418 !port_agent->timer_pending) {
419 port_agent->timer_pending = true;
420
421 sci_mod_timer(&port_agent->timer,
422 SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT);
423 }
424
425 sci_port_link_down(iport, iphy);
426 }
427}
428
429/* verify phys are assigned a valid SAS address for automatic port
430 * configuration mode.
431 */
432static enum sci_status
433sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
434 struct sci_port_configuration_agent *port_agent)
435{
436 u8 phy_index;
437 u8 port_index;
438 struct sci_sas_address sas_address;
439 struct sci_sas_address phy_assigned_address;
440
441 phy_index = 0;
442
443 while (phy_index < SCI_MAX_PHYS) {
444 port_index = phy_index;
445
446 /* Get the assigned SAS Address for the first PHY on the controller. */
447 sci_phy_get_sas_address(&ihost->phys[phy_index],
448 &sas_address);
449
450 while (++phy_index < SCI_MAX_PHYS) {
451 sci_phy_get_sas_address(&ihost->phys[phy_index],
452 &phy_assigned_address);
453
454 /* Verify each of the SAS address are all the same for every PHY */
455 if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) {
456 port_agent->phy_valid_port_range[phy_index].min_index = port_index;
457 port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
458 } else {
459 port_agent->phy_valid_port_range[phy_index].min_index = phy_index;
460 port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
461 break;
462 }
463 }
464 }
465
466 return sci_port_configuration_agent_validate_ports(ihost, port_agent);
467}
468
469static void sci_apc_agent_configure_ports(struct isci_host *ihost,
470 struct sci_port_configuration_agent *port_agent,
471 struct isci_phy *iphy,
472 bool start_timer)
473{
474 u8 port_index;
475 enum sci_status status;
476 struct isci_port *iport;
477 enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY;
478
479 iport = sci_port_configuration_agent_find_port(ihost, iphy);
480
481 if (iport) {
482 if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index))
483 apc_activity = SCIC_SDS_APC_ADD_PHY;
484 else
485 apc_activity = SCIC_SDS_APC_SKIP_PHY;
486 } else {
487 /*
488 * There is no matching Port for this PHY so lets search through the
489 * Ports and see if we can add the PHY to its own port or maybe start
490 * the timer and wait to see if a wider port can be made.
491 *
492 * Note the break when we reach the condition of the port id == phy id */
493 for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index;
494 port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index;
495 port_index++) {
496
497 iport = &ihost->ports[port_index];
498
499 /* First we must make sure that this PHY can be added to this Port. */
500 if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
501 /*
502 * Port contains a PHY with a greater PHY ID than the current
503 * PHY that has gone link up. This phy can not be part of any
504 * port so skip it and move on. */
505 if (iport->active_phy_mask > (1 << iphy->phy_index)) {
506 apc_activity = SCIC_SDS_APC_SKIP_PHY;
507 break;
508 }
509
510 /*
511 * We have reached the end of our Port list and have not found
512 * any reason why we should not either add the PHY to the port
513 * or wait for more phys to become active. */
514 if (iport->physical_port_index == iphy->phy_index) {
515 /*
516 * The Port either has no active PHYs.
517 * Consider that if the port had any active PHYs we would have
518 * or active PHYs with
519 * a lower PHY Id than this PHY. */
520 if (apc_activity != SCIC_SDS_APC_START_TIMER) {
521 apc_activity = SCIC_SDS_APC_ADD_PHY;
522 }
523
524 break;
525 }
526
527 /*
528 * The current Port has no active PHYs and this PHY could be part
529 * of this Port. Since we dont know as yet setup to start the
530 * timer and see if there is a better configuration. */
531 if (iport->active_phy_mask == 0) {
532 apc_activity = SCIC_SDS_APC_START_TIMER;
533 }
534 } else if (iport->active_phy_mask != 0) {
535 /*
536 * The Port has an active phy and the current Phy can not
537 * participate in this port so skip the PHY and see if
538 * there is a better configuration. */
539 apc_activity = SCIC_SDS_APC_SKIP_PHY;
540 }
541 }
542 }
543
544 /*
545 * Check to see if the start timer operations should instead map to an
546 * add phy operation. This is caused because we have been waiting to
547 * add a phy to a port but could not becuase the automatic port
548 * configuration engine had a choice of possible ports for the phy.
549 * Since we have gone through a timeout we are going to restrict the
550 * choice to the smallest possible port. */
551 if (
552 (start_timer == false)
553 && (apc_activity == SCIC_SDS_APC_START_TIMER)
554 ) {
555 apc_activity = SCIC_SDS_APC_ADD_PHY;
556 }
557
558 switch (apc_activity) {
559 case SCIC_SDS_APC_ADD_PHY:
560 status = sci_port_add_phy(iport, iphy);
561
562 if (status == SCI_SUCCESS) {
563 port_agent->phy_configured_mask |= (1 << iphy->phy_index);
564 }
565 break;
566
567 case SCIC_SDS_APC_START_TIMER:
568 /*
569 * This can occur for either a link down event, or a link
570 * up event where we cannot yet tell the port to which a
571 * phy belongs.
572 */
573 if (port_agent->timer_pending)
574 sci_del_timer(&port_agent->timer);
575
576 port_agent->timer_pending = true;
577 sci_mod_timer(&port_agent->timer,
578 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
579 break;
580
581 case SCIC_SDS_APC_SKIP_PHY:
582 default:
583 /* do nothing the PHY can not be made part of a port at this time. */
584 break;
585 }
586}
587
588/**
589 * sci_apc_agent_link_up - handle apc link up events
590 * @scic: This is the controller object that receives the link up
591 * notification.
592 * @sci_port: This is the port object associated with the phy. If the is no
593 * associated port this is an NULL.
594 * @sci_phy: This is the phy object which has gone link up.
595 *
596 * This method handles the automatic port configuration for link up
597 * notifications. Is it possible to get a link down notification from a phy
598 * that has no assocoated port?
599 */
600static void sci_apc_agent_link_up(struct isci_host *ihost,
601 struct sci_port_configuration_agent *port_agent,
602 struct isci_port *iport,
603 struct isci_phy *iphy)
604{
605 u8 phy_index = iphy->phy_index;
606
607 if (!iport) {
608 /* the phy is not the part of this port */
609 port_agent->phy_ready_mask |= 1 << phy_index;
610 sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
611 } else {
612 /* the phy is already the part of the port */
613 u32 port_state = iport->sm.current_state_id;
614
615 /* if the PORT'S state is resetting then the link up is from
616 * port hard reset in this case, we need to tell the port
617 * that link up is recieved
618 */
619 BUG_ON(port_state != SCI_PORT_RESETTING);
620 port_agent->phy_ready_mask |= 1 << phy_index;
621 sci_port_link_up(iport, iphy);
622 }
623}
624
625/**
626 *
627 * @controller: This is the controller object that receives the link down
628 * notification.
629 * @iport: This is the port object associated with the phy. If the is no
630 * associated port this is an NULL.
631 * @iphy: This is the phy object which has gone link down.
632 *
633 * This method handles the automatic port configuration link down
634 * notifications. not associated with a port there is no action taken. Is it
635 * possible to get a link down notification from a phy that has no assocoated
636 * port?
637 */
638static void sci_apc_agent_link_down(
639 struct isci_host *ihost,
640 struct sci_port_configuration_agent *port_agent,
641 struct isci_port *iport,
642 struct isci_phy *iphy)
643{
644 port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
645
646 if (!iport)
647 return;
648 if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) {
649 enum sci_status status;
650
651 status = sci_port_remove_phy(iport, iphy);
652
653 if (status == SCI_SUCCESS)
654 port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
655 }
656}
657
658/* configure the phys into ports when the timer fires */
659static void apc_agent_timeout(unsigned long data)
660{
661 u32 index;
662 struct sci_timer *tmr = (struct sci_timer *)data;
663 struct sci_port_configuration_agent *port_agent;
664 struct isci_host *ihost;
665 unsigned long flags;
666 u16 configure_phy_mask;
667
668 port_agent = container_of(tmr, typeof(*port_agent), timer);
669 ihost = container_of(port_agent, typeof(*ihost), port_agent);
670
671 spin_lock_irqsave(&ihost->scic_lock, flags);
672
673 if (tmr->cancel)
674 goto done;
675
676 port_agent->timer_pending = false;
677
678 configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
679
680 if (!configure_phy_mask)
681 return;
682
683 for (index = 0; index < SCI_MAX_PHYS; index++) {
684 if ((configure_phy_mask & (1 << index)) == 0)
685 continue;
686
687 sci_apc_agent_configure_ports(ihost, port_agent,
688 &ihost->phys[index], false);
689 }
690
691done:
692 spin_unlock_irqrestore(&ihost->scic_lock, flags);
693}
694
695/*
696 * ******************************************************************************
697 * Public port configuration agent routines
698 * ****************************************************************************** */
699
700/**
701 *
702 *
703 * This method will construct the port configuration agent for operation. This
704 * call is universal for both manual port configuration and automatic port
705 * configuration modes.
706 */
707void sci_port_configuration_agent_construct(
708 struct sci_port_configuration_agent *port_agent)
709{
710 u32 index;
711
712 port_agent->phy_configured_mask = 0x00;
713 port_agent->phy_ready_mask = 0x00;
714
715 port_agent->link_up_handler = NULL;
716 port_agent->link_down_handler = NULL;
717
718 port_agent->timer_pending = false;
719
720 for (index = 0; index < SCI_MAX_PORTS; index++) {
721 port_agent->phy_valid_port_range[index].min_index = 0;
722 port_agent->phy_valid_port_range[index].max_index = 0;
723 }
724}
725
726enum sci_status sci_port_configuration_agent_initialize(
727 struct isci_host *ihost,
728 struct sci_port_configuration_agent *port_agent)
729{
730 enum sci_status status;
731 enum sci_port_configuration_mode mode;
732
733 mode = ihost->oem_parameters.controller.mode_type;
734
735 if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
736 status = sci_mpc_agent_validate_phy_configuration(
737 ihost, port_agent);
738
739 port_agent->link_up_handler = sci_mpc_agent_link_up;
740 port_agent->link_down_handler = sci_mpc_agent_link_down;
741
742 sci_init_timer(&port_agent->timer, mpc_agent_timeout);
743 } else {
744 status = sci_apc_agent_validate_phy_configuration(
745 ihost, port_agent);
746
747 port_agent->link_up_handler = sci_apc_agent_link_up;
748 port_agent->link_down_handler = sci_apc_agent_link_down;
749
750 sci_init_timer(&port_agent->timer, apc_agent_timeout);
751 }
752
753 return status;
754}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
new file mode 100644
index 000000000000..b5f4341de243
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.c
@@ -0,0 +1,243 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 */
24
25/* probe_roms - scan for oem parameters */
26
27#include <linux/kernel.h>
28#include <linux/firmware.h>
29#include <linux/uaccess.h>
30#include <linux/efi.h>
31#include <asm/probe_roms.h>
32
33#include "isci.h"
34#include "task.h"
35#include "probe_roms.h"
36
37static efi_char16_t isci_efivar_name[] = {
38 'R', 's', 't', 'S', 'c', 'u', 'O'
39};
40
41struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
42{
43 void __iomem *oprom = pci_map_biosrom(pdev);
44 struct isci_orom *rom = NULL;
45 size_t len, i;
46 int j;
47 char oem_sig[4];
48 struct isci_oem_hdr oem_hdr;
49 u8 *tmp, sum;
50
51 if (!oprom)
52 return NULL;
53
54 len = pci_biosrom_size(pdev);
55 rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
56 if (!rom) {
57 dev_warn(&pdev->dev,
58 "Unable to allocate memory for orom\n");
59 return NULL;
60 }
61
62 for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) {
63 memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE);
64
65 /* we think we found the OEM table */
66 if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) {
67 size_t copy_len;
68
69 memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr));
70
71 copy_len = min(oem_hdr.len - sizeof(oem_hdr),
72 sizeof(*rom));
73
74 memcpy_fromio(rom,
75 oprom + i + sizeof(oem_hdr),
76 copy_len);
77
78 /* calculate checksum */
79 tmp = (u8 *)&oem_hdr;
80 for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++)
81 sum += *tmp;
82
83 tmp = (u8 *)rom;
84 for (j = 0; j < sizeof(*rom); j++, tmp++)
85 sum += *tmp;
86
87 if (sum != 0) {
88 dev_warn(&pdev->dev,
89 "OEM table checksum failed\n");
90 continue;
91 }
92
93 /* keep going if that's not the oem param table */
94 if (memcmp(rom->hdr.signature,
95 ISCI_ROM_SIG,
96 ISCI_ROM_SIG_SIZE) != 0)
97 continue;
98
99 dev_info(&pdev->dev,
100 "OEM parameter table found in OROM\n");
101 break;
102 }
103 }
104
105 if (i >= len) {
106 dev_err(&pdev->dev, "oprom parse error\n");
107 devm_kfree(&pdev->dev, rom);
108 rom = NULL;
109 }
110 pci_unmap_biosrom(oprom);
111
112 return rom;
113}
114
115enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
116 struct isci_orom *orom, int scu_index)
117{
118 /* check for valid inputs */
119 if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
120 scu_index > orom->hdr.num_elements || !oem)
121 return -EINVAL;
122
123 *oem = orom->ctrl[scu_index];
124 return 0;
125}
126
127struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
128{
129 struct isci_orom *orom = NULL, *data;
130 int i, j;
131
132 if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0)
133 return NULL;
134
135 if (fw->size < sizeof(*orom))
136 goto out;
137
138 data = (struct isci_orom *)fw->data;
139
140 if (strncmp(ISCI_ROM_SIG, data->hdr.signature,
141 strlen(ISCI_ROM_SIG)) != 0)
142 goto out;
143
144 orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL);
145 if (!orom)
146 goto out;
147
148 memcpy(orom, fw->data, fw->size);
149
150 if (is_c0(pdev))
151 goto out;
152
153 /*
154 * deprecated: override default amp_control for pre-preproduction
155 * silicon revisions
156 */
157 for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++)
158 for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) {
159 orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03;
160 orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03;
161 orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03;
162 orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03;
163 }
164 out:
165 release_firmware(fw);
166
167 return orom;
168}
169
170static struct efi *get_efi(void)
171{
172#ifdef CONFIG_EFI
173 return &efi;
174#else
175 return NULL;
176#endif
177}
178
179struct isci_orom *isci_get_efi_var(struct pci_dev *pdev)
180{
181 efi_status_t status;
182 struct isci_orom *rom;
183 struct isci_oem_hdr *oem_hdr;
184 u8 *tmp, sum;
185 int j;
186 unsigned long data_len;
187 u8 *efi_data;
188 u32 efi_attrib = 0;
189
190 data_len = 1024;
191 efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL);
192 if (!efi_data) {
193 dev_warn(&pdev->dev,
194 "Unable to allocate memory for EFI data\n");
195 return NULL;
196 }
197
198 rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr));
199
200 if (get_efi())
201 status = get_efi()->get_variable(isci_efivar_name,
202 &ISCI_EFI_VENDOR_GUID,
203 &efi_attrib,
204 &data_len,
205 efi_data);
206 else
207 status = EFI_NOT_FOUND;
208
209 if (status != EFI_SUCCESS) {
210 dev_warn(&pdev->dev,
211 "Unable to obtain EFI var data for OEM parms\n");
212 return NULL;
213 }
214
215 oem_hdr = (struct isci_oem_hdr *)efi_data;
216
217 if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) {
218 dev_warn(&pdev->dev,
219 "Invalid OEM header signature\n");
220 return NULL;
221 }
222
223 /* calculate checksum */
224 tmp = (u8 *)efi_data;
225 for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++)
226 sum += *tmp;
227
228 if (sum != 0) {
229 dev_warn(&pdev->dev,
230 "OEM table checksum failed\n");
231 return NULL;
232 }
233
234 if (memcmp(rom->hdr.signature,
235 ISCI_ROM_SIG,
236 ISCI_ROM_SIG_SIZE) != 0) {
237 dev_warn(&pdev->dev,
238 "Invalid OEM table signature\n");
239 return NULL;
240 }
241
242 return rom;
243}
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
new file mode 100644
index 000000000000..dc007e692f4e
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.h
@@ -0,0 +1,249 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#ifndef _ISCI_PROBE_ROMS_H_
56#define _ISCI_PROBE_ROMS_H_
57
58#ifdef __KERNEL__
59#include <linux/firmware.h>
60#include <linux/pci.h>
61#include <linux/efi.h>
62#include "isci.h"
63
64#define SCIC_SDS_PARM_NO_SPEED 0
65
66/* generation 1 (i.e. 1.5 Gb/s) */
67#define SCIC_SDS_PARM_GEN1_SPEED 1
68
69/* generation 2 (i.e. 3.0 Gb/s) */
70#define SCIC_SDS_PARM_GEN2_SPEED 2
71
72/* generation 3 (i.e. 6.0 Gb/s) */
73#define SCIC_SDS_PARM_GEN3_SPEED 3
74#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED
75
76/* parameters that can be set by module parameters */
77struct sci_user_parameters {
78 struct sci_phy_user_params {
79 /**
80 * This field specifies the NOTIFY (ENABLE SPIN UP) primitive
81 * insertion frequency for this phy index.
82 */
83 u32 notify_enable_spin_up_insertion_frequency;
84
85 /**
86 * This method specifies the number of transmitted DWORDs within which
87 * to transmit a single ALIGN primitive. This value applies regardless
88 * of what type of device is attached or connection state. A value of
89 * 0 indicates that no ALIGN primitives will be inserted.
90 */
91 u16 align_insertion_frequency;
92
93 /**
94 * This method specifies the number of transmitted DWORDs within which
95 * to transmit 2 ALIGN primitives. This applies for SAS connections
96 * only. A minimum value of 3 is required for this field.
97 */
98 u16 in_connection_align_insertion_frequency;
99
100 /**
101 * This field indicates the maximum speed generation to be utilized
102 * by phys in the supplied port.
103 * - A value of 1 indicates generation 1 (i.e. 1.5 Gb/s).
104 * - A value of 2 indicates generation 2 (i.e. 3.0 Gb/s).
105 * - A value of 3 indicates generation 3 (i.e. 6.0 Gb/s).
106 */
107 u8 max_speed_generation;
108
109 } phys[SCI_MAX_PHYS];
110
111 /**
112 * This field specifies the maximum number of direct attached devices
113 * that can have power supplied to them simultaneously.
114 */
115 u8 max_number_concurrent_device_spin_up;
116
117 /**
118 * This field specifies the number of seconds to allow a phy to consume
119 * power before yielding to another phy.
120 *
121 */
122 u8 phy_spin_up_delay_interval;
123
124 /**
125 * These timer values specifies how long a link will remain open with no
126 * activity in increments of a microsecond, it can be in increments of
127 * 100 microseconds if the upper most bit is set.
128 *
129 */
130 u16 stp_inactivity_timeout;
131 u16 ssp_inactivity_timeout;
132
133 /**
134 * These timer values specifies how long a link will remain open in increments
135 * of 100 microseconds.
136 *
137 */
138 u16 stp_max_occupancy_timeout;
139 u16 ssp_max_occupancy_timeout;
140
141 /**
142 * This timer value specifies how long a link will remain open with no
143 * outbound traffic in increments of a microsecond.
144 *
145 */
146 u8 no_outbound_task_timeout;
147
148};
149
150#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0
151#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF
152#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
153
154struct sci_oem_params;
155int sci_oem_parameters_validate(struct sci_oem_params *oem);
156
157struct isci_orom;
158struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
159enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
160 struct isci_orom *orom, int scu_index);
161struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
162struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
163
164struct isci_oem_hdr {
165 u8 sig[4];
166 u8 rev_major;
167 u8 rev_minor;
168 u16 len;
169 u8 checksum;
170 u8 reserved1;
171 u16 reserved2;
172} __attribute__ ((packed));
173
174#else
175#define SCI_MAX_PORTS 4
176#define SCI_MAX_PHYS 4
177#define SCI_MAX_CONTROLLERS 2
178#endif
179
180#define ISCI_FW_NAME "isci/isci_firmware.bin"
181
182#define ROMSIGNATURE 0xaa55
183
184#define ISCI_OEM_SIG "$OEM"
185#define ISCI_OEM_SIG_SIZE 4
186#define ISCI_ROM_SIG "ISCUOEMB"
187#define ISCI_ROM_SIG_SIZE 8
188
189#define ISCI_EFI_VENDOR_GUID \
190 EFI_GUID(0x193dfefa, 0xa445, 0x4302, 0x99, 0xd8, 0xef, 0x3a, 0xad, \
191 0x1a, 0x04, 0xc6)
192#define ISCI_EFI_VAR_NAME "RstScuO"
193
194/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
195 * defined by the OEM configuration parameters providing no PHY_MASK parameters
196 * for any PORT. i.e. There are no phys assigned to any of the ports at start.
197 * MPC Manual PORT configuration mode is defined by the OEM configuration
198 * parameters providing a PHY_MASK value for any PORT. It is assumed that any
199 * PORT with no PHY_MASK is an invalid port and not all PHYs must be assigned.
200 * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs
201 * being assigned is sufficient to declare manual PORT configuration.
202 */
203enum sci_port_configuration_mode {
204 SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0,
205 SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1
206};
207
208struct sci_bios_oem_param_block_hdr {
209 uint8_t signature[ISCI_ROM_SIG_SIZE];
210 uint16_t total_block_length;
211 uint8_t hdr_length;
212 uint8_t version;
213 uint8_t preboot_source;
214 uint8_t num_elements;
215 uint16_t element_length;
216 uint8_t reserved[8];
217} __attribute__ ((packed));
218
219struct sci_oem_params {
220 struct {
221 uint8_t mode_type;
222 uint8_t max_concurrent_dev_spin_up;
223 uint8_t do_enable_ssc;
224 uint8_t reserved;
225 } controller;
226
227 struct {
228 uint8_t phy_mask;
229 } ports[SCI_MAX_PORTS];
230
231 struct sci_phy_oem_params {
232 struct {
233 uint32_t high;
234 uint32_t low;
235 } sas_address;
236
237 uint32_t afe_tx_amp_control0;
238 uint32_t afe_tx_amp_control1;
239 uint32_t afe_tx_amp_control2;
240 uint32_t afe_tx_amp_control3;
241 } phys[SCI_MAX_PHYS];
242} __attribute__ ((packed));
243
244struct isci_orom {
245 struct sci_bios_oem_param_block_hdr hdr;
246 struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS];
247} __attribute__ ((packed));
248
249#endif
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
new file mode 100644
index 000000000000..9b266c7428e8
--- /dev/null
+++ b/drivers/scsi/isci/registers.h
@@ -0,0 +1,1934 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCU_REGISTERS_H_
57#define _SCU_REGISTERS_H_
58
59/**
60 * This file contains the constants and structures for the SCU memory mapped
61 * registers.
62 *
63 *
64 */
65
66#define SCU_VIIT_ENTRY_ID_MASK (0xC0000000)
67#define SCU_VIIT_ENTRY_ID_SHIFT (30)
68
69#define SCU_VIIT_ENTRY_FUNCTION_MASK (0x0FF00000)
70#define SCU_VIIT_ENTRY_FUNCTION_SHIFT (20)
71
72#define SCU_VIIT_ENTRY_IPPTMODE_MASK (0x0001F800)
73#define SCU_VIIT_ENTRY_IPPTMODE_SHIFT (12)
74
75#define SCU_VIIT_ENTRY_LPVIE_MASK (0x00000F00)
76#define SCU_VIIT_ENTRY_LPVIE_SHIFT (8)
77
78#define SCU_VIIT_ENTRY_STATUS_MASK (0x000000FF)
79#define SCU_VIIT_ENTRY_STATUS_SHIFT (0)
80
81#define SCU_VIIT_ENTRY_ID_INVALID (0 << SCU_VIIT_ENTRY_ID_SHIFT)
82#define SCU_VIIT_ENTRY_ID_VIIT (1 << SCU_VIIT_ENTRY_ID_SHIFT)
83#define SCU_VIIT_ENTRY_ID_IIT (2 << SCU_VIIT_ENTRY_ID_SHIFT)
84#define SCU_VIIT_ENTRY_ID_VIRT_EXP (3 << SCU_VIIT_ENTRY_ID_SHIFT)
85
86#define SCU_VIIT_IPPT_SSP_INITIATOR (0x01 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
87#define SCU_VIIT_IPPT_SMP_INITIATOR (0x02 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
88#define SCU_VIIT_IPPT_STP_INITIATOR (0x04 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
89#define SCU_VIIT_IPPT_INITIATOR \
90 (\
91 SCU_VIIT_IPPT_SSP_INITIATOR \
92 | SCU_VIIT_IPPT_SMP_INITIATOR \
93 | SCU_VIIT_IPPT_STP_INITIATOR \
94 )
95
96#define SCU_VIIT_STATUS_RNC_VALID (0x01 << SCU_VIIT_ENTRY_STATUS_SHIFT)
97#define SCU_VIIT_STATUS_ADDRESS_VALID (0x02 << SCU_VIIT_ENTRY_STATUS_SHIFT)
98#define SCU_VIIT_STATUS_RNI_VALID (0x04 << SCU_VIIT_ENTRY_STATUS_SHIFT)
99#define SCU_VIIT_STATUS_ALL_VALID \
100 (\
101 SCU_VIIT_STATUS_RNC_VALID \
102 | SCU_VIIT_STATUS_ADDRESS_VALID \
103 | SCU_VIIT_STATUS_RNI_VALID \
104 )
105
106#define SCU_VIIT_IPPT_SMP_TARGET (0x10 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
107
108/**
109 * struct scu_viit_entry - This is the SCU Virtual Initiator Table Entry
110 *
111 *
112 */
113struct scu_viit_entry {
114 /**
115 * This must be encoded as to the type of initiator that is being constructed
116 * for this port.
117 */
118 u32 status;
119
120 /**
121 * Virtual initiator high SAS Address
122 */
123 u32 initiator_sas_address_hi;
124
125 /**
126 * Virtual initiator low SAS Address
127 */
128 u32 initiator_sas_address_lo;
129
130 /**
131 * This must be 0
132 */
133 u32 reserved;
134
135};
136
137
138/* IIT Status Defines */
139#define SCU_IIT_ENTRY_ID_MASK (0xC0000000)
140#define SCU_IIT_ENTRY_ID_SHIFT (30)
141
142#define SCU_IIT_ENTRY_STATUS_UPDATE_MASK (0x20000000)
143#define SCU_IIT_ENTRY_STATUS_UPDATE_SHIFT (29)
144
145#define SCU_IIT_ENTRY_LPI_MASK (0x00000F00)
146#define SCU_IIT_ENTRY_LPI_SHIFT (8)
147
148#define SCU_IIT_ENTRY_STATUS_MASK (0x000000FF)
149#define SCU_IIT_ENTRY_STATUS_SHIFT (0)
150
151/* IIT Remote Initiator Defines */
152#define SCU_IIT_ENTRY_REMOTE_TAG_MASK (0x0000FFFF)
153#define SCU_IIT_ENTRY_REMOTE_TAG_SHIFT (0)
154
155#define SCU_IIT_ENTRY_REMOTE_RNC_MASK (0x0FFF0000)
156#define SCU_IIT_ENTRY_REMOTE_RNC_SHIFT (16)
157
158#define SCU_IIT_ENTRY_ID_INVALID (0 << SCU_IIT_ENTRY_ID_SHIFT)
159#define SCU_IIT_ENTRY_ID_VIIT (1 << SCU_IIT_ENTRY_ID_SHIFT)
160#define SCU_IIT_ENTRY_ID_IIT (2 << SCU_IIT_ENTRY_ID_SHIFT)
161#define SCU_IIT_ENTRY_ID_VIRT_EXP (3 << SCU_IIT_ENTRY_ID_SHIFT)
162
163/**
164 * struct scu_iit_entry - This will be implemented later when we support
165 * virtual functions
166 *
167 *
168 */
169struct scu_iit_entry {
170 u32 status;
171 u32 remote_initiator_sas_address_hi;
172 u32 remote_initiator_sas_address_lo;
173 u32 remote_initiator;
174
175};
176
177/* Generate a value for an SCU register */
178#define SCU_GEN_VALUE(name, value) \
179 (((value) << name ## _SHIFT) & (name ## _MASK))
180
181/*
182 * Generate a bit value for an SCU register
183 * Make sure that the register MASK is just a single bit */
184#define SCU_GEN_BIT(name) \
185 SCU_GEN_VALUE(name, ((u32)1))
186
187#define SCU_SET_BIT(name, reg_value) \
188 ((reg_value) | SCU_GEN_BIT(name))
189
190#define SCU_CLEAR_BIT(name, reg_value) \
191 ((reg_value)$ ~(SCU_GEN_BIT(name)))
192
193/*
194 * *****************************************************************************
195 * Unions for bitfield definitions of SCU Registers
196 * SMU Post Context Port
197 * ***************************************************************************** */
198#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_SHIFT (0)
199#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_MASK (0x00000FFF)
200#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_SHIFT (12)
201#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_MASK (0x0000F000)
202#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_SHIFT (16)
203#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_MASK (0x00030000)
204#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_SHIFT (18)
205#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_MASK (0x00FC0000)
206#define SMU_POST_CONTEXT_PORT_RESERVED_MASK (0xFF000000)
207
208#define SMU_PCP_GEN_VAL(name, value) \
209 SCU_GEN_VALUE(SMU_POST_CONTEXT_PORT_ ## name, value)
210
211/* ***************************************************************************** */
212#define SMU_INTERRUPT_STATUS_COMPLETION_SHIFT (31)
213#define SMU_INTERRUPT_STATUS_COMPLETION_MASK (0x80000000)
214#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_SHIFT (1)
215#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_MASK (0x00000002)
216#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_SHIFT (0)
217#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_MASK (0x00000001)
218#define SMU_INTERRUPT_STATUS_RESERVED_MASK (0x7FFFFFFC)
219
220#define SMU_ISR_GEN_BIT(name) \
221 SCU_GEN_BIT(SMU_INTERRUPT_STATUS_ ## name)
222
223#define SMU_ISR_QUEUE_ERROR SMU_ISR_GEN_BIT(QUEUE_ERROR)
224#define SMU_ISR_QUEUE_SUSPEND SMU_ISR_GEN_BIT(QUEUE_SUSPEND)
225#define SMU_ISR_COMPLETION SMU_ISR_GEN_BIT(COMPLETION)
226
227/* ***************************************************************************** */
228#define SMU_INTERRUPT_MASK_COMPLETION_SHIFT (31)
229#define SMU_INTERRUPT_MASK_COMPLETION_MASK (0x80000000)
230#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_SHIFT (1)
231#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_MASK (0x00000002)
232#define SMU_INTERRUPT_MASK_QUEUE_ERROR_SHIFT (0)
233#define SMU_INTERRUPT_MASK_QUEUE_ERROR_MASK (0x00000001)
234#define SMU_INTERRUPT_MASK_RESERVED_MASK (0x7FFFFFFC)
235
236#define SMU_IMR_GEN_BIT(name) \
237 SCU_GEN_BIT(SMU_INTERRUPT_MASK_ ## name)
238
239#define SMU_IMR_QUEUE_ERROR SMU_IMR_GEN_BIT(QUEUE_ERROR)
240#define SMU_IMR_QUEUE_SUSPEND SMU_IMR_GEN_BIT(QUEUE_SUSPEND)
241#define SMU_IMR_COMPLETION SMU_IMR_GEN_BIT(COMPLETION)
242
243/* ***************************************************************************** */
244#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_SHIFT (0)
245#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_MASK (0x0000001F)
246#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_SHIFT (8)
247#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_MASK (0x0000FF00)
248#define SMU_INTERRUPT_COALESCING_CONTROL_RESERVED_MASK (0xFFFF00E0)
249
250#define SMU_ICC_GEN_VAL(name, value) \
251 SCU_GEN_VALUE(SMU_INTERRUPT_COALESCING_CONTROL_ ## name, value)
252
253/* ***************************************************************************** */
254#define SMU_TASK_CONTEXT_RANGE_START_SHIFT (0)
255#define SMU_TASK_CONTEXT_RANGE_START_MASK (0x00000FFF)
256#define SMU_TASK_CONTEXT_RANGE_ENDING_SHIFT (16)
257#define SMU_TASK_CONTEXT_RANGE_ENDING_MASK (0x0FFF0000)
258#define SMU_TASK_CONTEXT_RANGE_ENABLE_SHIFT (31)
259#define SMU_TASK_CONTEXT_RANGE_ENABLE_MASK (0x80000000)
260#define SMU_TASK_CONTEXT_RANGE_RESERVED_MASK (0x7000F000)
261
262#define SMU_TCR_GEN_VAL(name, value) \
263 SCU_GEN_VALUE(SMU_TASK_CONTEXT_RANGE_ ## name, value)
264
265#define SMU_TCR_GEN_BIT(name, value) \
266 SCU_GEN_BIT(SMU_TASK_CONTEXT_RANGE_ ## name)
267
268/* ***************************************************************************** */
269
270#define SMU_COMPLETION_QUEUE_PUT_POINTER_SHIFT (0)
271#define SMU_COMPLETION_QUEUE_PUT_POINTER_MASK (0x00003FFF)
272#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_SHIFT (15)
273#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_MASK (0x00008000)
274#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_SHIFT (16)
275#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_MASK (0x03FF0000)
276#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_SHIFT (26)
277#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_MASK (0x04000000)
278#define SMU_COMPLETION_QUEUE_PUT_RESERVED_MASK (0xF8004000)
279
280#define SMU_CQPR_GEN_VAL(name, value) \
281 SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_PUT_ ## name, value)
282
283#define SMU_CQPR_GEN_BIT(name) \
284 SCU_GEN_BIT(SMU_COMPLETION_QUEUE_PUT_ ## name)
285
286/* ***************************************************************************** */
287
288#define SMU_COMPLETION_QUEUE_GET_POINTER_SHIFT (0)
289#define SMU_COMPLETION_QUEUE_GET_POINTER_MASK (0x00003FFF)
290#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT (15)
291#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_MASK (0x00008000)
292#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT (16)
293#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK (0x03FF0000)
294#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT (26)
295#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_MASK (0x04000000)
296#define SMU_COMPLETION_QUEUE_GET_ENABLE_SHIFT (30)
297#define SMU_COMPLETION_QUEUE_GET_ENABLE_MASK (0x40000000)
298#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_SHIFT (31)
299#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_MASK (0x80000000)
300#define SMU_COMPLETION_QUEUE_GET_RESERVED_MASK (0x38004000)
301
302#define SMU_CQGR_GEN_VAL(name, value) \
303 SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_GET_ ## name, value)
304
305#define SMU_CQGR_GEN_BIT(name) \
306 SCU_GEN_BIT(SMU_COMPLETION_QUEUE_GET_ ## name)
307
308#define SMU_CQGR_CYCLE_BIT \
309 SMU_CQGR_GEN_BIT(CYCLE_BIT)
310
311#define SMU_CQGR_EVENT_CYCLE_BIT \
312 SMU_CQGR_GEN_BIT(EVENT_CYCLE_BIT)
313
314#define SMU_CQGR_GET_POINTER_SET(value) \
315 SMU_CQGR_GEN_VAL(POINTER, value)
316
317
318/* ***************************************************************************** */
319#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_SHIFT (0)
320#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_MASK (0x00003FFF)
321#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_SHIFT (16)
322#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_MASK (0x03FF0000)
323#define SMU_COMPLETION_QUEUE_CONTROL_RESERVED_MASK (0xFC00C000)
324
325#define SMU_CQC_GEN_VAL(name, value) \
326 SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_CONTROL_ ## name, value)
327
328#define SMU_CQC_QUEUE_LIMIT_SET(value) \
329 SMU_CQC_GEN_VAL(QUEUE_LIMIT, value)
330
331#define SMU_CQC_EVENT_LIMIT_SET(value) \
332 SMU_CQC_GEN_VAL(EVENT_LIMIT, value)
333
334
335/* ***************************************************************************** */
336#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT (0)
337#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK (0x00000FFF)
338#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT (12)
339#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK (0x00007000)
340#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT (15)
341#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK (0x07FF8000)
342#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_SHIFT (27)
343#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK (0x08000000)
344#define SMU_DEVICE_CONTEXT_CAPACITY_RESERVED_MASK (0xF0000000)
345
346#define SMU_DCC_GEN_VAL(name, value) \
347 SCU_GEN_VALUE(SMU_DEVICE_CONTEXT_CAPACITY_ ## name, value)
348
349#define SMU_DCC_GET_MAX_PEG(value) \
350 (\
351 ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK) \
352 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
353 )
354
355#define SMU_DCC_GET_MAX_LP(value) \
356 (\
357 ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
358 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
359 )
360
361#define SMU_DCC_GET_MAX_TC(value) \
362 (\
363 ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
364 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT \
365 )
366
367#define SMU_DCC_GET_MAX_RNC(value) \
368 (\
369 ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
370 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
371 )
372
373/* -------------------------------------------------------------------------- */
374
375#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0)
376#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_MASK (0x00000001)
377#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_SHIFT (1)
378#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_MASK (0x00000002)
379#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_SHIFT (16)
380#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_MASK (0x00010000)
381#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_SHIFT (17)
382#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_MASK (0x00020000)
383#define SMU_CONTROL_STATUS_RESERVED_MASK (0xFFFCFFFC)
384
385#define SMU_SMUCSR_GEN_BIT(name) \
386 SCU_GEN_BIT(SMU_CONTROL_STATUS_ ## name)
387
388#define SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
389 (SMU_SMUCSR_GEN_BIT(SCHEDULER_RAM_INIT_COMPLETED))
390
391#define SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
392 (SMU_SMUCSR_GEN_BIT(CONTEXT_RAM_INIT_COMPLETED))
393
394#define SCU_RAM_INIT_COMPLETED \
395 (\
396 SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
397 | SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
398 )
399
400/* -------------------------------------------------------------------------- */
401
402#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_SHIFT (0)
403#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_MASK (0x00000001)
404#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_SHIFT (1)
405#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_MASK (0x00000002)
406#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_SHIFT (2)
407#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_MASK (0x00000004)
408#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_SHIFT (3)
409#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_MASK (0x00000008)
410#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_SHIFT (8)
411#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_MASK (0x00000100)
412#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_SHIFT (9)
413#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_MASK (0x00000200)
414#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_SHIFT (10)
415#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_MASK (0x00000400)
416#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_SHIFT (11)
417#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_MASK (0x00000800)
418
419#define SMU_RESET_PROTOCOL_ENGINE(peg, pe) \
420 ((1 << (pe)) << ((peg) * 8))
421
422#define SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
423 (\
424 SMU_RESET_PROTOCOL_ENGINE(peg, 0) \
425 | SMU_RESET_PROTOCOL_ENGINE(peg, 1) \
426 | SMU_RESET_PROTOCOL_ENGINE(peg, 2) \
427 | SMU_RESET_PROTOCOL_ENGINE(peg, 3) \
428 )
429
430#define SMU_RESET_ALL_PROTOCOL_ENGINES() \
431 (\
432 SMU_RESET_PEG_PROTOCOL_ENGINES(0) \
433 | SMU_RESET_PEG_PROTOCOL_ENGINES(1) \
434 )
435
436#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_SHIFT (16)
437#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_MASK (0x00010000)
438#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_SHIFT (17)
439#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_MASK (0x00020000)
440#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_SHIFT (18)
441#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_MASK (0x00040000)
442#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_SHIFT (19)
443#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_MASK (0x00080000)
444
445#define SMU_RESET_WIDE_PORT_QUEUE(peg, wide_port) \
446 ((1 << ((wide_port) / 2)) << ((peg) * 2) << 16)
447
448#define SMU_SOFTRESET_CONTROL_RESET_PEG0_SHIFT (20)
449#define SMU_SOFTRESET_CONTROL_RESET_PEG0_MASK (0x00100000)
450#define SMU_SOFTRESET_CONTROL_RESET_PEG1_SHIFT (21)
451#define SMU_SOFTRESET_CONTROL_RESET_PEG1_MASK (0x00200000)
452#define SMU_SOFTRESET_CONTROL_RESET_SCU_SHIFT (22)
453#define SMU_SOFTRESET_CONTROL_RESET_SCU_MASK (0x00400000)
454
455/*
456 * It seems to make sense that if you are going to reset the protocol
457 * engine group that you would also reset all of the protocol engines */
458#define SMU_RESET_PROTOCOL_ENGINE_GROUP(peg) \
459 (\
460 (1 << ((peg) + 20)) \
461 | SMU_RESET_WIDE_PORT_QUEUE(peg, 0) \
462 | SMU_RESET_WIDE_PORT_QUEUE(peg, 1) \
463 | SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
464 )
465
466#define SMU_RESET_ALL_PROTOCOL_ENGINE_GROUPS() \
467 (\
468 SMU_RESET_PROTOCOL_ENGINE_GROUP(0) \
469 | SMU_RESET_PROTOCOL_ENGINE_GROUP(1) \
470 )
471
472#define SMU_RESET_SCU() (0xFFFFFFFF)
473
474
475
476/* ***************************************************************************** */
477#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_SHIFT (0)
478#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_MASK (0x00000FFF)
479#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_SHIFT (16)
480#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_MASK (0x0FFF0000)
481#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_SHIFT (31)
482#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_MASK (0x80000000)
483#define SMU_TASK_CONTEXT_ASSIGNMENT_RESERVED_MASK (0x7000F000)
484
485#define SMU_TCA_GEN_VAL(name, value) \
486 SCU_GEN_VALUE(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name, value)
487
488#define SMU_TCA_GEN_BIT(name) \
489 SCU_GEN_BIT(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name)
490
491/* ***************************************************************************** */
492#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_SHIFT (0)
493#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_MASK (0x00000FFF)
494#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_RESERVED_MASK (0xFFFFF000)
495
496#define SCU_UFQC_GEN_VAL(name, value) \
497 SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_ ## name, value)
498
499#define SCU_UFQC_QUEUE_SIZE_SET(value) \
500 SCU_UFQC_GEN_VAL(QUEUE_SIZE, value)
501
502/* ***************************************************************************** */
503#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_SHIFT (0)
504#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_MASK (0x00000FFF)
505#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_SHIFT (12)
506#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_MASK (0x00001000)
507#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_RESERVED_MASK (0xFFFFE000)
508
509#define SCU_UFQPP_GEN_VAL(name, value) \
510 SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name, value)
511
512#define SCU_UFQPP_GEN_BIT(name) \
513 SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name)
514
515/*
516 * *****************************************************************************
517 * * SDMA Registers
518 * ***************************************************************************** */
519#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_SHIFT (0)
520#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_MASK (0x00000FFF)
521#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_SHIFT (12)
522#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_MASK (12)
523#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_SHIFT (31)
524#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_MASK (0x80000000)
525#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_RESERVED_MASK (0x7FFFE000)
526
527#define SCU_UFQGP_GEN_VAL(name, value) \
528 SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name, value)
529
530#define SCU_UFQGP_GEN_BIT(name) \
531 SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name)
532
533#define SCU_UFQGP_CYCLE_BIT(value) \
534 SCU_UFQGP_GEN_BIT(CYCLE_BIT, value)
535
536#define SCU_UFQGP_GET_POINTER(value) \
537 SCU_UFQGP_GEN_VALUE(POINTER, value)
538
539#define SCU_UFQGP_ENABLE(value) \
540 (SCU_UFQGP_GEN_BIT(ENABLE) | value)
541
542#define SCU_UFQGP_DISABLE(value) \
543 (~SCU_UFQGP_GEN_BIT(ENABLE) & value)
544
545#define SCU_UFQGP_VALUE(bit, value) \
546 (SCU_UFQGP_CYCLE_BIT(bit) | SCU_UFQGP_GET_POINTER(value))
547
548/* ***************************************************************************** */
549#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SHIFT (0)
550#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_MASK (0x0000FFFF)
551#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (16)
552#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00010000)
553#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_SHIFT (17)
554#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_MASK (0x00020000)
555#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_SHIFT (18)
556#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_MASK (0x00040000)
557#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_SHIFT (19)
558#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_MASK (0x00080000)
559#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_SHIFT (20)
560#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_MASK (0x00100000)
561#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_SHIFT (21)
562#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_MASK (0x00200000)
563#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_SHIFT (22)
564#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_MASK (0x00400000)
565#define SCU_PDMA_CONFIGURATION_RESERVED_MASK (0xFF800000)
566
567#define SCU_PDMACR_GEN_VALUE(name, value) \
568 SCU_GEN_VALUE(SCU_PDMA_CONFIGURATION_ ## name, value)
569
570#define SCU_PDMACR_GEN_BIT(name) \
571 SCU_GEN_BIT(SCU_PDMA_CONFIGURATION_ ## name)
572
573#define SCU_PDMACR_BE_GEN_BIT(name) \
574 SCU_PCMACR_GEN_BIT(BIG_ENDIAN_CONTROL_ ## name)
575
576/* ***************************************************************************** */
577#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (8)
578#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00000100)
579
580#define SCU_CDMACR_GEN_BIT(name) \
581 SCU_GEN_BIT(SCU_CDMA_CONFIGURATION_ ## name)
582
583/*
584 * *****************************************************************************
585 * * SCU Link Layer Registers
586 * ***************************************************************************** */
587#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_SHIFT (0)
588#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_MASK (0x000000FF)
589#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_SHIFT (8)
590#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_MASK (0x0000FF00)
591#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_SHIFT (16)
592#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_MASK (0x00FF0000)
593#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_SHIFT (24)
594#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_MASK (0xFF000000)
595#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_REQUIRED_MASK (0x00000000)
596#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_DEFAULT_MASK (0x7D00676F)
597#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_RESERVED_MASK (0x00FF0000)
598
599#define SCU_SAS_SPDTOV_GEN_VALUE(name, value) \
600 SCU_GEN_VALUE(SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_ ## name, value)
601
602
603#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_SHIFT (2)
604#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_MASK (0x00000004)
605#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_SHIFT (4)
606#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_MASK (0x00000010)
607#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_SHIFT (5)
608#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_MASK (0x00000020)
609#define SCU_LINK_STATUS_RESERVED_MASK (0xFFFFFFCD)
610
611#define SCU_SAS_LLSTA_GEN_BIT(name) \
612 SCU_GEN_BIT(SCU_LINK_STATUS_ ## name)
613
614
615/* TODO: Where is the SATA_PSELTOV register? */
616
617/*
618 * *****************************************************************************
619 * * SCU SAS Maximum Arbitration Wait Time Timeout Register
620 * ***************************************************************************** */
621#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_SHIFT (0)
622#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_MASK (0x00007FFF)
623#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_SHIFT (15)
624#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_MASK (0x00008000)
625
626#define SCU_SAS_MAWTTOV_GEN_VALUE(name, value) \
627 SCU_GEN_VALUE(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name, value)
628
629#define SCU_SAS_MAWTTOV_GEN_BIT(name) \
630 SCU_GEN_BIT(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name)
631
632
633/*
634 * TODO: Where is the SAS_LNKTOV regsiter?
635 * TODO: Where is the SAS_PHYTOV register? */
636
637#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1)
638#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_MASK (0x00000002)
639#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_SHIFT (2)
640#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_MASK (0x00000004)
641#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_SHIFT (3)
642#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_MASK (0x00000008)
643#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_SHIFT (8)
644#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_MASK (0x00000100)
645#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_SHIFT (9)
646#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_MASK (0x00000200)
647#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_SHIFT (10)
648#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_MASK (0x00000400)
649#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_SHIFT (11)
650#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_MASK (0x00000800)
651#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_SHIFT (16)
652#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_MASK (0x000F0000)
653#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_SHIFT (24)
654#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_MASK (0x0F000000)
655#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_SHIFT (28)
656#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_MASK (0x70000000)
657#define SCU_SAS_TRANSMIT_IDENTIFICATION_RESERVED_MASK (0x80F0F1F1)
658
659#define SCU_SAS_TIID_GEN_VAL(name, value) \
660 SCU_GEN_VALUE(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name, value)
661
662#define SCU_SAS_TIID_GEN_BIT(name) \
663 SCU_GEN_BIT(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name)
664
665/* SAS Identify Frame PHY Identifier Register */
666#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_SHIFT (16)
667#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_MASK (0x00010000)
668#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_SHIFT (17)
669#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_MASK (0x00020000)
670#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_SHIFT (18)
671#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_MASK (0x00040000)
672#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_SHIFT (24)
673#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_MASK (0xFF000000)
674#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_RESERVED_MASK (0x00F800FF)
675
676#define SCU_SAS_TIPID_GEN_VALUE(name, value) \
677 SCU_GEN_VALUE(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name, value)
678
679#define SCU_SAS_TIPID_GEN_BIT(name) \
680 SCU_GEN_BIT(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name)
681
682
683#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_SHIFT (4)
684#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_MASK (0x00000010)
685#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_SHIFT (6)
686#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_MASK (0x00000040)
687#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_SHIFT (7)
688#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_MASK (0x00000080)
689#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_SHIFT (8)
690#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_MASK (0x00000100)
691#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_SHIFT (9)
692#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_MASK (0x00000200)
693#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_SHIFT (11)
694#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_MASK (0x00000800)
695#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_SHIFT (12)
696#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_MASK (0x00001000)
697#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_SHIFT (13)
698#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_MASK (0x00002000)
699#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_SHIFT (14)
700#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_MASK (0x00004000)
701#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_SHIFT (15)
702#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_MASK (0x00008000)
703#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_SHIFT (23)
704#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_MASK (0x00800000)
705#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_SHIFT (27)
706#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_MASK (0x08000000)
707#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_SHIFT (28)
708#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_MASK (0x10000000)
709#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_SHIFT (29)
710#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_MASK (0x20000000)
711#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_SHIFT (30)
712#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_MASK (0x40000000)
713#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_SHIFT (31)
714#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_MASK (0x80000000)
715#define SCU_SAS_PHY_CONFIGURATION_REQUIRED_MASK (0x0100000F)
716#define SCU_SAS_PHY_CONFIGURATION_DEFAULT_MASK (0x4180100F)
717#define SCU_SAS_PHY_CONFIGURATION_RESERVED_MASK (0x00000000)
718
719#define SCU_SAS_PCFG_GEN_BIT(name) \
720 SCU_GEN_BIT(SCU_SAS_PHY_CONFIGURATION_ ## name)
721
722#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_SHIFT (0)
723#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_MASK (0x000007FF)
724#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_SHIFT (16)
725#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_MASK (0x00ff0000)
726
727#define SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(name, value) \
728 SCU_GEN_VALUE(SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_##name, value)
729
730#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_SHIFT (0)
731#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_MASK (0x0003FFFF)
732#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_SHIFT (31)
733#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_MASK (0x80000000)
734#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_RESERVED_MASK (0x7FFC0000)
735
736#define SCU_ENSPINUP_GEN_VAL(name, value) \
737 SCU_GEN_VALUE(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name, value)
738
739#define SCU_ENSPINUP_GEN_BIT(name) \
740 SCU_GEN_BIT(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name)
741
742
743#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_SHIFT (1)
744#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_MASK (0x00000002)
745#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_SHIFT (4)
746#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_MASK (0x000000F0)
747#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_SHIFT (8)
748#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_MASK (0x00000100)
749#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_SHIFT (9)
750#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_MASK (0x00000201)
751#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_SHIFT (10)
752#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_MASK (0x00000401)
753#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_SHIFT (11)
754#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_MASK (0x00000801)
755#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_SHIFT (12)
756#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_MASK (0x00001001)
757#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_SHIFT (13)
758#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_MASK (0x00002001)
759#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_SHIFT (31)
760#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_MASK (0x80000000)
761#define SCU_LINK_LAYER_PHY_CAPABILITIES_DEFAULT_MASK (0x00003F01)
762#define SCU_LINK_LAYER_PHY_CAPABILITIES_REQUIRED_MASK (0x00000001)
763#define SCU_LINK_LAYER_PHY_CAPABILITIES_RESERVED_MASK (0x7FFFC00D)
764
765#define SCU_SAS_PHYCAP_GEN_VAL(name, value) \
766 SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name, value)
767
768#define SCU_SAS_PHYCAP_GEN_BIT(name) \
769 SCU_GEN_BIT(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name)
770
771
772#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_SHIFT (0)
773#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_MASK (0x000000FF)
774#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_SHIFT (31)
775#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_MASK (0x80000000)
776#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_RESERVED_MASK (0x7FFFFF00)
777
778#define SCU_PSZGCR_GEN_VAL(name, value) \
779 SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name, value)
780
781#define SCU_PSZGCR_GEN_BIT(name) \
782 SCU_GEN_BIT(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name)
783
784#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_SHIFT (1)
785#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_MASK (0x00000002)
786#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_SHIFT (2)
787#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_MASK (0x00000004)
788#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_SHIFT (4)
789#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_MASK (0x00000010)
790#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_SHIFT (5)
791#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_MASK (0x00000020)
792#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_SHIFT (16)
793#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_MASK (0x00030000)
794#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_SHIFT (19)
795#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_MASK (0x00080000)
796#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_SHIFT (20)
797#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_MASK (0x00300000)
798#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_SHIFT (23)
799#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_MASK (0x00800000)
800#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_SHIFT (24)
801#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_MASK (0x03000000)
802#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_SHIFT (27)
803#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_MASK (0x08000000)
804#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_SHIFT (28)
805#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_MASK (0x30000000)
806#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_SHIFT (31)
807#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_MASK (0x80000000)
808#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_RESERVED_MASK (0x4444FFC9)
809
810#define SCU_PEG_SCUVZECR_GEN_VAL(name, val) \
811 SCU_GEN_VALUE(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name, val)
812
813#define SCU_PEG_SCUVZECR_GEN_BIT(name) \
814 SCU_GEN_BIT(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name)
815
816
817/*
818 * *****************************************************************************
819 * * Port Task Scheduler registers shift and mask values
820 * ***************************************************************************** */
821#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_SHIFT (0)
822#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_MASK (0x0000FFFF)
823#define SCU_PTSG_CONTROL_TASK_TIMEOUT_SHIFT (16)
824#define SCU_PTSG_CONTROL_TASK_TIMEOUT_MASK (0x00FF0000)
825#define SCU_PTSG_CONTROL_PTSG_ENABLE_SHIFT (24)
826#define SCU_PTSG_CONTROL_PTSG_ENABLE_MASK (0x01000000)
827#define SCU_PTSG_CONTROL_ETM_ENABLE_SHIFT (25)
828#define SCU_PTSG_CONTROL_ETM_ENABLE_MASK (0x02000000)
829#define SCU_PTSG_CONTROL_DEFAULT_MASK (0x00020002)
830#define SCU_PTSG_CONTROL_REQUIRED_MASK (0x00000000)
831#define SCU_PTSG_CONTROL_RESERVED_MASK (0xFC000000)
832
833#define SCU_PTSGCR_GEN_VAL(name, val) \
834 SCU_GEN_VALUE(SCU_PTSG_CONTROL_ ## name, val)
835
836#define SCU_PTSGCR_GEN_BIT(name) \
837 SCU_GEN_BIT(SCU_PTSG_CONTROL_ ## name)
838
839
840/* ***************************************************************************** */
841#define SCU_PTSG_REAL_TIME_CLOCK_SHIFT (0)
842#define SCU_PTSG_REAL_TIME_CLOCK_MASK (0x0000FFFF)
843#define SCU_PTSG_REAL_TIME_CLOCK_RESERVED_MASK (0xFFFF0000)
844
845#define SCU_RTCR_GEN_VAL(name, val) \
846 SCU_GEN_VALUE(SCU_PTSG_ ## name, val)
847
848
849#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_SHIFT (0)
850#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_MASK (0x00FFFFFF)
851#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_RESERVED_MASK (0xFF000000)
852
853#define SCU_RTCCR_GEN_VAL(name, val) \
854 SCU_GEN_VALUE(SCU_PTSG_REAL_TIME_CLOCK_CONTROL_ ## name, val)
855
856
857#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_SHIFT (0)
858#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_MASK (0x00000001)
859#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_SHIFT (1)
860#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_MASK (0x00000002)
861#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_RESERVED_MASK (0xFFFFFFFC)
862
863#define SCU_PTSxCR_GEN_BIT(name) \
864 SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ ## name)
865
866
867#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_SHIFT (0)
868#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_MASK (0x00000001)
869#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_SHIFT (1)
870#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_MASK (0x00000002)
871#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_SHIFT (2)
872#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_MASK (0x00000004)
873#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_RESERVED_MASK (0xFFFFFFF8)
874
875#define SCU_PTSxSR_GEN_BIT(name) \
876 SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
877
878
879/*
880 * *****************************************************************************
881 * * SGPIO Register shift and mask values
882 * ***************************************************************************** */
883#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT (0)
884#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK (0x00000001)
885#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT (1)
886#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK (0x00000002)
887#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2)
888#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK (0x00000004)
889#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT (15)
890#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK (0x00008000)
891#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK (0xFFFF7FF8)
892
893#define SCU_SGICRx_GEN_BIT(name) \
894 SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name)
895
896#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT (0)
897#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK (0x0000000F)
898#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT (4)
899#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK (0x000000F0)
900#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT (8)
901#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK (0x00000F00)
902#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT (12)
903#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK (0x0000F000)
904#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000)
905
906#define SCU_SGPBRx_GEN_VAL(name, value) \
907 SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value)
908
909#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT (0)
910#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK (0x00000003)
911#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT (4)
912#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK (0x00000030)
913#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT (8)
914#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK (0x00000300)
915#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT (12)
916#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK (0x00003000)
917#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK (0xFFFF8888)
918
919#define SCU_SGSDLRx_GEN_VAL(name, value) \
920 SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
921
922#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT (0)
923#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK (0x00000003)
924#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT (4)
925#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK (0x00000030)
926#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT (8)
927#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK (0x00000300)
928#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT (12)
929#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK (0x00003000)
930#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK (0xFFFF8888)
931
932#define SCU_SGSDURx_GEN_VAL(name, value) \
933 SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
934
935#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT (0)
936#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK (0x00000003)
937#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT (4)
938#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK (0x00000030)
939#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT (8)
940#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK (0x00000300)
941#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT (12)
942#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK (0x00003000)
943#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888)
944
945#define SCU_SGSIDLRx_GEN_VAL(name, value) \
946 SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
947
948#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT (0)
949#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK (0x00000003)
950#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT (4)
951#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK (0x00000030)
952#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT (8)
953#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK (0x00000300)
954#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT (12)
955#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK (0x00003000)
956#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888)
957
958#define SCU_SGSIDURx_GEN_VAL(name, value) \
959 SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
960
961#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT (0)
962#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK (0x0000000F)
963#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK (0xFFFFFFF0)
964
965#define SCU_SGVSCR_GEN_VAL(value) \
966 SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value)
967
968#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT (0)
969#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK (0x00000003)
970#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT (2)
971#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK (0x00000004)
972#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT (3)
973#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK (0x00000008)
974#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT (4)
975#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK (0x00000030)
976#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT (6)
977#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK (0x00000040)
978#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT (7)
979#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK (0x00000080)
980#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT (8)
981#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK (0x00000300)
982#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT (10)
983#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK (0x00000400)
984#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT (11)
985#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK (0x00000800)
986#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK (0xFFFFF000)
987
988#define SCU_SGODSR_GEN_VAL(name, value) \
989 SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value)
990
991#define SCU_SGODSR_GEN_BIT(name) \
992 SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name)
993
994/*
995 * *****************************************************************************
996 * * SMU Registers
997 * ***************************************************************************** */
998
999/*
1000 * ----------------------------------------------------------------------------
1001 * SMU Registers
1002 * These registers are based off of BAR0
1003 *
1004 * To calculate the offset for other functions use
1005 * BAR0 + FN# * SystemPageSize * 2
1006 *
1007 * The TCA is only accessable from FN#0 (Physical Function) and each
1008 * is programmed by (BAR0 + SCU_SMU_TCA_OFFSET + (FN# * 0x04)) or
1009 * TCA0 for FN#0 is at BAR0 + 0x0400
1010 * TCA1 for FN#1 is at BAR0 + 0x0404
1011 * etc.
1012 * ----------------------------------------------------------------------------
1013 * Accessable to all FN#s */
1014#define SCU_SMU_PCP_OFFSET 0x0000
1015#define SCU_SMU_AMR_OFFSET 0x0004
1016#define SCU_SMU_ISR_OFFSET 0x0010
1017#define SCU_SMU_IMR_OFFSET 0x0014
1018#define SCU_SMU_ICC_OFFSET 0x0018
1019#define SCU_SMU_HTTLBAR_OFFSET 0x0020
1020#define SCU_SMU_HTTUBAR_OFFSET 0x0024
1021#define SCU_SMU_TCR_OFFSET 0x0028
1022#define SCU_SMU_CQLBAR_OFFSET 0x0030
1023#define SCU_SMU_CQUBAR_OFFSET 0x0034
1024#define SCU_SMU_CQPR_OFFSET 0x0040
1025#define SCU_SMU_CQGR_OFFSET 0x0044
1026#define SCU_SMU_CQC_OFFSET 0x0048
1027/* Accessable to FN#0 only */
1028#define SCU_SMU_RNCLBAR_OFFSET 0x0080
1029#define SCU_SMU_RNCUBAR_OFFSET 0x0084
1030#define SCU_SMU_DCC_OFFSET 0x0090
1031#define SCU_SMU_DFC_OFFSET 0x0094
1032#define SCU_SMU_SMUCSR_OFFSET 0x0098
1033#define SCU_SMU_SCUSRCR_OFFSET 0x009C
1034#define SCU_SMU_SMAW_OFFSET 0x00A0
1035#define SCU_SMU_SMDW_OFFSET 0x00A4
1036/* Accessable to FN#0 only */
1037#define SCU_SMU_TCA_OFFSET 0x0400
1038/* Accessable to all FN#s */
1039#define SCU_SMU_MT_MLAR0_OFFSET 0x2000
1040#define SCU_SMU_MT_MUAR0_OFFSET 0x2004
1041#define SCU_SMU_MT_MDR0_OFFSET 0x2008
1042#define SCU_SMU_MT_VCR0_OFFSET 0x200C
1043#define SCU_SMU_MT_MLAR1_OFFSET 0x2010
1044#define SCU_SMU_MT_MUAR1_OFFSET 0x2014
1045#define SCU_SMU_MT_MDR1_OFFSET 0x2018
1046#define SCU_SMU_MT_VCR1_OFFSET 0x201C
1047#define SCU_SMU_MPBA_OFFSET 0x3000
1048
1049/**
1050 * struct smu_registers - These are the SMU registers
1051 *
1052 *
1053 */
1054struct smu_registers {
1055/* 0x0000 PCP */
1056 u32 post_context_port;
1057/* 0x0004 AMR */
1058 u32 address_modifier;
1059 u32 reserved_08;
1060 u32 reserved_0C;
1061/* 0x0010 ISR */
1062 u32 interrupt_status;
1063/* 0x0014 IMR */
1064 u32 interrupt_mask;
1065/* 0x0018 ICC */
1066 u32 interrupt_coalesce_control;
1067 u32 reserved_1C;
1068/* 0x0020 HTTLBAR */
1069 u32 host_task_table_lower;
1070/* 0x0024 HTTUBAR */
1071 u32 host_task_table_upper;
1072/* 0x0028 TCR */
1073 u32 task_context_range;
1074 u32 reserved_2C;
1075/* 0x0030 CQLBAR */
1076 u32 completion_queue_lower;
1077/* 0x0034 CQUBAR */
1078 u32 completion_queue_upper;
1079 u32 reserved_38;
1080 u32 reserved_3C;
1081/* 0x0040 CQPR */
1082 u32 completion_queue_put;
1083/* 0x0044 CQGR */
1084 u32 completion_queue_get;
1085/* 0x0048 CQC */
1086 u32 completion_queue_control;
1087 u32 reserved_4C;
1088 u32 reserved_5x[4];
1089 u32 reserved_6x[4];
1090 u32 reserved_7x[4];
1091/*
1092 * Accessable to FN#0 only
1093 * 0x0080 RNCLBAR */
1094 u32 remote_node_context_lower;
1095/* 0x0084 RNCUBAR */
1096 u32 remote_node_context_upper;
1097 u32 reserved_88;
1098 u32 reserved_8C;
1099/* 0x0090 DCC */
1100 u32 device_context_capacity;
1101/* 0x0094 DFC */
1102 u32 device_function_capacity;
1103/* 0x0098 SMUCSR */
1104 u32 control_status;
1105/* 0x009C SCUSRCR */
1106 u32 soft_reset_control;
1107/* 0x00A0 SMAW */
1108 u32 mmr_address_window;
1109/* 0x00A4 SMDW */
1110 u32 mmr_data_window;
1111 u32 reserved_A8;
1112 u32 reserved_AC;
1113/* A whole bunch of reserved space */
1114 u32 reserved_Bx[4];
1115 u32 reserved_Cx[4];
1116 u32 reserved_Dx[4];
1117 u32 reserved_Ex[4];
1118 u32 reserved_Fx[4];
1119 u32 reserved_1xx[64];
1120 u32 reserved_2xx[64];
1121 u32 reserved_3xx[64];
1122/*
1123 * Accessable to FN#0 only
1124 * 0x0400 TCA */
1125 u32 task_context_assignment[256];
1126/* MSI-X registers not included */
1127};
1128
1129/*
1130 * *****************************************************************************
1131 * SDMA Registers
1132 * ***************************************************************************** */
1133#define SCU_SDMA_BASE 0x6000
1134#define SCU_SDMA_PUFATLHAR_OFFSET 0x0000
1135#define SCU_SDMA_PUFATUHAR_OFFSET 0x0004
1136#define SCU_SDMA_UFLHBAR_OFFSET 0x0008
1137#define SCU_SDMA_UFUHBAR_OFFSET 0x000C
1138#define SCU_SDMA_UFQC_OFFSET 0x0010
1139#define SCU_SDMA_UFQPP_OFFSET 0x0014
1140#define SCU_SDMA_UFQGP_OFFSET 0x0018
1141#define SCU_SDMA_PDMACR_OFFSET 0x001C
1142#define SCU_SDMA_CDMACR_OFFSET 0x0080
1143
1144/**
1145 * struct scu_sdma_registers - These are the SCU SDMA Registers
1146 *
1147 *
1148 */
1149struct scu_sdma_registers {
1150/* 0x0000 PUFATLHAR */
1151 u32 uf_address_table_lower;
1152/* 0x0004 PUFATUHAR */
1153 u32 uf_address_table_upper;
1154/* 0x0008 UFLHBAR */
1155 u32 uf_header_base_address_lower;
1156/* 0x000C UFUHBAR */
1157 u32 uf_header_base_address_upper;
1158/* 0x0010 UFQC */
1159 u32 unsolicited_frame_queue_control;
1160/* 0x0014 UFQPP */
1161 u32 unsolicited_frame_put_pointer;
1162/* 0x0018 UFQGP */
1163 u32 unsolicited_frame_get_pointer;
1164/* 0x001C PDMACR */
1165 u32 pdma_configuration;
1166/* Reserved until offset 0x80 */
1167 u32 reserved_0020_007C[0x18];
1168/* 0x0080 CDMACR */
1169 u32 cdma_configuration;
1170/* Remainder SDMA register space */
1171 u32 reserved_0084_0400[0xDF];
1172
1173};
1174
1175/*
1176 * *****************************************************************************
1177 * * SCU Link Registers
1178 * ***************************************************************************** */
1179#define SCU_PEG0_OFFSET 0x0000
1180#define SCU_PEG1_OFFSET 0x8000
1181
1182#define SCU_TL0_OFFSET 0x0000
1183#define SCU_TL1_OFFSET 0x0400
1184#define SCU_TL2_OFFSET 0x0800
1185#define SCU_TL3_OFFSET 0x0C00
1186
1187#define SCU_LL_OFFSET 0x0080
1188#define SCU_LL0_OFFSET (SCU_TL0_OFFSET + SCU_LL_OFFSET)
1189#define SCU_LL1_OFFSET (SCU_TL1_OFFSET + SCU_LL_OFFSET)
1190#define SCU_LL2_OFFSET (SCU_TL2_OFFSET + SCU_LL_OFFSET)
1191#define SCU_LL3_OFFSET (SCU_TL3_OFFSET + SCU_LL_OFFSET)
1192
1193/* Transport Layer Offsets (PEG + TL) */
1194#define SCU_TLCR_OFFSET 0x0000
1195#define SCU_TLADTR_OFFSET 0x0004
1196#define SCU_TLTTMR_OFFSET 0x0008
1197#define SCU_TLEECR0_OFFSET 0x000C
1198#define SCU_STPTLDARNI_OFFSET 0x0010
1199
1200
1201#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_SHIFT (0)
1202#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_MASK (0x00000001)
1203#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_SHIFT (1)
1204#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_MASK (0x00000002)
1205#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_SHIFT (3)
1206#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_MASK (0x00000008)
1207#define SCU_TLCR_CMD_NAK_STATUS_CODE_SHIFT (4)
1208#define SCU_TLCR_CMD_NAK_STATUS_CODE_MASK (0x00000010)
1209#define SCU_TLCR_RESERVED_MASK (0xFFFFFFEB)
1210
1211#define SCU_TLCR_GEN_BIT(name) \
1212 SCU_GEN_BIT(SCU_TLCR_ ## name)
1213
1214/**
1215 * struct scu_transport_layer_registers - These are the SCU Transport Layer
1216 * registers
1217 *
1218 *
1219 */
1220struct scu_transport_layer_registers {
1221 /* 0x0000 TLCR */
1222 u32 control;
1223 /* 0x0004 TLADTR */
1224 u32 arbitration_delay_timer;
1225 /* 0x0008 TLTTMR */
1226 u32 timer_test_mode;
1227 /* 0x000C reserved */
1228 u32 reserved_0C;
1229 /* 0x0010 STPTLDARNI */
1230 u32 stp_rni;
1231 /* 0x0014 TLFEWPORCTRL */
1232 u32 tlfe_wpo_read_control;
1233 /* 0x0018 TLFEWPORDATA */
1234 u32 tlfe_wpo_read_data;
1235 /* 0x001C RXTLSSCSR1 */
1236 u32 rxtl_single_step_control_status_1;
1237 /* 0x0020 RXTLSSCSR2 */
1238 u32 rxtl_single_step_control_status_2;
1239 /* 0x0024 AWTRDDCR */
1240 u32 tlfe_awt_retry_delay_debug_control;
1241 /* Remainder of TL memory space */
1242 u32 reserved_0028_007F[0x16];
1243
1244};
1245
1246/* Protocol Engine Group Registers */
1247#define SCU_SCUVZECRx_OFFSET 0x1080
1248
1249/* Link Layer Offsets (PEG + TL + LL) */
1250#define SCU_SAS_SPDTOV_OFFSET 0x0000
1251#define SCU_SAS_LLSTA_OFFSET 0x0004
1252#define SCU_SATA_PSELTOV_OFFSET 0x0008
1253#define SCU_SAS_TIMETOV_OFFSET 0x0010
1254#define SCU_SAS_LOSTOT_OFFSET 0x0014
1255#define SCU_SAS_LNKTOV_OFFSET 0x0018
1256#define SCU_SAS_PHYTOV_OFFSET 0x001C
1257#define SCU_SAS_AFERCNT_OFFSET 0x0020
1258#define SCU_SAS_WERCNT_OFFSET 0x0024
1259#define SCU_SAS_TIID_OFFSET 0x0028
1260#define SCU_SAS_TIDNH_OFFSET 0x002C
1261#define SCU_SAS_TIDNL_OFFSET 0x0030
1262#define SCU_SAS_TISSAH_OFFSET 0x0034
1263#define SCU_SAS_TISSAL_OFFSET 0x0038
1264#define SCU_SAS_TIPID_OFFSET 0x003C
1265#define SCU_SAS_TIRES2_OFFSET 0x0040
1266#define SCU_SAS_ADRSTA_OFFSET 0x0044
1267#define SCU_SAS_MAWTTOV_OFFSET 0x0048
1268#define SCU_SAS_FRPLDFIL_OFFSET 0x0054
1269#define SCU_SAS_RFCNT_OFFSET 0x0060
1270#define SCU_SAS_TFCNT_OFFSET 0x0064
1271#define SCU_SAS_RFDCNT_OFFSET 0x0068
1272#define SCU_SAS_TFDCNT_OFFSET 0x006C
1273#define SCU_SAS_LERCNT_OFFSET 0x0070
1274#define SCU_SAS_RDISERRCNT_OFFSET 0x0074
1275#define SCU_SAS_CRERCNT_OFFSET 0x0078
1276#define SCU_STPCTL_OFFSET 0x007C
1277#define SCU_SAS_PCFG_OFFSET 0x0080
1278#define SCU_SAS_CLKSM_OFFSET 0x0084
1279#define SCU_SAS_TXCOMWAKE_OFFSET 0x0088
1280#define SCU_SAS_TXCOMINIT_OFFSET 0x008C
1281#define SCU_SAS_TXCOMSAS_OFFSET 0x0090
1282#define SCU_SAS_COMINIT_OFFSET 0x0094
1283#define SCU_SAS_COMWAKE_OFFSET 0x0098
1284#define SCU_SAS_COMSAS_OFFSET 0x009C
1285#define SCU_SAS_SFERCNT_OFFSET 0x00A0
1286#define SCU_SAS_CDFERCNT_OFFSET 0x00A4
1287#define SCU_SAS_DNFERCNT_OFFSET 0x00A8
1288#define SCU_SAS_PRSTERCNT_OFFSET 0x00AC
1289#define SCU_SAS_CNTCTL_OFFSET 0x00B0
1290#define SCU_SAS_SSPTOV_OFFSET 0x00B4
1291#define SCU_FTCTL_OFFSET 0x00B8
1292#define SCU_FRCTL_OFFSET 0x00BC
1293#define SCU_FTWMRK_OFFSET 0x00C0
1294#define SCU_ENSPINUP_OFFSET 0x00C4
1295#define SCU_SAS_TRNTOV_OFFSET 0x00C8
1296#define SCU_SAS_PHYCAP_OFFSET 0x00CC
1297#define SCU_SAS_PHYCTL_OFFSET 0x00D0
1298#define SCU_SAS_LLCTL_OFFSET 0x00D8
1299#define SCU_AFE_XCVRCR_OFFSET 0x00DC
1300#define SCU_AFE_LUTCR_OFFSET 0x00E0
1301
1302#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0)
1303#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003)
1304#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
1305#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2 (1)
1306#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3 (2)
1307#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_SHIFT (2)
1308#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_MASK (0x000003FC)
1309#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_SHIFT (16)
1310#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_MASK (0x00010000)
1311#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_SHIFT (17)
1312#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_MASK (0x00020000)
1313#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_SHIFT (24)
1314#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_MASK (0xFF000000)
1315#define SCU_SAS_LINK_LAYER_CONTROL_RESERVED (0x00FCFC00)
1316
1317#define SCU_SAS_LLCTL_GEN_VAL(name, value) \
1318 SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_CONTROL_ ## name, value)
1319
1320#define SCU_SAS_LLCTL_GEN_BIT(name) \
1321 SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
1322
1323
1324/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */
1325#define SCU_PSZGCR_OFFSET 0x00E4
1326#define SCU_SAS_RECPHYCAP_OFFSET 0x00E8
1327/* #define SCU_TX_LUTSEL_OFFSET 0x00B8 */
1328
1329#define SCU_SAS_PTxC_OFFSET 0x00D4 /* Same offset as SAS_TCTSTM */
1330
1331/**
1332 * struct scu_link_layer_registers - SCU Link Layer Registers
1333 *
1334 *
1335 */
1336struct scu_link_layer_registers {
1337/* 0x0000 SAS_SPDTOV */
1338 u32 speed_negotiation_timers;
1339/* 0x0004 SAS_LLSTA */
1340 u32 link_layer_status;
1341/* 0x0008 SATA_PSELTOV */
1342 u32 port_selector_timeout;
1343 u32 reserved0C;
1344/* 0x0010 SAS_TIMETOV */
1345 u32 timeout_unit_value;
1346/* 0x0014 SAS_RCDTOV */
1347 u32 rcd_timeout;
1348/* 0x0018 SAS_LNKTOV */
1349 u32 link_timer_timeouts;
1350/* 0x001C SAS_PHYTOV */
1351 u32 sas_phy_timeouts;
1352/* 0x0020 SAS_AFERCNT */
1353 u32 received_address_frame_error_counter;
1354/* 0x0024 SAS_WERCNT */
1355 u32 invalid_dword_counter;
1356/* 0x0028 SAS_TIID */
1357 u32 transmit_identification;
1358/* 0x002C SAS_TIDNH */
1359 u32 sas_device_name_high;
1360/* 0x0030 SAS_TIDNL */
1361 u32 sas_device_name_low;
1362/* 0x0034 SAS_TISSAH */
1363 u32 source_sas_address_high;
1364/* 0x0038 SAS_TISSAL */
1365 u32 source_sas_address_low;
1366/* 0x003C SAS_TIPID */
1367 u32 identify_frame_phy_id;
1368/* 0x0040 SAS_TIRES2 */
1369 u32 identify_frame_reserved;
1370/* 0x0044 SAS_ADRSTA */
1371 u32 received_address_frame;
1372/* 0x0048 SAS_MAWTTOV */
1373 u32 maximum_arbitration_wait_timer_timeout;
1374/* 0x004C SAS_PTxC */
1375 u32 transmit_primitive;
1376/* 0x0050 SAS_RORES */
1377 u32 error_counter_event_notification_control;
1378/* 0x0054 SAS_FRPLDFIL */
1379 u32 frxq_payload_fill_threshold;
1380/* 0x0058 SAS_LLHANG_TOT */
1381 u32 link_layer_hang_detection_timeout;
1382 u32 reserved_5C;
1383/* 0x0060 SAS_RFCNT */
1384 u32 received_frame_count;
1385/* 0x0064 SAS_TFCNT */
1386 u32 transmit_frame_count;
1387/* 0x0068 SAS_RFDCNT */
1388 u32 received_dword_count;
1389/* 0x006C SAS_TFDCNT */
1390 u32 transmit_dword_count;
1391/* 0x0070 SAS_LERCNT */
1392 u32 loss_of_sync_error_count;
1393/* 0x0074 SAS_RDISERRCNT */
1394 u32 running_disparity_error_count;
1395/* 0x0078 SAS_CRERCNT */
1396 u32 received_frame_crc_error_count;
1397/* 0x007C STPCTL */
1398 u32 stp_control;
1399/* 0x0080 SAS_PCFG */
1400 u32 phy_configuration;
1401/* 0x0084 SAS_CLKSM */
1402 u32 clock_skew_management;
1403/* 0x0088 SAS_TXCOMWAKE */
1404 u32 transmit_comwake_signal;
1405/* 0x008C SAS_TXCOMINIT */
1406 u32 transmit_cominit_signal;
1407/* 0x0090 SAS_TXCOMSAS */
1408 u32 transmit_comsas_signal;
1409/* 0x0094 SAS_COMINIT */
1410 u32 cominit_control;
1411/* 0x0098 SAS_COMWAKE */
1412 u32 comwake_control;
1413/* 0x009C SAS_COMSAS */
1414 u32 comsas_control;
1415/* 0x00A0 SAS_SFERCNT */
1416 u32 received_short_frame_count;
1417/* 0x00A4 SAS_CDFERCNT */
1418 u32 received_frame_without_credit_count;
1419/* 0x00A8 SAS_DNFERCNT */
1420 u32 received_frame_after_done_count;
1421/* 0x00AC SAS_PRSTERCNT */
1422 u32 phy_reset_problem_count;
1423/* 0x00B0 SAS_CNTCTL */
1424 u32 counter_control;
1425/* 0x00B4 SAS_SSPTOV */
1426 u32 ssp_timer_timeout_values;
1427/* 0x00B8 FTCTL */
1428 u32 ftx_control;
1429/* 0x00BC FRCTL */
1430 u32 frx_control;
1431/* 0x00C0 FTWMRK */
1432 u32 ftx_watermark;
1433/* 0x00C4 ENSPINUP */
1434 u32 notify_enable_spinup_control;
1435/* 0x00C8 SAS_TRNTOV */
1436 u32 sas_training_sequence_timer_values;
1437/* 0x00CC SAS_PHYCAP */
1438 u32 phy_capabilities;
1439/* 0x00D0 SAS_PHYCTL */
1440 u32 phy_control;
1441 u32 reserved_d4;
1442/* 0x00D8 LLCTL */
1443 u32 link_layer_control;
1444/* 0x00DC AFE_XCVRCR */
1445 u32 afe_xcvr_control;
1446/* 0x00E0 AFE_LUTCR */
1447 u32 afe_lookup_table_control;
1448/* 0x00E4 PSZGCR */
1449 u32 phy_source_zone_group_control;
1450/* 0x00E8 SAS_RECPHYCAP */
1451 u32 receive_phycap;
1452 u32 reserved_ec;
1453/* 0x00F0 SNAFERXRSTCTL */
1454 u32 speed_negotiation_afe_rx_reset_control;
1455/* 0x00F4 SAS_SSIPMCTL */
1456 u32 power_management_control;
1457/* 0x00F8 SAS_PSPREQ_PRIM */
1458 u32 sas_pm_partial_request_primitive;
1459/* 0x00FC SAS_PSSREQ_PRIM */
1460 u32 sas_pm_slumber_request_primitive;
1461/* 0x0100 SAS_PPSACK_PRIM */
1462 u32 sas_pm_ack_primitive_register;
1463/* 0x0104 SAS_PSNAK_PRIM */
1464 u32 sas_pm_nak_primitive_register;
1465/* 0x0108 SAS_SSIPMTOV */
1466 u32 sas_primitive_timeout;
1467 u32 reserved_10c;
1468/* 0x0110 - 0x011C PLAPRDCTRLxREG */
1469 u32 pla_product_control[4];
1470/* 0x0120 PLAPRDSUMREG */
1471 u32 pla_product_sum;
1472/* 0x0124 PLACONTROLREG */
1473 u32 pla_control;
1474/* Remainder of memory space 896 bytes */
1475 u32 reserved_0128_037f[0x96];
1476
1477};
1478
1479/*
1480 * 0x00D4 // Same offset as SAS_TCTSTM SAS_PTxC
1481 * u32 primitive_transmit_control; */
1482
1483/*
1484 * ----------------------------------------------------------------------------
1485 * SGPIO
1486 * ---------------------------------------------------------------------------- */
1487#define SCU_SGPIO_OFFSET 0x1400
1488
1489/* #define SCU_SGPIO_OFFSET 0x6000 // later moves to 0x1400 see HSD 652625 */
1490#define SCU_SGPIO_SGICR_OFFSET 0x0000
1491#define SCU_SGPIO_SGPBR_OFFSET 0x0004
1492#define SCU_SGPIO_SGSDLR_OFFSET 0x0008
1493#define SCU_SGPIO_SGSDUR_OFFSET 0x000C
1494#define SCU_SGPIO_SGSIDLR_OFFSET 0x0010
1495#define SCU_SGPIO_SGSIDUR_OFFSET 0x0014
1496#define SCU_SGPIO_SGVSCR_OFFSET 0x0018
1497/* Address from 0x0820 to 0x083C */
1498#define SCU_SGPIO_SGODSR_OFFSET 0x0020
1499
1500/**
1501 * struct scu_sgpio_registers - SCU SGPIO Registers
1502 *
1503 *
1504 */
1505struct scu_sgpio_registers {
1506/* 0x0000 SGPIO_SGICR */
1507 u32 interface_control;
1508/* 0x0004 SGPIO_SGPBR */
1509 u32 blink_rate;
1510/* 0x0008 SGPIO_SGSDLR */
1511 u32 start_drive_lower;
1512/* 0x000C SGPIO_SGSDUR */
1513 u32 start_drive_upper;
1514/* 0x0010 SGPIO_SGSIDLR */
1515 u32 serial_input_lower;
1516/* 0x0014 SGPIO_SGSIDUR */
1517 u32 serial_input_upper;
1518/* 0x0018 SGPIO_SGVSCR */
1519 u32 vendor_specific_code;
1520/* 0x0020 SGPIO_SGODSR */
1521 u32 ouput_data_select[8];
1522/* Remainder of memory space 256 bytes */
1523 u32 reserved_1444_14ff[0x31];
1524
1525};
1526
1527/*
1528 * *****************************************************************************
1529 * * Defines for VIIT entry offsets
1530 * * Access additional entries by SCU_VIIT_BASE + index * 0x10
1531 * ***************************************************************************** */
1532#define SCU_VIIT_BASE 0x1c00
1533
1534struct scu_viit_registers {
1535 u32 registers[256];
1536};
1537
1538/*
1539 * *****************************************************************************
1540 * * SCU PORT TASK SCHEDULER REGISTERS
1541 * ***************************************************************************** */
1542
1543#define SCU_PTSG_BASE 0x1000
1544
1545#define SCU_PTSG_PTSGCR_OFFSET 0x0000
1546#define SCU_PTSG_RTCR_OFFSET 0x0004
1547#define SCU_PTSG_RTCCR_OFFSET 0x0008
1548#define SCU_PTSG_PTS0CR_OFFSET 0x0010
1549#define SCU_PTSG_PTS0SR_OFFSET 0x0014
1550#define SCU_PTSG_PTS1CR_OFFSET 0x0018
1551#define SCU_PTSG_PTS1SR_OFFSET 0x001C
1552#define SCU_PTSG_PTS2CR_OFFSET 0x0020
1553#define SCU_PTSG_PTS2SR_OFFSET 0x0024
1554#define SCU_PTSG_PTS3CR_OFFSET 0x0028
1555#define SCU_PTSG_PTS3SR_OFFSET 0x002C
1556#define SCU_PTSG_PCSPE0CR_OFFSET 0x0030
1557#define SCU_PTSG_PCSPE1CR_OFFSET 0x0034
1558#define SCU_PTSG_PCSPE2CR_OFFSET 0x0038
1559#define SCU_PTSG_PCSPE3CR_OFFSET 0x003C
1560#define SCU_PTSG_ETMTSCCR_OFFSET 0x0040
1561#define SCU_PTSG_ETMRNSCCR_OFFSET 0x0044
1562
1563/**
1564 * struct scu_port_task_scheduler_registers - These are the control/stats pairs
1565 * for each Port Task Scheduler.
1566 *
1567 *
1568 */
1569struct scu_port_task_scheduler_registers {
1570 u32 control;
1571 u32 status;
1572};
1573
1574/**
1575 * struct scu_port_task_scheduler_group_registers - These are the PORT Task
1576 * Scheduler registers
1577 *
1578 *
1579 */
1580struct scu_port_task_scheduler_group_registers {
1581/* 0x0000 PTSGCR */
1582 u32 control;
1583/* 0x0004 RTCR */
1584 u32 real_time_clock;
1585/* 0x0008 RTCCR */
1586 u32 real_time_clock_control;
1587/* 0x000C */
1588 u32 reserved_0C;
1589/*
1590 * 0x0010 PTS0CR
1591 * 0x0014 PTS0SR
1592 * 0x0018 PTS1CR
1593 * 0x001C PTS1SR
1594 * 0x0020 PTS2CR
1595 * 0x0024 PTS2SR
1596 * 0x0028 PTS3CR
1597 * 0x002C PTS3SR */
1598 struct scu_port_task_scheduler_registers port[4];
1599/*
1600 * 0x0030 PCSPE0CR
1601 * 0x0034 PCSPE1CR
1602 * 0x0038 PCSPE2CR
1603 * 0x003C PCSPE3CR */
1604 u32 protocol_engine[4];
1605/* 0x0040 ETMTSCCR */
1606 u32 tc_scanning_interval_control;
1607/* 0x0044 ETMRNSCCR */
1608 u32 rnc_scanning_interval_control;
1609/* Remainder of memory space 128 bytes */
1610 u32 reserved_1048_107f[0x0E];
1611
1612};
1613
1614#define SCU_PTSG_SCUVZECR_OFFSET 0x003C
1615
1616/*
1617 * *****************************************************************************
1618 * * AFE REGISTERS
1619 * ***************************************************************************** */
1620#define SCU_AFE_MMR_BASE 0xE000
1621
1622/*
1623 * AFE 0 is at offset 0x0800
1624 * AFE 1 is at offset 0x0900
1625 * AFE 2 is at offset 0x0a00
1626 * AFE 3 is at offset 0x0b00 */
1627struct scu_afe_transceiver {
1628 /* 0x0000 AFE_XCVR_CTRL0 */
1629 u32 afe_xcvr_control0;
1630 /* 0x0004 AFE_XCVR_CTRL1 */
1631 u32 afe_xcvr_control1;
1632 /* 0x0008 */
1633 u32 reserved_0008;
1634 /* 0x000c afe_dfx_rx_control0 */
1635 u32 afe_dfx_rx_control0;
1636 /* 0x0010 AFE_DFX_RX_CTRL1 */
1637 u32 afe_dfx_rx_control1;
1638 /* 0x0014 */
1639 u32 reserved_0014;
1640 /* 0x0018 AFE_DFX_RX_STS0 */
1641 u32 afe_dfx_rx_status0;
1642 /* 0x001c AFE_DFX_RX_STS1 */
1643 u32 afe_dfx_rx_status1;
1644 /* 0x0020 */
1645 u32 reserved_0020;
1646 /* 0x0024 AFE_TX_CTRL */
1647 u32 afe_tx_control;
1648 /* 0x0028 AFE_TX_AMP_CTRL0 */
1649 u32 afe_tx_amp_control0;
1650 /* 0x002c AFE_TX_AMP_CTRL1 */
1651 u32 afe_tx_amp_control1;
1652 /* 0x0030 AFE_TX_AMP_CTRL2 */
1653 u32 afe_tx_amp_control2;
1654 /* 0x0034 AFE_TX_AMP_CTRL3 */
1655 u32 afe_tx_amp_control3;
1656 /* 0x0038 afe_tx_ssc_control */
1657 u32 afe_tx_ssc_control;
1658 /* 0x003c */
1659 u32 reserved_003c;
1660 /* 0x0040 AFE_RX_SSC_CTRL0 */
1661 u32 afe_rx_ssc_control0;
1662 /* 0x0044 AFE_RX_SSC_CTRL1 */
1663 u32 afe_rx_ssc_control1;
1664 /* 0x0048 AFE_RX_SSC_CTRL2 */
1665 u32 afe_rx_ssc_control2;
1666 /* 0x004c AFE_RX_EQ_STS0 */
1667 u32 afe_rx_eq_status0;
1668 /* 0x0050 AFE_RX_EQ_STS1 */
1669 u32 afe_rx_eq_status1;
1670 /* 0x0054 AFE_RX_CDR_STS */
1671 u32 afe_rx_cdr_status;
1672 /* 0x0058 */
1673 u32 reserved_0058;
1674 /* 0x005c AFE_CHAN_CTRL */
1675 u32 afe_channel_control;
1676 /* 0x0060-0x006c */
1677 u32 reserved_0060_006c[0x04];
1678 /* 0x0070 AFE_XCVR_EC_STS0 */
1679 u32 afe_xcvr_error_capture_status0;
1680 /* 0x0074 AFE_XCVR_EC_STS1 */
1681 u32 afe_xcvr_error_capture_status1;
1682 /* 0x0078 AFE_XCVR_EC_STS2 */
1683 u32 afe_xcvr_error_capture_status2;
1684 /* 0x007c afe_xcvr_ec_status3 */
1685 u32 afe_xcvr_error_capture_status3;
1686 /* 0x0080 AFE_XCVR_EC_STS4 */
1687 u32 afe_xcvr_error_capture_status4;
1688 /* 0x0084 AFE_XCVR_EC_STS5 */
1689 u32 afe_xcvr_error_capture_status5;
1690 /* 0x0088-0x00fc */
1691 u32 reserved_008c_00fc[0x1e];
1692};
1693
1694/**
1695 * struct scu_afe_registers - AFE Regsiters
1696 *
1697 *
1698 */
1699/* Uaoa AFE registers */
1700struct scu_afe_registers {
1701 /* 0Xe000 AFE_BIAS_CTRL */
1702 u32 afe_bias_control;
1703 u32 reserved_0004;
1704 /* 0x0008 AFE_PLL_CTRL0 */
1705 u32 afe_pll_control0;
1706 /* 0x000c AFE_PLL_CTRL1 */
1707 u32 afe_pll_control1;
1708 /* 0x0010 AFE_PLL_CTRL2 */
1709 u32 afe_pll_control2;
1710 /* 0x0014 AFE_CB_STS */
1711 u32 afe_common_block_status;
1712 /* 0x0018-0x007c */
1713 u32 reserved_18_7c[0x1a];
1714 /* 0x0080 AFE_PMSN_MCTRL0 */
1715 u32 afe_pmsn_master_control0;
1716 /* 0x0084 AFE_PMSN_MCTRL1 */
1717 u32 afe_pmsn_master_control1;
1718 /* 0x0088 AFE_PMSN_MCTRL2 */
1719 u32 afe_pmsn_master_control2;
1720 /* 0x008C-0x00fc */
1721 u32 reserved_008c_00fc[0x1D];
1722 /* 0x0100 AFE_DFX_MST_CTRL0 */
1723 u32 afe_dfx_master_control0;
1724 /* 0x0104 AFE_DFX_MST_CTRL1 */
1725 u32 afe_dfx_master_control1;
1726 /* 0x0108 AFE_DFX_DCL_CTRL */
1727 u32 afe_dfx_dcl_control;
1728 /* 0x010c AFE_DFX_DMON_CTRL */
1729 u32 afe_dfx_digital_monitor_control;
1730 /* 0x0110 AFE_DFX_AMONP_CTRL */
1731 u32 afe_dfx_analog_p_monitor_control;
1732 /* 0x0114 AFE_DFX_AMONN_CTRL */
1733 u32 afe_dfx_analog_n_monitor_control;
1734 /* 0x0118 AFE_DFX_NTL_STS */
1735 u32 afe_dfx_ntl_status;
1736 /* 0x011c AFE_DFX_FIFO_STS0 */
1737 u32 afe_dfx_fifo_status0;
1738 /* 0x0120 AFE_DFX_FIFO_STS1 */
1739 u32 afe_dfx_fifo_status1;
1740 /* 0x0124 AFE_DFX_MPAT_CTRL */
1741 u32 afe_dfx_master_pattern_control;
1742 /* 0x0128 AFE_DFX_P0_CTRL */
1743 u32 afe_dfx_p0_control;
1744 /* 0x012c-0x01a8 AFE_DFX_P0_DRx */
1745 u32 afe_dfx_p0_data[32];
1746 /* 0x01ac */
1747 u32 reserved_01ac;
1748 /* 0x01b0-0x020c AFE_DFX_P0_IRx */
1749 u32 afe_dfx_p0_instruction[24];
1750 /* 0x0210 */
1751 u32 reserved_0210;
1752 /* 0x0214 AFE_DFX_P1_CTRL */
1753 u32 afe_dfx_p1_control;
1754 /* 0x0218-0x245 AFE_DFX_P1_DRx */
1755 u32 afe_dfx_p1_data[16];
1756 /* 0x0258-0x029c */
1757 u32 reserved_0258_029c[0x12];
1758 /* 0x02a0-0x02bc AFE_DFX_P1_IRx */
1759 u32 afe_dfx_p1_instruction[8];
1760 /* 0x02c0-0x2fc */
1761 u32 reserved_02c0_02fc[0x10];
1762 /* 0x0300 AFE_DFX_TX_PMSN_CTRL */
1763 u32 afe_dfx_tx_pmsn_control;
1764 /* 0x0304 AFE_DFX_RX_PMSN_CTRL */
1765 u32 afe_dfx_rx_pmsn_control;
1766 u32 reserved_0308;
1767 /* 0x030c AFE_DFX_NOA_CTRL0 */
1768 u32 afe_dfx_noa_control0;
1769 /* 0x0310 AFE_DFX_NOA_CTRL1 */
1770 u32 afe_dfx_noa_control1;
1771 /* 0x0314 AFE_DFX_NOA_CTRL2 */
1772 u32 afe_dfx_noa_control2;
1773 /* 0x0318 AFE_DFX_NOA_CTRL3 */
1774 u32 afe_dfx_noa_control3;
1775 /* 0x031c AFE_DFX_NOA_CTRL4 */
1776 u32 afe_dfx_noa_control4;
1777 /* 0x0320 AFE_DFX_NOA_CTRL5 */
1778 u32 afe_dfx_noa_control5;
1779 /* 0x0324 AFE_DFX_NOA_CTRL6 */
1780 u32 afe_dfx_noa_control6;
1781 /* 0x0328 AFE_DFX_NOA_CTRL7 */
1782 u32 afe_dfx_noa_control7;
1783 /* 0x032c-0x07fc */
1784 u32 reserved_032c_07fc[0x135];
1785
1786 /* 0x0800-0x0bfc */
1787 struct scu_afe_transceiver scu_afe_xcvr[4];
1788
1789 /* 0x0c00-0x0ffc */
1790 u32 reserved_0c00_0ffc[0x0100];
1791};
1792
1793struct scu_protocol_engine_group_registers {
1794 u32 table[0xE0];
1795};
1796
1797
1798struct scu_viit_iit {
1799 u32 table[256];
1800};
1801
1802/**
1803 * Placeholder for the ZONE Partition Table information ZONING will not be
1804 * included in the 1.1 release.
1805 *
1806 *
1807 */
1808struct scu_zone_partition_table {
1809 u32 table[2048];
1810};
1811
1812/**
1813 * Placeholder for the CRAM register since I am not sure if we need to
1814 * read/write to these registers as yet.
1815 *
1816 *
1817 */
1818struct scu_completion_ram {
1819 u32 ram[128];
1820};
1821
1822/**
1823 * Placeholder for the FBRAM registers since I am not sure if we need to
1824 * read/write to these registers as yet.
1825 *
1826 *
1827 */
1828struct scu_frame_buffer_ram {
1829 u32 ram[128];
1830};
1831
1832#define scu_scratch_ram_SIZE_IN_DWORDS 256
1833
1834/**
1835 * Placeholder for the scratch RAM registers.
1836 *
1837 *
1838 */
1839struct scu_scratch_ram {
1840 u32 ram[scu_scratch_ram_SIZE_IN_DWORDS];
1841};
1842
1843/**
1844 * Placeholder since I am not yet sure what these registers are here for.
1845 *
1846 *
1847 */
1848struct noa_protocol_engine_partition {
1849 u32 reserved[64];
1850};
1851
1852/**
1853 * Placeholder since I am not yet sure what these registers are here for.
1854 *
1855 *
1856 */
1857struct noa_hub_partition {
1858 u32 reserved[64];
1859};
1860
1861/**
1862 * Placeholder since I am not yet sure what these registers are here for.
1863 *
1864 *
1865 */
1866struct noa_host_interface_partition {
1867 u32 reserved[64];
1868};
1869
1870/**
1871 * struct transport_link_layer_pair - The SCU Hardware pairs up the TL
1872 * registers with the LL registers so we must place them adjcent to make the
1873 * array of registers in the PEG.
1874 *
1875 *
1876 */
1877struct transport_link_layer_pair {
1878 struct scu_transport_layer_registers tl;
1879 struct scu_link_layer_registers ll;
1880};
1881
1882/**
1883 * struct scu_peg_registers - SCU Protocol Engine Memory mapped register space.
1884 * These registers are unique to each protocol engine group. There can be
1885 * at most two PEG for a single SCU part.
1886 *
1887 *
1888 */
1889struct scu_peg_registers {
1890 struct transport_link_layer_pair pe[4];
1891 struct scu_port_task_scheduler_group_registers ptsg;
1892 struct scu_protocol_engine_group_registers peg;
1893 struct scu_sgpio_registers sgpio;
1894 u32 reserved_01500_1BFF[0x1C0];
1895 struct scu_viit_entry viit[64];
1896 struct scu_zone_partition_table zpt0;
1897 struct scu_zone_partition_table zpt1;
1898};
1899
1900/**
1901 * struct scu_registers - SCU regsiters including both PEG registers if we turn
1902 * on that compile option. All of these registers are in the memory mapped
1903 * space returned from BAR1.
1904 *
1905 *
1906 */
1907struct scu_registers {
1908 /* 0x0000 - PEG 0 */
1909 struct scu_peg_registers peg0;
1910
1911 /* 0x6000 - SDMA and Miscellaneous */
1912 struct scu_sdma_registers sdma;
1913 struct scu_completion_ram cram;
1914 struct scu_frame_buffer_ram fbram;
1915 u32 reserved_6800_69FF[0x80];
1916 struct noa_protocol_engine_partition noa_pe;
1917 struct noa_hub_partition noa_hub;
1918 struct noa_host_interface_partition noa_if;
1919 u32 reserved_6d00_7fff[0x4c0];
1920
1921 /* 0x8000 - PEG 1 */
1922 struct scu_peg_registers peg1;
1923
1924 /* 0xE000 - AFE Registers */
1925 struct scu_afe_registers afe;
1926
1927 /* 0xF000 - reserved */
1928 u32 reserved_f000_211fff[0x80c00];
1929
1930 /* 0x212000 - scratch RAM */
1931 struct scu_scratch_ram scratch_ram;
1932};
1933
1934#endif /* _SCU_REGISTERS_HEADER_ */
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
new file mode 100644
index 000000000000..b6e6368c2665
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.c
@@ -0,0 +1,1501 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#include <scsi/sas.h>
56#include "isci.h"
57#include "port.h"
58#include "remote_device.h"
59#include "request.h"
60#include "remote_node_context.h"
61#include "scu_event_codes.h"
62#include "task.h"
63
64/**
65 * isci_remote_device_not_ready() - This function is called by the ihost when
66 * the remote device is not ready. We mark the isci device as ready (not
67 * "ready_for_io") and signal the waiting proccess.
68 * @isci_host: This parameter specifies the isci host object.
69 * @isci_device: This parameter specifies the remote device
70 *
71 * sci_lock is held on entrance to this function.
72 */
73static void isci_remote_device_not_ready(struct isci_host *ihost,
74 struct isci_remote_device *idev, u32 reason)
75{
76 struct isci_request *ireq;
77
78 dev_dbg(&ihost->pdev->dev,
79 "%s: isci_device = %p\n", __func__, idev);
80
81 switch (reason) {
82 case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
83 set_bit(IDEV_GONE, &idev->flags);
84 break;
85 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
86 set_bit(IDEV_IO_NCQERROR, &idev->flags);
87
88 /* Kill all outstanding requests for the device. */
89 list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
90
91 dev_dbg(&ihost->pdev->dev,
92 "%s: isci_device = %p request = %p\n",
93 __func__, idev, ireq);
94
95 sci_controller_terminate_request(ihost,
96 idev,
97 ireq);
98 }
99 /* Fall through into the default case... */
100 default:
101 clear_bit(IDEV_IO_READY, &idev->flags);
102 break;
103 }
104}
105
106/**
107 * isci_remote_device_ready() - This function is called by the ihost when the
108 * remote device is ready. We mark the isci device as ready and signal the
109 * waiting proccess.
110 * @ihost: our valid isci_host
111 * @idev: remote device
112 *
113 */
114static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
115{
116 dev_dbg(&ihost->pdev->dev,
117 "%s: idev = %p\n", __func__, idev);
118
119 clear_bit(IDEV_IO_NCQERROR, &idev->flags);
120 set_bit(IDEV_IO_READY, &idev->flags);
121 if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
122 wake_up(&ihost->eventq);
123}
124
125/* called once the remote node context is ready to be freed.
126 * The remote device can now report that its stop operation is complete. none
127 */
128static void rnc_destruct_done(void *_dev)
129{
130 struct isci_remote_device *idev = _dev;
131
132 BUG_ON(idev->started_request_count != 0);
133 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
134}
135
136static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
137{
138 struct isci_host *ihost = idev->owning_port->owning_controller;
139 enum sci_status status = SCI_SUCCESS;
140 u32 i;
141
142 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
143 struct isci_request *ireq = ihost->reqs[i];
144 enum sci_status s;
145
146 if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
147 ireq->target_device != idev)
148 continue;
149
150 s = sci_controller_terminate_request(ihost, idev, ireq);
151 if (s != SCI_SUCCESS)
152 status = s;
153 }
154
155 return status;
156}
157
158enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
159 u32 timeout)
160{
161 struct sci_base_state_machine *sm = &idev->sm;
162 enum sci_remote_device_states state = sm->current_state_id;
163
164 switch (state) {
165 case SCI_DEV_INITIAL:
166 case SCI_DEV_FAILED:
167 case SCI_DEV_FINAL:
168 default:
169 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
170 __func__, state);
171 return SCI_FAILURE_INVALID_STATE;
172 case SCI_DEV_STOPPED:
173 return SCI_SUCCESS;
174 case SCI_DEV_STARTING:
175 /* device not started so there had better be no requests */
176 BUG_ON(idev->started_request_count != 0);
177 sci_remote_node_context_destruct(&idev->rnc,
178 rnc_destruct_done, idev);
179 /* Transition to the stopping state and wait for the
180 * remote node to complete being posted and invalidated.
181 */
182 sci_change_state(sm, SCI_DEV_STOPPING);
183 return SCI_SUCCESS;
184 case SCI_DEV_READY:
185 case SCI_STP_DEV_IDLE:
186 case SCI_STP_DEV_CMD:
187 case SCI_STP_DEV_NCQ:
188 case SCI_STP_DEV_NCQ_ERROR:
189 case SCI_STP_DEV_AWAIT_RESET:
190 case SCI_SMP_DEV_IDLE:
191 case SCI_SMP_DEV_CMD:
192 sci_change_state(sm, SCI_DEV_STOPPING);
193 if (idev->started_request_count == 0) {
194 sci_remote_node_context_destruct(&idev->rnc,
195 rnc_destruct_done, idev);
196 return SCI_SUCCESS;
197 } else
198 return sci_remote_device_terminate_requests(idev);
199 break;
200 case SCI_DEV_STOPPING:
201 /* All requests should have been terminated, but if there is an
202 * attempt to stop a device already in the stopping state, then
203 * try again to terminate.
204 */
205 return sci_remote_device_terminate_requests(idev);
206 case SCI_DEV_RESETTING:
207 sci_change_state(sm, SCI_DEV_STOPPING);
208 return SCI_SUCCESS;
209 }
210}
211
212enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
213{
214 struct sci_base_state_machine *sm = &idev->sm;
215 enum sci_remote_device_states state = sm->current_state_id;
216
217 switch (state) {
218 case SCI_DEV_INITIAL:
219 case SCI_DEV_STOPPED:
220 case SCI_DEV_STARTING:
221 case SCI_SMP_DEV_IDLE:
222 case SCI_SMP_DEV_CMD:
223 case SCI_DEV_STOPPING:
224 case SCI_DEV_FAILED:
225 case SCI_DEV_RESETTING:
226 case SCI_DEV_FINAL:
227 default:
228 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
229 __func__, state);
230 return SCI_FAILURE_INVALID_STATE;
231 case SCI_DEV_READY:
232 case SCI_STP_DEV_IDLE:
233 case SCI_STP_DEV_CMD:
234 case SCI_STP_DEV_NCQ:
235 case SCI_STP_DEV_NCQ_ERROR:
236 case SCI_STP_DEV_AWAIT_RESET:
237 sci_change_state(sm, SCI_DEV_RESETTING);
238 return SCI_SUCCESS;
239 }
240}
241
242enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
243{
244 struct sci_base_state_machine *sm = &idev->sm;
245 enum sci_remote_device_states state = sm->current_state_id;
246
247 if (state != SCI_DEV_RESETTING) {
248 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
249 __func__, state);
250 return SCI_FAILURE_INVALID_STATE;
251 }
252
253 sci_change_state(sm, SCI_DEV_READY);
254 return SCI_SUCCESS;
255}
256
257enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
258 u32 suspend_type)
259{
260 struct sci_base_state_machine *sm = &idev->sm;
261 enum sci_remote_device_states state = sm->current_state_id;
262
263 if (state != SCI_STP_DEV_CMD) {
264 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
265 __func__, state);
266 return SCI_FAILURE_INVALID_STATE;
267 }
268
269 return sci_remote_node_context_suspend(&idev->rnc,
270 suspend_type, NULL, NULL);
271}
272
273enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
274 u32 frame_index)
275{
276 struct sci_base_state_machine *sm = &idev->sm;
277 enum sci_remote_device_states state = sm->current_state_id;
278 struct isci_host *ihost = idev->owning_port->owning_controller;
279 enum sci_status status;
280
281 switch (state) {
282 case SCI_DEV_INITIAL:
283 case SCI_DEV_STOPPED:
284 case SCI_DEV_STARTING:
285 case SCI_STP_DEV_IDLE:
286 case SCI_SMP_DEV_IDLE:
287 case SCI_DEV_FINAL:
288 default:
289 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
290 __func__, state);
291 /* Return the frame back to the controller */
292 sci_controller_release_frame(ihost, frame_index);
293 return SCI_FAILURE_INVALID_STATE;
294 case SCI_DEV_READY:
295 case SCI_STP_DEV_NCQ_ERROR:
296 case SCI_STP_DEV_AWAIT_RESET:
297 case SCI_DEV_STOPPING:
298 case SCI_DEV_FAILED:
299 case SCI_DEV_RESETTING: {
300 struct isci_request *ireq;
301 struct ssp_frame_hdr hdr;
302 void *frame_header;
303 ssize_t word_cnt;
304
305 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
306 frame_index,
307 &frame_header);
308 if (status != SCI_SUCCESS)
309 return status;
310
311 word_cnt = sizeof(hdr) / sizeof(u32);
312 sci_swab32_cpy(&hdr, frame_header, word_cnt);
313
314 ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
315 if (ireq && ireq->target_device == idev) {
316 /* The IO request is now in charge of releasing the frame */
317 status = sci_io_request_frame_handler(ireq, frame_index);
318 } else {
319 /* We could not map this tag to a valid IO
320 * request Just toss the frame and continue
321 */
322 sci_controller_release_frame(ihost, frame_index);
323 }
324 break;
325 }
326 case SCI_STP_DEV_NCQ: {
327 struct dev_to_host_fis *hdr;
328
329 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
330 frame_index,
331 (void **)&hdr);
332 if (status != SCI_SUCCESS)
333 return status;
334
335 if (hdr->fis_type == FIS_SETDEVBITS &&
336 (hdr->status & ATA_ERR)) {
337 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
338
339 /* TODO Check sactive and complete associated IO if any. */
340 sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
341 } else if (hdr->fis_type == FIS_REGD2H &&
342 (hdr->status & ATA_ERR)) {
343 /*
344 * Some devices return D2H FIS when an NCQ error is detected.
345 * Treat this like an SDB error FIS ready reason.
346 */
347 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
348 sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
349 } else
350 status = SCI_FAILURE;
351
352 sci_controller_release_frame(ihost, frame_index);
353 break;
354 }
355 case SCI_STP_DEV_CMD:
356 case SCI_SMP_DEV_CMD:
357 /* The device does not process any UF received from the hardware while
358 * in this state. All unsolicited frames are forwarded to the io request
359 * object.
360 */
361 status = sci_io_request_frame_handler(idev->working_request, frame_index);
362 break;
363 }
364
365 return status;
366}
367
368static bool is_remote_device_ready(struct isci_remote_device *idev)
369{
370
371 struct sci_base_state_machine *sm = &idev->sm;
372 enum sci_remote_device_states state = sm->current_state_id;
373
374 switch (state) {
375 case SCI_DEV_READY:
376 case SCI_STP_DEV_IDLE:
377 case SCI_STP_DEV_CMD:
378 case SCI_STP_DEV_NCQ:
379 case SCI_STP_DEV_NCQ_ERROR:
380 case SCI_STP_DEV_AWAIT_RESET:
381 case SCI_SMP_DEV_IDLE:
382 case SCI_SMP_DEV_CMD:
383 return true;
384 default:
385 return false;
386 }
387}
388
389enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
390 u32 event_code)
391{
392 struct sci_base_state_machine *sm = &idev->sm;
393 enum sci_remote_device_states state = sm->current_state_id;
394 enum sci_status status;
395
396 switch (scu_get_event_type(event_code)) {
397 case SCU_EVENT_TYPE_RNC_OPS_MISC:
398 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
399 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
400 status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
401 break;
402 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
403 if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
404 status = SCI_SUCCESS;
405
406 /* Suspend the associated RNC */
407 sci_remote_node_context_suspend(&idev->rnc,
408 SCI_SOFTWARE_SUSPENSION,
409 NULL, NULL);
410
411 dev_dbg(scirdev_to_dev(idev),
412 "%s: device: %p event code: %x: %s\n",
413 __func__, idev, event_code,
414 is_remote_device_ready(idev)
415 ? "I_T_Nexus_Timeout event"
416 : "I_T_Nexus_Timeout event in wrong state");
417
418 break;
419 }
420 /* Else, fall through and treat as unhandled... */
421 default:
422 dev_dbg(scirdev_to_dev(idev),
423 "%s: device: %p event code: %x: %s\n",
424 __func__, idev, event_code,
425 is_remote_device_ready(idev)
426 ? "unexpected event"
427 : "unexpected event in wrong state");
428 status = SCI_FAILURE_INVALID_STATE;
429 break;
430 }
431
432 if (status != SCI_SUCCESS)
433 return status;
434
435 if (state == SCI_STP_DEV_IDLE) {
436
437 /* We pick up suspension events to handle specifically to this
438 * state. We resume the RNC right away.
439 */
440 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
441 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
442 status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
443 }
444
445 return status;
446}
447
448static void sci_remote_device_start_request(struct isci_remote_device *idev,
449 struct isci_request *ireq,
450 enum sci_status status)
451{
452 struct isci_port *iport = idev->owning_port;
453
454 /* cleanup requests that failed after starting on the port */
455 if (status != SCI_SUCCESS)
456 sci_port_complete_io(iport, idev, ireq);
457 else {
458 kref_get(&idev->kref);
459 idev->started_request_count++;
460 }
461}
462
463enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
464 struct isci_remote_device *idev,
465 struct isci_request *ireq)
466{
467 struct sci_base_state_machine *sm = &idev->sm;
468 enum sci_remote_device_states state = sm->current_state_id;
469 struct isci_port *iport = idev->owning_port;
470 enum sci_status status;
471
472 switch (state) {
473 case SCI_DEV_INITIAL:
474 case SCI_DEV_STOPPED:
475 case SCI_DEV_STARTING:
476 case SCI_STP_DEV_NCQ_ERROR:
477 case SCI_DEV_STOPPING:
478 case SCI_DEV_FAILED:
479 case SCI_DEV_RESETTING:
480 case SCI_DEV_FINAL:
481 default:
482 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
483 __func__, state);
484 return SCI_FAILURE_INVALID_STATE;
485 case SCI_DEV_READY:
486 /* attempt to start an io request for this device object. The remote
487 * device object will issue the start request for the io and if
488 * successful it will start the request for the port object then
489 * increment its own request count.
490 */
491 status = sci_port_start_io(iport, idev, ireq);
492 if (status != SCI_SUCCESS)
493 return status;
494
495 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
496 if (status != SCI_SUCCESS)
497 break;
498
499 status = sci_request_start(ireq);
500 break;
501 case SCI_STP_DEV_IDLE: {
502 /* handle the start io operation for a sata device that is in
503 * the command idle state. - Evalute the type of IO request to
504 * be started - If its an NCQ request change to NCQ substate -
505 * If its any other command change to the CMD substate
506 *
507 * If this is a softreset we may want to have a different
508 * substate.
509 */
510 enum sci_remote_device_states new_state;
511 struct sas_task *task = isci_request_access_task(ireq);
512
513 status = sci_port_start_io(iport, idev, ireq);
514 if (status != SCI_SUCCESS)
515 return status;
516
517 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
518 if (status != SCI_SUCCESS)
519 break;
520
521 status = sci_request_start(ireq);
522 if (status != SCI_SUCCESS)
523 break;
524
525 if (task->ata_task.use_ncq)
526 new_state = SCI_STP_DEV_NCQ;
527 else {
528 idev->working_request = ireq;
529 new_state = SCI_STP_DEV_CMD;
530 }
531 sci_change_state(sm, new_state);
532 break;
533 }
534 case SCI_STP_DEV_NCQ: {
535 struct sas_task *task = isci_request_access_task(ireq);
536
537 if (task->ata_task.use_ncq) {
538 status = sci_port_start_io(iport, idev, ireq);
539 if (status != SCI_SUCCESS)
540 return status;
541
542 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
543 if (status != SCI_SUCCESS)
544 break;
545
546 status = sci_request_start(ireq);
547 } else
548 return SCI_FAILURE_INVALID_STATE;
549 break;
550 }
551 case SCI_STP_DEV_AWAIT_RESET:
552 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
553 case SCI_SMP_DEV_IDLE:
554 status = sci_port_start_io(iport, idev, ireq);
555 if (status != SCI_SUCCESS)
556 return status;
557
558 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
559 if (status != SCI_SUCCESS)
560 break;
561
562 status = sci_request_start(ireq);
563 if (status != SCI_SUCCESS)
564 break;
565
566 idev->working_request = ireq;
567 sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
568 break;
569 case SCI_STP_DEV_CMD:
570 case SCI_SMP_DEV_CMD:
571 /* device is already handling a command it can not accept new commands
572 * until this one is complete.
573 */
574 return SCI_FAILURE_INVALID_STATE;
575 }
576
577 sci_remote_device_start_request(idev, ireq, status);
578 return status;
579}
580
581static enum sci_status common_complete_io(struct isci_port *iport,
582 struct isci_remote_device *idev,
583 struct isci_request *ireq)
584{
585 enum sci_status status;
586
587 status = sci_request_complete(ireq);
588 if (status != SCI_SUCCESS)
589 return status;
590
591 status = sci_port_complete_io(iport, idev, ireq);
592 if (status != SCI_SUCCESS)
593 return status;
594
595 sci_remote_device_decrement_request_count(idev);
596 return status;
597}
598
599enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
600 struct isci_remote_device *idev,
601 struct isci_request *ireq)
602{
603 struct sci_base_state_machine *sm = &idev->sm;
604 enum sci_remote_device_states state = sm->current_state_id;
605 struct isci_port *iport = idev->owning_port;
606 enum sci_status status;
607
608 switch (state) {
609 case SCI_DEV_INITIAL:
610 case SCI_DEV_STOPPED:
611 case SCI_DEV_STARTING:
612 case SCI_STP_DEV_IDLE:
613 case SCI_SMP_DEV_IDLE:
614 case SCI_DEV_FAILED:
615 case SCI_DEV_FINAL:
616 default:
617 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
618 __func__, state);
619 return SCI_FAILURE_INVALID_STATE;
620 case SCI_DEV_READY:
621 case SCI_STP_DEV_AWAIT_RESET:
622 case SCI_DEV_RESETTING:
623 status = common_complete_io(iport, idev, ireq);
624 break;
625 case SCI_STP_DEV_CMD:
626 case SCI_STP_DEV_NCQ:
627 case SCI_STP_DEV_NCQ_ERROR:
628 status = common_complete_io(iport, idev, ireq);
629 if (status != SCI_SUCCESS)
630 break;
631
632 if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
633 /* This request causes hardware error, device needs to be Lun Reset.
634 * So here we force the state machine to IDLE state so the rest IOs
635 * can reach RNC state handler, these IOs will be completed by RNC with
636 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
637 */
638 sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
639 } else if (idev->started_request_count == 0)
640 sci_change_state(sm, SCI_STP_DEV_IDLE);
641 break;
642 case SCI_SMP_DEV_CMD:
643 status = common_complete_io(iport, idev, ireq);
644 if (status != SCI_SUCCESS)
645 break;
646 sci_change_state(sm, SCI_SMP_DEV_IDLE);
647 break;
648 case SCI_DEV_STOPPING:
649 status = common_complete_io(iport, idev, ireq);
650 if (status != SCI_SUCCESS)
651 break;
652
653 if (idev->started_request_count == 0)
654 sci_remote_node_context_destruct(&idev->rnc,
655 rnc_destruct_done,
656 idev);
657 break;
658 }
659
660 if (status != SCI_SUCCESS)
661 dev_err(scirdev_to_dev(idev),
662 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
663 "could not complete\n", __func__, iport,
664 idev, ireq, status);
665 else
666 isci_put_device(idev);
667
668 return status;
669}
670
671static void sci_remote_device_continue_request(void *dev)
672{
673 struct isci_remote_device *idev = dev;
674
675 /* we need to check if this request is still valid to continue. */
676 if (idev->working_request)
677 sci_controller_continue_io(idev->working_request);
678}
679
680enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
681 struct isci_remote_device *idev,
682 struct isci_request *ireq)
683{
684 struct sci_base_state_machine *sm = &idev->sm;
685 enum sci_remote_device_states state = sm->current_state_id;
686 struct isci_port *iport = idev->owning_port;
687 enum sci_status status;
688
689 switch (state) {
690 case SCI_DEV_INITIAL:
691 case SCI_DEV_STOPPED:
692 case SCI_DEV_STARTING:
693 case SCI_SMP_DEV_IDLE:
694 case SCI_SMP_DEV_CMD:
695 case SCI_DEV_STOPPING:
696 case SCI_DEV_FAILED:
697 case SCI_DEV_RESETTING:
698 case SCI_DEV_FINAL:
699 default:
700 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
701 __func__, state);
702 return SCI_FAILURE_INVALID_STATE;
703 case SCI_STP_DEV_IDLE:
704 case SCI_STP_DEV_CMD:
705 case SCI_STP_DEV_NCQ:
706 case SCI_STP_DEV_NCQ_ERROR:
707 case SCI_STP_DEV_AWAIT_RESET:
708 status = sci_port_start_io(iport, idev, ireq);
709 if (status != SCI_SUCCESS)
710 return status;
711
712 status = sci_remote_node_context_start_task(&idev->rnc, ireq);
713 if (status != SCI_SUCCESS)
714 goto out;
715
716 status = sci_request_start(ireq);
717 if (status != SCI_SUCCESS)
718 goto out;
719
720 /* Note: If the remote device state is not IDLE this will
721 * replace the request that probably resulted in the task
722 * management request.
723 */
724 idev->working_request = ireq;
725 sci_change_state(sm, SCI_STP_DEV_CMD);
726
727 /* The remote node context must cleanup the TCi to NCQ mapping
728 * table. The only way to do this correctly is to either write
729 * to the TLCR register or to invalidate and repost the RNC. In
730 * either case the remote node context state machine will take
731 * the correct action when the remote node context is suspended
732 * and later resumed.
733 */
734 sci_remote_node_context_suspend(&idev->rnc,
735 SCI_SOFTWARE_SUSPENSION, NULL, NULL);
736 sci_remote_node_context_resume(&idev->rnc,
737 sci_remote_device_continue_request,
738 idev);
739
740 out:
741 sci_remote_device_start_request(idev, ireq, status);
742 /* We need to let the controller start request handler know that
743 * it can't post TC yet. We will provide a callback function to
744 * post TC when RNC gets resumed.
745 */
746 return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
747 case SCI_DEV_READY:
748 status = sci_port_start_io(iport, idev, ireq);
749 if (status != SCI_SUCCESS)
750 return status;
751
752 status = sci_remote_node_context_start_task(&idev->rnc, ireq);
753 if (status != SCI_SUCCESS)
754 break;
755
756 status = sci_request_start(ireq);
757 break;
758 }
759 sci_remote_device_start_request(idev, ireq, status);
760
761 return status;
762}
763
764void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
765{
766 struct isci_port *iport = idev->owning_port;
767 u32 context;
768
769 context = request |
770 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
771 (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
772 idev->rnc.remote_node_index;
773
774 sci_controller_post_request(iport->owning_controller, context);
775}
776
777/* called once the remote node context has transisitioned to a
778 * ready state. This is the indication that the remote device object can also
779 * transition to ready.
780 */
781static void remote_device_resume_done(void *_dev)
782{
783 struct isci_remote_device *idev = _dev;
784
785 if (is_remote_device_ready(idev))
786 return;
787
788 /* go 'ready' if we are not already in a ready state */
789 sci_change_state(&idev->sm, SCI_DEV_READY);
790}
791
792static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
793{
794 struct isci_remote_device *idev = _dev;
795 struct isci_host *ihost = idev->owning_port->owning_controller;
796
797 /* For NCQ operation we do not issue a isci_remote_device_not_ready().
798 * As a result, avoid sending the ready notification.
799 */
800 if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
801 isci_remote_device_ready(ihost, idev);
802}
803
804static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
805{
806 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
807
808 /* Initial state is a transitional state to the stopped state */
809 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
810}
811
812/**
813 * sci_remote_device_destruct() - free remote node context and destruct
814 * @remote_device: This parameter specifies the remote device to be destructed.
815 *
816 * Remote device objects are a limited resource. As such, they must be
817 * protected. Thus calls to construct and destruct are mutually exclusive and
818 * non-reentrant. The return value shall indicate if the device was
819 * successfully destructed or if some failure occurred. enum sci_status This value
820 * is returned if the device is successfully destructed.
821 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
822 * device isn't valid (e.g. it's already been destoryed, the handle isn't
823 * valid, etc.).
824 */
825static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
826{
827 struct sci_base_state_machine *sm = &idev->sm;
828 enum sci_remote_device_states state = sm->current_state_id;
829 struct isci_host *ihost;
830
831 if (state != SCI_DEV_STOPPED) {
832 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
833 __func__, state);
834 return SCI_FAILURE_INVALID_STATE;
835 }
836
837 ihost = idev->owning_port->owning_controller;
838 sci_controller_free_remote_node_context(ihost, idev,
839 idev->rnc.remote_node_index);
840 idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
841 sci_change_state(sm, SCI_DEV_FINAL);
842
843 return SCI_SUCCESS;
844}
845
846/**
847 * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
848 * @ihost: This parameter specifies the isci host object.
849 * @idev: This parameter specifies the remote device to be freed.
850 *
851 */
852static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
853{
854 dev_dbg(&ihost->pdev->dev,
855 "%s: isci_device = %p\n", __func__, idev);
856
857 /* There should not be any outstanding io's. All paths to
858 * here should go through isci_remote_device_nuke_requests.
859 * If we hit this condition, we will need a way to complete
860 * io requests in process */
861 BUG_ON(!list_empty(&idev->reqs_in_process));
862
863 sci_remote_device_destruct(idev);
864 list_del_init(&idev->node);
865 isci_put_device(idev);
866}
867
868static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
869{
870 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
871 struct isci_host *ihost = idev->owning_port->owning_controller;
872 u32 prev_state;
873
874 /* If we are entering from the stopping state let the SCI User know that
875 * the stop operation has completed.
876 */
877 prev_state = idev->sm.previous_state_id;
878 if (prev_state == SCI_DEV_STOPPING)
879 isci_remote_device_deconstruct(ihost, idev);
880
881 sci_controller_remote_device_stopped(ihost, idev);
882}
883
884static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
885{
886 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
887 struct isci_host *ihost = idev->owning_port->owning_controller;
888
889 isci_remote_device_not_ready(ihost, idev,
890 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
891}
892
893static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
894{
895 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
896 struct isci_host *ihost = idev->owning_port->owning_controller;
897 struct domain_device *dev = idev->domain_dev;
898
899 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
900 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
901 } else if (dev_is_expander(dev)) {
902 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
903 } else
904 isci_remote_device_ready(ihost, idev);
905}
906
907static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
908{
909 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
910 struct domain_device *dev = idev->domain_dev;
911
912 if (dev->dev_type == SAS_END_DEV) {
913 struct isci_host *ihost = idev->owning_port->owning_controller;
914
915 isci_remote_device_not_ready(ihost, idev,
916 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
917 }
918}
919
920static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
921{
922 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
923
924 sci_remote_node_context_suspend(
925 &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
926}
927
928static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
929{
930 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
931
932 sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
933}
934
935static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
936{
937 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
938
939 idev->working_request = NULL;
940 if (sci_remote_node_context_is_ready(&idev->rnc)) {
941 /*
942 * Since the RNC is ready, it's alright to finish completion
943 * processing (e.g. signal the remote device is ready). */
944 sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
945 } else {
946 sci_remote_node_context_resume(&idev->rnc,
947 sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
948 idev);
949 }
950}
951
952static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
953{
954 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
955 struct isci_host *ihost = idev->owning_port->owning_controller;
956
957 BUG_ON(idev->working_request == NULL);
958
959 isci_remote_device_not_ready(ihost, idev,
960 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
961}
962
963static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
964{
965 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
966 struct isci_host *ihost = idev->owning_port->owning_controller;
967
968 if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
969 isci_remote_device_not_ready(ihost, idev,
970 idev->not_ready_reason);
971}
972
973static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
974{
975 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
976 struct isci_host *ihost = idev->owning_port->owning_controller;
977
978 isci_remote_device_ready(ihost, idev);
979}
980
981static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
982{
983 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
984 struct isci_host *ihost = idev->owning_port->owning_controller;
985
986 BUG_ON(idev->working_request == NULL);
987
988 isci_remote_device_not_ready(ihost, idev,
989 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
990}
991
992static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
993{
994 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
995
996 idev->working_request = NULL;
997}
998
999static const struct sci_base_state sci_remote_device_state_table[] = {
1000 [SCI_DEV_INITIAL] = {
1001 .enter_state = sci_remote_device_initial_state_enter,
1002 },
1003 [SCI_DEV_STOPPED] = {
1004 .enter_state = sci_remote_device_stopped_state_enter,
1005 },
1006 [SCI_DEV_STARTING] = {
1007 .enter_state = sci_remote_device_starting_state_enter,
1008 },
1009 [SCI_DEV_READY] = {
1010 .enter_state = sci_remote_device_ready_state_enter,
1011 .exit_state = sci_remote_device_ready_state_exit
1012 },
1013 [SCI_STP_DEV_IDLE] = {
1014 .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
1015 },
1016 [SCI_STP_DEV_CMD] = {
1017 .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
1018 },
1019 [SCI_STP_DEV_NCQ] = { },
1020 [SCI_STP_DEV_NCQ_ERROR] = {
1021 .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
1022 },
1023 [SCI_STP_DEV_AWAIT_RESET] = { },
1024 [SCI_SMP_DEV_IDLE] = {
1025 .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
1026 },
1027 [SCI_SMP_DEV_CMD] = {
1028 .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
1029 .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
1030 },
1031 [SCI_DEV_STOPPING] = { },
1032 [SCI_DEV_FAILED] = { },
1033 [SCI_DEV_RESETTING] = {
1034 .enter_state = sci_remote_device_resetting_state_enter,
1035 .exit_state = sci_remote_device_resetting_state_exit
1036 },
1037 [SCI_DEV_FINAL] = { },
1038};
1039
1040/**
1041 * sci_remote_device_construct() - common construction
1042 * @sci_port: SAS/SATA port through which this device is accessed.
1043 * @sci_dev: remote device to construct
1044 *
1045 * This routine just performs benign initialization and does not
1046 * allocate the remote_node_context which is left to
1047 * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
1048 * frees the remote_node_context(s) for the device.
1049 */
1050static void sci_remote_device_construct(struct isci_port *iport,
1051 struct isci_remote_device *idev)
1052{
1053 idev->owning_port = iport;
1054 idev->started_request_count = 0;
1055
1056 sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
1057
1058 sci_remote_node_context_construct(&idev->rnc,
1059 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
1060}
1061
1062/**
1063 * sci_remote_device_da_construct() - construct direct attached device.
1064 *
1065 * The information (e.g. IAF, Signature FIS, etc.) necessary to build
1066 * the device is known to the SCI Core since it is contained in the
1067 * sci_phy object. Remote node context(s) is/are a global resource
1068 * allocated by this routine, freed by sci_remote_device_destruct().
1069 *
1070 * Returns:
1071 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1072 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1073 * sata-only controller instance.
1074 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1075 */
1076static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1077 struct isci_remote_device *idev)
1078{
1079 enum sci_status status;
1080 struct domain_device *dev = idev->domain_dev;
1081
1082 sci_remote_device_construct(iport, idev);
1083
1084 /*
1085 * This information is request to determine how many remote node context
1086 * entries will be needed to store the remote node.
1087 */
1088 idev->is_direct_attached = true;
1089 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1090 idev,
1091 &idev->rnc.remote_node_index);
1092
1093 if (status != SCI_SUCCESS)
1094 return status;
1095
1096 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
1097 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
1098 /* pass */;
1099 else
1100 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
1101
1102 idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1103
1104 /* / @todo Should I assign the port width by reading all of the phys on the port? */
1105 idev->device_port_width = 1;
1106
1107 return SCI_SUCCESS;
1108}
1109
1110/**
1111 * sci_remote_device_ea_construct() - construct expander attached device
1112 *
1113 * Remote node context(s) is/are a global resource allocated by this
1114 * routine, freed by sci_remote_device_destruct().
1115 *
1116 * Returns:
1117 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1118 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1119 * sata-only controller instance.
1120 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1121 */
1122static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1123 struct isci_remote_device *idev)
1124{
1125 struct domain_device *dev = idev->domain_dev;
1126 enum sci_status status;
1127
1128 sci_remote_device_construct(iport, idev);
1129
1130 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1131 idev,
1132 &idev->rnc.remote_node_index);
1133 if (status != SCI_SUCCESS)
1134 return status;
1135
1136 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
1137 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
1138 /* pass */;
1139 else
1140 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
1141
1142 /*
1143 * For SAS-2 the physical link rate is actually a logical link
1144 * rate that incorporates multiplexing. The SCU doesn't
1145 * incorporate multiplexing and for the purposes of the
1146 * connection the logical link rate is that same as the
1147 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
1148 * one another, so this code works for both situations. */
1149 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
1150 dev->linkrate);
1151
1152 /* / @todo Should I assign the port width by reading all of the phys on the port? */
1153 idev->device_port_width = 1;
1154
1155 return SCI_SUCCESS;
1156}
1157
1158/**
1159 * sci_remote_device_start() - This method will start the supplied remote
1160 * device. This method enables normal IO requests to flow through to the
1161 * remote device.
1162 * @remote_device: This parameter specifies the device to be started.
1163 * @timeout: This parameter specifies the number of milliseconds in which the
1164 * start operation should complete.
1165 *
1166 * An indication of whether the device was successfully started. SCI_SUCCESS
1167 * This value is returned if the device was successfully started.
1168 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
1169 * the device when there have been no phys added to it.
1170 */
1171static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1172 u32 timeout)
1173{
1174 struct sci_base_state_machine *sm = &idev->sm;
1175 enum sci_remote_device_states state = sm->current_state_id;
1176 enum sci_status status;
1177
1178 if (state != SCI_DEV_STOPPED) {
1179 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
1180 __func__, state);
1181 return SCI_FAILURE_INVALID_STATE;
1182 }
1183
1184 status = sci_remote_node_context_resume(&idev->rnc,
1185 remote_device_resume_done,
1186 idev);
1187 if (status != SCI_SUCCESS)
1188 return status;
1189
1190 sci_change_state(sm, SCI_DEV_STARTING);
1191
1192 return SCI_SUCCESS;
1193}
1194
1195static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1196 struct isci_remote_device *idev)
1197{
1198 struct isci_host *ihost = iport->isci_host;
1199 struct domain_device *dev = idev->domain_dev;
1200 enum sci_status status;
1201
1202 if (dev->parent && dev_is_expander(dev->parent))
1203 status = sci_remote_device_ea_construct(iport, idev);
1204 else
1205 status = sci_remote_device_da_construct(iport, idev);
1206
1207 if (status != SCI_SUCCESS) {
1208 dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
1209 __func__, status);
1210
1211 return status;
1212 }
1213
1214 /* start the device. */
1215 status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
1216
1217 if (status != SCI_SUCCESS)
1218 dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
1219 status);
1220
1221 return status;
1222}
1223
1224void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
1225{
1226 DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
1227
1228 dev_dbg(&ihost->pdev->dev,
1229 "%s: idev = %p\n", __func__, idev);
1230
1231 /* Cleanup all requests pending for this device. */
1232 isci_terminate_pending_requests(ihost, idev);
1233
1234 dev_dbg(&ihost->pdev->dev,
1235 "%s: idev = %p, done\n", __func__, idev);
1236}
1237
1238/**
1239 * This function builds the isci_remote_device when a libsas dev_found message
1240 * is received.
1241 * @isci_host: This parameter specifies the isci host object.
1242 * @port: This parameter specifies the isci_port conected to this device.
1243 *
1244 * pointer to new isci_remote_device.
1245 */
1246static struct isci_remote_device *
1247isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1248{
1249 struct isci_remote_device *idev;
1250 int i;
1251
1252 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
1253 idev = &ihost->devices[i];
1254 if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
1255 break;
1256 }
1257
1258 if (i >= SCI_MAX_REMOTE_DEVICES) {
1259 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1260 return NULL;
1261 }
1262
1263 if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
1264 return NULL;
1265
1266 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1267 return NULL;
1268
1269 return idev;
1270}
1271
1272void isci_remote_device_release(struct kref *kref)
1273{
1274 struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
1275 struct isci_host *ihost = idev->isci_port->isci_host;
1276
1277 idev->domain_dev = NULL;
1278 idev->isci_port = NULL;
1279 clear_bit(IDEV_START_PENDING, &idev->flags);
1280 clear_bit(IDEV_STOP_PENDING, &idev->flags);
1281 clear_bit(IDEV_IO_READY, &idev->flags);
1282 clear_bit(IDEV_GONE, &idev->flags);
1283 clear_bit(IDEV_EH, &idev->flags);
1284 smp_mb__before_clear_bit();
1285 clear_bit(IDEV_ALLOCATED, &idev->flags);
1286 wake_up(&ihost->eventq);
1287}
1288
1289/**
1290 * isci_remote_device_stop() - This function is called internally to stop the
1291 * remote device.
1292 * @isci_host: This parameter specifies the isci host object.
1293 * @isci_device: This parameter specifies the remote device.
1294 *
1295 * The status of the ihost request to stop.
1296 */
1297enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
1298{
1299 enum sci_status status;
1300 unsigned long flags;
1301
1302 dev_dbg(&ihost->pdev->dev,
1303 "%s: isci_device = %p\n", __func__, idev);
1304
1305 spin_lock_irqsave(&ihost->scic_lock, flags);
1306 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1307 set_bit(IDEV_GONE, &idev->flags);
1308 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1309
1310 /* Kill all outstanding requests. */
1311 isci_remote_device_nuke_requests(ihost, idev);
1312
1313 set_bit(IDEV_STOP_PENDING, &idev->flags);
1314
1315 spin_lock_irqsave(&ihost->scic_lock, flags);
1316 status = sci_remote_device_stop(idev, 50);
1317 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1318
1319 /* Wait for the stop complete callback. */
1320 if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
1321 /* nothing to wait for */;
1322 else
1323 wait_for_device_stop(ihost, idev);
1324
1325 return status;
1326}
1327
1328/**
1329 * isci_remote_device_gone() - This function is called by libsas when a domain
1330 * device is removed.
1331 * @domain_device: This parameter specifies the libsas domain device.
1332 *
1333 */
1334void isci_remote_device_gone(struct domain_device *dev)
1335{
1336 struct isci_host *ihost = dev_to_ihost(dev);
1337 struct isci_remote_device *idev = dev->lldd_dev;
1338
1339 dev_dbg(&ihost->pdev->dev,
1340 "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
1341 __func__, dev, idev, idev->isci_port);
1342
1343 isci_remote_device_stop(ihost, idev);
1344}
1345
1346
1347/**
1348 * isci_remote_device_found() - This function is called by libsas when a remote
1349 * device is discovered. A remote device object is created and started. the
1350 * function then sleeps until the sci core device started message is
1351 * received.
1352 * @domain_device: This parameter specifies the libsas domain device.
1353 *
1354 * status, zero indicates success.
1355 */
1356int isci_remote_device_found(struct domain_device *domain_dev)
1357{
1358 struct isci_host *isci_host = dev_to_ihost(domain_dev);
1359 struct isci_port *isci_port;
1360 struct isci_phy *isci_phy;
1361 struct asd_sas_port *sas_port;
1362 struct asd_sas_phy *sas_phy;
1363 struct isci_remote_device *isci_device;
1364 enum sci_status status;
1365
1366 dev_dbg(&isci_host->pdev->dev,
1367 "%s: domain_device = %p\n", __func__, domain_dev);
1368
1369 wait_for_start(isci_host);
1370
1371 sas_port = domain_dev->port;
1372 sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
1373 port_phy_el);
1374 isci_phy = to_iphy(sas_phy);
1375 isci_port = isci_phy->isci_port;
1376
1377 /* we are being called for a device on this port,
1378 * so it has to come up eventually
1379 */
1380 wait_for_completion(&isci_port->start_complete);
1381
1382 if ((isci_stopping == isci_port_get_state(isci_port)) ||
1383 (isci_stopped == isci_port_get_state(isci_port)))
1384 return -ENODEV;
1385
1386 isci_device = isci_remote_device_alloc(isci_host, isci_port);
1387 if (!isci_device)
1388 return -ENODEV;
1389
1390 kref_init(&isci_device->kref);
1391 INIT_LIST_HEAD(&isci_device->node);
1392
1393 spin_lock_irq(&isci_host->scic_lock);
1394 isci_device->domain_dev = domain_dev;
1395 isci_device->isci_port = isci_port;
1396 list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1397
1398 set_bit(IDEV_START_PENDING, &isci_device->flags);
1399 status = isci_remote_device_construct(isci_port, isci_device);
1400
1401 dev_dbg(&isci_host->pdev->dev,
1402 "%s: isci_device = %p\n",
1403 __func__, isci_device);
1404
1405 if (status == SCI_SUCCESS) {
1406 /* device came up, advertise it to the world */
1407 domain_dev->lldd_dev = isci_device;
1408 } else
1409 isci_put_device(isci_device);
1410 spin_unlock_irq(&isci_host->scic_lock);
1411
1412 /* wait for the device ready callback. */
1413 wait_for_device_start(isci_host, isci_device);
1414
1415 return status == SCI_SUCCESS ? 0 : -ENODEV;
1416}
1417/**
1418 * isci_device_is_reset_pending() - This function will check if there is any
1419 * pending reset condition on the device.
1420 * @request: This parameter is the isci_device object.
1421 *
1422 * true if there is a reset pending for the device.
1423 */
1424bool isci_device_is_reset_pending(
1425 struct isci_host *isci_host,
1426 struct isci_remote_device *isci_device)
1427{
1428 struct isci_request *isci_request;
1429 struct isci_request *tmp_req;
1430 bool reset_is_pending = false;
1431 unsigned long flags;
1432
1433 dev_dbg(&isci_host->pdev->dev,
1434 "%s: isci_device = %p\n", __func__, isci_device);
1435
1436 spin_lock_irqsave(&isci_host->scic_lock, flags);
1437
1438 /* Check for reset on all pending requests. */
1439 list_for_each_entry_safe(isci_request, tmp_req,
1440 &isci_device->reqs_in_process, dev_node) {
1441 dev_dbg(&isci_host->pdev->dev,
1442 "%s: isci_device = %p request = %p\n",
1443 __func__, isci_device, isci_request);
1444
1445 if (isci_request->ttype == io_task) {
1446 struct sas_task *task = isci_request_access_task(
1447 isci_request);
1448
1449 spin_lock(&task->task_state_lock);
1450 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1451 reset_is_pending = true;
1452 spin_unlock(&task->task_state_lock);
1453 }
1454 }
1455
1456 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1457
1458 dev_dbg(&isci_host->pdev->dev,
1459 "%s: isci_device = %p reset_is_pending = %d\n",
1460 __func__, isci_device, reset_is_pending);
1461
1462 return reset_is_pending;
1463}
1464
1465/**
1466 * isci_device_clear_reset_pending() - This function will clear if any pending
1467 * reset condition flags on the device.
1468 * @request: This parameter is the isci_device object.
1469 *
1470 * true if there is a reset pending for the device.
1471 */
1472void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev)
1473{
1474 struct isci_request *isci_request;
1475 struct isci_request *tmp_req;
1476 unsigned long flags = 0;
1477
1478 dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n",
1479 __func__, idev, ihost);
1480
1481 spin_lock_irqsave(&ihost->scic_lock, flags);
1482
1483 /* Clear reset pending on all pending requests. */
1484 list_for_each_entry_safe(isci_request, tmp_req,
1485 &idev->reqs_in_process, dev_node) {
1486 dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n",
1487 __func__, idev, isci_request);
1488
1489 if (isci_request->ttype == io_task) {
1490
1491 unsigned long flags2;
1492 struct sas_task *task = isci_request_access_task(
1493 isci_request);
1494
1495 spin_lock_irqsave(&task->task_state_lock, flags2);
1496 task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
1497 spin_unlock_irqrestore(&task->task_state_lock, flags2);
1498 }
1499 }
1500 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1501}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
new file mode 100644
index 000000000000..57ccfc3d6ad3
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.h
@@ -0,0 +1,352 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _ISCI_REMOTE_DEVICE_H_
57#define _ISCI_REMOTE_DEVICE_H_
58#include <scsi/libsas.h>
59#include <linux/kref.h>
60#include "scu_remote_node_context.h"
61#include "remote_node_context.h"
62#include "port.h"
63
64enum sci_remote_device_not_ready_reason_code {
65 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
66 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
67 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
68 SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED,
69 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED,
70 SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX
71};
72
73/**
74 * isci_remote_device - isci representation of a sas expander / end point
75 * @device_port_width: hw setting for number of simultaneous connections
76 * @connection_rate: per-taskcontext connection rate for this device
77 * @working_request: SATA requests have no tag we for unaccelerated
78 * protocols we need a method to associate unsolicited
79 * frames with a pending request
80 */
81struct isci_remote_device {
82 #define IDEV_START_PENDING 0
83 #define IDEV_STOP_PENDING 1
84 #define IDEV_ALLOCATED 2
85 #define IDEV_EH 3
86 #define IDEV_GONE 4
87 #define IDEV_IO_READY 5
88 #define IDEV_IO_NCQERROR 6
89 unsigned long flags;
90 struct kref kref;
91 struct isci_port *isci_port;
92 struct domain_device *domain_dev;
93 struct list_head node;
94 struct list_head reqs_in_process;
95 struct sci_base_state_machine sm;
96 u32 device_port_width;
97 enum sas_linkrate connection_rate;
98 bool is_direct_attached;
99 struct isci_port *owning_port;
100 struct sci_remote_node_context rnc;
101 /* XXX unify with device reference counting and delete */
102 u32 started_request_count;
103 struct isci_request *working_request;
104 u32 not_ready_reason;
105};
106
107#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
108
109/* device reference routines must be called under sci_lock */
110static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
111{
112 struct isci_remote_device *idev = dev->lldd_dev;
113
114 if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
115 kref_get(&idev->kref);
116 return idev;
117 }
118
119 return NULL;
120}
121
122void isci_remote_device_release(struct kref *kref);
123static inline void isci_put_device(struct isci_remote_device *idev)
124{
125 if (idev)
126 kref_put(&idev->kref, isci_remote_device_release);
127}
128
129enum sci_status isci_remote_device_stop(struct isci_host *ihost,
130 struct isci_remote_device *idev);
131void isci_remote_device_nuke_requests(struct isci_host *ihost,
132 struct isci_remote_device *idev);
133void isci_remote_device_gone(struct domain_device *domain_dev);
134int isci_remote_device_found(struct domain_device *domain_dev);
135bool isci_device_is_reset_pending(struct isci_host *ihost,
136 struct isci_remote_device *idev);
137void isci_device_clear_reset_pending(struct isci_host *ihost,
138 struct isci_remote_device *idev);
139/**
140 * sci_remote_device_stop() - This method will stop both transmission and
141 * reception of link activity for the supplied remote device. This method
142 * disables normal IO requests from flowing through to the remote device.
143 * @remote_device: This parameter specifies the device to be stopped.
144 * @timeout: This parameter specifies the number of milliseconds in which the
145 * stop operation should complete.
146 *
147 * An indication of whether the device was successfully stopped. SCI_SUCCESS
148 * This value is returned if the transmission and reception for the device was
149 * successfully stopped.
150 */
151enum sci_status sci_remote_device_stop(
152 struct isci_remote_device *idev,
153 u32 timeout);
154
155/**
156 * sci_remote_device_reset() - This method will reset the device making it
157 * ready for operation. This method must be called anytime the device is
158 * reset either through a SMP phy control or a port hard reset request.
159 * @remote_device: This parameter specifies the device to be reset.
160 *
161 * This method does not actually cause the device hardware to be reset. This
162 * method resets the software object so that it will be operational after a
163 * device hardware reset completes. An indication of whether the device reset
164 * was accepted. SCI_SUCCESS This value is returned if the device reset is
165 * started.
166 */
167enum sci_status sci_remote_device_reset(
168 struct isci_remote_device *idev);
169
170/**
171 * sci_remote_device_reset_complete() - This method informs the device object
172 * that the reset operation is complete and the device can resume operation
173 * again.
174 * @remote_device: This parameter specifies the device which is to be informed
175 * of the reset complete operation.
176 *
177 * An indication that the device is resuming operation. SCI_SUCCESS the device
178 * is resuming operation.
179 */
180enum sci_status sci_remote_device_reset_complete(
181 struct isci_remote_device *idev);
182
183/**
184 * enum sci_remote_device_states - This enumeration depicts all the states
185 * for the common remote device state machine.
186 *
187 *
188 */
189enum sci_remote_device_states {
190 /**
191 * Simply the initial state for the base remote device state machine.
192 */
193 SCI_DEV_INITIAL,
194
195 /**
196 * This state indicates that the remote device has successfully been
197 * stopped. In this state no new IO operations are permitted.
198 * This state is entered from the INITIAL state.
199 * This state is entered from the STOPPING state.
200 */
201 SCI_DEV_STOPPED,
202
203 /**
204 * This state indicates the the remote device is in the process of
205 * becoming ready (i.e. starting). In this state no new IO operations
206 * are permitted.
207 * This state is entered from the STOPPED state.
208 */
209 SCI_DEV_STARTING,
210
211 /**
212 * This state indicates the remote device is now ready. Thus, the user
213 * is able to perform IO operations on the remote device.
214 * This state is entered from the STARTING state.
215 */
216 SCI_DEV_READY,
217
218 /**
219 * This is the idle substate for the stp remote device. When there are no
220 * active IO for the device it is is in this state.
221 */
222 SCI_STP_DEV_IDLE,
223
224 /**
225 * This is the command state for for the STP remote device. This state is
226 * entered when the device is processing a non-NCQ command. The device object
227 * will fail any new start IO requests until this command is complete.
228 */
229 SCI_STP_DEV_CMD,
230
231 /**
232 * This is the NCQ state for the STP remote device. This state is entered
233 * when the device is processing an NCQ reuqest. It will remain in this state
234 * so long as there is one or more NCQ requests being processed.
235 */
236 SCI_STP_DEV_NCQ,
237
238 /**
239 * This is the NCQ error state for the STP remote device. This state is
240 * entered when an SDB error FIS is received by the device object while in the
241 * NCQ state. The device object will only accept a READ LOG command while in
242 * this state.
243 */
244 SCI_STP_DEV_NCQ_ERROR,
245
246 /**
247 * This is the READY substate indicates the device is waiting for the RESET task
248 * coming to be recovered from certain hardware specific error.
249 */
250 SCI_STP_DEV_AWAIT_RESET,
251
252 /**
253 * This is the ready operational substate for the remote device. This is the
254 * normal operational state for a remote device.
255 */
256 SCI_SMP_DEV_IDLE,
257
258 /**
259 * This is the suspended state for the remote device. This is the state that
260 * the device is placed in when a RNC suspend is received by the SCU hardware.
261 */
262 SCI_SMP_DEV_CMD,
263
264 /**
265 * This state indicates that the remote device is in the process of
266 * stopping. In this state no new IO operations are permitted, but
267 * existing IO operations are allowed to complete.
268 * This state is entered from the READY state.
269 * This state is entered from the FAILED state.
270 */
271 SCI_DEV_STOPPING,
272
273 /**
274 * This state indicates that the remote device has failed.
275 * In this state no new IO operations are permitted.
276 * This state is entered from the INITIALIZING state.
277 * This state is entered from the READY state.
278 */
279 SCI_DEV_FAILED,
280
281 /**
282 * This state indicates the device is being reset.
283 * In this state no new IO operations are permitted.
284 * This state is entered from the READY state.
285 */
286 SCI_DEV_RESETTING,
287
288 /**
289 * Simply the final state for the base remote device state machine.
290 */
291 SCI_DEV_FINAL,
292};
293
294static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
295{
296 struct isci_remote_device *idev;
297
298 idev = container_of(rnc, typeof(*idev), rnc);
299
300 return idev;
301}
302
303static inline bool dev_is_expander(struct domain_device *dev)
304{
305 return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
306}
307
308static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
309{
310 /* XXX delete this voodoo when converting to the top-level device
311 * reference count
312 */
313 if (WARN_ONCE(idev->started_request_count == 0,
314 "%s: tried to decrement started_request_count past 0!?",
315 __func__))
316 /* pass */;
317 else
318 idev->started_request_count--;
319}
320
321enum sci_status sci_remote_device_frame_handler(
322 struct isci_remote_device *idev,
323 u32 frame_index);
324
325enum sci_status sci_remote_device_event_handler(
326 struct isci_remote_device *idev,
327 u32 event_code);
328
329enum sci_status sci_remote_device_start_io(
330 struct isci_host *ihost,
331 struct isci_remote_device *idev,
332 struct isci_request *ireq);
333
334enum sci_status sci_remote_device_start_task(
335 struct isci_host *ihost,
336 struct isci_remote_device *idev,
337 struct isci_request *ireq);
338
339enum sci_status sci_remote_device_complete_io(
340 struct isci_host *ihost,
341 struct isci_remote_device *idev,
342 struct isci_request *ireq);
343
344enum sci_status sci_remote_device_suspend(
345 struct isci_remote_device *idev,
346 u32 suspend_type);
347
348void sci_remote_device_post_request(
349 struct isci_remote_device *idev,
350 u32 request);
351
352#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
new file mode 100644
index 000000000000..748e8339d1ec
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -0,0 +1,627 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "host.h"
57#include "isci.h"
58#include "remote_device.h"
59#include "remote_node_context.h"
60#include "scu_event_codes.h"
61#include "scu_task_context.h"
62
63
64/**
65 *
66 * @sci_rnc: The RNC for which the is posted request is being made.
67 *
68 * This method will return true if the RNC is not in the initial state. In all
69 * other states the RNC is considered active and this will return true. The
70 * destroy request of the state machine drives the RNC back to the initial
71 * state. If the state machine changes then this routine will also have to be
72 * changed. bool true if the state machine is not in the initial state false if
73 * the state machine is in the initial state
74 */
75
76/**
77 *
78 * @sci_rnc: The state of the remote node context object to check.
79 *
80 * This method will return true if the remote node context is in a READY state
81 * otherwise it will return false bool true if the remote node context is in
82 * the ready state. false if the remote node context is not in the ready state.
83 */
84bool sci_remote_node_context_is_ready(
85 struct sci_remote_node_context *sci_rnc)
86{
87 u32 current_state = sci_rnc->sm.current_state_id;
88
89 if (current_state == SCI_RNC_READY) {
90 return true;
91 }
92
93 return false;
94}
95
96static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
97{
98 if (id < ihost->remote_node_entries &&
99 ihost->device_table[id])
100 return &ihost->remote_node_context_table[id];
101
102 return NULL;
103}
104
105static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
106{
107 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
108 struct domain_device *dev = idev->domain_dev;
109 int rni = sci_rnc->remote_node_index;
110 union scu_remote_node_context *rnc;
111 struct isci_host *ihost;
112 __le64 sas_addr;
113
114 ihost = idev->owning_port->owning_controller;
115 rnc = sci_rnc_by_id(ihost, rni);
116
117 memset(rnc, 0, sizeof(union scu_remote_node_context)
118 * sci_remote_device_node_count(idev));
119
120 rnc->ssp.remote_node_index = rni;
121 rnc->ssp.remote_node_port_width = idev->device_port_width;
122 rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
123
124 /* sas address is __be64, context ram format is __le64 */
125 sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
126 rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
127 rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
128
129 rnc->ssp.nexus_loss_timer_enable = true;
130 rnc->ssp.check_bit = false;
131 rnc->ssp.is_valid = false;
132 rnc->ssp.is_remote_node_context = true;
133 rnc->ssp.function_number = 0;
134
135 rnc->ssp.arbitration_wait_time = 0;
136
137 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
138 rnc->ssp.connection_occupancy_timeout =
139 ihost->user_parameters.stp_max_occupancy_timeout;
140 rnc->ssp.connection_inactivity_timeout =
141 ihost->user_parameters.stp_inactivity_timeout;
142 } else {
143 rnc->ssp.connection_occupancy_timeout =
144 ihost->user_parameters.ssp_max_occupancy_timeout;
145 rnc->ssp.connection_inactivity_timeout =
146 ihost->user_parameters.ssp_inactivity_timeout;
147 }
148
149 rnc->ssp.initial_arbitration_wait_time = 0;
150
151 /* Open Address Frame Parameters */
152 rnc->ssp.oaf_connection_rate = idev->connection_rate;
153 rnc->ssp.oaf_features = 0;
154 rnc->ssp.oaf_source_zone_group = 0;
155 rnc->ssp.oaf_more_compatibility_features = 0;
156}
157
158/**
159 *
160 * @sci_rnc:
161 * @callback:
162 * @callback_parameter:
163 *
164 * This method will setup the remote node context object so it will transition
165 * to its ready state. If the remote node context is already setup to
166 * transition to its final state then this function does nothing. none
167 */
168static void sci_remote_node_context_setup_to_resume(
169 struct sci_remote_node_context *sci_rnc,
170 scics_sds_remote_node_context_callback callback,
171 void *callback_parameter)
172{
173 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
174 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
175 sci_rnc->user_callback = callback;
176 sci_rnc->user_cookie = callback_parameter;
177 }
178}
179
180static void sci_remote_node_context_setup_to_destory(
181 struct sci_remote_node_context *sci_rnc,
182 scics_sds_remote_node_context_callback callback,
183 void *callback_parameter)
184{
185 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
186 sci_rnc->user_callback = callback;
187 sci_rnc->user_cookie = callback_parameter;
188}
189
190/**
191 *
192 *
193 * This method just calls the user callback function and then resets the
194 * callback.
195 */
196static void sci_remote_node_context_notify_user(
197 struct sci_remote_node_context *rnc)
198{
199 if (rnc->user_callback != NULL) {
200 (*rnc->user_callback)(rnc->user_cookie);
201
202 rnc->user_callback = NULL;
203 rnc->user_cookie = NULL;
204 }
205}
206
207static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
208{
209 if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
210 sci_remote_node_context_resume(rnc, rnc->user_callback,
211 rnc->user_cookie);
212}
213
214static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
215{
216 union scu_remote_node_context *rnc_buffer;
217 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
218 struct domain_device *dev = idev->domain_dev;
219 struct isci_host *ihost = idev->owning_port->owning_controller;
220
221 rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
222
223 rnc_buffer->ssp.is_valid = true;
224
225 if (!idev->is_direct_attached &&
226 (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
227 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
228 } else {
229 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
230
231 if (idev->is_direct_attached)
232 sci_port_setup_transports(idev->owning_port,
233 sci_rnc->remote_node_index);
234 }
235}
236
237static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
238{
239 union scu_remote_node_context *rnc_buffer;
240 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
241 struct isci_host *ihost = idev->owning_port->owning_controller;
242
243 rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
244
245 rnc_buffer->ssp.is_valid = false;
246
247 sci_remote_device_post_request(rnc_to_dev(sci_rnc),
248 SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
249}
250
251static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
252{
253 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
254
255 /* Check to see if we have gotten back to the initial state because
256 * someone requested to destroy the remote node context object.
257 */
258 if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
259 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
260 sci_remote_node_context_notify_user(rnc);
261 }
262}
263
264static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
265{
266 struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
267
268 sci_remote_node_context_validate_context_buffer(sci_rnc);
269}
270
271static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
272{
273 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
274
275 sci_remote_node_context_invalidate_context_buffer(rnc);
276}
277
278static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
279{
280 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
281 struct isci_remote_device *idev;
282 struct domain_device *dev;
283
284 idev = rnc_to_dev(rnc);
285 dev = idev->domain_dev;
286
287 /*
288 * For direct attached SATA devices we need to clear the TLCR
289 * NCQ to TCi tag mapping on the phy and in cases where we
290 * resume because of a target reset we also need to update
291 * the STPTLDARNI register with the RNi of the device
292 */
293 if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
294 idev->is_direct_attached)
295 sci_port_setup_transports(idev->owning_port,
296 rnc->remote_node_index);
297
298 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
299}
300
301static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
302{
303 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
304
305 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
306
307 if (rnc->user_callback)
308 sci_remote_node_context_notify_user(rnc);
309}
310
311static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
312{
313 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
314
315 sci_remote_node_context_continue_state_transitions(rnc);
316}
317
318static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
319{
320 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
321
322 sci_remote_node_context_continue_state_transitions(rnc);
323}
324
325static const struct sci_base_state sci_remote_node_context_state_table[] = {
326 [SCI_RNC_INITIAL] = {
327 .enter_state = sci_remote_node_context_initial_state_enter,
328 },
329 [SCI_RNC_POSTING] = {
330 .enter_state = sci_remote_node_context_posting_state_enter,
331 },
332 [SCI_RNC_INVALIDATING] = {
333 .enter_state = sci_remote_node_context_invalidating_state_enter,
334 },
335 [SCI_RNC_RESUMING] = {
336 .enter_state = sci_remote_node_context_resuming_state_enter,
337 },
338 [SCI_RNC_READY] = {
339 .enter_state = sci_remote_node_context_ready_state_enter,
340 },
341 [SCI_RNC_TX_SUSPENDED] = {
342 .enter_state = sci_remote_node_context_tx_suspended_state_enter,
343 },
344 [SCI_RNC_TX_RX_SUSPENDED] = {
345 .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
346 },
347 [SCI_RNC_AWAIT_SUSPENSION] = { },
348};
349
350void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
351 u16 remote_node_index)
352{
353 memset(rnc, 0, sizeof(struct sci_remote_node_context));
354
355 rnc->remote_node_index = remote_node_index;
356 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
357
358 sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
359}
360
361enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
362 u32 event_code)
363{
364 enum scis_sds_remote_node_context_states state;
365
366 state = sci_rnc->sm.current_state_id;
367 switch (state) {
368 case SCI_RNC_POSTING:
369 switch (scu_get_event_code(event_code)) {
370 case SCU_EVENT_POST_RNC_COMPLETE:
371 sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
372 break;
373 default:
374 goto out;
375 }
376 break;
377 case SCI_RNC_INVALIDATING:
378 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
379 if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
380 state = SCI_RNC_INITIAL;
381 else
382 state = SCI_RNC_POSTING;
383 sci_change_state(&sci_rnc->sm, state);
384 } else {
385 switch (scu_get_event_type(event_code)) {
386 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
387 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
388 /* We really dont care if the hardware is going to suspend
389 * the device since it's being invalidated anyway */
390 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
391 "%s: SCIC Remote Node Context 0x%p was "
392 "suspeneded by hardware while being "
393 "invalidated.\n", __func__, sci_rnc);
394 break;
395 default:
396 goto out;
397 }
398 }
399 break;
400 case SCI_RNC_RESUMING:
401 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
402 sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
403 } else {
404 switch (scu_get_event_type(event_code)) {
405 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
406 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
407 /* We really dont care if the hardware is going to suspend
408 * the device since it's being resumed anyway */
409 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
410 "%s: SCIC Remote Node Context 0x%p was "
411 "suspeneded by hardware while being resumed.\n",
412 __func__, sci_rnc);
413 break;
414 default:
415 goto out;
416 }
417 }
418 break;
419 case SCI_RNC_READY:
420 switch (scu_get_event_type(event_code)) {
421 case SCU_EVENT_TL_RNC_SUSPEND_TX:
422 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
423 sci_rnc->suspension_code = scu_get_event_specifier(event_code);
424 break;
425 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
426 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
427 sci_rnc->suspension_code = scu_get_event_specifier(event_code);
428 break;
429 default:
430 goto out;
431 }
432 break;
433 case SCI_RNC_AWAIT_SUSPENSION:
434 switch (scu_get_event_type(event_code)) {
435 case SCU_EVENT_TL_RNC_SUSPEND_TX:
436 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
437 sci_rnc->suspension_code = scu_get_event_specifier(event_code);
438 break;
439 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
440 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
441 sci_rnc->suspension_code = scu_get_event_specifier(event_code);
442 break;
443 default:
444 goto out;
445 }
446 break;
447 default:
448 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
449 "%s: invalid state %d\n", __func__, state);
450 return SCI_FAILURE_INVALID_STATE;
451 }
452 return SCI_SUCCESS;
453
454 out:
455 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
456 "%s: code: %#x state: %d\n", __func__, event_code, state);
457 return SCI_FAILURE;
458
459}
460
461enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
462 scics_sds_remote_node_context_callback cb_fn,
463 void *cb_p)
464{
465 enum scis_sds_remote_node_context_states state;
466
467 state = sci_rnc->sm.current_state_id;
468 switch (state) {
469 case SCI_RNC_INVALIDATING:
470 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
471 return SCI_SUCCESS;
472 case SCI_RNC_POSTING:
473 case SCI_RNC_RESUMING:
474 case SCI_RNC_READY:
475 case SCI_RNC_TX_SUSPENDED:
476 case SCI_RNC_TX_RX_SUSPENDED:
477 case SCI_RNC_AWAIT_SUSPENSION:
478 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
479 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
480 return SCI_SUCCESS;
481 case SCI_RNC_INITIAL:
482 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
483 "%s: invalid state %d\n", __func__, state);
484 /* We have decided that the destruct request on the remote node context
485 * can not fail since it is either in the initial/destroyed state or is
486 * can be destroyed.
487 */
488 return SCI_SUCCESS;
489 default:
490 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
491 "%s: invalid state %d\n", __func__, state);
492 return SCI_FAILURE_INVALID_STATE;
493 }
494}
495
496enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
497 u32 suspend_type,
498 scics_sds_remote_node_context_callback cb_fn,
499 void *cb_p)
500{
501 enum scis_sds_remote_node_context_states state;
502
503 state = sci_rnc->sm.current_state_id;
504 if (state != SCI_RNC_READY) {
505 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
506 "%s: invalid state %d\n", __func__, state);
507 return SCI_FAILURE_INVALID_STATE;
508 }
509
510 sci_rnc->user_callback = cb_fn;
511 sci_rnc->user_cookie = cb_p;
512 sci_rnc->suspension_code = suspend_type;
513
514 if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
515 sci_remote_device_post_request(rnc_to_dev(sci_rnc),
516 SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
517 }
518
519 sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
520 return SCI_SUCCESS;
521}
522
523enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
524 scics_sds_remote_node_context_callback cb_fn,
525 void *cb_p)
526{
527 enum scis_sds_remote_node_context_states state;
528
529 state = sci_rnc->sm.current_state_id;
530 switch (state) {
531 case SCI_RNC_INITIAL:
532 if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
533 return SCI_FAILURE_INVALID_STATE;
534
535 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
536 sci_remote_node_context_construct_buffer(sci_rnc);
537 sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
538 return SCI_SUCCESS;
539 case SCI_RNC_POSTING:
540 case SCI_RNC_INVALIDATING:
541 case SCI_RNC_RESUMING:
542 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
543 return SCI_FAILURE_INVALID_STATE;
544
545 sci_rnc->user_callback = cb_fn;
546 sci_rnc->user_cookie = cb_p;
547 return SCI_SUCCESS;
548 case SCI_RNC_TX_SUSPENDED: {
549 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
550 struct domain_device *dev = idev->domain_dev;
551
552 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
553
554 /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
555 if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
556 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
557 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
558 if (idev->is_direct_attached) {
559 /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
560 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
561 } else {
562 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
563 }
564 } else
565 return SCI_FAILURE;
566 return SCI_SUCCESS;
567 }
568 case SCI_RNC_TX_RX_SUSPENDED:
569 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
570 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
571 return SCI_FAILURE_INVALID_STATE;
572 case SCI_RNC_AWAIT_SUSPENSION:
573 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
574 return SCI_SUCCESS;
575 default:
576 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
577 "%s: invalid state %d\n", __func__, state);
578 return SCI_FAILURE_INVALID_STATE;
579 }
580}
581
582enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
583 struct isci_request *ireq)
584{
585 enum scis_sds_remote_node_context_states state;
586
587 state = sci_rnc->sm.current_state_id;
588
589 switch (state) {
590 case SCI_RNC_READY:
591 return SCI_SUCCESS;
592 case SCI_RNC_TX_SUSPENDED:
593 case SCI_RNC_TX_RX_SUSPENDED:
594 case SCI_RNC_AWAIT_SUSPENSION:
595 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
596 "%s: invalid state %d\n", __func__, state);
597 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
598 default:
599 break;
600 }
601 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
602 "%s: requested to start IO while still resuming, %d\n",
603 __func__, state);
604 return SCI_FAILURE_INVALID_STATE;
605}
606
607enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
608 struct isci_request *ireq)
609{
610 enum scis_sds_remote_node_context_states state;
611
612 state = sci_rnc->sm.current_state_id;
613 switch (state) {
614 case SCI_RNC_RESUMING:
615 case SCI_RNC_READY:
616 case SCI_RNC_AWAIT_SUSPENSION:
617 return SCI_SUCCESS;
618 case SCI_RNC_TX_SUSPENDED:
619 case SCI_RNC_TX_RX_SUSPENDED:
620 sci_remote_node_context_resume(sci_rnc, NULL, NULL);
621 return SCI_SUCCESS;
622 default:
623 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
624 "%s: invalid state %d\n", __func__, state);
625 return SCI_FAILURE_INVALID_STATE;
626 }
627}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
new file mode 100644
index 000000000000..41580ad12520
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -0,0 +1,224 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
57#define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
58
59/**
60 * This file contains the structures, constants, and prototypes associated with
61 * the remote node context in the silicon. It exists to model and manage
62 * the remote node context in the silicon.
63 *
64 *
65 */
66
67#include "isci.h"
68
69/**
70 *
71 *
72 * This constant represents an invalid remote device id, it is used to program
73 * the STPDARNI register so the driver knows when it has received a SIGNATURE
74 * FIS from the SCU.
75 */
76#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF
77
78#define SCU_HARDWARE_SUSPENSION (0)
79#define SCI_SOFTWARE_SUSPENSION (1)
80
81struct isci_request;
82struct isci_remote_device;
83struct sci_remote_node_context;
84
85typedef void (*scics_sds_remote_node_context_callback)(void *);
86
87/**
88 * This is the enumeration of the remote node context states.
89 */
90enum scis_sds_remote_node_context_states {
91 /**
92 * This state is the initial state for a remote node context. On a resume
93 * request the remote node context will transition to the posting state.
94 */
95 SCI_RNC_INITIAL,
96
97 /**
98 * This is a transition state that posts the RNi to the hardware. Once the RNC
99 * is posted the remote node context will be made ready.
100 */
101 SCI_RNC_POSTING,
102
103 /**
104 * This is a transition state that will post an RNC invalidate to the
105 * hardware. Once the invalidate is complete the remote node context will
106 * transition to the posting state.
107 */
108 SCI_RNC_INVALIDATING,
109
110 /**
111 * This is a transition state that will post an RNC resume to the hardare.
112 * Once the event notification of resume complete is received the remote node
113 * context will transition to the ready state.
114 */
115 SCI_RNC_RESUMING,
116
117 /**
118 * This is the state that the remote node context must be in to accept io
119 * request operations.
120 */
121 SCI_RNC_READY,
122
123 /**
124 * This is the state that the remote node context transitions to when it gets
125 * a TX suspend notification from the hardware.
126 */
127 SCI_RNC_TX_SUSPENDED,
128
129 /**
130 * This is the state that the remote node context transitions to when it gets
131 * a TX RX suspend notification from the hardware.
132 */
133 SCI_RNC_TX_RX_SUSPENDED,
134
135 /**
136 * This state is a wait state for the remote node context that waits for a
137 * suspend notification from the hardware. This state is entered when either
138 * there is a request to supend the remote node context or when there is a TC
139 * completion where the remote node will be suspended by the hardware.
140 */
141 SCI_RNC_AWAIT_SUSPENSION
142};
143
144/**
145 *
146 *
147 * This enumeration is used to define the end destination state for the remote
148 * node context.
149 */
150enum sci_remote_node_context_destination_state {
151 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
152 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
153 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
154};
155
156/**
157 * struct sci_remote_node_context - This structure contains the data
158 * associated with the remote node context object. The remote node context
159 * (RNC) object models the the remote device information necessary to manage
160 * the silicon RNC.
161 */
162struct sci_remote_node_context {
163 /**
164 * This field indicates the remote node index (RNI) associated with
165 * this RNC.
166 */
167 u16 remote_node_index;
168
169 /**
170 * This field is the recored suspension code or the reason for the remote node
171 * context suspension.
172 */
173 u32 suspension_code;
174
175 /**
176 * This field is true if the remote node context is resuming from its current
177 * state. This can cause an automatic resume on receiving a suspension
178 * notification.
179 */
180 enum sci_remote_node_context_destination_state destination_state;
181
182 /**
183 * This field contains the callback function that the user requested to be
184 * called when the requested state transition is complete.
185 */
186 scics_sds_remote_node_context_callback user_callback;
187
188 /**
189 * This field contains the parameter that is called when the user requested
190 * state transition is completed.
191 */
192 void *user_cookie;
193
194 /**
195 * This field contains the data for the object's state machine.
196 */
197 struct sci_base_state_machine sm;
198};
199
200void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
201 u16 remote_node_index);
202
203
204bool sci_remote_node_context_is_ready(
205 struct sci_remote_node_context *sci_rnc);
206
207enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
208 u32 event_code);
209enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
210 scics_sds_remote_node_context_callback callback,
211 void *callback_parameter);
212enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
213 u32 suspend_type,
214 scics_sds_remote_node_context_callback cb_fn,
215 void *cb_p);
216enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
217 scics_sds_remote_node_context_callback cb_fn,
218 void *cb_p);
219enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
220 struct isci_request *ireq);
221enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
222 struct isci_request *ireq);
223
224#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c
new file mode 100644
index 000000000000..301b3141945e
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.c
@@ -0,0 +1,598 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56/**
57 * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
58 * public, protected, and private methods.
59 *
60 *
61 */
62#include "remote_node_table.h"
63#include "remote_node_context.h"
64
65/**
66 *
67 * @remote_node_table: This is the remote node index table from which the
68 * selection will be made.
69 * @group_table_index: This is the index to the group table from which to
70 * search for an available selection.
71 *
72 * This routine will find the bit position in absolute bit terms of the next 32
73 * + bit position. If there are available bits in the first u32 then it is
74 * just bit position. u32 This is the absolute bit position for an available
75 * group.
76 */
77static u32 sci_remote_node_table_get_group_index(
78 struct sci_remote_node_table *remote_node_table,
79 u32 group_table_index)
80{
81 u32 dword_index;
82 u32 *group_table;
83 u32 bit_index;
84
85 group_table = remote_node_table->remote_node_groups[group_table_index];
86
87 for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
88 if (group_table[dword_index] != 0) {
89 for (bit_index = 0; bit_index < 32; bit_index++) {
90 if ((group_table[dword_index] & (1 << bit_index)) != 0) {
91 return (dword_index * 32) + bit_index;
92 }
93 }
94 }
95 }
96
97 return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
98}
99
100/**
101 *
102 * @out]: remote_node_table This the remote node table in which to clear the
103 * selector.
104 * @set_index: This is the remote node selector in which the change will be
105 * made.
106 * @group_index: This is the bit index in the table to be modified.
107 *
108 * This method will clear the group index entry in the specified group index
109 * table. none
110 */
111static void sci_remote_node_table_clear_group_index(
112 struct sci_remote_node_table *remote_node_table,
113 u32 group_table_index,
114 u32 group_index)
115{
116 u32 dword_index;
117 u32 bit_index;
118 u32 *group_table;
119
120 BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
121 BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
122
123 dword_index = group_index / 32;
124 bit_index = group_index % 32;
125 group_table = remote_node_table->remote_node_groups[group_table_index];
126
127 group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
128}
129
130/**
131 *
132 * @out]: remote_node_table This the remote node table in which to set the
133 * selector.
134 * @group_table_index: This is the remote node selector in which the change
135 * will be made.
136 * @group_index: This is the bit position in the table to be modified.
137 *
138 * This method will set the group index bit entry in the specified gropu index
139 * table. none
140 */
141static void sci_remote_node_table_set_group_index(
142 struct sci_remote_node_table *remote_node_table,
143 u32 group_table_index,
144 u32 group_index)
145{
146 u32 dword_index;
147 u32 bit_index;
148 u32 *group_table;
149
150 BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
151 BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
152
153 dword_index = group_index / 32;
154 bit_index = group_index % 32;
155 group_table = remote_node_table->remote_node_groups[group_table_index];
156
157 group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
158}
159
160/**
161 *
162 * @out]: remote_node_table This is the remote node table in which to modify
163 * the remote node availability.
164 * @remote_node_index: This is the remote node index that is being returned to
165 * the table.
166 *
167 * This method will set the remote to available in the remote node allocation
168 * table. none
169 */
170static void sci_remote_node_table_set_node_index(
171 struct sci_remote_node_table *remote_node_table,
172 u32 remote_node_index)
173{
174 u32 dword_location;
175 u32 dword_remainder;
176 u32 slot_normalized;
177 u32 slot_position;
178
179 BUG_ON(
180 (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
181 <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
182 );
183
184 dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
185 dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
186 slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
187 slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
188
189 remote_node_table->available_remote_nodes[dword_location] |=
190 1 << (slot_normalized + slot_position);
191}
192
193/**
194 *
195 * @out]: remote_node_table This is the remote node table from which to clear
196 * the available remote node bit.
197 * @remote_node_index: This is the remote node index which is to be cleared
198 * from the table.
199 *
200 * This method clears the remote node index from the table of available remote
201 * nodes. none
202 */
203static void sci_remote_node_table_clear_node_index(
204 struct sci_remote_node_table *remote_node_table,
205 u32 remote_node_index)
206{
207 u32 dword_location;
208 u32 dword_remainder;
209 u32 slot_position;
210 u32 slot_normalized;
211
212 BUG_ON(
213 (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
214 <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
215 );
216
217 dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
218 dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
219 slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
220 slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
221
222 remote_node_table->available_remote_nodes[dword_location] &=
223 ~(1 << (slot_normalized + slot_position));
224}
225
226/**
227 *
228 * @out]: remote_node_table The remote node table from which the slot will be
229 * cleared.
230 * @group_index: The index for the slot that is to be cleared.
231 *
232 * This method clears the entire table slot at the specified slot index. none
233 */
234static void sci_remote_node_table_clear_group(
235 struct sci_remote_node_table *remote_node_table,
236 u32 group_index)
237{
238 u32 dword_location;
239 u32 dword_remainder;
240 u32 dword_value;
241
242 BUG_ON(
243 (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
244 <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
245 );
246
247 dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
248 dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
249
250 dword_value = remote_node_table->available_remote_nodes[dword_location];
251 dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
252 remote_node_table->available_remote_nodes[dword_location] = dword_value;
253}
254
255/**
256 *
257 * @remote_node_table:
258 *
259 * THis method sets an entire remote node group in the remote node table.
260 */
261static void sci_remote_node_table_set_group(
262 struct sci_remote_node_table *remote_node_table,
263 u32 group_index)
264{
265 u32 dword_location;
266 u32 dword_remainder;
267 u32 dword_value;
268
269 BUG_ON(
270 (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
271 <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
272 );
273
274 dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
275 dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
276
277 dword_value = remote_node_table->available_remote_nodes[dword_location];
278 dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
279 remote_node_table->available_remote_nodes[dword_location] = dword_value;
280}
281
282/**
283 *
284 * @remote_node_table: This is the remote node table that for which the group
285 * value is to be returned.
286 * @group_index: This is the group index to use to find the group value.
287 *
288 * This method will return the group value for the specified group index. The
289 * bit values at the specified remote node group index.
290 */
291static u8 sci_remote_node_table_get_group_value(
292 struct sci_remote_node_table *remote_node_table,
293 u32 group_index)
294{
295 u32 dword_location;
296 u32 dword_remainder;
297 u32 dword_value;
298
299 dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
300 dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
301
302 dword_value = remote_node_table->available_remote_nodes[dword_location];
303 dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
304 dword_value = dword_value >> (dword_remainder * 4);
305
306 return (u8)dword_value;
307}
308
309/**
310 *
311 * @out]: remote_node_table The remote that which is to be initialized.
312 * @remote_node_entries: The number of entries to put in the table.
313 *
314 * This method will initialize the remote node table for use. none
315 */
316void sci_remote_node_table_initialize(
317 struct sci_remote_node_table *remote_node_table,
318 u32 remote_node_entries)
319{
320 u32 index;
321
322 /*
323 * Initialize the raw data we could improve the speed by only initializing
324 * those entries that we are actually going to be used */
325 memset(
326 remote_node_table->available_remote_nodes,
327 0x00,
328 sizeof(remote_node_table->available_remote_nodes)
329 );
330
331 memset(
332 remote_node_table->remote_node_groups,
333 0x00,
334 sizeof(remote_node_table->remote_node_groups)
335 );
336
337 /* Initialize the available remote node sets */
338 remote_node_table->available_nodes_array_size = (u16)
339 (remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
340 + ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
341
342
343 /* Initialize each full DWORD to a FULL SET of remote nodes */
344 for (index = 0; index < remote_node_entries; index++) {
345 sci_remote_node_table_set_node_index(remote_node_table, index);
346 }
347
348 remote_node_table->group_array_size = (u16)
349 (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
350 + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
351
352 for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
353 /*
354 * These are all guaranteed to be full slot values so fill them in the
355 * available sets of 3 remote nodes */
356 sci_remote_node_table_set_group_index(remote_node_table, 2, index);
357 }
358
359 /* Now fill in any remainders that we may find */
360 if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
361 sci_remote_node_table_set_group_index(remote_node_table, 1, index);
362 } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
363 sci_remote_node_table_set_group_index(remote_node_table, 0, index);
364 }
365}
366
367/**
368 *
369 * @out]: remote_node_table The remote node table from which to allocate a
370 * remote node.
371 * @table_index: The group index that is to be used for the search.
372 *
373 * This method will allocate a single RNi from the remote node table. The
374 * table index will determine from which remote node group table to search.
375 * This search may fail and another group node table can be specified. The
376 * function is designed to allow a serach of the available single remote node
377 * group up to the triple remote node group. If an entry is found in the
378 * specified table the remote node is removed and the remote node groups are
379 * updated. The RNi value or an invalid remote node context if an RNi can not
380 * be found.
381 */
382static u16 sci_remote_node_table_allocate_single_remote_node(
383 struct sci_remote_node_table *remote_node_table,
384 u32 group_table_index)
385{
386 u8 index;
387 u8 group_value;
388 u32 group_index;
389 u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
390
391 group_index = sci_remote_node_table_get_group_index(
392 remote_node_table, group_table_index);
393
394 /* We could not find an available slot in the table selector 0 */
395 if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
396 group_value = sci_remote_node_table_get_group_value(
397 remote_node_table, group_index);
398
399 for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
400 if (((1 << index) & group_value) != 0) {
401 /* We have selected a bit now clear it */
402 remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
403 + index);
404
405 sci_remote_node_table_clear_group_index(
406 remote_node_table, group_table_index, group_index
407 );
408
409 sci_remote_node_table_clear_node_index(
410 remote_node_table, remote_node_index
411 );
412
413 if (group_table_index > 0) {
414 sci_remote_node_table_set_group_index(
415 remote_node_table, group_table_index - 1, group_index
416 );
417 }
418
419 break;
420 }
421 }
422 }
423
424 return remote_node_index;
425}
426
427/**
428 *
429 * @remote_node_table: This is the remote node table from which to allocate the
430 * remote node entries.
431 * @group_table_index: THis is the group table index which must equal two (2)
432 * for this operation.
433 *
434 * This method will allocate three consecutive remote node context entries. If
435 * there are no remaining triple entries the function will return a failure.
436 * The remote node index that represents three consecutive remote node entries
437 * or an invalid remote node context if none can be found.
438 */
439static u16 sci_remote_node_table_allocate_triple_remote_node(
440 struct sci_remote_node_table *remote_node_table,
441 u32 group_table_index)
442{
443 u32 group_index;
444 u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
445
446 group_index = sci_remote_node_table_get_group_index(
447 remote_node_table, group_table_index);
448
449 if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
450 remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
451
452 sci_remote_node_table_clear_group_index(
453 remote_node_table, group_table_index, group_index
454 );
455
456 sci_remote_node_table_clear_group(
457 remote_node_table, group_index
458 );
459 }
460
461 return remote_node_index;
462}
463
464/**
465 *
466 * @remote_node_table: This is the remote node table from which the remote node
467 * allocation is to take place.
468 * @remote_node_count: This is ther remote node count which is one of
469 * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
470 *
471 * This method will allocate a remote node that mataches the remote node count
472 * specified by the caller. Valid values for remote node count is
473 * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
474 * the remote node index that is returned or an invalid remote node context.
475 */
476u16 sci_remote_node_table_allocate_remote_node(
477 struct sci_remote_node_table *remote_node_table,
478 u32 remote_node_count)
479{
480 u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
481
482 if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
483 remote_node_index =
484 sci_remote_node_table_allocate_single_remote_node(
485 remote_node_table, 0);
486
487 if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
488 remote_node_index =
489 sci_remote_node_table_allocate_single_remote_node(
490 remote_node_table, 1);
491 }
492
493 if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
494 remote_node_index =
495 sci_remote_node_table_allocate_single_remote_node(
496 remote_node_table, 2);
497 }
498 } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
499 remote_node_index =
500 sci_remote_node_table_allocate_triple_remote_node(
501 remote_node_table, 2);
502 }
503
504 return remote_node_index;
505}
506
507/**
508 *
509 * @remote_node_table:
510 *
511 * This method will free a single remote node index back to the remote node
512 * table. This routine will update the remote node groups
513 */
514static void sci_remote_node_table_release_single_remote_node(
515 struct sci_remote_node_table *remote_node_table,
516 u16 remote_node_index)
517{
518 u32 group_index;
519 u8 group_value;
520
521 group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
522
523 group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
524
525 /*
526 * Assert that we are not trying to add an entry to a slot that is already
527 * full. */
528 BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
529
530 if (group_value == 0x00) {
531 /*
532 * There are no entries in this slot so it must be added to the single
533 * slot table. */
534 sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
535 } else if ((group_value & (group_value - 1)) == 0) {
536 /*
537 * There is only one entry in this slot so it must be moved from the
538 * single slot table to the dual slot table */
539 sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
540 sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
541 } else {
542 /*
543 * There are two entries in the slot so it must be moved from the dual
544 * slot table to the tripple slot table. */
545 sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
546 sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
547 }
548
549 sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
550}
551
552/**
553 *
554 * @remote_node_table: This is the remote node table to which the remote node
555 * index is to be freed.
556 *
557 * This method will release a group of three consecutive remote nodes back to
558 * the free remote nodes.
559 */
560static void sci_remote_node_table_release_triple_remote_node(
561 struct sci_remote_node_table *remote_node_table,
562 u16 remote_node_index)
563{
564 u32 group_index;
565
566 group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
567
568 sci_remote_node_table_set_group_index(
569 remote_node_table, 2, group_index
570 );
571
572 sci_remote_node_table_set_group(remote_node_table, group_index);
573}
574
575/**
576 *
577 * @remote_node_table: The remote node table to which the remote node index is
578 * to be freed.
579 * @remote_node_count: This is the count of consecutive remote nodes that are
580 * to be freed.
581 *
582 * This method will release the remote node index back into the remote node
583 * table free pool.
584 */
585void sci_remote_node_table_release_remote_node_index(
586 struct sci_remote_node_table *remote_node_table,
587 u32 remote_node_count,
588 u16 remote_node_index)
589{
590 if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
591 sci_remote_node_table_release_single_remote_node(
592 remote_node_table, remote_node_index);
593 } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
594 sci_remote_node_table_release_triple_remote_node(
595 remote_node_table, remote_node_index);
596 }
597}
598
diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h
new file mode 100644
index 000000000000..721ab982d2ac
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.h
@@ -0,0 +1,188 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_
57#define _SCIC_SDS_REMOTE_NODE_TABLE_H_
58
59#include "isci.h"
60
61/**
62 *
63 *
64 * Remote node sets are sets of remote node index in the remtoe node table The
65 * SCU hardware requires that STP remote node entries take three consecutive
66 * remote node index so the table is arranged in sets of three. The bits are
67 * used as 0111 0111 to make a byte and the bits define the set of three remote
68 * nodes to use as a sequence.
69 */
70#define SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE 2
71
72/**
73 *
74 *
75 * Since the remote node table is organized as DWORDS take the remote node sets
76 * in bytes and represent them in DWORDs. The lowest ordered bits are the ones
77 * used in case full DWORD is not being used. i.e. 0000 0000 0000 0000 0111
78 * 0111 0111 0111 // if only a single WORD is in use in the DWORD.
79 */
80#define SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD \
81 (sizeof(u32) * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
82/**
83 *
84 *
85 * This is a count of the numeber of remote nodes that can be represented in a
86 * byte
87 */
88#define SCIC_SDS_REMOTE_NODES_PER_BYTE \
89 (SCU_STP_REMOTE_NODE_COUNT * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
90
91/**
92 *
93 *
94 * This is a count of the number of remote nodes that can be represented in a
95 * DWROD
96 */
97#define SCIC_SDS_REMOTE_NODES_PER_DWORD \
98 (sizeof(u32) * SCIC_SDS_REMOTE_NODES_PER_BYTE)
99
100/**
101 *
102 *
103 * This is the number of bits in a remote node group
104 */
105#define SCIC_SDS_REMOTE_NODES_BITS_PER_GROUP 4
106
107#define SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX (0xFFFFFFFF)
108#define SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE (0x07)
109#define SCIC_SDS_REMOTE_NODE_TABLE_EMPTY_SLOT_VALUE (0x00)
110
111/**
112 *
113 *
114 * Expander attached sata remote node count
115 */
116#define SCU_STP_REMOTE_NODE_COUNT 3
117
118/**
119 *
120 *
121 * Expander or direct attached ssp remote node count
122 */
123#define SCU_SSP_REMOTE_NODE_COUNT 1
124
125/**
126 *
127 *
128 * Direct attached STP remote node count
129 */
130#define SCU_SATA_REMOTE_NODE_COUNT 1
131
132/**
133 * struct sci_remote_node_table -
134 *
135 *
136 */
137struct sci_remote_node_table {
138 /**
139 * This field contains the array size in dwords
140 */
141 u16 available_nodes_array_size;
142
143 /**
144 * This field contains the array size of the
145 */
146 u16 group_array_size;
147
148 /**
149 * This field is the array of available remote node entries in bits.
150 * Because of the way STP remote node data is allocated on the SCU hardware
151 * the remote nodes must occupy three consecutive remote node context
152 * entries. For ease of allocation and de-allocation we have broken the
153 * sets of three into a single nibble. When the STP RNi is allocated all
154 * of the bits in the nibble are cleared. This math results in a table size
155 * of MAX_REMOTE_NODES / CONSECUTIVE RNi ENTRIES for STP / 2 entries per byte.
156 */
157 u32 available_remote_nodes[
158 (SCI_MAX_REMOTE_DEVICES / SCIC_SDS_REMOTE_NODES_PER_DWORD)
159 + ((SCI_MAX_REMOTE_DEVICES % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0)];
160
161 /**
162 * This field is the nibble selector for the above table. There are three
163 * possible selectors each for fast lookup when trying to find one, two or
164 * three remote node entries.
165 */
166 u32 remote_node_groups[
167 SCU_STP_REMOTE_NODE_COUNT][
168 (SCI_MAX_REMOTE_DEVICES / (32 * SCU_STP_REMOTE_NODE_COUNT))
169 + ((SCI_MAX_REMOTE_DEVICES % (32 * SCU_STP_REMOTE_NODE_COUNT)) != 0)];
170
171};
172
173/* --------------------------------------------------------------------------- */
174
175void sci_remote_node_table_initialize(
176 struct sci_remote_node_table *remote_node_table,
177 u32 remote_node_entries);
178
179u16 sci_remote_node_table_allocate_remote_node(
180 struct sci_remote_node_table *remote_node_table,
181 u32 remote_node_count);
182
183void sci_remote_node_table_release_remote_node_index(
184 struct sci_remote_node_table *remote_node_table,
185 u32 remote_node_count,
186 u16 remote_node_index);
187
188#endif /* _SCIC_SDS_REMOTE_NODE_TABLE_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
new file mode 100644
index 000000000000..a46e07ac789f
--- /dev/null
+++ b/drivers/scsi/isci/request.c
@@ -0,0 +1,3391 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
57#include "task.h"
58#include "request.h"
59#include "scu_completion_codes.h"
60#include "scu_event_codes.h"
61#include "sas.h"
62
63static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
64 int idx)
65{
66 if (idx == 0)
67 return &ireq->tc->sgl_pair_ab;
68 else if (idx == 1)
69 return &ireq->tc->sgl_pair_cd;
70 else if (idx < 0)
71 return NULL;
72 else
73 return &ireq->sg_table[idx - 2];
74}
75
76static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
77 struct isci_request *ireq, u32 idx)
78{
79 u32 offset;
80
81 if (idx == 0) {
82 offset = (void *) &ireq->tc->sgl_pair_ab -
83 (void *) &ihost->task_context_table[0];
84 return ihost->task_context_dma + offset;
85 } else if (idx == 1) {
86 offset = (void *) &ireq->tc->sgl_pair_cd -
87 (void *) &ihost->task_context_table[0];
88 return ihost->task_context_dma + offset;
89 }
90
91 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
92}
93
94static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
95{
96 e->length = sg_dma_len(sg);
97 e->address_upper = upper_32_bits(sg_dma_address(sg));
98 e->address_lower = lower_32_bits(sg_dma_address(sg));
99 e->address_modifier = 0;
100}
101
102static void sci_request_build_sgl(struct isci_request *ireq)
103{
104 struct isci_host *ihost = ireq->isci_host;
105 struct sas_task *task = isci_request_access_task(ireq);
106 struct scatterlist *sg = NULL;
107 dma_addr_t dma_addr;
108 u32 sg_idx = 0;
109 struct scu_sgl_element_pair *scu_sg = NULL;
110 struct scu_sgl_element_pair *prev_sg = NULL;
111
112 if (task->num_scatter > 0) {
113 sg = task->scatter;
114
115 while (sg) {
116 scu_sg = to_sgl_element_pair(ireq, sg_idx);
117 init_sgl_element(&scu_sg->A, sg);
118 sg = sg_next(sg);
119 if (sg) {
120 init_sgl_element(&scu_sg->B, sg);
121 sg = sg_next(sg);
122 } else
123 memset(&scu_sg->B, 0, sizeof(scu_sg->B));
124
125 if (prev_sg) {
126 dma_addr = to_sgl_element_pair_dma(ihost,
127 ireq,
128 sg_idx);
129
130 prev_sg->next_pair_upper =
131 upper_32_bits(dma_addr);
132 prev_sg->next_pair_lower =
133 lower_32_bits(dma_addr);
134 }
135
136 prev_sg = scu_sg;
137 sg_idx++;
138 }
139 } else { /* handle when no sg */
140 scu_sg = to_sgl_element_pair(ireq, sg_idx);
141
142 dma_addr = dma_map_single(&ihost->pdev->dev,
143 task->scatter,
144 task->total_xfer_len,
145 task->data_dir);
146
147 ireq->zero_scatter_daddr = dma_addr;
148
149 scu_sg->A.length = task->total_xfer_len;
150 scu_sg->A.address_upper = upper_32_bits(dma_addr);
151 scu_sg->A.address_lower = lower_32_bits(dma_addr);
152 }
153
154 if (scu_sg) {
155 scu_sg->next_pair_upper = 0;
156 scu_sg->next_pair_lower = 0;
157 }
158}
159
160static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
161{
162 struct ssp_cmd_iu *cmd_iu;
163 struct sas_task *task = isci_request_access_task(ireq);
164
165 cmd_iu = &ireq->ssp.cmd;
166
167 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
168 cmd_iu->add_cdb_len = 0;
169 cmd_iu->_r_a = 0;
170 cmd_iu->_r_b = 0;
171 cmd_iu->en_fburst = 0; /* unsupported */
172 cmd_iu->task_prio = task->ssp_task.task_prio;
173 cmd_iu->task_attr = task->ssp_task.task_attr;
174 cmd_iu->_r_c = 0;
175
176 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
177 sizeof(task->ssp_task.cdb) / sizeof(u32));
178}
179
180static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
181{
182 struct ssp_task_iu *task_iu;
183 struct sas_task *task = isci_request_access_task(ireq);
184 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
185
186 task_iu = &ireq->ssp.tmf;
187
188 memset(task_iu, 0, sizeof(struct ssp_task_iu));
189
190 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
191
192 task_iu->task_func = isci_tmf->tmf_code;
193 task_iu->task_tag =
194 (ireq->ttype == tmf_task) ?
195 isci_tmf->io_tag :
196 SCI_CONTROLLER_INVALID_IO_TAG;
197}
198
199/**
200 * This method is will fill in the SCU Task Context for any type of SSP request.
201 * @sci_req:
202 * @task_context:
203 *
204 */
205static void scu_ssp_reqeust_construct_task_context(
206 struct isci_request *ireq,
207 struct scu_task_context *task_context)
208{
209 dma_addr_t dma_addr;
210 struct isci_remote_device *idev;
211 struct isci_port *iport;
212
213 idev = ireq->target_device;
214 iport = idev->owning_port;
215
216 /* Fill in the TC with the its required data */
217 task_context->abort = 0;
218 task_context->priority = 0;
219 task_context->initiator_request = 1;
220 task_context->connection_rate = idev->connection_rate;
221 task_context->protocol_engine_index = ISCI_PEG;
222 task_context->logical_port_index = iport->physical_port_index;
223 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
224 task_context->valid = SCU_TASK_CONTEXT_VALID;
225 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
226
227 task_context->remote_node_index = idev->rnc.remote_node_index;
228 task_context->command_code = 0;
229
230 task_context->link_layer_control = 0;
231 task_context->do_not_dma_ssp_good_response = 1;
232 task_context->strict_ordering = 0;
233 task_context->control_frame = 0;
234 task_context->timeout_enable = 0;
235 task_context->block_guard_enable = 0;
236
237 task_context->address_modifier = 0;
238
239 /* task_context->type.ssp.tag = ireq->io_tag; */
240 task_context->task_phase = 0x01;
241
242 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
243 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
244 (iport->physical_port_index <<
245 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
246 ISCI_TAG_TCI(ireq->io_tag));
247
248 /*
249 * Copy the physical address for the command buffer to the
250 * SCU Task Context
251 */
252 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
253
254 task_context->command_iu_upper = upper_32_bits(dma_addr);
255 task_context->command_iu_lower = lower_32_bits(dma_addr);
256
257 /*
258 * Copy the physical address for the response buffer to the
259 * SCU Task Context
260 */
261 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
262
263 task_context->response_iu_upper = upper_32_bits(dma_addr);
264 task_context->response_iu_lower = lower_32_bits(dma_addr);
265}
266
267/**
268 * This method is will fill in the SCU Task Context for a SSP IO request.
269 * @sci_req:
270 *
271 */
272static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
273 enum dma_data_direction dir,
274 u32 len)
275{
276 struct scu_task_context *task_context = ireq->tc;
277
278 scu_ssp_reqeust_construct_task_context(ireq, task_context);
279
280 task_context->ssp_command_iu_length =
281 sizeof(struct ssp_cmd_iu) / sizeof(u32);
282 task_context->type.ssp.frame_type = SSP_COMMAND;
283
284 switch (dir) {
285 case DMA_FROM_DEVICE:
286 case DMA_NONE:
287 default:
288 task_context->task_type = SCU_TASK_TYPE_IOREAD;
289 break;
290 case DMA_TO_DEVICE:
291 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
292 break;
293 }
294
295 task_context->transfer_length_bytes = len;
296
297 if (task_context->transfer_length_bytes > 0)
298 sci_request_build_sgl(ireq);
299}
300
301/**
302 * This method will fill in the SCU Task Context for a SSP Task request. The
303 * following important settings are utilized: -# priority ==
304 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
305 * ahead of other task destined for the same Remote Node. -# task_type ==
306 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
307 * (i.e. non-raw frame) is being utilized to perform task management. -#
308 * control_frame == 1. This ensures that the proper endianess is set so
309 * that the bytes are transmitted in the right order for a task frame.
310 * @sci_req: This parameter specifies the task request object being
311 * constructed.
312 *
313 */
314static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
315{
316 struct scu_task_context *task_context = ireq->tc;
317
318 scu_ssp_reqeust_construct_task_context(ireq, task_context);
319
320 task_context->control_frame = 1;
321 task_context->priority = SCU_TASK_PRIORITY_HIGH;
322 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
323 task_context->transfer_length_bytes = 0;
324 task_context->type.ssp.frame_type = SSP_TASK;
325 task_context->ssp_command_iu_length =
326 sizeof(struct ssp_task_iu) / sizeof(u32);
327}
328
329/**
330 * This method is will fill in the SCU Task Context for any type of SATA
331 * request. This is called from the various SATA constructors.
332 * @sci_req: The general IO request object which is to be used in
333 * constructing the SCU task context.
334 * @task_context: The buffer pointer for the SCU task context which is being
335 * constructed.
336 *
337 * The general io request construction is complete. The buffer assignment for
338 * the command buffer is complete. none Revisit task context construction to
339 * determine what is common for SSP/SMP/STP task context structures.
340 */
341static void scu_sata_reqeust_construct_task_context(
342 struct isci_request *ireq,
343 struct scu_task_context *task_context)
344{
345 dma_addr_t dma_addr;
346 struct isci_remote_device *idev;
347 struct isci_port *iport;
348
349 idev = ireq->target_device;
350 iport = idev->owning_port;
351
352 /* Fill in the TC with the its required data */
353 task_context->abort = 0;
354 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
355 task_context->initiator_request = 1;
356 task_context->connection_rate = idev->connection_rate;
357 task_context->protocol_engine_index = ISCI_PEG;
358 task_context->logical_port_index = iport->physical_port_index;
359 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
360 task_context->valid = SCU_TASK_CONTEXT_VALID;
361 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
362
363 task_context->remote_node_index = idev->rnc.remote_node_index;
364 task_context->command_code = 0;
365
366 task_context->link_layer_control = 0;
367 task_context->do_not_dma_ssp_good_response = 1;
368 task_context->strict_ordering = 0;
369 task_context->control_frame = 0;
370 task_context->timeout_enable = 0;
371 task_context->block_guard_enable = 0;
372
373 task_context->address_modifier = 0;
374 task_context->task_phase = 0x01;
375
376 task_context->ssp_command_iu_length =
377 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
378
379 /* Set the first word of the H2D REG FIS */
380 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
381
382 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
383 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
384 (iport->physical_port_index <<
385 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
386 ISCI_TAG_TCI(ireq->io_tag));
387 /*
388 * Copy the physical address for the command buffer to the SCU Task
389 * Context. We must offset the command buffer by 4 bytes because the
390 * first 4 bytes are transfered in the body of the TC.
391 */
392 dma_addr = sci_io_request_get_dma_addr(ireq,
393 ((char *) &ireq->stp.cmd) +
394 sizeof(u32));
395
396 task_context->command_iu_upper = upper_32_bits(dma_addr);
397 task_context->command_iu_lower = lower_32_bits(dma_addr);
398
399 /* SATA Requests do not have a response buffer */
400 task_context->response_iu_upper = 0;
401 task_context->response_iu_lower = 0;
402}
403
404static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
405{
406 struct scu_task_context *task_context = ireq->tc;
407
408 scu_sata_reqeust_construct_task_context(ireq, task_context);
409
410 task_context->control_frame = 0;
411 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
412 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
413 task_context->type.stp.fis_type = FIS_REGH2D;
414 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
415}
416
417static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
418 bool copy_rx_frame)
419{
420 struct isci_stp_request *stp_req = &ireq->stp.req;
421
422 scu_stp_raw_request_construct_task_context(ireq);
423
424 stp_req->status = 0;
425 stp_req->sgl.offset = 0;
426 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
427
428 if (copy_rx_frame) {
429 sci_request_build_sgl(ireq);
430 stp_req->sgl.index = 0;
431 } else {
432 /* The user does not want the data copied to the SGL buffer location */
433 stp_req->sgl.index = -1;
434 }
435
436 return SCI_SUCCESS;
437}
438
439/**
440 *
441 * @sci_req: This parameter specifies the request to be constructed as an
442 * optimized request.
443 * @optimized_task_type: This parameter specifies whether the request is to be
444 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
445 * value of 1 indicates NCQ.
446 *
447 * This method will perform request construction common to all types of STP
448 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
449 * returns an indication as to whether the construction was successful.
450 */
451static void sci_stp_optimized_request_construct(struct isci_request *ireq,
452 u8 optimized_task_type,
453 u32 len,
454 enum dma_data_direction dir)
455{
456 struct scu_task_context *task_context = ireq->tc;
457
458 /* Build the STP task context structure */
459 scu_sata_reqeust_construct_task_context(ireq, task_context);
460
461 /* Copy over the SGL elements */
462 sci_request_build_sgl(ireq);
463
464 /* Copy over the number of bytes to be transfered */
465 task_context->transfer_length_bytes = len;
466
467 if (dir == DMA_TO_DEVICE) {
468 /*
469 * The difference between the DMA IN and DMA OUT request task type
470 * values are consistent with the difference between FPDMA READ
471 * and FPDMA WRITE values. Add the supplied task type parameter
472 * to this difference to set the task type properly for this
473 * DATA OUT (WRITE) case. */
474 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
475 - SCU_TASK_TYPE_DMA_IN);
476 } else {
477 /*
478 * For the DATA IN (READ) case, simply save the supplied
479 * optimized task type. */
480 task_context->task_type = optimized_task_type;
481 }
482}
483
484
485
486static enum sci_status
487sci_io_request_construct_sata(struct isci_request *ireq,
488 u32 len,
489 enum dma_data_direction dir,
490 bool copy)
491{
492 enum sci_status status = SCI_SUCCESS;
493 struct sas_task *task = isci_request_access_task(ireq);
494
495 /* check for management protocols */
496 if (ireq->ttype == tmf_task) {
497 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
498
499 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
500 tmf->tmf_code == isci_tmf_sata_srst_low) {
501 scu_stp_raw_request_construct_task_context(ireq);
502 return SCI_SUCCESS;
503 } else {
504 dev_err(&ireq->owning_controller->pdev->dev,
505 "%s: Request 0x%p received un-handled SAT "
506 "management protocol 0x%x.\n",
507 __func__, ireq, tmf->tmf_code);
508
509 return SCI_FAILURE;
510 }
511 }
512
513 if (!sas_protocol_ata(task->task_proto)) {
514 dev_err(&ireq->owning_controller->pdev->dev,
515 "%s: Non-ATA protocol in SATA path: 0x%x\n",
516 __func__,
517 task->task_proto);
518 return SCI_FAILURE;
519
520 }
521
522 /* non data */
523 if (task->data_dir == DMA_NONE) {
524 scu_stp_raw_request_construct_task_context(ireq);
525 return SCI_SUCCESS;
526 }
527
528 /* NCQ */
529 if (task->ata_task.use_ncq) {
530 sci_stp_optimized_request_construct(ireq,
531 SCU_TASK_TYPE_FPDMAQ_READ,
532 len, dir);
533 return SCI_SUCCESS;
534 }
535
536 /* DMA */
537 if (task->ata_task.dma_xfer) {
538 sci_stp_optimized_request_construct(ireq,
539 SCU_TASK_TYPE_DMA_IN,
540 len, dir);
541 return SCI_SUCCESS;
542 } else /* PIO */
543 return sci_stp_pio_request_construct(ireq, copy);
544
545 return status;
546}
547
548static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
549{
550 struct sas_task *task = isci_request_access_task(ireq);
551
552 ireq->protocol = SCIC_SSP_PROTOCOL;
553
554 scu_ssp_io_request_construct_task_context(ireq,
555 task->data_dir,
556 task->total_xfer_len);
557
558 sci_io_request_build_ssp_command_iu(ireq);
559
560 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
561
562 return SCI_SUCCESS;
563}
564
565enum sci_status sci_task_request_construct_ssp(
566 struct isci_request *ireq)
567{
568 /* Construct the SSP Task SCU Task Context */
569 scu_ssp_task_request_construct_task_context(ireq);
570
571 /* Fill in the SSP Task IU */
572 sci_task_request_build_ssp_task_iu(ireq);
573
574 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
575
576 return SCI_SUCCESS;
577}
578
579static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
580{
581 enum sci_status status;
582 bool copy = false;
583 struct sas_task *task = isci_request_access_task(ireq);
584
585 ireq->protocol = SCIC_STP_PROTOCOL;
586
587 copy = (task->data_dir == DMA_NONE) ? false : true;
588
589 status = sci_io_request_construct_sata(ireq,
590 task->total_xfer_len,
591 task->data_dir,
592 copy);
593
594 if (status == SCI_SUCCESS)
595 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
596
597 return status;
598}
599
600enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
601{
602 enum sci_status status = SCI_SUCCESS;
603
604 /* check for management protocols */
605 if (ireq->ttype == tmf_task) {
606 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
607
608 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
609 tmf->tmf_code == isci_tmf_sata_srst_low) {
610 scu_stp_raw_request_construct_task_context(ireq);
611 } else {
612 dev_err(&ireq->owning_controller->pdev->dev,
613 "%s: Request 0x%p received un-handled SAT "
614 "Protocol 0x%x.\n",
615 __func__, ireq, tmf->tmf_code);
616
617 return SCI_FAILURE;
618 }
619 }
620
621 if (status != SCI_SUCCESS)
622 return status;
623 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
624
625 return status;
626}
627
628/**
629 * sci_req_tx_bytes - bytes transferred when reply underruns request
630 * @sci_req: request that was terminated early
631 */
632#define SCU_TASK_CONTEXT_SRAM 0x200000
633static u32 sci_req_tx_bytes(struct isci_request *ireq)
634{
635 struct isci_host *ihost = ireq->owning_controller;
636 u32 ret_val = 0;
637
638 if (readl(&ihost->smu_registers->address_modifier) == 0) {
639 void __iomem *scu_reg_base = ihost->scu_registers;
640
641 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
642 * BAR1 is the scu_registers
643 * 0x20002C = 0x200000 + 0x2c
644 * = start of task context SRAM + offset of (type.ssp.data_offset)
645 * TCi is the io_tag of struct sci_request
646 */
647 ret_val = readl(scu_reg_base +
648 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
649 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
650 }
651
652 return ret_val;
653}
654
655enum sci_status sci_request_start(struct isci_request *ireq)
656{
657 enum sci_base_request_states state;
658 struct scu_task_context *tc = ireq->tc;
659 struct isci_host *ihost = ireq->owning_controller;
660
661 state = ireq->sm.current_state_id;
662 if (state != SCI_REQ_CONSTRUCTED) {
663 dev_warn(&ihost->pdev->dev,
664 "%s: SCIC IO Request requested to start while in wrong "
665 "state %d\n", __func__, state);
666 return SCI_FAILURE_INVALID_STATE;
667 }
668
669 tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
670
671 switch (tc->protocol_type) {
672 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
673 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
674 /* SSP/SMP Frame */
675 tc->type.ssp.tag = ireq->io_tag;
676 tc->type.ssp.target_port_transfer_tag = 0xFFFF;
677 break;
678
679 case SCU_TASK_CONTEXT_PROTOCOL_STP:
680 /* STP/SATA Frame
681 * tc->type.stp.ncq_tag = ireq->ncq_tag;
682 */
683 break;
684
685 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
686 /* / @todo When do we set no protocol type? */
687 break;
688
689 default:
690 /* This should never happen since we build the IO
691 * requests */
692 break;
693 }
694
695 /* Add to the post_context the io tag value */
696 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
697
698 /* Everything is good go ahead and change state */
699 sci_change_state(&ireq->sm, SCI_REQ_STARTED);
700
701 return SCI_SUCCESS;
702}
703
704enum sci_status
705sci_io_request_terminate(struct isci_request *ireq)
706{
707 enum sci_base_request_states state;
708
709 state = ireq->sm.current_state_id;
710
711 switch (state) {
712 case SCI_REQ_CONSTRUCTED:
713 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
714 ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
715 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
716 return SCI_SUCCESS;
717 case SCI_REQ_STARTED:
718 case SCI_REQ_TASK_WAIT_TC_COMP:
719 case SCI_REQ_SMP_WAIT_RESP:
720 case SCI_REQ_SMP_WAIT_TC_COMP:
721 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
722 case SCI_REQ_STP_UDMA_WAIT_D2H:
723 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
724 case SCI_REQ_STP_NON_DATA_WAIT_D2H:
725 case SCI_REQ_STP_PIO_WAIT_H2D:
726 case SCI_REQ_STP_PIO_WAIT_FRAME:
727 case SCI_REQ_STP_PIO_DATA_IN:
728 case SCI_REQ_STP_PIO_DATA_OUT:
729 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
730 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
731 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
732 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
733 return SCI_SUCCESS;
734 case SCI_REQ_TASK_WAIT_TC_RESP:
735 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
736 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
737 return SCI_SUCCESS;
738 case SCI_REQ_ABORTING:
739 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
740 return SCI_SUCCESS;
741 case SCI_REQ_COMPLETED:
742 default:
743 dev_warn(&ireq->owning_controller->pdev->dev,
744 "%s: SCIC IO Request requested to abort while in wrong "
745 "state %d\n",
746 __func__,
747 ireq->sm.current_state_id);
748 break;
749 }
750
751 return SCI_FAILURE_INVALID_STATE;
752}
753
754enum sci_status sci_request_complete(struct isci_request *ireq)
755{
756 enum sci_base_request_states state;
757 struct isci_host *ihost = ireq->owning_controller;
758
759 state = ireq->sm.current_state_id;
760 if (WARN_ONCE(state != SCI_REQ_COMPLETED,
761 "isci: request completion from wrong state (%d)\n", state))
762 return SCI_FAILURE_INVALID_STATE;
763
764 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
765 sci_controller_release_frame(ihost,
766 ireq->saved_rx_frame_index);
767
768 /* XXX can we just stop the machine and remove the 'final' state? */
769 sci_change_state(&ireq->sm, SCI_REQ_FINAL);
770 return SCI_SUCCESS;
771}
772
773enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
774 u32 event_code)
775{
776 enum sci_base_request_states state;
777 struct isci_host *ihost = ireq->owning_controller;
778
779 state = ireq->sm.current_state_id;
780
781 if (state != SCI_REQ_STP_PIO_DATA_IN) {
782 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
783 __func__, event_code, state);
784
785 return SCI_FAILURE_INVALID_STATE;
786 }
787
788 switch (scu_get_event_specifier(event_code)) {
789 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
790 /* We are waiting for data and the SCU has R_ERR the data frame.
791 * Go back to waiting for the D2H Register FIS
792 */
793 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
794 return SCI_SUCCESS;
795 default:
796 dev_err(&ihost->pdev->dev,
797 "%s: pio request unexpected event %#x\n",
798 __func__, event_code);
799
800 /* TODO Should we fail the PIO request when we get an
801 * unexpected event?
802 */
803 return SCI_FAILURE;
804 }
805}
806
807/*
808 * This function copies response data for requests returning response data
809 * instead of sense data.
810 * @sci_req: This parameter specifies the request object for which to copy
811 * the response data.
812 */
813static void sci_io_request_copy_response(struct isci_request *ireq)
814{
815 void *resp_buf;
816 u32 len;
817 struct ssp_response_iu *ssp_response;
818 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
819
820 ssp_response = &ireq->ssp.rsp;
821
822 resp_buf = &isci_tmf->resp.resp_iu;
823
824 len = min_t(u32,
825 SSP_RESP_IU_MAX_SIZE,
826 be32_to_cpu(ssp_response->response_data_len));
827
828 memcpy(resp_buf, ssp_response->resp_data, len);
829}
830
831static enum sci_status
832request_started_state_tc_event(struct isci_request *ireq,
833 u32 completion_code)
834{
835 struct ssp_response_iu *resp_iu;
836 u8 datapres;
837
838 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
839 * to determine SDMA status
840 */
841 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
842 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
843 ireq->scu_status = SCU_TASK_DONE_GOOD;
844 ireq->sci_status = SCI_SUCCESS;
845 break;
846 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
847 /* There are times when the SCU hardware will return an early
848 * response because the io request specified more data than is
849 * returned by the target device (mode pages, inquiry data,
850 * etc.). We must check the response stats to see if this is
851 * truly a failed request or a good request that just got
852 * completed early.
853 */
854 struct ssp_response_iu *resp = &ireq->ssp.rsp;
855 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
856
857 sci_swab32_cpy(&ireq->ssp.rsp,
858 &ireq->ssp.rsp,
859 word_cnt);
860
861 if (resp->status == 0) {
862 ireq->scu_status = SCU_TASK_DONE_GOOD;
863 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
864 } else {
865 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
866 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
867 }
868 break;
869 }
870 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
871 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
872
873 sci_swab32_cpy(&ireq->ssp.rsp,
874 &ireq->ssp.rsp,
875 word_cnt);
876
877 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
878 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
879 break;
880 }
881
882 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
883 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
884 * guaranteed to be received before this completion status is
885 * posted?
886 */
887 resp_iu = &ireq->ssp.rsp;
888 datapres = resp_iu->datapres;
889
890 if (datapres == 1 || datapres == 2) {
891 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
892 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
893 } else {
894 ireq->scu_status = SCU_TASK_DONE_GOOD;
895 ireq->sci_status = SCI_SUCCESS;
896 }
897 break;
898 /* only stp device gets suspended. */
899 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
900 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
901 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
902 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
903 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
904 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
905 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
906 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
907 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
908 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
909 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
910 if (ireq->protocol == SCIC_STP_PROTOCOL) {
911 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
912 SCU_COMPLETION_TL_STATUS_SHIFT;
913 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
914 } else {
915 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
916 SCU_COMPLETION_TL_STATUS_SHIFT;
917 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
918 }
919 break;
920
921 /* both stp/ssp device gets suspended */
922 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
923 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
924 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
925 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
926 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
927 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
928 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
929 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
930 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
931 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
932 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
933 SCU_COMPLETION_TL_STATUS_SHIFT;
934 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
935 break;
936
937 /* neither ssp nor stp gets suspended. */
938 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
939 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
940 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
941 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
942 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
943 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
944 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
945 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
946 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
947 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
948 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
949 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
950 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
951 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
952 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
953 default:
954 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
955 SCU_COMPLETION_TL_STATUS_SHIFT;
956 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
957 break;
958 }
959
960 /*
961 * TODO: This is probably wrong for ACK/NAK timeout conditions
962 */
963
964 /* In all cases we will treat this as the completion of the IO req. */
965 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
966 return SCI_SUCCESS;
967}
968
969static enum sci_status
970request_aborting_state_tc_event(struct isci_request *ireq,
971 u32 completion_code)
972{
973 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
974 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
975 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
976 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
977 ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
978 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
979 break;
980
981 default:
982 /* Unless we get some strange error wait for the task abort to complete
983 * TODO: Should there be a state change for this completion?
984 */
985 break;
986 }
987
988 return SCI_SUCCESS;
989}
990
991static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
992 u32 completion_code)
993{
994 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
995 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
996 ireq->scu_status = SCU_TASK_DONE_GOOD;
997 ireq->sci_status = SCI_SUCCESS;
998 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
999 break;
1000 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1001 /* Currently, the decision is to simply allow the task request
1002 * to timeout if the task IU wasn't received successfully.
1003 * There is a potential for receiving multiple task responses if
1004 * we decide to send the task IU again.
1005 */
1006 dev_warn(&ireq->owning_controller->pdev->dev,
1007 "%s: TaskRequest:0x%p CompletionCode:%x - "
1008 "ACK/NAK timeout\n", __func__, ireq,
1009 completion_code);
1010
1011 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1012 break;
1013 default:
1014 /*
1015 * All other completion status cause the IO to be complete.
1016 * If a NAK was received, then it is up to the user to retry
1017 * the request.
1018 */
1019 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1020 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1021 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1022 break;
1023 }
1024
1025 return SCI_SUCCESS;
1026}
1027
1028static enum sci_status
1029smp_request_await_response_tc_event(struct isci_request *ireq,
1030 u32 completion_code)
1031{
1032 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1033 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1034 /* In the AWAIT RESPONSE state, any TC completion is
1035 * unexpected. but if the TC has success status, we
1036 * complete the IO anyway.
1037 */
1038 ireq->scu_status = SCU_TASK_DONE_GOOD;
1039 ireq->sci_status = SCI_SUCCESS;
1040 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1041 break;
1042 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1043 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1044 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1045 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1046 /* These status has been seen in a specific LSI
1047 * expander, which sometimes is not able to send smp
1048 * response within 2 ms. This causes our hardware break
1049 * the connection and set TC completion with one of
1050 * these SMP_XXX_XX_ERR status. For these type of error,
1051 * we ask ihost user to retry the request.
1052 */
1053 ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
1054 ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
1055 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1056 break;
1057 default:
1058 /* All other completion status cause the IO to be complete. If a NAK
1059 * was received, then it is up to the user to retry the request
1060 */
1061 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1062 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1063 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1064 break;
1065 }
1066
1067 return SCI_SUCCESS;
1068}
1069
1070static enum sci_status
1071smp_request_await_tc_event(struct isci_request *ireq,
1072 u32 completion_code)
1073{
1074 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1075 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1076 ireq->scu_status = SCU_TASK_DONE_GOOD;
1077 ireq->sci_status = SCI_SUCCESS;
1078 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1079 break;
1080 default:
1081 /* All other completion status cause the IO to be
1082 * complete. If a NAK was received, then it is up to
1083 * the user to retry the request.
1084 */
1085 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1086 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1087 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1088 break;
1089 }
1090
1091 return SCI_SUCCESS;
1092}
1093
1094static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1095{
1096 struct scu_sgl_element *sgl;
1097 struct scu_sgl_element_pair *sgl_pair;
1098 struct isci_request *ireq = to_ireq(stp_req);
1099 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1100
1101 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1102 if (!sgl_pair)
1103 sgl = NULL;
1104 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
1105 if (sgl_pair->B.address_lower == 0 &&
1106 sgl_pair->B.address_upper == 0) {
1107 sgl = NULL;
1108 } else {
1109 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
1110 sgl = &sgl_pair->B;
1111 }
1112 } else {
1113 if (sgl_pair->next_pair_lower == 0 &&
1114 sgl_pair->next_pair_upper == 0) {
1115 sgl = NULL;
1116 } else {
1117 pio_sgl->index++;
1118 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1119 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1120 sgl = &sgl_pair->A;
1121 }
1122 }
1123
1124 return sgl;
1125}
1126
1127static enum sci_status
1128stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1129 u32 completion_code)
1130{
1131 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1132 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1133 ireq->scu_status = SCU_TASK_DONE_GOOD;
1134 ireq->sci_status = SCI_SUCCESS;
1135 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1136 break;
1137
1138 default:
1139 /* All other completion status cause the IO to be
1140 * complete. If a NAK was received, then it is up to
1141 * the user to retry the request.
1142 */
1143 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1144 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1145 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1146 break;
1147 }
1148
1149 return SCI_SUCCESS;
1150}
1151
1152#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1153
1154/* transmit DATA_FIS from (current sgl + offset) for input
1155 * parameter length. current sgl and offset is alreay stored in the IO request
1156 */
1157static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
1158 struct isci_request *ireq,
1159 u32 length)
1160{
1161 struct isci_stp_request *stp_req = &ireq->stp.req;
1162 struct scu_task_context *task_context = ireq->tc;
1163 struct scu_sgl_element_pair *sgl_pair;
1164 struct scu_sgl_element *current_sgl;
1165
1166 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1167 * for the data from current_sgl+offset for the input length
1168 */
1169 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1170 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1171 current_sgl = &sgl_pair->A;
1172 else
1173 current_sgl = &sgl_pair->B;
1174
1175 /* update the TC */
1176 task_context->command_iu_upper = current_sgl->address_upper;
1177 task_context->command_iu_lower = current_sgl->address_lower;
1178 task_context->transfer_length_bytes = length;
1179 task_context->type.stp.fis_type = FIS_DATA;
1180
1181 /* send the new TC out. */
1182 return sci_controller_continue_io(ireq);
1183}
1184
1185static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1186{
1187 struct isci_stp_request *stp_req = &ireq->stp.req;
1188 struct scu_sgl_element_pair *sgl_pair;
1189 struct scu_sgl_element *sgl;
1190 enum sci_status status;
1191 u32 offset;
1192 u32 len = 0;
1193
1194 offset = stp_req->sgl.offset;
1195 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1196 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1197 return SCI_FAILURE;
1198
1199 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1200 sgl = &sgl_pair->A;
1201 len = sgl_pair->A.length - offset;
1202 } else {
1203 sgl = &sgl_pair->B;
1204 len = sgl_pair->B.length - offset;
1205 }
1206
1207 if (stp_req->pio_len == 0)
1208 return SCI_SUCCESS;
1209
1210 if (stp_req->pio_len >= len) {
1211 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1212 if (status != SCI_SUCCESS)
1213 return status;
1214 stp_req->pio_len -= len;
1215
1216 /* update the current sgl, offset and save for future */
1217 sgl = pio_sgl_next(stp_req);
1218 offset = 0;
1219 } else if (stp_req->pio_len < len) {
1220 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1221
1222 /* Sgl offset will be adjusted and saved for future */
1223 offset += stp_req->pio_len;
1224 sgl->address_lower += stp_req->pio_len;
1225 stp_req->pio_len = 0;
1226 }
1227
1228 stp_req->sgl.offset = offset;
1229
1230 return status;
1231}
1232
1233/**
1234 *
1235 * @stp_request: The request that is used for the SGL processing.
1236 * @data_buffer: The buffer of data to be copied.
1237 * @length: The length of the data transfer.
1238 *
1239 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1240 * specified data region. enum sci_status
1241 */
1242static enum sci_status
1243sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1244 u8 *data_buf, u32 len)
1245{
1246 struct isci_request *ireq;
1247 u8 *src_addr;
1248 int copy_len;
1249 struct sas_task *task;
1250 struct scatterlist *sg;
1251 void *kaddr;
1252 int total_len = len;
1253
1254 ireq = to_ireq(stp_req);
1255 task = isci_request_access_task(ireq);
1256 src_addr = data_buf;
1257
1258 if (task->num_scatter > 0) {
1259 sg = task->scatter;
1260
1261 while (total_len > 0) {
1262 struct page *page = sg_page(sg);
1263
1264 copy_len = min_t(int, total_len, sg_dma_len(sg));
1265 kaddr = kmap_atomic(page, KM_IRQ0);
1266 memcpy(kaddr + sg->offset, src_addr, copy_len);
1267 kunmap_atomic(kaddr, KM_IRQ0);
1268 total_len -= copy_len;
1269 src_addr += copy_len;
1270 sg = sg_next(sg);
1271 }
1272 } else {
1273 BUG_ON(task->total_xfer_len < total_len);
1274 memcpy(task->scatter, src_addr, total_len);
1275 }
1276
1277 return SCI_SUCCESS;
1278}
1279
1280/**
1281 *
1282 * @sci_req: The PIO DATA IN request that is to receive the data.
1283 * @data_buffer: The buffer to copy from.
1284 *
1285 * Copy the data buffer to the io request data region. enum sci_status
1286 */
1287static enum sci_status sci_stp_request_pio_data_in_copy_data(
1288 struct isci_stp_request *stp_req,
1289 u8 *data_buffer)
1290{
1291 enum sci_status status;
1292
1293 /*
1294 * If there is less than 1K remaining in the transfer request
1295 * copy just the data for the transfer */
1296 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1297 status = sci_stp_request_pio_data_in_copy_data_buffer(
1298 stp_req, data_buffer, stp_req->pio_len);
1299
1300 if (status == SCI_SUCCESS)
1301 stp_req->pio_len = 0;
1302 } else {
1303 /* We are transfering the whole frame so copy */
1304 status = sci_stp_request_pio_data_in_copy_data_buffer(
1305 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1306
1307 if (status == SCI_SUCCESS)
1308 stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
1309 }
1310
1311 return status;
1312}
1313
1314static enum sci_status
1315stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1316 u32 completion_code)
1317{
1318 enum sci_status status = SCI_SUCCESS;
1319
1320 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1321 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1322 ireq->scu_status = SCU_TASK_DONE_GOOD;
1323 ireq->sci_status = SCI_SUCCESS;
1324 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1325 break;
1326
1327 default:
1328 /* All other completion status cause the IO to be
1329 * complete. If a NAK was received, then it is up to
1330 * the user to retry the request.
1331 */
1332 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1333 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1334 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1335 break;
1336 }
1337
1338 return status;
1339}
1340
1341static enum sci_status
1342pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1343 u32 completion_code)
1344{
1345 enum sci_status status = SCI_SUCCESS;
1346 bool all_frames_transferred = false;
1347 struct isci_stp_request *stp_req = &ireq->stp.req;
1348
1349 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1350 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1351 /* Transmit data */
1352 if (stp_req->pio_len != 0) {
1353 status = sci_stp_request_pio_data_out_transmit_data(ireq);
1354 if (status == SCI_SUCCESS) {
1355 if (stp_req->pio_len == 0)
1356 all_frames_transferred = true;
1357 }
1358 } else if (stp_req->pio_len == 0) {
1359 /*
1360 * this will happen if the all data is written at the
1361 * first time after the pio setup fis is received
1362 */
1363 all_frames_transferred = true;
1364 }
1365
1366 /* all data transferred. */
1367 if (all_frames_transferred) {
1368 /*
1369 * Change the state to SCI_REQ_STP_PIO_DATA_IN
1370 * and wait for PIO_SETUP fis / or D2H REg fis. */
1371 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1372 }
1373 break;
1374
1375 default:
1376 /*
1377 * All other completion status cause the IO to be complete.
1378 * If a NAK was received, then it is up to the user to retry
1379 * the request.
1380 */
1381 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1382 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1383 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1384 break;
1385 }
1386
1387 return status;
1388}
1389
1390static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1391 u32 frame_index)
1392{
1393 struct isci_host *ihost = ireq->owning_controller;
1394 struct dev_to_host_fis *frame_header;
1395 enum sci_status status;
1396 u32 *frame_buffer;
1397
1398 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1399 frame_index,
1400 (void **)&frame_header);
1401
1402 if ((status == SCI_SUCCESS) &&
1403 (frame_header->fis_type == FIS_REGD2H)) {
1404 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1405 frame_index,
1406 (void **)&frame_buffer);
1407
1408 sci_controller_copy_sata_response(&ireq->stp.rsp,
1409 frame_header,
1410 frame_buffer);
1411 }
1412
1413 sci_controller_release_frame(ihost, frame_index);
1414
1415 return status;
1416}
1417
1418enum sci_status
1419sci_io_request_frame_handler(struct isci_request *ireq,
1420 u32 frame_index)
1421{
1422 struct isci_host *ihost = ireq->owning_controller;
1423 struct isci_stp_request *stp_req = &ireq->stp.req;
1424 enum sci_base_request_states state;
1425 enum sci_status status;
1426 ssize_t word_cnt;
1427
1428 state = ireq->sm.current_state_id;
1429 switch (state) {
1430 case SCI_REQ_STARTED: {
1431 struct ssp_frame_hdr ssp_hdr;
1432 void *frame_header;
1433
1434 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1435 frame_index,
1436 &frame_header);
1437
1438 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1439 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1440
1441 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1442 struct ssp_response_iu *resp_iu;
1443 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1444
1445 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1446 frame_index,
1447 (void **)&resp_iu);
1448
1449 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1450
1451 resp_iu = &ireq->ssp.rsp;
1452
1453 if (resp_iu->datapres == 0x01 ||
1454 resp_iu->datapres == 0x02) {
1455 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1456 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1457 } else {
1458 ireq->scu_status = SCU_TASK_DONE_GOOD;
1459 ireq->sci_status = SCI_SUCCESS;
1460 }
1461 } else {
1462 /* not a response frame, why did it get forwarded? */
1463 dev_err(&ihost->pdev->dev,
1464 "%s: SCIC IO Request 0x%p received unexpected "
1465 "frame %d type 0x%02x\n", __func__, ireq,
1466 frame_index, ssp_hdr.frame_type);
1467 }
1468
1469 /*
1470 * In any case we are done with this frame buffer return it to
1471 * the controller
1472 */
1473 sci_controller_release_frame(ihost, frame_index);
1474
1475 return SCI_SUCCESS;
1476 }
1477
1478 case SCI_REQ_TASK_WAIT_TC_RESP:
1479 sci_io_request_copy_response(ireq);
1480 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1481 sci_controller_release_frame(ihost, frame_index);
1482 return SCI_SUCCESS;
1483
1484 case SCI_REQ_SMP_WAIT_RESP: {
1485 struct smp_resp *rsp_hdr = &ireq->smp.rsp;
1486 void *frame_header;
1487
1488 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1489 frame_index,
1490 &frame_header);
1491
1492 /* byte swap the header. */
1493 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1494 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1495
1496 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1497 void *smp_resp;
1498
1499 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1500 frame_index,
1501 &smp_resp);
1502
1503 word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
1504 sizeof(u32);
1505
1506 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1507 smp_resp, word_cnt);
1508
1509 ireq->scu_status = SCU_TASK_DONE_GOOD;
1510 ireq->sci_status = SCI_SUCCESS;
1511 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1512 } else {
1513 /*
1514 * This was not a response frame why did it get
1515 * forwarded?
1516 */
1517 dev_err(&ihost->pdev->dev,
1518 "%s: SCIC SMP Request 0x%p received unexpected "
1519 "frame %d type 0x%02x\n",
1520 __func__,
1521 ireq,
1522 frame_index,
1523 rsp_hdr->frame_type);
1524
1525 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
1526 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1527 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1528 }
1529
1530 sci_controller_release_frame(ihost, frame_index);
1531
1532 return SCI_SUCCESS;
1533 }
1534
1535 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1536 return sci_stp_request_udma_general_frame_handler(ireq,
1537 frame_index);
1538
1539 case SCI_REQ_STP_UDMA_WAIT_D2H:
1540 /* Use the general frame handler to copy the resposne data */
1541 status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
1542
1543 if (status != SCI_SUCCESS)
1544 return status;
1545
1546 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1547 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1548 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1549 return SCI_SUCCESS;
1550
1551 case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1552 struct dev_to_host_fis *frame_header;
1553 u32 *frame_buffer;
1554
1555 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1556 frame_index,
1557 (void **)&frame_header);
1558
1559 if (status != SCI_SUCCESS) {
1560 dev_err(&ihost->pdev->dev,
1561 "%s: SCIC IO Request 0x%p could not get frame "
1562 "header for frame index %d, status %x\n",
1563 __func__,
1564 stp_req,
1565 frame_index,
1566 status);
1567
1568 return status;
1569 }
1570
1571 switch (frame_header->fis_type) {
1572 case FIS_REGD2H:
1573 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1574 frame_index,
1575 (void **)&frame_buffer);
1576
1577 sci_controller_copy_sata_response(&ireq->stp.rsp,
1578 frame_header,
1579 frame_buffer);
1580
1581 /* The command has completed with error */
1582 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1583 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1584 break;
1585
1586 default:
1587 dev_warn(&ihost->pdev->dev,
1588 "%s: IO Request:0x%p Frame Id:%d protocol "
1589 "violation occurred\n", __func__, stp_req,
1590 frame_index);
1591
1592 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1593 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1594 break;
1595 }
1596
1597 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1598
1599 /* Frame has been decoded return it to the controller */
1600 sci_controller_release_frame(ihost, frame_index);
1601
1602 return status;
1603 }
1604
1605 case SCI_REQ_STP_PIO_WAIT_FRAME: {
1606 struct sas_task *task = isci_request_access_task(ireq);
1607 struct dev_to_host_fis *frame_header;
1608 u32 *frame_buffer;
1609
1610 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1611 frame_index,
1612 (void **)&frame_header);
1613
1614 if (status != SCI_SUCCESS) {
1615 dev_err(&ihost->pdev->dev,
1616 "%s: SCIC IO Request 0x%p could not get frame "
1617 "header for frame index %d, status %x\n",
1618 __func__, stp_req, frame_index, status);
1619 return status;
1620 }
1621
1622 switch (frame_header->fis_type) {
1623 case FIS_PIO_SETUP:
1624 /* Get from the frame buffer the PIO Setup Data */
1625 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1626 frame_index,
1627 (void **)&frame_buffer);
1628
1629 /* Get the data from the PIO Setup The SCU Hardware
1630 * returns first word in the frame_header and the rest
1631 * of the data is in the frame buffer so we need to
1632 * back up one dword
1633 */
1634
1635 /* transfer_count: first 16bits in the 4th dword */
1636 stp_req->pio_len = frame_buffer[3] & 0xffff;
1637
1638 /* status: 4th byte in the 3rd dword */
1639 stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1640
1641 sci_controller_copy_sata_response(&ireq->stp.rsp,
1642 frame_header,
1643 frame_buffer);
1644
1645 ireq->stp.rsp.status = stp_req->status;
1646
1647 /* The next state is dependent on whether the
1648 * request was PIO Data-in or Data out
1649 */
1650 if (task->data_dir == DMA_FROM_DEVICE) {
1651 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1652 } else if (task->data_dir == DMA_TO_DEVICE) {
1653 /* Transmit data */
1654 status = sci_stp_request_pio_data_out_transmit_data(ireq);
1655 if (status != SCI_SUCCESS)
1656 break;
1657 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1658 }
1659 break;
1660
1661 case FIS_SETDEVBITS:
1662 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1663 break;
1664
1665 case FIS_REGD2H:
1666 if (frame_header->status & ATA_BUSY) {
1667 /*
1668 * Now why is the drive sending a D2H Register
1669 * FIS when it is still busy? Do nothing since
1670 * we are still in the right state.
1671 */
1672 dev_dbg(&ihost->pdev->dev,
1673 "%s: SCIC PIO Request 0x%p received "
1674 "D2H Register FIS with BSY status "
1675 "0x%x\n",
1676 __func__,
1677 stp_req,
1678 frame_header->status);
1679 break;
1680 }
1681
1682 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1683 frame_index,
1684 (void **)&frame_buffer);
1685
1686 sci_controller_copy_sata_response(&ireq->stp.req,
1687 frame_header,
1688 frame_buffer);
1689
1690 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1691 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1692 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1693 break;
1694
1695 default:
1696 /* FIXME: what do we do here? */
1697 break;
1698 }
1699
1700 /* Frame is decoded return it to the controller */
1701 sci_controller_release_frame(ihost, frame_index);
1702
1703 return status;
1704 }
1705
1706 case SCI_REQ_STP_PIO_DATA_IN: {
1707 struct dev_to_host_fis *frame_header;
1708 struct sata_fis_data *frame_buffer;
1709
1710 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1711 frame_index,
1712 (void **)&frame_header);
1713
1714 if (status != SCI_SUCCESS) {
1715 dev_err(&ihost->pdev->dev,
1716 "%s: SCIC IO Request 0x%p could not get frame "
1717 "header for frame index %d, status %x\n",
1718 __func__,
1719 stp_req,
1720 frame_index,
1721 status);
1722 return status;
1723 }
1724
1725 if (frame_header->fis_type != FIS_DATA) {
1726 dev_err(&ihost->pdev->dev,
1727 "%s: SCIC PIO Request 0x%p received frame %d "
1728 "with fis type 0x%02x when expecting a data "
1729 "fis.\n",
1730 __func__,
1731 stp_req,
1732 frame_index,
1733 frame_header->fis_type);
1734
1735 ireq->scu_status = SCU_TASK_DONE_GOOD;
1736 ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
1737 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1738
1739 /* Frame is decoded return it to the controller */
1740 sci_controller_release_frame(ihost, frame_index);
1741 return status;
1742 }
1743
1744 if (stp_req->sgl.index < 0) {
1745 ireq->saved_rx_frame_index = frame_index;
1746 stp_req->pio_len = 0;
1747 } else {
1748 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1749 frame_index,
1750 (void **)&frame_buffer);
1751
1752 status = sci_stp_request_pio_data_in_copy_data(stp_req,
1753 (u8 *)frame_buffer);
1754
1755 /* Frame is decoded return it to the controller */
1756 sci_controller_release_frame(ihost, frame_index);
1757 }
1758
1759 /* Check for the end of the transfer, are there more
1760 * bytes remaining for this data transfer
1761 */
1762 if (status != SCI_SUCCESS || stp_req->pio_len != 0)
1763 return status;
1764
1765 if ((stp_req->status & ATA_BUSY) == 0) {
1766 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1767 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1768 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1769 } else {
1770 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1771 }
1772 return status;
1773 }
1774
1775 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1776 struct dev_to_host_fis *frame_header;
1777 u32 *frame_buffer;
1778
1779 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1780 frame_index,
1781 (void **)&frame_header);
1782 if (status != SCI_SUCCESS) {
1783 dev_err(&ihost->pdev->dev,
1784 "%s: SCIC IO Request 0x%p could not get frame "
1785 "header for frame index %d, status %x\n",
1786 __func__,
1787 stp_req,
1788 frame_index,
1789 status);
1790 return status;
1791 }
1792
1793 switch (frame_header->fis_type) {
1794 case FIS_REGD2H:
1795 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1796 frame_index,
1797 (void **)&frame_buffer);
1798
1799 sci_controller_copy_sata_response(&ireq->stp.rsp,
1800 frame_header,
1801 frame_buffer);
1802
1803 /* The command has completed with error */
1804 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1805 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1806 break;
1807
1808 default:
1809 dev_warn(&ihost->pdev->dev,
1810 "%s: IO Request:0x%p Frame Id:%d protocol "
1811 "violation occurred\n",
1812 __func__,
1813 stp_req,
1814 frame_index);
1815
1816 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1817 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1818 break;
1819 }
1820
1821 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1822
1823 /* Frame has been decoded return it to the controller */
1824 sci_controller_release_frame(ihost, frame_index);
1825
1826 return status;
1827 }
1828 case SCI_REQ_ABORTING:
1829 /*
1830 * TODO: Is it even possible to get an unsolicited frame in the
1831 * aborting state?
1832 */
1833 sci_controller_release_frame(ihost, frame_index);
1834 return SCI_SUCCESS;
1835
1836 default:
1837 dev_warn(&ihost->pdev->dev,
1838 "%s: SCIC IO Request given unexpected frame %x while "
1839 "in state %d\n",
1840 __func__,
1841 frame_index,
1842 state);
1843
1844 sci_controller_release_frame(ihost, frame_index);
1845 return SCI_FAILURE_INVALID_STATE;
1846 }
1847}
1848
1849static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
1850 u32 completion_code)
1851{
1852 enum sci_status status = SCI_SUCCESS;
1853
1854 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1855 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1856 ireq->scu_status = SCU_TASK_DONE_GOOD;
1857 ireq->sci_status = SCI_SUCCESS;
1858 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1859 break;
1860 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1861 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1862 /* We must check ther response buffer to see if the D2H
1863 * Register FIS was received before we got the TC
1864 * completion.
1865 */
1866 if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
1867 sci_remote_device_suspend(ireq->target_device,
1868 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1869
1870 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1871 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1872 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1873 } else {
1874 /* If we have an error completion status for the
1875 * TC then we can expect a D2H register FIS from
1876 * the device so we must change state to wait
1877 * for it
1878 */
1879 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
1880 }
1881 break;
1882
1883 /* TODO Check to see if any of these completion status need to
1884 * wait for the device to host register fis.
1885 */
1886 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
1887 * - this comes only for B0
1888 */
1889 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1890 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1891 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1892 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1893 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1894 sci_remote_device_suspend(ireq->target_device,
1895 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1896 /* Fall through to the default case */
1897 default:
1898 /* All other completion status cause the IO to be complete. */
1899 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1900 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1901 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1902 break;
1903 }
1904
1905 return status;
1906}
1907
1908static enum sci_status
1909stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
1910 u32 completion_code)
1911{
1912 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1913 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1914 ireq->scu_status = SCU_TASK_DONE_GOOD;
1915 ireq->sci_status = SCI_SUCCESS;
1916 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
1917 break;
1918
1919 default:
1920 /*
1921 * All other completion status cause the IO to be complete.
1922 * If a NAK was received, then it is up to the user to retry
1923 * the request.
1924 */
1925 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1926 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1927 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1928 break;
1929 }
1930
1931 return SCI_SUCCESS;
1932}
1933
1934static enum sci_status
1935stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
1936 u32 completion_code)
1937{
1938 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1939 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1940 ireq->scu_status = SCU_TASK_DONE_GOOD;
1941 ireq->sci_status = SCI_SUCCESS;
1942 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
1943 break;
1944
1945 default:
1946 /* All other completion status cause the IO to be complete. If
1947 * a NAK was received, then it is up to the user to retry the
1948 * request.
1949 */
1950 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1951 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1952 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1953 break;
1954 }
1955
1956 return SCI_SUCCESS;
1957}
1958
1959enum sci_status
1960sci_io_request_tc_completion(struct isci_request *ireq,
1961 u32 completion_code)
1962{
1963 enum sci_base_request_states state;
1964 struct isci_host *ihost = ireq->owning_controller;
1965
1966 state = ireq->sm.current_state_id;
1967
1968 switch (state) {
1969 case SCI_REQ_STARTED:
1970 return request_started_state_tc_event(ireq, completion_code);
1971
1972 case SCI_REQ_TASK_WAIT_TC_COMP:
1973 return ssp_task_request_await_tc_event(ireq,
1974 completion_code);
1975
1976 case SCI_REQ_SMP_WAIT_RESP:
1977 return smp_request_await_response_tc_event(ireq,
1978 completion_code);
1979
1980 case SCI_REQ_SMP_WAIT_TC_COMP:
1981 return smp_request_await_tc_event(ireq, completion_code);
1982
1983 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1984 return stp_request_udma_await_tc_event(ireq,
1985 completion_code);
1986
1987 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
1988 return stp_request_non_data_await_h2d_tc_event(ireq,
1989 completion_code);
1990
1991 case SCI_REQ_STP_PIO_WAIT_H2D:
1992 return stp_request_pio_await_h2d_completion_tc_event(ireq,
1993 completion_code);
1994
1995 case SCI_REQ_STP_PIO_DATA_OUT:
1996 return pio_data_out_tx_done_tc_event(ireq, completion_code);
1997
1998 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
1999 return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
2000 completion_code);
2001
2002 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2003 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
2004 completion_code);
2005
2006 case SCI_REQ_ABORTING:
2007 return request_aborting_state_tc_event(ireq,
2008 completion_code);
2009
2010 default:
2011 dev_warn(&ihost->pdev->dev,
2012 "%s: SCIC IO Request given task completion "
2013 "notification %x while in wrong state %d\n",
2014 __func__,
2015 completion_code,
2016 state);
2017 return SCI_FAILURE_INVALID_STATE;
2018 }
2019}
2020
2021/**
2022 * isci_request_process_response_iu() - This function sets the status and
2023 * response iu, in the task struct, from the request object for the upper
2024 * layer driver.
2025 * @sas_task: This parameter is the task struct from the upper layer driver.
2026 * @resp_iu: This parameter points to the response iu of the completed request.
2027 * @dev: This parameter specifies the linux device struct.
2028 *
2029 * none.
2030 */
2031static void isci_request_process_response_iu(
2032 struct sas_task *task,
2033 struct ssp_response_iu *resp_iu,
2034 struct device *dev)
2035{
2036 dev_dbg(dev,
2037 "%s: resp_iu = %p "
2038 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2039 "resp_iu->response_data_len = %x, "
2040 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2041 __func__,
2042 resp_iu,
2043 resp_iu->status,
2044 resp_iu->datapres,
2045 resp_iu->response_data_len,
2046 resp_iu->sense_data_len);
2047
2048 task->task_status.stat = resp_iu->status;
2049
2050 /* libsas updates the task status fields based on the response iu. */
2051 sas_ssp_task_response(dev, task, resp_iu);
2052}
2053
2054/**
2055 * isci_request_set_open_reject_status() - This function prepares the I/O
2056 * completion for OPEN_REJECT conditions.
2057 * @request: This parameter is the completed isci_request object.
2058 * @response_ptr: This parameter specifies the service response for the I/O.
2059 * @status_ptr: This parameter specifies the exec status for the I/O.
2060 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2061 * the LLDD with respect to completing this request or forcing an abort
2062 * condition on the I/O.
2063 * @open_rej_reason: This parameter specifies the encoded reason for the
2064 * abandon-class reject.
2065 *
2066 * none.
2067 */
2068static void isci_request_set_open_reject_status(
2069 struct isci_request *request,
2070 struct sas_task *task,
2071 enum service_response *response_ptr,
2072 enum exec_status *status_ptr,
2073 enum isci_completion_selection *complete_to_host_ptr,
2074 enum sas_open_rej_reason open_rej_reason)
2075{
2076 /* Task in the target is done. */
2077 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2078 *response_ptr = SAS_TASK_UNDELIVERED;
2079 *status_ptr = SAS_OPEN_REJECT;
2080 *complete_to_host_ptr = isci_perform_normal_io_completion;
2081 task->task_status.open_rej_reason = open_rej_reason;
2082}
2083
2084/**
2085 * isci_request_handle_controller_specific_errors() - This function decodes
2086 * controller-specific I/O completion error conditions.
2087 * @request: This parameter is the completed isci_request object.
2088 * @response_ptr: This parameter specifies the service response for the I/O.
2089 * @status_ptr: This parameter specifies the exec status for the I/O.
2090 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2091 * the LLDD with respect to completing this request or forcing an abort
2092 * condition on the I/O.
2093 *
2094 * none.
2095 */
2096static void isci_request_handle_controller_specific_errors(
2097 struct isci_remote_device *idev,
2098 struct isci_request *request,
2099 struct sas_task *task,
2100 enum service_response *response_ptr,
2101 enum exec_status *status_ptr,
2102 enum isci_completion_selection *complete_to_host_ptr)
2103{
2104 unsigned int cstatus;
2105
2106 cstatus = request->scu_status;
2107
2108 dev_dbg(&request->isci_host->pdev->dev,
2109 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2110 "- controller status = 0x%x\n",
2111 __func__, request, cstatus);
2112
2113 /* Decode the controller-specific errors; most
2114 * important is to recognize those conditions in which
2115 * the target may still have a task outstanding that
2116 * must be aborted.
2117 *
2118 * Note that there are SCU completion codes being
2119 * named in the decode below for which SCIC has already
2120 * done work to handle them in a way other than as
2121 * a controller-specific completion code; these are left
2122 * in the decode below for completeness sake.
2123 */
2124 switch (cstatus) {
2125 case SCU_TASK_DONE_DMASETUP_DIRERR:
2126 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2127 case SCU_TASK_DONE_XFERCNT_ERR:
2128 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2129 if (task->task_proto == SAS_PROTOCOL_SMP) {
2130 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2131 *response_ptr = SAS_TASK_COMPLETE;
2132
2133 /* See if the device has been/is being stopped. Note
2134 * that we ignore the quiesce state, since we are
2135 * concerned about the actual device state.
2136 */
2137 if (!idev)
2138 *status_ptr = SAS_DEVICE_UNKNOWN;
2139 else
2140 *status_ptr = SAS_ABORTED_TASK;
2141
2142 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2143
2144 *complete_to_host_ptr =
2145 isci_perform_normal_io_completion;
2146 } else {
2147 /* Task in the target is not done. */
2148 *response_ptr = SAS_TASK_UNDELIVERED;
2149
2150 if (!idev)
2151 *status_ptr = SAS_DEVICE_UNKNOWN;
2152 else
2153 *status_ptr = SAM_STAT_TASK_ABORTED;
2154
2155 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2156
2157 *complete_to_host_ptr =
2158 isci_perform_error_io_completion;
2159 }
2160
2161 break;
2162
2163 case SCU_TASK_DONE_CRC_ERR:
2164 case SCU_TASK_DONE_NAK_CMD_ERR:
2165 case SCU_TASK_DONE_EXCESS_DATA:
2166 case SCU_TASK_DONE_UNEXP_FIS:
2167 /* Also SCU_TASK_DONE_UNEXP_RESP: */
2168 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
2169 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
2170 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
2171 /* These are conditions in which the target
2172 * has completed the task, so that no cleanup
2173 * is necessary.
2174 */
2175 *response_ptr = SAS_TASK_COMPLETE;
2176
2177 /* See if the device has been/is being stopped. Note
2178 * that we ignore the quiesce state, since we are
2179 * concerned about the actual device state.
2180 */
2181 if (!idev)
2182 *status_ptr = SAS_DEVICE_UNKNOWN;
2183 else
2184 *status_ptr = SAS_ABORTED_TASK;
2185
2186 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2187
2188 *complete_to_host_ptr = isci_perform_normal_io_completion;
2189 break;
2190
2191
2192 /* Note that the only open reject completion codes seen here will be
2193 * abandon-class codes; all others are automatically retried in the SCU.
2194 */
2195 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2196
2197 isci_request_set_open_reject_status(
2198 request, task, response_ptr, status_ptr,
2199 complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2200 break;
2201
2202 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2203
2204 /* Note - the return of AB0 will change when
2205 * libsas implements detection of zone violations.
2206 */
2207 isci_request_set_open_reject_status(
2208 request, task, response_ptr, status_ptr,
2209 complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2210 break;
2211
2212 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2213
2214 isci_request_set_open_reject_status(
2215 request, task, response_ptr, status_ptr,
2216 complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2217 break;
2218
2219 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2220
2221 isci_request_set_open_reject_status(
2222 request, task, response_ptr, status_ptr,
2223 complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2224 break;
2225
2226 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2227
2228 isci_request_set_open_reject_status(
2229 request, task, response_ptr, status_ptr,
2230 complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2231 break;
2232
2233 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2234
2235 isci_request_set_open_reject_status(
2236 request, task, response_ptr, status_ptr,
2237 complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2238 break;
2239
2240 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2241
2242 isci_request_set_open_reject_status(
2243 request, task, response_ptr, status_ptr,
2244 complete_to_host_ptr, SAS_OREJ_STP_NORES);
2245 break;
2246
2247 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2248
2249 isci_request_set_open_reject_status(
2250 request, task, response_ptr, status_ptr,
2251 complete_to_host_ptr, SAS_OREJ_EPROTO);
2252 break;
2253
2254 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2255
2256 isci_request_set_open_reject_status(
2257 request, task, response_ptr, status_ptr,
2258 complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2259 break;
2260
2261 case SCU_TASK_DONE_LL_R_ERR:
2262 /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2263 case SCU_TASK_DONE_LL_PERR:
2264 case SCU_TASK_DONE_LL_SY_TERM:
2265 /* Also SCU_TASK_DONE_NAK_ERR:*/
2266 case SCU_TASK_DONE_LL_LF_TERM:
2267 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2268 case SCU_TASK_DONE_LL_ABORT_ERR:
2269 case SCU_TASK_DONE_SEQ_INV_TYPE:
2270 /* Also SCU_TASK_DONE_UNEXP_XR: */
2271 case SCU_TASK_DONE_XR_IU_LEN_ERR:
2272 case SCU_TASK_DONE_INV_FIS_LEN:
2273 /* Also SCU_TASK_DONE_XR_WD_LEN: */
2274 case SCU_TASK_DONE_SDMA_ERR:
2275 case SCU_TASK_DONE_OFFSET_ERR:
2276 case SCU_TASK_DONE_MAX_PLD_ERR:
2277 case SCU_TASK_DONE_LF_ERR:
2278 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
2279 case SCU_TASK_DONE_SMP_LL_RX_ERR:
2280 case SCU_TASK_DONE_UNEXP_DATA:
2281 case SCU_TASK_DONE_UNEXP_SDBFIS:
2282 case SCU_TASK_DONE_REG_ERR:
2283 case SCU_TASK_DONE_SDB_ERR:
2284 case SCU_TASK_DONE_TASK_ABORT:
2285 default:
2286 /* Task in the target is not done. */
2287 *response_ptr = SAS_TASK_UNDELIVERED;
2288 *status_ptr = SAM_STAT_TASK_ABORTED;
2289
2290 if (task->task_proto == SAS_PROTOCOL_SMP) {
2291 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2292
2293 *complete_to_host_ptr = isci_perform_normal_io_completion;
2294 } else {
2295 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2296
2297 *complete_to_host_ptr = isci_perform_error_io_completion;
2298 }
2299 break;
2300 }
2301}
2302
2303/**
2304 * isci_task_save_for_upper_layer_completion() - This function saves the
2305 * request for later completion to the upper layer driver.
2306 * @host: This parameter is a pointer to the host on which the the request
2307 * should be queued (either as an error or success).
2308 * @request: This parameter is the completed request.
2309 * @response: This parameter is the response code for the completed task.
2310 * @status: This parameter is the status code for the completed task.
2311 *
2312 * none.
2313 */
2314static void isci_task_save_for_upper_layer_completion(
2315 struct isci_host *host,
2316 struct isci_request *request,
2317 enum service_response response,
2318 enum exec_status status,
2319 enum isci_completion_selection task_notification_selection)
2320{
2321 struct sas_task *task = isci_request_access_task(request);
2322
2323 task_notification_selection
2324 = isci_task_set_completion_status(task, response, status,
2325 task_notification_selection);
2326
2327 /* Tasks aborted specifically by a call to the lldd_abort_task
2328 * function should not be completed to the host in the regular path.
2329 */
2330 switch (task_notification_selection) {
2331
2332 case isci_perform_normal_io_completion:
2333
2334 /* Normal notification (task_done) */
2335 dev_dbg(&host->pdev->dev,
2336 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
2337 __func__,
2338 task,
2339 task->task_status.resp, response,
2340 task->task_status.stat, status);
2341 /* Add to the completed list. */
2342 list_add(&request->completed_node,
2343 &host->requests_to_complete);
2344
2345 /* Take the request off the device's pending request list. */
2346 list_del_init(&request->dev_node);
2347 break;
2348
2349 case isci_perform_aborted_io_completion:
2350 /* No notification to libsas because this request is
2351 * already in the abort path.
2352 */
2353 dev_dbg(&host->pdev->dev,
2354 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
2355 __func__,
2356 task,
2357 task->task_status.resp, response,
2358 task->task_status.stat, status);
2359
2360 /* Wake up whatever process was waiting for this
2361 * request to complete.
2362 */
2363 WARN_ON(request->io_request_completion == NULL);
2364
2365 if (request->io_request_completion != NULL) {
2366
2367 /* Signal whoever is waiting that this
2368 * request is complete.
2369 */
2370 complete(request->io_request_completion);
2371 }
2372 break;
2373
2374 case isci_perform_error_io_completion:
2375 /* Use sas_task_abort */
2376 dev_dbg(&host->pdev->dev,
2377 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
2378 __func__,
2379 task,
2380 task->task_status.resp, response,
2381 task->task_status.stat, status);
2382 /* Add to the aborted list. */
2383 list_add(&request->completed_node,
2384 &host->requests_to_errorback);
2385 break;
2386
2387 default:
2388 dev_dbg(&host->pdev->dev,
2389 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
2390 __func__,
2391 task,
2392 task->task_status.resp, response,
2393 task->task_status.stat, status);
2394
2395 /* Add to the error to libsas list. */
2396 list_add(&request->completed_node,
2397 &host->requests_to_errorback);
2398 break;
2399 }
2400}
2401
2402static void isci_request_process_stp_response(struct sas_task *task,
2403 void *response_buffer)
2404{
2405 struct dev_to_host_fis *d2h_reg_fis = response_buffer;
2406 struct task_status_struct *ts = &task->task_status;
2407 struct ata_task_resp *resp = (void *)&ts->buf[0];
2408
2409 resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
2410 memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
2411 ts->buf_valid_size = sizeof(*resp);
2412
2413 /**
2414 * If the device fault bit is set in the status register, then
2415 * set the sense data and return.
2416 */
2417 if (d2h_reg_fis->status & ATA_DF)
2418 ts->stat = SAS_PROTO_RESPONSE;
2419 else
2420 ts->stat = SAM_STAT_GOOD;
2421
2422 ts->resp = SAS_TASK_COMPLETE;
2423}
2424
2425static void isci_request_io_request_complete(struct isci_host *ihost,
2426 struct isci_request *request,
2427 enum sci_io_status completion_status)
2428{
2429 struct sas_task *task = isci_request_access_task(request);
2430 struct ssp_response_iu *resp_iu;
2431 void *resp_buf;
2432 unsigned long task_flags;
2433 struct isci_remote_device *idev = isci_lookup_device(task->dev);
2434 enum service_response response = SAS_TASK_UNDELIVERED;
2435 enum exec_status status = SAS_ABORTED_TASK;
2436 enum isci_request_status request_status;
2437 enum isci_completion_selection complete_to_host
2438 = isci_perform_normal_io_completion;
2439
2440 dev_dbg(&ihost->pdev->dev,
2441 "%s: request = %p, task = %p,\n"
2442 "task->data_dir = %d completion_status = 0x%x\n",
2443 __func__,
2444 request,
2445 task,
2446 task->data_dir,
2447 completion_status);
2448
2449 spin_lock(&request->state_lock);
2450 request_status = request->status;
2451
2452 /* Decode the request status. Note that if the request has been
2453 * aborted by a task management function, we don't care
2454 * what the status is.
2455 */
2456 switch (request_status) {
2457
2458 case aborted:
2459 /* "aborted" indicates that the request was aborted by a task
2460 * management function, since once a task management request is
2461 * perfomed by the device, the request only completes because
2462 * of the subsequent driver terminate.
2463 *
2464 * Aborted also means an external thread is explicitly managing
2465 * this request, so that we do not complete it up the stack.
2466 *
2467 * The target is still there (since the TMF was successful).
2468 */
2469 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2470 response = SAS_TASK_COMPLETE;
2471
2472 /* See if the device has been/is being stopped. Note
2473 * that we ignore the quiesce state, since we are
2474 * concerned about the actual device state.
2475 */
2476 if (!idev)
2477 status = SAS_DEVICE_UNKNOWN;
2478 else
2479 status = SAS_ABORTED_TASK;
2480
2481 complete_to_host = isci_perform_aborted_io_completion;
2482 /* This was an aborted request. */
2483
2484 spin_unlock(&request->state_lock);
2485 break;
2486
2487 case aborting:
2488 /* aborting means that the task management function tried and
2489 * failed to abort the request. We need to note the request
2490 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2491 * target as down.
2492 *
2493 * Aborting also means an external thread is explicitly managing
2494 * this request, so that we do not complete it up the stack.
2495 */
2496 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2497 response = SAS_TASK_UNDELIVERED;
2498
2499 if (!idev)
2500 /* The device has been /is being stopped. Note that
2501 * we ignore the quiesce state, since we are
2502 * concerned about the actual device state.
2503 */
2504 status = SAS_DEVICE_UNKNOWN;
2505 else
2506 status = SAS_PHY_DOWN;
2507
2508 complete_to_host = isci_perform_aborted_io_completion;
2509
2510 /* This was an aborted request. */
2511
2512 spin_unlock(&request->state_lock);
2513 break;
2514
2515 case terminating:
2516
2517 /* This was an terminated request. This happens when
2518 * the I/O is being terminated because of an action on
2519 * the device (reset, tear down, etc.), and the I/O needs
2520 * to be completed up the stack.
2521 */
2522 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2523 response = SAS_TASK_UNDELIVERED;
2524
2525 /* See if the device has been/is being stopped. Note
2526 * that we ignore the quiesce state, since we are
2527 * concerned about the actual device state.
2528 */
2529 if (!idev)
2530 status = SAS_DEVICE_UNKNOWN;
2531 else
2532 status = SAS_ABORTED_TASK;
2533
2534 complete_to_host = isci_perform_aborted_io_completion;
2535
2536 /* This was a terminated request. */
2537
2538 spin_unlock(&request->state_lock);
2539 break;
2540
2541 case dead:
2542 /* This was a terminated request that timed-out during the
2543 * termination process. There is no task to complete to
2544 * libsas.
2545 */
2546 complete_to_host = isci_perform_normal_io_completion;
2547 spin_unlock(&request->state_lock);
2548 break;
2549
2550 default:
2551
2552 /* The request is done from an SCU HW perspective. */
2553 request->status = completed;
2554
2555 spin_unlock(&request->state_lock);
2556
2557 /* This is an active request being completed from the core. */
2558 switch (completion_status) {
2559
2560 case SCI_IO_FAILURE_RESPONSE_VALID:
2561 dev_dbg(&ihost->pdev->dev,
2562 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2563 __func__,
2564 request,
2565 task);
2566
2567 if (sas_protocol_ata(task->task_proto)) {
2568 resp_buf = &request->stp.rsp;
2569 isci_request_process_stp_response(task,
2570 resp_buf);
2571 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2572
2573 /* crack the iu response buffer. */
2574 resp_iu = &request->ssp.rsp;
2575 isci_request_process_response_iu(task, resp_iu,
2576 &ihost->pdev->dev);
2577
2578 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2579
2580 dev_err(&ihost->pdev->dev,
2581 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2582 "SAS_PROTOCOL_SMP protocol\n",
2583 __func__);
2584
2585 } else
2586 dev_err(&ihost->pdev->dev,
2587 "%s: unknown protocol\n", __func__);
2588
2589 /* use the task status set in the task struct by the
2590 * isci_request_process_response_iu call.
2591 */
2592 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2593 response = task->task_status.resp;
2594 status = task->task_status.stat;
2595 break;
2596
2597 case SCI_IO_SUCCESS:
2598 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2599
2600 response = SAS_TASK_COMPLETE;
2601 status = SAM_STAT_GOOD;
2602 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2603
2604 if (task->task_proto == SAS_PROTOCOL_SMP) {
2605 void *rsp = &request->smp.rsp;
2606
2607 dev_dbg(&ihost->pdev->dev,
2608 "%s: SMP protocol completion\n",
2609 __func__);
2610
2611 sg_copy_from_buffer(
2612 &task->smp_task.smp_resp, 1,
2613 rsp, sizeof(struct smp_resp));
2614 } else if (completion_status
2615 == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2616
2617 /* This was an SSP / STP / SATA transfer.
2618 * There is a possibility that less data than
2619 * the maximum was transferred.
2620 */
2621 u32 transferred_length = sci_req_tx_bytes(request);
2622
2623 task->task_status.residual
2624 = task->total_xfer_len - transferred_length;
2625
2626 /* If there were residual bytes, call this an
2627 * underrun.
2628 */
2629 if (task->task_status.residual != 0)
2630 status = SAS_DATA_UNDERRUN;
2631
2632 dev_dbg(&ihost->pdev->dev,
2633 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2634 __func__,
2635 status);
2636
2637 } else
2638 dev_dbg(&ihost->pdev->dev,
2639 "%s: SCI_IO_SUCCESS\n",
2640 __func__);
2641
2642 break;
2643
2644 case SCI_IO_FAILURE_TERMINATED:
2645 dev_dbg(&ihost->pdev->dev,
2646 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2647 __func__,
2648 request,
2649 task);
2650
2651 /* The request was terminated explicitly. No handling
2652 * is needed in the SCSI error handler path.
2653 */
2654 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2655 response = SAS_TASK_UNDELIVERED;
2656
2657 /* See if the device has been/is being stopped. Note
2658 * that we ignore the quiesce state, since we are
2659 * concerned about the actual device state.
2660 */
2661 if (!idev)
2662 status = SAS_DEVICE_UNKNOWN;
2663 else
2664 status = SAS_ABORTED_TASK;
2665
2666 complete_to_host = isci_perform_normal_io_completion;
2667 break;
2668
2669 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2670
2671 isci_request_handle_controller_specific_errors(
2672 idev, request, task, &response, &status,
2673 &complete_to_host);
2674
2675 break;
2676
2677 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2678 /* This is a special case, in that the I/O completion
2679 * is telling us that the device needs a reset.
2680 * In order for the device reset condition to be
2681 * noticed, the I/O has to be handled in the error
2682 * handler. Set the reset flag and cause the
2683 * SCSI error thread to be scheduled.
2684 */
2685 spin_lock_irqsave(&task->task_state_lock, task_flags);
2686 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2687 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2688
2689 /* Fail the I/O. */
2690 response = SAS_TASK_UNDELIVERED;
2691 status = SAM_STAT_TASK_ABORTED;
2692
2693 complete_to_host = isci_perform_error_io_completion;
2694 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2695 break;
2696
2697 case SCI_FAILURE_RETRY_REQUIRED:
2698
2699 /* Fail the I/O so it can be retried. */
2700 response = SAS_TASK_UNDELIVERED;
2701 if (!idev)
2702 status = SAS_DEVICE_UNKNOWN;
2703 else
2704 status = SAS_ABORTED_TASK;
2705
2706 complete_to_host = isci_perform_normal_io_completion;
2707 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2708 break;
2709
2710
2711 default:
2712 /* Catch any otherwise unhandled error codes here. */
2713 dev_dbg(&ihost->pdev->dev,
2714 "%s: invalid completion code: 0x%x - "
2715 "isci_request = %p\n",
2716 __func__, completion_status, request);
2717
2718 response = SAS_TASK_UNDELIVERED;
2719
2720 /* See if the device has been/is being stopped. Note
2721 * that we ignore the quiesce state, since we are
2722 * concerned about the actual device state.
2723 */
2724 if (!idev)
2725 status = SAS_DEVICE_UNKNOWN;
2726 else
2727 status = SAS_ABORTED_TASK;
2728
2729 if (SAS_PROTOCOL_SMP == task->task_proto) {
2730 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2731 complete_to_host = isci_perform_normal_io_completion;
2732 } else {
2733 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2734 complete_to_host = isci_perform_error_io_completion;
2735 }
2736 break;
2737 }
2738 break;
2739 }
2740
2741 switch (task->task_proto) {
2742 case SAS_PROTOCOL_SSP:
2743 if (task->data_dir == DMA_NONE)
2744 break;
2745 if (task->num_scatter == 0)
2746 /* 0 indicates a single dma address */
2747 dma_unmap_single(&ihost->pdev->dev,
2748 request->zero_scatter_daddr,
2749 task->total_xfer_len, task->data_dir);
2750 else /* unmap the sgl dma addresses */
2751 dma_unmap_sg(&ihost->pdev->dev, task->scatter,
2752 request->num_sg_entries, task->data_dir);
2753 break;
2754 case SAS_PROTOCOL_SMP: {
2755 struct scatterlist *sg = &task->smp_task.smp_req;
2756 struct smp_req *smp_req;
2757 void *kaddr;
2758
2759 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
2760
2761 /* need to swab it back in case the command buffer is re-used */
2762 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
2763 smp_req = kaddr + sg->offset;
2764 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
2765 kunmap_atomic(kaddr, KM_IRQ0);
2766 break;
2767 }
2768 default:
2769 break;
2770 }
2771
2772 /* Put the completed request on the correct list */
2773 isci_task_save_for_upper_layer_completion(ihost, request, response,
2774 status, complete_to_host
2775 );
2776
2777 /* complete the io request to the core. */
2778 sci_controller_complete_io(ihost, request->target_device, request);
2779 isci_put_device(idev);
2780
2781 /* set terminated handle so it cannot be completed or
2782 * terminated again, and to cause any calls into abort
2783 * task to recognize the already completed case.
2784 */
2785 set_bit(IREQ_TERMINATED, &request->flags);
2786}
2787
2788static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
2789{
2790 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2791 struct domain_device *dev = ireq->target_device->domain_dev;
2792 struct sas_task *task;
2793
2794 /* XXX as hch said always creating an internal sas_task for tmf
2795 * requests would simplify the driver
2796 */
2797 task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
2798
2799 /* all unaccelerated request types (non ssp or ncq) handled with
2800 * substates
2801 */
2802 if (!task && dev->dev_type == SAS_END_DEV) {
2803 sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
2804 } else if (!task &&
2805 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
2806 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
2807 sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
2808 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2809 sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
2810 } else if (task && sas_protocol_ata(task->task_proto) &&
2811 !task->ata_task.use_ncq) {
2812 u32 state;
2813
2814 if (task->data_dir == DMA_NONE)
2815 state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
2816 else if (task->ata_task.dma_xfer)
2817 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
2818 else /* PIO */
2819 state = SCI_REQ_STP_PIO_WAIT_H2D;
2820
2821 sci_change_state(sm, state);
2822 }
2823}
2824
2825static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
2826{
2827 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2828 struct isci_host *ihost = ireq->owning_controller;
2829
2830 /* Tell the SCI_USER that the IO request is complete */
2831 if (!test_bit(IREQ_TMF, &ireq->flags))
2832 isci_request_io_request_complete(ihost, ireq,
2833 ireq->sci_status);
2834 else
2835 isci_task_request_complete(ihost, ireq, ireq->sci_status);
2836}
2837
2838static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
2839{
2840 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2841
2842 /* Setting the abort bit in the Task Context is required by the silicon. */
2843 ireq->tc->abort = 1;
2844}
2845
2846static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2847{
2848 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2849
2850 ireq->target_device->working_request = ireq;
2851}
2852
2853static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2854{
2855 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2856
2857 ireq->target_device->working_request = ireq;
2858}
2859
2860static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
2861{
2862 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2863
2864 ireq->target_device->working_request = ireq;
2865}
2866
2867static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
2868{
2869 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2870 struct scu_task_context *tc = ireq->tc;
2871 struct host_to_dev_fis *h2d_fis;
2872 enum sci_status status;
2873
2874 /* Clear the SRST bit */
2875 h2d_fis = &ireq->stp.cmd;
2876 h2d_fis->control = 0;
2877
2878 /* Clear the TC control bit */
2879 tc->control_frame = 0;
2880
2881 status = sci_controller_continue_io(ireq);
2882 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
2883}
2884
2885static const struct sci_base_state sci_request_state_table[] = {
2886 [SCI_REQ_INIT] = { },
2887 [SCI_REQ_CONSTRUCTED] = { },
2888 [SCI_REQ_STARTED] = {
2889 .enter_state = sci_request_started_state_enter,
2890 },
2891 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
2892 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
2893 },
2894 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
2895 [SCI_REQ_STP_PIO_WAIT_H2D] = {
2896 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
2897 },
2898 [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
2899 [SCI_REQ_STP_PIO_DATA_IN] = { },
2900 [SCI_REQ_STP_PIO_DATA_OUT] = { },
2901 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
2902 [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
2903 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
2904 .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
2905 },
2906 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
2907 .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
2908 },
2909 [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
2910 [SCI_REQ_TASK_WAIT_TC_COMP] = { },
2911 [SCI_REQ_TASK_WAIT_TC_RESP] = { },
2912 [SCI_REQ_SMP_WAIT_RESP] = { },
2913 [SCI_REQ_SMP_WAIT_TC_COMP] = { },
2914 [SCI_REQ_COMPLETED] = {
2915 .enter_state = sci_request_completed_state_enter,
2916 },
2917 [SCI_REQ_ABORTING] = {
2918 .enter_state = sci_request_aborting_state_enter,
2919 },
2920 [SCI_REQ_FINAL] = { },
2921};
2922
2923static void
2924sci_general_request_construct(struct isci_host *ihost,
2925 struct isci_remote_device *idev,
2926 struct isci_request *ireq)
2927{
2928 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
2929
2930 ireq->target_device = idev;
2931 ireq->protocol = SCIC_NO_PROTOCOL;
2932 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
2933
2934 ireq->sci_status = SCI_SUCCESS;
2935 ireq->scu_status = 0;
2936 ireq->post_context = 0xFFFFFFFF;
2937}
2938
2939static enum sci_status
2940sci_io_request_construct(struct isci_host *ihost,
2941 struct isci_remote_device *idev,
2942 struct isci_request *ireq)
2943{
2944 struct domain_device *dev = idev->domain_dev;
2945 enum sci_status status = SCI_SUCCESS;
2946
2947 /* Build the common part of the request */
2948 sci_general_request_construct(ihost, idev, ireq);
2949
2950 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
2951 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
2952
2953 if (dev->dev_type == SAS_END_DEV)
2954 /* pass */;
2955 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
2956 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
2957 else if (dev_is_expander(dev))
2958 /* pass */;
2959 else
2960 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
2961
2962 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
2963
2964 return status;
2965}
2966
2967enum sci_status sci_task_request_construct(struct isci_host *ihost,
2968 struct isci_remote_device *idev,
2969 u16 io_tag, struct isci_request *ireq)
2970{
2971 struct domain_device *dev = idev->domain_dev;
2972 enum sci_status status = SCI_SUCCESS;
2973
2974 /* Build the common part of the request */
2975 sci_general_request_construct(ihost, idev, ireq);
2976
2977 if (dev->dev_type == SAS_END_DEV ||
2978 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
2979 set_bit(IREQ_TMF, &ireq->flags);
2980 memset(ireq->tc, 0, sizeof(struct scu_task_context));
2981 } else
2982 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
2983
2984 return status;
2985}
2986
2987static enum sci_status isci_request_ssp_request_construct(
2988 struct isci_request *request)
2989{
2990 enum sci_status status;
2991
2992 dev_dbg(&request->isci_host->pdev->dev,
2993 "%s: request = %p\n",
2994 __func__,
2995 request);
2996 status = sci_io_request_construct_basic_ssp(request);
2997 return status;
2998}
2999
3000static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
3001{
3002 struct sas_task *task = isci_request_access_task(ireq);
3003 struct host_to_dev_fis *fis = &ireq->stp.cmd;
3004 struct ata_queued_cmd *qc = task->uldd_task;
3005 enum sci_status status;
3006
3007 dev_dbg(&ireq->isci_host->pdev->dev,
3008 "%s: ireq = %p\n",
3009 __func__,
3010 ireq);
3011
3012 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
3013 if (!task->ata_task.device_control_reg_update)
3014 fis->flags |= 0x80;
3015 fis->flags &= 0xF0;
3016
3017 status = sci_io_request_construct_basic_sata(ireq);
3018
3019 if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
3020 qc->tf.command == ATA_CMD_FPDMA_READ)) {
3021 fis->sector_count = qc->tag << 3;
3022 ireq->tc->type.stp.ncq_tag = qc->tag;
3023 }
3024
3025 return status;
3026}
3027
3028static enum sci_status
3029sci_io_request_construct_smp(struct device *dev,
3030 struct isci_request *ireq,
3031 struct sas_task *task)
3032{
3033 struct scatterlist *sg = &task->smp_task.smp_req;
3034 struct isci_remote_device *idev;
3035 struct scu_task_context *task_context;
3036 struct isci_port *iport;
3037 struct smp_req *smp_req;
3038 void *kaddr;
3039 u8 req_len;
3040 u32 cmd;
3041
3042 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3043 smp_req = kaddr + sg->offset;
3044 /*
3045 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3046 * functions under SAS 2.0, a zero request length really indicates
3047 * a non-zero default length.
3048 */
3049 if (smp_req->req_len == 0) {
3050 switch (smp_req->func) {
3051 case SMP_DISCOVER:
3052 case SMP_REPORT_PHY_ERR_LOG:
3053 case SMP_REPORT_PHY_SATA:
3054 case SMP_REPORT_ROUTE_INFO:
3055 smp_req->req_len = 2;
3056 break;
3057 case SMP_CONF_ROUTE_INFO:
3058 case SMP_PHY_CONTROL:
3059 case SMP_PHY_TEST_FUNCTION:
3060 smp_req->req_len = 9;
3061 break;
3062 /* Default - zero is a valid default for 2.0. */
3063 }
3064 }
3065 req_len = smp_req->req_len;
3066 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3067 cmd = *(u32 *) smp_req;
3068 kunmap_atomic(kaddr, KM_IRQ0);
3069
3070 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3071 return SCI_FAILURE;
3072
3073 ireq->protocol = SCIC_SMP_PROTOCOL;
3074
3075 /* byte swap the smp request. */
3076
3077 task_context = ireq->tc;
3078
3079 idev = ireq->target_device;
3080 iport = idev->owning_port;
3081
3082 /*
3083 * Fill in the TC with the its required data
3084 * 00h
3085 */
3086 task_context->priority = 0;
3087 task_context->initiator_request = 1;
3088 task_context->connection_rate = idev->connection_rate;
3089 task_context->protocol_engine_index = ISCI_PEG;
3090 task_context->logical_port_index = iport->physical_port_index;
3091 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3092 task_context->abort = 0;
3093 task_context->valid = SCU_TASK_CONTEXT_VALID;
3094 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3095
3096 /* 04h */
3097 task_context->remote_node_index = idev->rnc.remote_node_index;
3098 task_context->command_code = 0;
3099 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3100
3101 /* 08h */
3102 task_context->link_layer_control = 0;
3103 task_context->do_not_dma_ssp_good_response = 1;
3104 task_context->strict_ordering = 0;
3105 task_context->control_frame = 1;
3106 task_context->timeout_enable = 0;
3107 task_context->block_guard_enable = 0;
3108
3109 /* 0ch */
3110 task_context->address_modifier = 0;
3111
3112 /* 10h */
3113 task_context->ssp_command_iu_length = req_len;
3114
3115 /* 14h */
3116 task_context->transfer_length_bytes = 0;
3117
3118 /*
3119 * 18h ~ 30h, protocol specific
3120 * since commandIU has been build by framework at this point, we just
3121 * copy the frist DWord from command IU to this location. */
3122 memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3123
3124 /*
3125 * 40h
3126 * "For SMP you could program it to zero. We would prefer that way
3127 * so that done code will be consistent." - Venki
3128 */
3129 task_context->task_phase = 0;
3130
3131 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3132 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3133 (iport->physical_port_index <<
3134 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3135 ISCI_TAG_TCI(ireq->io_tag));
3136 /*
3137 * Copy the physical address for the command buffer to the SCU Task
3138 * Context command buffer should not contain command header.
3139 */
3140 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3141 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3142
3143 /* SMP response comes as UF, so no need to set response IU address. */
3144 task_context->response_iu_upper = 0;
3145 task_context->response_iu_lower = 0;
3146
3147 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3148
3149 return SCI_SUCCESS;
3150}
3151
3152/*
3153 * isci_smp_request_build() - This function builds the smp request.
3154 * @ireq: This parameter points to the isci_request allocated in the
3155 * request construct function.
3156 *
3157 * SCI_SUCCESS on successfull completion, or specific failure code.
3158 */
3159static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3160{
3161 struct sas_task *task = isci_request_access_task(ireq);
3162 struct device *dev = &ireq->isci_host->pdev->dev;
3163 enum sci_status status = SCI_FAILURE;
3164
3165 status = sci_io_request_construct_smp(dev, ireq, task);
3166 if (status != SCI_SUCCESS)
3167 dev_dbg(&ireq->isci_host->pdev->dev,
3168 "%s: failed with status = %d\n",
3169 __func__,
3170 status);
3171
3172 return status;
3173}
3174
3175/**
3176 * isci_io_request_build() - This function builds the io request object.
3177 * @ihost: This parameter specifies the ISCI host object
3178 * @request: This parameter points to the isci_request object allocated in the
3179 * request construct function.
3180 * @sci_device: This parameter is the handle for the sci core's remote device
3181 * object that is the destination for this request.
3182 *
3183 * SCI_SUCCESS on successfull completion, or specific failure code.
3184 */
3185static enum sci_status isci_io_request_build(struct isci_host *ihost,
3186 struct isci_request *request,
3187 struct isci_remote_device *idev)
3188{
3189 enum sci_status status = SCI_SUCCESS;
3190 struct sas_task *task = isci_request_access_task(request);
3191
3192 dev_dbg(&ihost->pdev->dev,
3193 "%s: idev = 0x%p; request = %p, "
3194 "num_scatter = %d\n",
3195 __func__,
3196 idev,
3197 request,
3198 task->num_scatter);
3199
3200 /* map the sgl addresses, if present.
3201 * libata does the mapping for sata devices
3202 * before we get the request.
3203 */
3204 if (task->num_scatter &&
3205 !sas_protocol_ata(task->task_proto) &&
3206 !(SAS_PROTOCOL_SMP & task->task_proto)) {
3207
3208 request->num_sg_entries = dma_map_sg(
3209 &ihost->pdev->dev,
3210 task->scatter,
3211 task->num_scatter,
3212 task->data_dir
3213 );
3214
3215 if (request->num_sg_entries == 0)
3216 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3217 }
3218
3219 status = sci_io_request_construct(ihost, idev, request);
3220
3221 if (status != SCI_SUCCESS) {
3222 dev_dbg(&ihost->pdev->dev,
3223 "%s: failed request construct\n",
3224 __func__);
3225 return SCI_FAILURE;
3226 }
3227
3228 switch (task->task_proto) {
3229 case SAS_PROTOCOL_SMP:
3230 status = isci_smp_request_build(request);
3231 break;
3232 case SAS_PROTOCOL_SSP:
3233 status = isci_request_ssp_request_construct(request);
3234 break;
3235 case SAS_PROTOCOL_SATA:
3236 case SAS_PROTOCOL_STP:
3237 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3238 status = isci_request_stp_request_construct(request);
3239 break;
3240 default:
3241 dev_dbg(&ihost->pdev->dev,
3242 "%s: unknown protocol\n", __func__);
3243 return SCI_FAILURE;
3244 }
3245
3246 return SCI_SUCCESS;
3247}
3248
3249static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
3250{
3251 struct isci_request *ireq;
3252
3253 ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3254 ireq->io_tag = tag;
3255 ireq->io_request_completion = NULL;
3256 ireq->flags = 0;
3257 ireq->num_sg_entries = 0;
3258 INIT_LIST_HEAD(&ireq->completed_node);
3259 INIT_LIST_HEAD(&ireq->dev_node);
3260 isci_request_change_state(ireq, allocated);
3261
3262 return ireq;
3263}
3264
3265static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3266 struct sas_task *task,
3267 u16 tag)
3268{
3269 struct isci_request *ireq;
3270
3271 ireq = isci_request_from_tag(ihost, tag);
3272 ireq->ttype_ptr.io_task_ptr = task;
3273 ireq->ttype = io_task;
3274 task->lldd_task = ireq;
3275
3276 return ireq;
3277}
3278
3279struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
3280 struct isci_tmf *isci_tmf,
3281 u16 tag)
3282{
3283 struct isci_request *ireq;
3284
3285 ireq = isci_request_from_tag(ihost, tag);
3286 ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3287 ireq->ttype = tmf_task;
3288
3289 return ireq;
3290}
3291
3292int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3293 struct sas_task *task, u16 tag)
3294{
3295 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3296 struct isci_request *ireq;
3297 unsigned long flags;
3298 int ret = 0;
3299
3300 /* do common allocation and init of request object. */
3301 ireq = isci_io_request_from_tag(ihost, task, tag);
3302
3303 status = isci_io_request_build(ihost, ireq, idev);
3304 if (status != SCI_SUCCESS) {
3305 dev_dbg(&ihost->pdev->dev,
3306 "%s: request_construct failed - status = 0x%x\n",
3307 __func__,
3308 status);
3309 return status;
3310 }
3311
3312 spin_lock_irqsave(&ihost->scic_lock, flags);
3313
3314 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3315
3316 if (isci_task_is_ncq_recovery(task)) {
3317
3318 /* The device is in an NCQ recovery state. Issue the
3319 * request on the task side. Note that it will
3320 * complete on the I/O request side because the
3321 * request was built that way (ie.
3322 * ireq->is_task_management_request is false).
3323 */
3324 status = sci_controller_start_task(ihost,
3325 idev,
3326 ireq);
3327 } else {
3328 status = SCI_FAILURE;
3329 }
3330 } else {
3331 /* send the request, let the core assign the IO TAG. */
3332 status = sci_controller_start_io(ihost, idev,
3333 ireq);
3334 }
3335
3336 if (status != SCI_SUCCESS &&
3337 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3338 dev_dbg(&ihost->pdev->dev,
3339 "%s: failed request start (0x%x)\n",
3340 __func__, status);
3341 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3342 return status;
3343 }
3344
3345 /* Either I/O started OK, or the core has signaled that
3346 * the device needs a target reset.
3347 *
3348 * In either case, hold onto the I/O for later.
3349 *
3350 * Update it's status and add it to the list in the
3351 * remote device object.
3352 */
3353 list_add(&ireq->dev_node, &idev->reqs_in_process);
3354
3355 if (status == SCI_SUCCESS) {
3356 isci_request_change_state(ireq, started);
3357 } else {
3358 /* The request did not really start in the
3359 * hardware, so clear the request handle
3360 * here so no terminations will be done.
3361 */
3362 set_bit(IREQ_TERMINATED, &ireq->flags);
3363 isci_request_change_state(ireq, completed);
3364 }
3365 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3366
3367 if (status ==
3368 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3369 /* Signal libsas that we need the SCSI error
3370 * handler thread to work on this I/O and that
3371 * we want a device reset.
3372 */
3373 spin_lock_irqsave(&task->task_state_lock, flags);
3374 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3375 spin_unlock_irqrestore(&task->task_state_lock, flags);
3376
3377 /* Cause this task to be scheduled in the SCSI error
3378 * handler thread.
3379 */
3380 isci_execpath_callback(ihost, task,
3381 sas_task_abort);
3382
3383 /* Change the status, since we are holding
3384 * the I/O until it is managed by the SCSI
3385 * error handler.
3386 */
3387 status = SCI_SUCCESS;
3388 }
3389
3390 return ret;
3391}
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
new file mode 100644
index 000000000000..7a1d5a9778eb
--- /dev/null
+++ b/drivers/scsi/isci/request.h
@@ -0,0 +1,448 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _ISCI_REQUEST_H_
57#define _ISCI_REQUEST_H_
58
59#include "isci.h"
60#include "host.h"
61#include "scu_task_context.h"
62
63/**
64 * struct isci_request_status - This enum defines the possible states of an I/O
65 * request.
66 *
67 *
68 */
69enum isci_request_status {
70 unallocated = 0x00,
71 allocated = 0x01,
72 started = 0x02,
73 completed = 0x03,
74 aborting = 0x04,
75 aborted = 0x05,
76 terminating = 0x06,
77 dead = 0x07
78};
79
80enum task_type {
81 io_task = 0,
82 tmf_task = 1
83};
84
85enum sci_request_protocol {
86 SCIC_NO_PROTOCOL,
87 SCIC_SMP_PROTOCOL,
88 SCIC_SSP_PROTOCOL,
89 SCIC_STP_PROTOCOL
90}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
91
92/**
93 * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
94 * @pio_len - number of bytes requested at PIO setup
95 * @status - pio setup ending status value to tell us if we need
96 * to wait for another fis or if the transfer is complete. Upon
97 * receipt of a d2h fis this will be the status field of that fis.
98 * @sgl - track pio transfer progress as we iterate through the sgl
99 * @device_cdb_len - atapi device advertises it's transfer constraints at setup
100 */
101struct isci_stp_request {
102 u32 pio_len;
103 u8 status;
104
105 struct isci_stp_pio_sgl {
106 int index;
107 u8 set;
108 u32 offset;
109 } sgl;
110 u32 device_cdb_len;
111};
112
113struct isci_request {
114 enum isci_request_status status;
115 #define IREQ_COMPLETE_IN_TARGET 0
116 #define IREQ_TERMINATED 1
117 #define IREQ_TMF 2
118 #define IREQ_ACTIVE 3
119 unsigned long flags;
120 /* XXX kill ttype and ttype_ptr, allocate full sas_task */
121 enum task_type ttype;
122 union ttype_ptr_union {
123 struct sas_task *io_task_ptr; /* When ttype==io_task */
124 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
125 } ttype_ptr;
126 struct isci_host *isci_host;
127 /* For use in the requests_to_{complete|abort} lists: */
128 struct list_head completed_node;
129 /* For use in the reqs_in_process list: */
130 struct list_head dev_node;
131 spinlock_t state_lock;
132 dma_addr_t request_daddr;
133 dma_addr_t zero_scatter_daddr;
134 unsigned int num_sg_entries;
135 /* Note: "io_request_completion" is completed in two different ways
136 * depending on whether this is a TMF or regular request.
137 * - TMF requests are completed in the thread that started them;
138 * - regular requests are completed in the request completion callback
139 * function.
140 * This difference in operation allows the aborter of a TMF request
141 * to be sure that once the TMF request completes, the I/O that the
142 * TMF was aborting is guaranteed to have completed.
143 *
144 * XXX kill io_request_completion
145 */
146 struct completion *io_request_completion;
147 struct sci_base_state_machine sm;
148 struct isci_host *owning_controller;
149 struct isci_remote_device *target_device;
150 u16 io_tag;
151 enum sci_request_protocol protocol;
152 u32 scu_status; /* hardware result */
153 u32 sci_status; /* upper layer disposition */
154 u32 post_context;
155 struct scu_task_context *tc;
156 /* could be larger with sg chaining */
157 #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
158 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
159 /* This field is a pointer to the stored rx frame data. It is used in
160 * STP internal requests and SMP response frames. If this field is
161 * non-NULL the saved frame must be released on IO request completion.
162 */
163 u32 saved_rx_frame_index;
164
165 union {
166 struct {
167 union {
168 struct ssp_cmd_iu cmd;
169 struct ssp_task_iu tmf;
170 };
171 union {
172 struct ssp_response_iu rsp;
173 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
174 };
175 } ssp;
176 struct {
177 struct smp_resp rsp;
178 } smp;
179 struct {
180 struct isci_stp_request req;
181 struct host_to_dev_fis cmd;
182 struct dev_to_host_fis rsp;
183 } stp;
184 };
185};
186
187static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
188{
189 struct isci_request *ireq;
190
191 ireq = container_of(stp_req, typeof(*ireq), stp.req);
192 return ireq;
193}
194
195/**
196 * enum sci_base_request_states - This enumeration depicts all the states for
197 * the common request state machine.
198 *
199 *
200 */
201enum sci_base_request_states {
202 /*
203 * Simply the initial state for the base request state machine.
204 */
205 SCI_REQ_INIT,
206
207 /*
208 * This state indicates that the request has been constructed.
209 * This state is entered from the INITIAL state.
210 */
211 SCI_REQ_CONSTRUCTED,
212
213 /*
214 * This state indicates that the request has been started. This state
215 * is entered from the CONSTRUCTED state.
216 */
217 SCI_REQ_STARTED,
218
219 SCI_REQ_STP_UDMA_WAIT_TC_COMP,
220 SCI_REQ_STP_UDMA_WAIT_D2H,
221
222 SCI_REQ_STP_NON_DATA_WAIT_H2D,
223 SCI_REQ_STP_NON_DATA_WAIT_D2H,
224
225 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
226 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
227 SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
228
229 /*
230 * While in this state the IO request object is waiting for the TC
231 * completion notification for the H2D Register FIS
232 */
233 SCI_REQ_STP_PIO_WAIT_H2D,
234
235 /*
236 * While in this state the IO request object is waiting for either a
237 * PIO Setup FIS or a D2H register FIS. The type of frame received is
238 * based on the result of the prior frame and line conditions.
239 */
240 SCI_REQ_STP_PIO_WAIT_FRAME,
241
242 /*
243 * While in this state the IO request object is waiting for a DATA
244 * frame from the device.
245 */
246 SCI_REQ_STP_PIO_DATA_IN,
247
248 /*
249 * While in this state the IO request object is waiting to transmit
250 * the next data frame to the device.
251 */
252 SCI_REQ_STP_PIO_DATA_OUT,
253
254 /*
255 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
256 * task management request is waiting for the transmission of the
257 * initial frame (i.e. command, task, etc.).
258 */
259 SCI_REQ_TASK_WAIT_TC_COMP,
260
261 /*
262 * This sub-state indicates that the started task management request
263 * is waiting for the reception of an unsolicited frame
264 * (i.e. response IU).
265 */
266 SCI_REQ_TASK_WAIT_TC_RESP,
267
268 /*
269 * This sub-state indicates that the started task management request
270 * is waiting for the reception of an unsolicited frame
271 * (i.e. response IU).
272 */
273 SCI_REQ_SMP_WAIT_RESP,
274
275 /*
276 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
277 * request is waiting for the transmission of the initial frame
278 * (i.e. command, task, etc.).
279 */
280 SCI_REQ_SMP_WAIT_TC_COMP,
281
282 /*
283 * This state indicates that the request has completed.
284 * This state is entered from the STARTED state. This state is entered
285 * from the ABORTING state.
286 */
287 SCI_REQ_COMPLETED,
288
289 /*
290 * This state indicates that the request is in the process of being
291 * terminated/aborted.
292 * This state is entered from the CONSTRUCTED state.
293 * This state is entered from the STARTED state.
294 */
295 SCI_REQ_ABORTING,
296
297 /*
298 * Simply the final state for the base request state machine.
299 */
300 SCI_REQ_FINAL,
301};
302
303enum sci_status sci_request_start(struct isci_request *ireq);
304enum sci_status sci_io_request_terminate(struct isci_request *ireq);
305enum sci_status
306sci_io_request_event_handler(struct isci_request *ireq,
307 u32 event_code);
308enum sci_status
309sci_io_request_frame_handler(struct isci_request *ireq,
310 u32 frame_index);
311enum sci_status
312sci_task_request_terminate(struct isci_request *ireq);
313extern enum sci_status
314sci_request_complete(struct isci_request *ireq);
315extern enum sci_status
316sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
317
318/* XXX open code in caller */
319static inline dma_addr_t
320sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
321{
322
323 char *requested_addr = (char *)virt_addr;
324 char *base_addr = (char *)ireq;
325
326 BUG_ON(requested_addr < base_addr);
327 BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
328
329 return ireq->request_daddr + (requested_addr - base_addr);
330}
331
332/**
333 * isci_request_change_state() - This function sets the status of the request
334 * object.
335 * @request: This parameter points to the isci_request object
336 * @status: This Parameter is the new status of the object
337 *
338 */
339static inline enum isci_request_status
340isci_request_change_state(struct isci_request *isci_request,
341 enum isci_request_status status)
342{
343 enum isci_request_status old_state;
344 unsigned long flags;
345
346 dev_dbg(&isci_request->isci_host->pdev->dev,
347 "%s: isci_request = %p, state = 0x%x\n",
348 __func__,
349 isci_request,
350 status);
351
352 BUG_ON(isci_request == NULL);
353
354 spin_lock_irqsave(&isci_request->state_lock, flags);
355 old_state = isci_request->status;
356 isci_request->status = status;
357 spin_unlock_irqrestore(&isci_request->state_lock, flags);
358
359 return old_state;
360}
361
362/**
363 * isci_request_change_started_to_newstate() - This function sets the status of
364 * the request object.
365 * @request: This parameter points to the isci_request object
366 * @status: This Parameter is the new status of the object
367 *
368 * state previous to any change.
369 */
370static inline enum isci_request_status
371isci_request_change_started_to_newstate(struct isci_request *isci_request,
372 struct completion *completion_ptr,
373 enum isci_request_status newstate)
374{
375 enum isci_request_status old_state;
376 unsigned long flags;
377
378 spin_lock_irqsave(&isci_request->state_lock, flags);
379
380 old_state = isci_request->status;
381
382 if (old_state == started || old_state == aborting) {
383 BUG_ON(isci_request->io_request_completion != NULL);
384
385 isci_request->io_request_completion = completion_ptr;
386 isci_request->status = newstate;
387 }
388
389 spin_unlock_irqrestore(&isci_request->state_lock, flags);
390
391 dev_dbg(&isci_request->isci_host->pdev->dev,
392 "%s: isci_request = %p, old_state = 0x%x\n",
393 __func__,
394 isci_request,
395 old_state);
396
397 return old_state;
398}
399
400/**
401 * isci_request_change_started_to_aborted() - This function sets the status of
402 * the request object.
403 * @request: This parameter points to the isci_request object
404 * @completion_ptr: This parameter is saved as the kernel completion structure
405 * signalled when the old request completes.
406 *
407 * state previous to any change.
408 */
409static inline enum isci_request_status
410isci_request_change_started_to_aborted(struct isci_request *isci_request,
411 struct completion *completion_ptr)
412{
413 return isci_request_change_started_to_newstate(isci_request,
414 completion_ptr,
415 aborted);
416}
417
418#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
419
420#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
421
422struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
423 struct isci_tmf *isci_tmf,
424 u16 tag);
425int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
426 struct sas_task *task, u16 tag);
427void isci_terminate_pending_requests(struct isci_host *ihost,
428 struct isci_remote_device *idev);
429enum sci_status
430sci_task_request_construct(struct isci_host *ihost,
431 struct isci_remote_device *idev,
432 u16 io_tag,
433 struct isci_request *ireq);
434enum sci_status
435sci_task_request_construct_ssp(struct isci_request *ireq);
436enum sci_status
437sci_task_request_construct_sata(struct isci_request *ireq);
438void sci_smp_request_copy_response(struct isci_request *ireq);
439
440static inline int isci_task_is_ncq_recovery(struct sas_task *task)
441{
442 return (sas_protocol_ata(task->task_proto) &&
443 task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
444 task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
445
446}
447
448#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
new file mode 100644
index 000000000000..462b15174d3f
--- /dev/null
+++ b/drivers/scsi/isci/sas.h
@@ -0,0 +1,219 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCI_SAS_H_
57#define _SCI_SAS_H_
58
59#include <linux/kernel.h>
60
61/*
62 * SATA FIS Types These constants depict the various SATA FIS types devined in
63 * the serial ATA specification.
64 * XXX: This needs to go into <scsi/sas.h>
65 */
66#define FIS_REGH2D 0x27
67#define FIS_REGD2H 0x34
68#define FIS_SETDEVBITS 0xA1
69#define FIS_DMA_ACTIVATE 0x39
70#define FIS_DMA_SETUP 0x41
71#define FIS_BIST_ACTIVATE 0x58
72#define FIS_PIO_SETUP 0x5F
73#define FIS_DATA 0x46
74
75/**************************************************************************/
76#define SSP_RESP_IU_MAX_SIZE 280
77
78/*
79 * contents of the SSP COMMAND INFORMATION UNIT.
80 * For specific information on each of these individual fields please
81 * reference the SAS specification SSP transport layer section.
82 * XXX: This needs to go into <scsi/sas.h>
83 */
84struct ssp_cmd_iu {
85 u8 LUN[8];
86 u8 add_cdb_len:6;
87 u8 _r_a:2;
88 u8 _r_b;
89 u8 en_fburst:1;
90 u8 task_prio:4;
91 u8 task_attr:3;
92 u8 _r_c;
93
94 u8 cdb[16];
95} __packed;
96
97/*
98 * contents of the SSP TASK INFORMATION UNIT.
99 * For specific information on each of these individual fields please
100 * reference the SAS specification SSP transport layer section.
101 * XXX: This needs to go into <scsi/sas.h>
102 */
103struct ssp_task_iu {
104 u8 LUN[8];
105 u8 _r_a;
106 u8 task_func;
107 u8 _r_b[4];
108 u16 task_tag;
109 u8 _r_c[12];
110} __packed;
111
112
113/*
114 * struct smp_req_phy_id - This structure defines the contents of
115 * an SMP Request that is comprised of the struct smp_request_header and a
116 * phy identifier.
117 * Examples: SMP_REQUEST_DISCOVER, SMP_REQUEST_REPORT_PHY_SATA.
118 *
119 * For specific information on each of these individual fields please reference
120 * the SAS specification.
121 */
122struct smp_req_phy_id {
123 u8 _r_a[4]; /* bytes 4-7 */
124
125 u8 ign_zone_grp:1; /* byte 8 */
126 u8 _r_b:7;
127
128 u8 phy_id; /* byte 9 */
129 u8 _r_c; /* byte 10 */
130 u8 _r_d; /* byte 11 */
131} __packed;
132
133/*
134 * struct smp_req_config_route_info - This structure defines the
135 * contents of an SMP Configure Route Information request.
136 *
137 * For specific information on each of these individual fields please reference
138 * the SAS specification.
139 */
140struct smp_req_conf_rtinfo {
141 u16 exp_change_cnt; /* bytes 4-5 */
142 u8 exp_rt_idx_hi; /* byte 6 */
143 u8 exp_rt_idx; /* byte 7 */
144
145 u8 _r_a; /* byte 8 */
146 u8 phy_id; /* byte 9 */
147 u16 _r_b; /* bytes 10-11 */
148
149 u8 _r_c:7; /* byte 12 */
150 u8 dis_rt_entry:1;
151 u8 _r_d[3]; /* bytes 13-15 */
152
153 u8 rt_sas_addr[8]; /* bytes 16-23 */
154 u8 _r_e[16]; /* bytes 24-39 */
155} __packed;
156
157/*
158 * struct smp_req_phycntl - This structure defines the contents of an
159 * SMP Phy Controller request.
160 *
161 * For specific information on each of these individual fields please reference
162 * the SAS specification.
163 */
164struct smp_req_phycntl {
165 u16 exp_change_cnt; /* byte 4-5 */
166
167 u8 _r_a[3]; /* bytes 6-8 */
168
169 u8 phy_id; /* byte 9 */
170 u8 phy_op; /* byte 10 */
171
172 u8 upd_pathway:1; /* byte 11 */
173 u8 _r_b:7;
174
175 u8 _r_c[12]; /* byte 12-23 */
176
177 u8 att_dev_name[8]; /* byte 24-31 */
178
179 u8 _r_d:4; /* byte 32 */
180 u8 min_linkrate:4;
181
182 u8 _r_e:4; /* byte 33 */
183 u8 max_linkrate:4;
184
185 u8 _r_f[2]; /* byte 34-35 */
186
187 u8 pathway:4; /* byte 36 */
188 u8 _r_g:4;
189
190 u8 _r_h[3]; /* bytes 37-39 */
191} __packed;
192
193/*
194 * struct smp_req - This structure simply unionizes the existing request
195 * structures into a common request type.
196 *
197 * XXX: This data structure may need to go to scsi/sas.h
198 */
199struct smp_req {
200 u8 type; /* byte 0 */
201 u8 func; /* byte 1 */
202 u8 alloc_resp_len; /* byte 2 */
203 u8 req_len; /* byte 3 */
204 u8 req_data[0];
205} __packed;
206
207#define SMP_RESP_HDR_SZ 4
208
209/*
210 * struct sci_sas_address - This structure depicts how a SAS address is
211 * represented by SCI.
212 * XXX convert this to u8 [SAS_ADDR_SIZE] like the rest of libsas
213 *
214 */
215struct sci_sas_address {
216 u32 high;
217 u32 low;
218};
219#endif
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
new file mode 100644
index 000000000000..c8b329c695f9
--- /dev/null
+++ b/drivers/scsi/isci/scu_completion_codes.h
@@ -0,0 +1,283 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCU_COMPLETION_CODES_HEADER_
57#define _SCU_COMPLETION_CODES_HEADER_
58
59/**
60 * This file contains the constants and macros for the SCU hardware completion
61 * codes.
62 *
63 *
64 */
65
66#define SCU_COMPLETION_TYPE_SHIFT 28
67#define SCU_COMPLETION_TYPE_MASK 0x70000000
68
69/**
70 * SCU_COMPLETION_TYPE() -
71 *
72 * This macro constructs an SCU completion type
73 */
74#define SCU_COMPLETION_TYPE(type) \
75 ((u32)(type) << SCU_COMPLETION_TYPE_SHIFT)
76
77/**
78 * SCU_COMPLETION_TYPE() -
79 *
80 * These macros contain the SCU completion types SCU_COMPLETION_TYPE
81 */
82#define SCU_COMPLETION_TYPE_TASK SCU_COMPLETION_TYPE(0)
83#define SCU_COMPLETION_TYPE_SDMA SCU_COMPLETION_TYPE(1)
84#define SCU_COMPLETION_TYPE_UFI SCU_COMPLETION_TYPE(2)
85#define SCU_COMPLETION_TYPE_EVENT SCU_COMPLETION_TYPE(3)
86#define SCU_COMPLETION_TYPE_NOTIFY SCU_COMPLETION_TYPE(4)
87
88/**
89 *
90 *
91 * These constants provide the shift and mask values for the various parts of
92 * an SCU completion code.
93 */
94#define SCU_COMPLETION_STATUS_MASK 0x0FFC0000
95#define SCU_COMPLETION_TL_STATUS_MASK 0x0FC00000
96#define SCU_COMPLETION_TL_STATUS_SHIFT 22
97#define SCU_COMPLETION_SDMA_STATUS_MASK 0x003C0000
98#define SCU_COMPLETION_PEG_MASK 0x00010000
99#define SCU_COMPLETION_PORT_MASK 0x00007000
100#define SCU_COMPLETION_PE_MASK SCU_COMPLETION_PORT_MASK
101#define SCU_COMPLETION_PE_SHIFT 12
102#define SCU_COMPLETION_INDEX_MASK 0x00000FFF
103
104/**
105 * SCU_GET_COMPLETION_TYPE() -
106 *
107 * This macro returns the SCU completion type.
108 */
109#define SCU_GET_COMPLETION_TYPE(completion_code) \
110 ((completion_code) & SCU_COMPLETION_TYPE_MASK)
111
112/**
113 * SCU_GET_COMPLETION_STATUS() -
114 *
115 * This macro returns the SCU completion status.
116 */
117#define SCU_GET_COMPLETION_STATUS(completion_code) \
118 ((completion_code) & SCU_COMPLETION_STATUS_MASK)
119
120/**
121 * SCU_GET_COMPLETION_TL_STATUS() -
122 *
123 * This macro returns the transport layer completion status.
124 */
125#define SCU_GET_COMPLETION_TL_STATUS(completion_code) \
126 ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK)
127
128/**
129 * SCU_MAKE_COMPLETION_STATUS() -
130 *
131 * This macro takes a completion code and performs the shift and mask
132 * operations to turn it into a completion code that can be compared to a
133 * SCU_GET_COMPLETION_TL_STATUS.
134 */
135#define SCU_MAKE_COMPLETION_STATUS(completion_code) \
136 ((u32)(completion_code) << SCU_COMPLETION_TL_STATUS_SHIFT)
137
138/**
139 * SCU_NORMALIZE_COMPLETION_STATUS() -
140 *
141 * This macro takes a SCU_GET_COMPLETION_TL_STATUS and normalizes it for a
142 * return code.
143 */
144#define SCU_NORMALIZE_COMPLETION_STATUS(completion_code) \
145 (\
146 ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) \
147 >> SCU_COMPLETION_TL_STATUS_SHIFT \
148 )
149
150/**
151 * SCU_GET_COMPLETION_SDMA_STATUS() -
152 *
153 * This macro returns the SDMA completion status.
154 */
155#define SCU_GET_COMPLETION_SDMA_STATUS(completion_code) \
156 ((completion_code) & SCU_COMPLETION_SDMA_STATUS_MASK)
157
158/**
159 * SCU_GET_COMPLETION_PEG() -
160 *
161 * This macro returns the Protocol Engine Group from the completion code.
162 */
163#define SCU_GET_COMPLETION_PEG(completion_code) \
164 ((completion_code) & SCU_COMPLETION_PEG_MASK)
165
166/**
167 * SCU_GET_COMPLETION_PORT() -
168 *
169 * This macro reuturns the logical port index from the completion code.
170 */
171#define SCU_GET_COMPLETION_PORT(completion_code) \
172 ((completion_code) & SCU_COMPLETION_PORT_MASK)
173
174/**
175 * SCU_GET_PROTOCOL_ENGINE_INDEX() -
176 *
177 * This macro returns the PE index from the completion code.
178 */
179#define SCU_GET_PROTOCOL_ENGINE_INDEX(completion_code) \
180 (((completion_code) & SCU_COMPLETION_PE_MASK) >> SCU_COMPLETION_PE_SHIFT)
181
182/**
183 * SCU_GET_COMPLETION_INDEX() -
184 *
185 * This macro returns the index of the completion which is either a TCi or an
186 * RNi depending on the completion type.
187 */
188#define SCU_GET_COMPLETION_INDEX(completion_code) \
189 ((completion_code) & SCU_COMPLETION_INDEX_MASK)
190
191#define SCU_UNSOLICITED_FRAME_MASK 0x0FFF0000
192#define SCU_UNSOLICITED_FRAME_SHIFT 16
193
194/**
195 * SCU_GET_FRAME_INDEX() -
196 *
197 * This macro returns a normalized frame index from an unsolicited frame
198 * completion.
199 */
200#define SCU_GET_FRAME_INDEX(completion_code) \
201 (\
202 ((completion_code) & SCU_UNSOLICITED_FRAME_MASK) \
203 >> SCU_UNSOLICITED_FRAME_SHIFT \
204 )
205
206#define SCU_UNSOLICITED_FRAME_ERROR_MASK 0x00008000
207
208/**
209 * SCU_GET_FRAME_ERROR() -
210 *
211 * This macro returns a zero (0) value if there is no frame error otherwise it
212 * returns non-zero (!0).
213 */
214#define SCU_GET_FRAME_ERROR(completion_code) \
215 ((completion_code) & SCU_UNSOLICITED_FRAME_ERROR_MASK)
216
217/**
218 *
219 *
220 * These constants represent normalized completion codes which must be shifted
221 * 18 bits to match it with the hardware completion code. In a 16-bit compiler,
222 * immediate constants are 16-bit values (the size of an int). If we shift
223 * those by 18 bits, we completely lose the value. To ensure the value is a
224 * 32-bit value like we want, each immediate value must be cast to a u32.
225 */
226#define SCU_TASK_DONE_GOOD ((u32)0x00)
227#define SCU_TASK_DONE_CRC_ERR ((u32)0x14)
228#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14)
229#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15)
230#define SCU_TASK_DONE_NAK_CMD_ERR ((u32)0x16)
231#define SCU_TASK_DONE_CMD_LL_R_ERR ((u32)0x16)
232#define SCU_TASK_DONE_LL_R_ERR ((u32)0x17)
233#define SCU_TASK_DONE_ACK_NAK_TO ((u32)0x17)
234#define SCU_TASK_DONE_LL_PERR ((u32)0x18)
235#define SCU_TASK_DONE_LL_SY_TERM ((u32)0x19)
236#define SCU_TASK_DONE_NAK_ERR ((u32)0x19)
237#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A)
238#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A)
239#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B)
240#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B)
241#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C)
242#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C)
243#define SCU_TASK_DONE_INV_FIS_TYPE ((u32)0x1D)
244#define SCU_TASK_DONE_XR_IU_LEN_ERR ((u32)0x1D)
245#define SCU_TASK_DONE_INV_FIS_LEN ((u32)0x1E)
246#define SCU_TASK_DONE_XR_WD_LEN ((u32)0x1E)
247#define SCU_TASK_DONE_SDMA_ERR ((u32)0x1F)
248#define SCU_TASK_DONE_OFFSET_ERR ((u32)0x20)
249#define SCU_TASK_DONE_MAX_PLD_ERR ((u32)0x21)
250#define SCU_TASK_DONE_EXCESS_DATA ((u32)0x22)
251#define SCU_TASK_DONE_LF_ERR ((u32)0x23)
252#define SCU_TASK_DONE_UNEXP_FIS ((u32)0x24)
253#define SCU_TASK_DONE_UNEXP_RESP ((u32)0x24)
254#define SCU_TASK_DONE_EARLY_RESP ((u32)0x25)
255#define SCU_TASK_DONE_SMP_RESP_TO_ERR ((u32)0x26)
256#define SCU_TASK_DONE_DMASETUP_DIRERR ((u32)0x27)
257#define SCU_TASK_DONE_SMP_UFI_ERR ((u32)0x27)
258#define SCU_TASK_DONE_XFERCNT_ERR ((u32)0x28)
259#define SCU_TASK_DONE_SMP_FRM_TYPE_ERR ((u32)0x28)
260#define SCU_TASK_DONE_SMP_LL_RX_ERR ((u32)0x29)
261#define SCU_TASK_DONE_RESP_LEN_ERR ((u32)0x2A)
262#define SCU_TASK_DONE_UNEXP_DATA ((u32)0x2B)
263#define SCU_TASK_DONE_OPEN_FAIL ((u32)0x2C)
264#define SCU_TASK_DONE_UNEXP_SDBFIS ((u32)0x2D)
265#define SCU_TASK_DONE_REG_ERR ((u32)0x2E)
266#define SCU_TASK_DONE_SDB_ERR ((u32)0x2F)
267#define SCU_TASK_DONE_TASK_ABORT ((u32)0x30)
268#define SCU_TASK_DONE_CMD_SDMA_ERR ((U32)0x32)
269#define SCU_TASK_DONE_CMD_LL_ABORT_ERR ((U32)0x33)
270#define SCU_TASK_OPEN_REJECT_WRONG_DESTINATION ((u32)0x34)
271#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1 ((u32)0x35)
272#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2 ((u32)0x36)
273#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3 ((u32)0x37)
274#define SCU_TASK_OPEN_REJECT_BAD_DESTINATION ((u32)0x38)
275#define SCU_TASK_OPEN_REJECT_ZONE_VIOLATION ((u32)0x39)
276#define SCU_TASK_DONE_VIIT_ENTRY_NV ((u32)0x3A)
277#define SCU_TASK_DONE_IIT_ENTRY_NV ((u32)0x3B)
278#define SCU_TASK_DONE_RNCNV_OUTBOUND ((u32)0x3C)
279#define SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY ((u32)0x3D)
280#define SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED ((u32)0x3E)
281#define SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED ((u32)0x3F)
282
283#endif /* _SCU_COMPLETION_CODES_HEADER_ */
diff --git a/drivers/scsi/isci/scu_event_codes.h b/drivers/scsi/isci/scu_event_codes.h
new file mode 100644
index 000000000000..36a945ad5722
--- /dev/null
+++ b/drivers/scsi/isci/scu_event_codes.h
@@ -0,0 +1,336 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef __SCU_EVENT_CODES_HEADER__
57#define __SCU_EVENT_CODES_HEADER__
58
59/**
60 * This file contains the constants and macros for the SCU event codes.
61 *
62 *
63 */
64
65#define SCU_EVENT_TYPE_CODE_SHIFT 24
66#define SCU_EVENT_TYPE_CODE_MASK 0x0F000000
67
68#define SCU_EVENT_SPECIFIC_CODE_SHIFT 18
69#define SCU_EVENT_SPECIFIC_CODE_MASK 0x00FC0000
70
71#define SCU_EVENT_CODE_MASK \
72 (SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK)
73
74/**
75 * SCU_EVENT_TYPE() -
76 *
77 * This macro constructs an SCU event type from the type value.
78 */
79#define SCU_EVENT_TYPE(type) \
80 ((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT)
81
82/**
83 * SCU_EVENT_SPECIFIC() -
84 *
85 * This macro constructs an SCU event specifier from the code value.
86 */
87#define SCU_EVENT_SPECIFIC(code) \
88 ((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT)
89
90/**
91 * SCU_EVENT_MESSAGE() -
92 *
93 * This macro constructs a combines an SCU event type and SCU event specifier
94 * from the type and code values.
95 */
96#define SCU_EVENT_MESSAGE(type, code) \
97 ((type) | SCU_EVENT_SPECIFIC(code))
98
99/**
100 * SCU_EVENT_TYPE() -
101 *
102 * SCU_EVENT_TYPES
103 */
104#define SCU_EVENT_TYPE_SMU_COMMAND_ERROR SCU_EVENT_TYPE(0x08)
105#define SCU_EVENT_TYPE_SMU_PCQ_ERROR SCU_EVENT_TYPE(0x09)
106#define SCU_EVENT_TYPE_SMU_ERROR SCU_EVENT_TYPE(0x00)
107#define SCU_EVENT_TYPE_TRANSPORT_ERROR SCU_EVENT_TYPE(0x01)
108#define SCU_EVENT_TYPE_BROADCAST_CHANGE SCU_EVENT_TYPE(0x02)
109#define SCU_EVENT_TYPE_OSSP_EVENT SCU_EVENT_TYPE(0x03)
110#define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F)
111#define SCU_EVENT_TYPE_RNC_SUSPEND_TX SCU_EVENT_TYPE(0x04)
112#define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX SCU_EVENT_TYPE(0x05)
113#define SCU_EVENT_TYPE_RNC_OPS_MISC SCU_EVENT_TYPE(0x06)
114#define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07)
115#define SCU_EVENT_TYPE_ERR_CNT_EVENT SCU_EVENT_TYPE(0x0A)
116
117/**
118 *
119 *
120 * SCU_EVENT_SPECIFIERS
121 */
122#define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20
123#define SCU_EVENT_SPECIFIER_RNC_RELEASE 0x00
124
125/**
126 *
127 *
128 * SMU_COMMAND_EVENTS
129 */
130#define SCU_EVENT_INVALID_CONTEXT_COMMAND \
131 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00)
132
133/**
134 *
135 *
136 * SMU_PCQ_EVENTS
137 */
138#define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \
139 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00)
140
141/**
142 *
143 *
144 * SMU_EVENTS
145 */
146#define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \
147 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02)
148#define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \
149 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03)
150#define SCU_EVENT_PCIE_INTERFACE_ERROR \
151 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04)
152#define SCU_EVENT_FUNCTION_LEVEL_RESET \
153 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05)
154
155/**
156 *
157 *
158 * TRANSPORT_LEVEL_ERRORS
159 */
160#define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR \
161 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00)
162
163/**
164 *
165 *
166 * BROADCAST_CHANGE_EVENTS
167 */
168#define SCU_EVENT_BROADCAST_CHANGE \
169 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01)
170#define SCU_EVENT_BROADCAST_RESERVED0 \
171 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02)
172#define SCU_EVENT_BROADCAST_RESERVED1 \
173 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03)
174#define SCU_EVENT_BROADCAST_SES \
175 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04)
176#define SCU_EVENT_BROADCAST_EXPANDER \
177 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05)
178#define SCU_EVENT_BROADCAST_AEN \
179 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06)
180#define SCU_EVENT_BROADCAST_RESERVED3 \
181 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07)
182#define SCU_EVENT_BROADCAST_RESERVED4 \
183 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08)
184#define SCU_EVENT_PE_SUSPENDED \
185 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09)
186
187/**
188 *
189 *
190 * OSSP_EVENTS
191 */
192#define SCU_EVENT_PORT_SELECTOR_DETECTED \
193 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10)
194#define SCU_EVENT_SENT_PORT_SELECTION \
195 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11)
196#define SCU_EVENT_HARD_RESET_TRANSMITTED \
197 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12)
198#define SCU_EVENT_HARD_RESET_RECEIVED \
199 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13)
200#define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \
201 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15)
202#define SCU_EVENT_LINK_FAILURE \
203 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16)
204#define SCU_EVENT_SATA_SPINUP_HOLD \
205 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17)
206#define SCU_EVENT_SAS_15_SSC \
207 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18)
208#define SCU_EVENT_SAS_15 \
209 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19)
210#define SCU_EVENT_SAS_30_SSC \
211 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A)
212#define SCU_EVENT_SAS_30 \
213 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B)
214#define SCU_EVENT_SAS_60_SSC \
215 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C)
216#define SCU_EVENT_SAS_60 \
217 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D)
218#define SCU_EVENT_SATA_15_SSC \
219 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E)
220#define SCU_EVENT_SATA_15 \
221 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F)
222#define SCU_EVENT_SATA_30_SSC \
223 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20)
224#define SCU_EVENT_SATA_30 \
225 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21)
226#define SCU_EVENT_SATA_60_SSC \
227 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22)
228#define SCU_EVENT_SATA_60 \
229 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23)
230#define SCU_EVENT_SAS_PHY_DETECTED \
231 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24)
232#define SCU_EVENT_SATA_PHY_DETECTED \
233 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25)
234
235/**
236 *
237 *
238 * FATAL_INTERNAL_MEMORY_ERROR_EVENTS
239 */
240#define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \
241 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x00)
242#define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \
243 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x01)
244#define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \
245 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x02)
246
247/**
248 *
249 *
250 * REMOTE_NODE_SUSPEND_EVENTS
251 */
252#define SCU_EVENT_TL_RNC_SUSPEND_TX \
253 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00)
254#define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \
255 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00)
256#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \
257 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20)
258#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX \
259 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20)
260
261/**
262 *
263 *
264 * REMOTE_NODE_MISC_EVENTS
265 */
266#define SCU_EVENT_POST_RCN_RELEASE \
267 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE)
268#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \
269 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01)
270#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \
271 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02)
272#define SCU_EVENT_POST_RNC_COMPLETE \
273 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03)
274#define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \
275 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04)
276
277/**
278 *
279 *
280 * ERROR_COUNT_EVENT
281 */
282#define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \
283 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00)
284#define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \
285 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01)
286#define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \
287 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02)
288
289/**
290 * scu_get_event_type() -
291 *
292 * This macro returns the SCU event type from the event code.
293 */
294#define scu_get_event_type(event_code) \
295 ((event_code) & SCU_EVENT_TYPE_CODE_MASK)
296
297/**
298 * scu_get_event_specifier() -
299 *
300 * This macro returns the SCU event specifier from the event code.
301 */
302#define scu_get_event_specifier(event_code) \
303 ((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK)
304
305/**
306 * scu_get_event_code() -
307 *
308 * This macro returns the combined SCU event type and SCU event specifier from
309 * the event code.
310 */
311#define scu_get_event_code(event_code) \
312 ((event_code) & SCU_EVENT_CODE_MASK)
313
314
315/**
316 *
317 *
318 * PTS_SCHEDULE_EVENT
319 */
320#define SCU_EVENT_SMP_RESPONSE_NO_PE \
321 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00)
322#define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \
323 scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE)
324
325#define SCU_EVENT_TASK_TIMEOUT \
326 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01)
327#define SCU_EVENT_SPECIFIC_TASK_TIMEOUT \
328 scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT)
329
330#define SCU_EVENT_IT_NEXUS_TIMEOUT \
331 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02)
332#define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \
333 scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT)
334
335
336#endif /* __SCU_EVENT_CODES_HEADER__ */
diff --git a/drivers/scsi/isci/scu_remote_node_context.h b/drivers/scsi/isci/scu_remote_node_context.h
new file mode 100644
index 000000000000..33745adc826b
--- /dev/null
+++ b/drivers/scsi/isci/scu_remote_node_context.h
@@ -0,0 +1,229 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef __SCU_REMOTE_NODE_CONTEXT_HEADER__
57#define __SCU_REMOTE_NODE_CONTEXT_HEADER__
58
59/**
60 * This file contains the structures and constatns used by the SCU hardware to
61 * describe a remote node context.
62 *
63 *
64 */
65
66/**
67 * struct ssp_remote_node_context - This structure contains the SCU hardware
68 * definition for an SSP remote node.
69 *
70 *
71 */
72struct ssp_remote_node_context {
73 /* WORD 0 */
74
75 /**
76 * This field is the remote node index assigned for this remote node. All
77 * remote nodes must have a unique remote node index. The value of the remote
78 * node index can not exceed the maximum number of remote nodes reported in
79 * the SCU device context capacity register.
80 */
81 u32 remote_node_index:12;
82 u32 reserved0_1:4;
83
84 /**
85 * This field tells the SCU hardware how many simultaneous connections that
86 * this remote node will support.
87 */
88 u32 remote_node_port_width:4;
89
90 /**
91 * This field tells the SCU hardware which logical port to associate with this
92 * remote node.
93 */
94 u32 logical_port_index:3;
95 u32 reserved0_2:5;
96
97 /**
98 * This field will enable the I_T nexus loss timer for this remote node.
99 */
100 u32 nexus_loss_timer_enable:1;
101
102 /**
103 * This field is the for driver debug only and is not used.
104 */
105 u32 check_bit:1;
106
107 /**
108 * This field must be set to true when the hardware DMAs the remote node
109 * context to the hardware SRAM. When the remote node is being invalidated
110 * this field must be set to false.
111 */
112 u32 is_valid:1;
113
114 /**
115 * This field must be set to true.
116 */
117 u32 is_remote_node_context:1;
118
119 /* WORD 1 - 2 */
120
121 /**
122 * This is the low word of the remote device SAS Address
123 */
124 u32 remote_sas_address_lo;
125
126 /**
127 * This field is the high word of the remote device SAS Address
128 */
129 u32 remote_sas_address_hi;
130
131 /* WORD 3 */
132 /**
133 * This field reprensets the function number assigned to this remote device.
134 * This value must match the virtual function number that is being used to
135 * communicate to the device.
136 */
137 u32 function_number:8;
138 u32 reserved3_1:8;
139
140 /**
141 * This field provides the driver a way to cheat on the arbitration wait time
142 * for this remote node.
143 */
144 u32 arbitration_wait_time:16;
145
146 /* WORD 4 */
147 /**
148 * This field tells the SCU hardware how long this device may occupy the
149 * connection before it must be closed.
150 */
151 u32 connection_occupancy_timeout:16;
152
153 /**
154 * This field tells the SCU hardware how long to maintain a connection when
155 * there are no frames being transmitted on the link.
156 */
157 u32 connection_inactivity_timeout:16;
158
159 /* WORD 5 */
160 /**
161 * This field allows the driver to cheat on the arbitration wait time for this
162 * remote node.
163 */
164 u32 initial_arbitration_wait_time:16;
165
166 /**
167 * This field is tells the hardware what to program for the connection rate in
168 * the open address frame. See the SAS spec for valid values.
169 */
170 u32 oaf_connection_rate:4;
171
172 /**
173 * This field tells the SCU hardware what to program for the features in the
174 * open address frame. See the SAS spec for valid values.
175 */
176 u32 oaf_features:4;
177
178 /**
179 * This field tells the SCU hardware what to use for the source zone group in
180 * the open address frame. See the SAS spec for more details on zoning.
181 */
182 u32 oaf_source_zone_group:8;
183
184 /* WORD 6 */
185 /**
186 * This field tells the SCU hardware what to use as the more capibilities in
187 * the open address frame. See the SAS Spec for details.
188 */
189 u32 oaf_more_compatibility_features;
190
191 /* WORD 7 */
192 u32 reserved7;
193
194};
195
196/**
197 * struct stp_remote_node_context - This structure contains the SCU hardware
198 * definition for a STP remote node.
199 *
200 * STP Targets are not yet supported so this definition is a placeholder until
201 * we do support them.
202 */
203struct stp_remote_node_context {
204 /**
205 * Placeholder data for the STP remote node.
206 */
207 u32 data[8];
208
209};
210
211/**
212 * This union combines the SAS and SATA remote node definitions.
213 *
214 * union scu_remote_node_context
215 */
216union scu_remote_node_context {
217 /**
218 * SSP Remote Node
219 */
220 struct ssp_remote_node_context ssp;
221
222 /**
223 * STP Remote Node
224 */
225 struct stp_remote_node_context stp;
226
227};
228
229#endif /* __SCU_REMOTE_NODE_CONTEXT_HEADER__ */
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
new file mode 100644
index 000000000000..7df87d923285
--- /dev/null
+++ b/drivers/scsi/isci/scu_task_context.h
@@ -0,0 +1,942 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCU_TASK_CONTEXT_H_
57#define _SCU_TASK_CONTEXT_H_
58
59/**
60 * This file contains the structures and constants for the SCU hardware task
61 * context.
62 *
63 *
64 */
65
66
67/**
68 * enum scu_ssp_task_type - This enumberation defines the various SSP task
69 * types the SCU hardware will accept. The definition for the various task
70 * types the SCU hardware will accept can be found in the DS specification.
71 *
72 *
73 */
74typedef enum {
75 SCU_TASK_TYPE_IOREAD, /* /< IO READ direction or no direction */
76 SCU_TASK_TYPE_IOWRITE, /* /< IO Write direction */
77 SCU_TASK_TYPE_SMP_REQUEST, /* /< SMP Request type */
78 SCU_TASK_TYPE_RESPONSE, /* /< Driver generated response frame (targt mode) */
79 SCU_TASK_TYPE_RAW_FRAME, /* /< Raw frame request type */
80 SCU_TASK_TYPE_PRIMITIVE /* /< Request for a primitive to be transmitted */
81} scu_ssp_task_type;
82
83/**
84 * enum scu_sata_task_type - This enumeration defines the various SATA task
85 * types the SCU hardware will accept. The definition for the various task
86 * types the SCU hardware will accept can be found in the DS specification.
87 *
88 *
89 */
90typedef enum {
91 SCU_TASK_TYPE_DMA_IN, /* /< Read request */
92 SCU_TASK_TYPE_FPDMAQ_READ, /* /< NCQ read request */
93 SCU_TASK_TYPE_PACKET_DMA_IN, /* /< Packet read request */
94 SCU_TASK_TYPE_SATA_RAW_FRAME, /* /< Raw frame request */
95 RESERVED_4,
96 RESERVED_5,
97 RESERVED_6,
98 RESERVED_7,
99 SCU_TASK_TYPE_DMA_OUT, /* /< Write request */
100 SCU_TASK_TYPE_FPDMAQ_WRITE, /* /< NCQ write Request */
101 SCU_TASK_TYPE_PACKET_DMA_OUT /* /< Packet write request */
102} scu_sata_task_type;
103
104
105/**
106 *
107 *
108 * SCU_CONTEXT_TYPE
109 */
110#define SCU_TASK_CONTEXT_TYPE 0
111#define SCU_RNC_CONTEXT_TYPE 1
112
113/**
114 *
115 *
116 * SCU_TASK_CONTEXT_VALIDITY
117 */
118#define SCU_TASK_CONTEXT_INVALID 0
119#define SCU_TASK_CONTEXT_VALID 1
120
121/**
122 *
123 *
124 * SCU_COMMAND_CODE
125 */
126#define SCU_COMMAND_CODE_INITIATOR_NEW_TASK 0
127#define SCU_COMMAND_CODE_ACTIVE_TASK 1
128#define SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK 2
129#define SCU_COMMAND_CODE_TARGET_RAW_FRAMES 3
130
131/**
132 *
133 *
134 * SCU_TASK_PRIORITY
135 */
136/**
137 *
138 *
139 * This priority is used when there is no priority request for this request.
140 */
141#define SCU_TASK_PRIORITY_NORMAL 0
142
143/**
144 *
145 *
146 * This priority indicates that the task should be scheduled to the head of the
147 * queue. The task will NOT be executed if the TX is suspended for the remote
148 * node.
149 */
150#define SCU_TASK_PRIORITY_HEAD_OF_Q 1
151
152/**
153 *
154 *
155 * This priority indicates that the task will be executed before all
156 * SCU_TASK_PRIORITY_NORMAL and SCU_TASK_PRIORITY_HEAD_OF_Q tasks. The task
157 * WILL be executed if the TX is suspended for the remote node.
158 */
159#define SCU_TASK_PRIORITY_HIGH 2
160
161/**
162 *
163 *
164 * This task priority is reserved and should not be used.
165 */
166#define SCU_TASK_PRIORITY_RESERVED 3
167
168#define SCU_TASK_INITIATOR_MODE 1
169#define SCU_TASK_TARGET_MODE 0
170
171#define SCU_TASK_REGULAR 0
172#define SCU_TASK_ABORTED 1
173
174/* direction bit defintion */
175/**
176 *
177 *
178 * SATA_DIRECTION
179 */
180#define SCU_SATA_WRITE_DATA_DIRECTION 0
181#define SCU_SATA_READ_DATA_DIRECTION 1
182
183/**
184 *
185 *
186 * SCU_COMMAND_CONTEXT_MACROS These macros provide the mask and shift
187 * operations to construct the various SCU commands
188 */
189#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT 21
190#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK 0x00E00000
191#define scu_get_command_request_type(x) \
192 ((x) & SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK)
193
194#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT 18
195#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK 0x001C0000
196#define scu_get_command_request_subtype(x) \
197 ((x) & SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK)
198
199#define SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK \
200 (\
201 SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK \
202 | SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK \
203 )
204#define scu_get_command_request_full_type(x) \
205 ((x) & SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK)
206
207#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT 16
208#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK 0x00010000
209#define scu_get_command_protocl_engine_group(x) \
210 ((x) & SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK)
211
212#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT 12
213#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK 0x00007000
214#define scu_get_command_reqeust_logical_port(x) \
215 ((x) & SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK)
216
217
218#define MAKE_SCU_CONTEXT_COMMAND_TYPE(type) \
219 ((u32)(type) << SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT)
220
221/**
222 * MAKE_SCU_CONTEXT_COMMAND_TYPE() -
223 *
224 * SCU_COMMAND_TYPES These constants provide the grouping of the different SCU
225 * command types.
226 */
227#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(0)
228#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(1)
229#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(2)
230#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(3)
231#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(6)
232
233#define MAKE_SCU_CONTEXT_COMMAND_REQUEST(type, command) \
234 ((type) | ((command) << SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT))
235
236/**
237 *
238 *
239 * SCU_REQUEST_TYPES These constants are the various request types that can be
240 * posted to the SCU hardware.
241 */
242#define SCU_CONTEXT_COMMAND_REQUST_POST_TC \
243 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 0))
244
245#define SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT \
246 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 1))
247
248#define SCU_CONTEXT_COMMAND_REQUST_DUMP_TC \
249 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC, 0))
250
251#define SCU_CONTEXT_COMMAND_POST_RNC_32 \
252 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 0))
253
254#define SCU_CONTEXT_COMMAND_POST_RNC_96 \
255 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 1))
256
257#define SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE \
258 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 2))
259
260#define SCU_CONTEXT_COMMAND_DUMP_RNC_32 \
261 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 0))
262
263#define SCU_CONTEXT_COMMAND_DUMP_RNC_96 \
264 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 1))
265
266#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX \
267 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 0))
268
269#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX \
270 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 1))
271
272#define SCU_CONTEXT_COMMAND_POST_RNC_RESUME \
273 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 2))
274
275#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_ENABLE \
276 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 3))
277
278#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_DISABLE \
279 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 4))
280
281/**
282 *
283 *
284 * SCU_TASK_CONTEXT_PROTOCOL SCU Task context protocol types this is uesd to
285 * program the SCU Task context protocol field in word 0x00.
286 */
287#define SCU_TASK_CONTEXT_PROTOCOL_SMP 0x00
288#define SCU_TASK_CONTEXT_PROTOCOL_SSP 0x01
289#define SCU_TASK_CONTEXT_PROTOCOL_STP 0x02
290#define SCU_TASK_CONTEXT_PROTOCOL_NONE 0x07
291
292/**
293 * struct ssp_task_context - This is the SCU hardware definition for an SSP
294 * request.
295 *
296 *
297 */
298struct ssp_task_context {
299 /* OFFSET 0x18 */
300 u32 reserved00:24;
301 u32 frame_type:8;
302
303 /* OFFSET 0x1C */
304 u32 reserved01;
305
306 /* OFFSET 0x20 */
307 u32 fill_bytes:2;
308 u32 reserved02:6;
309 u32 changing_data_pointer:1;
310 u32 retransmit:1;
311 u32 retry_data_frame:1;
312 u32 tlr_control:2;
313 u32 reserved03:19;
314
315 /* OFFSET 0x24 */
316 u32 uiRsvd4;
317
318 /* OFFSET 0x28 */
319 u32 target_port_transfer_tag:16;
320 u32 tag:16;
321
322 /* OFFSET 0x2C */
323 u32 data_offset;
324};
325
326/**
327 * struct stp_task_context - This is the SCU hardware definition for an STP
328 * request.
329 *
330 *
331 */
332struct stp_task_context {
333 /* OFFSET 0x18 */
334 u32 fis_type:8;
335 u32 pm_port:4;
336 u32 reserved0:3;
337 u32 control:1;
338 u32 command:8;
339 u32 features:8;
340
341 /* OFFSET 0x1C */
342 u32 reserved1;
343
344 /* OFFSET 0x20 */
345 u32 reserved2;
346
347 /* OFFSET 0x24 */
348 u32 reserved3;
349
350 /* OFFSET 0x28 */
351 u32 ncq_tag:5;
352 u32 reserved4:27;
353
354 /* OFFSET 0x2C */
355 u32 data_offset; /* TODO: What is this used for? */
356};
357
358/**
359 * struct smp_task_context - This is the SCU hardware definition for an SMP
360 * request.
361 *
362 *
363 */
364struct smp_task_context {
365 /* OFFSET 0x18 */
366 u32 response_length:8;
367 u32 function_result:8;
368 u32 function:8;
369 u32 frame_type:8;
370
371 /* OFFSET 0x1C */
372 u32 smp_response_ufi:12;
373 u32 reserved1:20;
374
375 /* OFFSET 0x20 */
376 u32 reserved2;
377
378 /* OFFSET 0x24 */
379 u32 reserved3;
380
381 /* OFFSET 0x28 */
382 u32 reserved4;
383
384 /* OFFSET 0x2C */
385 u32 reserved5;
386};
387
388/**
389 * struct primitive_task_context - This is the SCU hardware definition used
390 * when the driver wants to send a primitive on the link.
391 *
392 *
393 */
394struct primitive_task_context {
395 /* OFFSET 0x18 */
396 /**
397 * This field is the control word and it must be 0.
398 */
399 u32 control; /* /< must be set to 0 */
400
401 /* OFFSET 0x1C */
402 /**
403 * This field specifies the primitive that is to be transmitted.
404 */
405 u32 sequence;
406
407 /* OFFSET 0x20 */
408 u32 reserved0;
409
410 /* OFFSET 0x24 */
411 u32 reserved1;
412
413 /* OFFSET 0x28 */
414 u32 reserved2;
415
416 /* OFFSET 0x2C */
417 u32 reserved3;
418};
419
420/**
421 * The union of the protocols that can be selected in the SCU task context
422 * field.
423 *
424 * protocol_context
425 */
426union protocol_context {
427 struct ssp_task_context ssp;
428 struct stp_task_context stp;
429 struct smp_task_context smp;
430 struct primitive_task_context primitive;
431 u32 words[6];
432};
433
434/**
435 * struct scu_sgl_element - This structure represents a single SCU defined SGL
436 * element. SCU SGLs contain a 64 bit address with the maximum data transfer
437 * being 24 bits in size. The SGL can not cross a 4GB boundary.
438 *
439 * struct scu_sgl_element
440 */
441struct scu_sgl_element {
442 /**
443 * This field is the upper 32 bits of the 64 bit physical address.
444 */
445 u32 address_upper;
446
447 /**
448 * This field is the lower 32 bits of the 64 bit physical address.
449 */
450 u32 address_lower;
451
452 /**
453 * This field is the number of bytes to transfer.
454 */
455 u32 length:24;
456
457 /**
458 * This field is the address modifier to be used when a virtual function is
459 * requesting a data transfer.
460 */
461 u32 address_modifier:8;
462
463};
464
465#define SCU_SGL_ELEMENT_PAIR_A 0
466#define SCU_SGL_ELEMENT_PAIR_B 1
467
468/**
469 * struct scu_sgl_element_pair - This structure is the SCU hardware definition
470 * of a pair of SGL elements. The SCU hardware always works on SGL pairs.
471 * They are refered to in the DS specification as SGL A and SGL B. Each SGL
472 * pair is followed by the address of the next pair.
473 *
474 *
475 */
476struct scu_sgl_element_pair {
477 /* OFFSET 0x60-0x68 */
478 /**
479 * This field is the SGL element A of the SGL pair.
480 */
481 struct scu_sgl_element A;
482
483 /* OFFSET 0x6C-0x74 */
484 /**
485 * This field is the SGL element B of the SGL pair.
486 */
487 struct scu_sgl_element B;
488
489 /* OFFSET 0x78-0x7C */
490 /**
491 * This field is the upper 32 bits of the 64 bit address to the next SGL
492 * element pair.
493 */
494 u32 next_pair_upper;
495
496 /**
497 * This field is the lower 32 bits of the 64 bit address to the next SGL
498 * element pair.
499 */
500 u32 next_pair_lower;
501
502};
503
504/**
505 * struct transport_snapshot - This structure is the SCU hardware scratch area
506 * for the task context. This is set to 0 by the driver but can be read by
507 * issuing a dump TC request to the SCU.
508 *
509 *
510 */
511struct transport_snapshot {
512 /* OFFSET 0x48 */
513 u32 xfer_rdy_write_data_length;
514
515 /* OFFSET 0x4C */
516 u32 data_offset;
517
518 /* OFFSET 0x50 */
519 u32 data_transfer_size:24;
520 u32 reserved_50_0:8;
521
522 /* OFFSET 0x54 */
523 u32 next_initiator_write_data_offset;
524
525 /* OFFSET 0x58 */
526 u32 next_initiator_write_data_xfer_size:24;
527 u32 reserved_58_0:8;
528};
529
530/**
531 * struct scu_task_context - This structure defines the contents of the SCU
532 * silicon task context. It lays out all of the fields according to the
533 * expected order and location for the Storage Controller unit.
534 *
535 *
536 */
537struct scu_task_context {
538 /* OFFSET 0x00 ------ */
539 /**
540 * This field must be encoded to one of the valid SCU task priority values
541 * - SCU_TASK_PRIORITY_NORMAL
542 * - SCU_TASK_PRIORITY_HEAD_OF_Q
543 * - SCU_TASK_PRIORITY_HIGH
544 */
545 u32 priority:2;
546
547 /**
548 * This field must be set to true if this is an initiator generated request.
549 * Until target mode is supported all task requests are initiator requests.
550 */
551 u32 initiator_request:1;
552
553 /**
554 * This field must be set to one of the valid connection rates valid values
555 * are 0x8, 0x9, and 0xA.
556 */
557 u32 connection_rate:4;
558
559 /**
560 * This field muse be programed when generating an SMP response since the SMP
561 * connection remains open until the SMP response is generated.
562 */
563 u32 protocol_engine_index:3;
564
565 /**
566 * This field must contain the logical port for the task request.
567 */
568 u32 logical_port_index:3;
569
570 /**
571 * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values
572 * - SCU_TASK_CONTEXT_PROTOCOL_SMP
573 * - SCU_TASK_CONTEXT_PROTOCOL_SSP
574 * - SCU_TASK_CONTEXT_PROTOCOL_STP
575 * - SCU_TASK_CONTEXT_PROTOCOL_NONE
576 */
577 u32 protocol_type:3;
578
579 /**
580 * This filed must be set to the TCi allocated for this task
581 */
582 u32 task_index:12;
583
584 /**
585 * This field is reserved and must be set to 0x00
586 */
587 u32 reserved_00_0:1;
588
589 /**
590 * For a normal task request this must be set to 0. If this is an abort of
591 * this task request it must be set to 1.
592 */
593 u32 abort:1;
594
595 /**
596 * This field must be set to true for the SCU hardware to process the task.
597 */
598 u32 valid:1;
599
600 /**
601 * This field must be set to SCU_TASK_CONTEXT_TYPE
602 */
603 u32 context_type:1;
604
605 /* OFFSET 0x04 */
606 /**
607 * This field contains the RNi that is the target of this request.
608 */
609 u32 remote_node_index:12;
610
611 /**
612 * This field is programmed if this is a mirrored request, which we are not
613 * using, in which case it is the RNi for the mirrored target.
614 */
615 u32 mirrored_node_index:12;
616
617 /**
618 * This field is programmed with the direction of the SATA reqeust
619 * - SCU_SATA_WRITE_DATA_DIRECTION
620 * - SCU_SATA_READ_DATA_DIRECTION
621 */
622 u32 sata_direction:1;
623
624 /**
625 * This field is programmsed with one of the following SCU_COMMAND_CODE
626 * - SCU_COMMAND_CODE_INITIATOR_NEW_TASK
627 * - SCU_COMMAND_CODE_ACTIVE_TASK
628 * - SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK
629 * - SCU_COMMAND_CODE_TARGET_RAW_FRAMES
630 */
631 u32 command_code:2;
632
633 /**
634 * This field is set to true if the remote node should be suspended.
635 * This bit is only valid for SSP & SMP target devices.
636 */
637 u32 suspend_node:1;
638
639 /**
640 * This field is programmed with one of the following command type codes
641 *
642 * For SAS requests use the scu_ssp_task_type
643 * - SCU_TASK_TYPE_IOREAD
644 * - SCU_TASK_TYPE_IOWRITE
645 * - SCU_TASK_TYPE_SMP_REQUEST
646 * - SCU_TASK_TYPE_RESPONSE
647 * - SCU_TASK_TYPE_RAW_FRAME
648 * - SCU_TASK_TYPE_PRIMITIVE
649 *
650 * For SATA requests use the scu_sata_task_type
651 * - SCU_TASK_TYPE_DMA_IN
652 * - SCU_TASK_TYPE_FPDMAQ_READ
653 * - SCU_TASK_TYPE_PACKET_DMA_IN
654 * - SCU_TASK_TYPE_SATA_RAW_FRAME
655 * - SCU_TASK_TYPE_DMA_OUT
656 * - SCU_TASK_TYPE_FPDMAQ_WRITE
657 * - SCU_TASK_TYPE_PACKET_DMA_OUT
658 */
659 u32 task_type:4;
660
661 /* OFFSET 0x08 */
662 /**
663 * This field is reserved and the must be set to 0x00
664 */
665 u32 link_layer_control:8; /* presently all reserved */
666
667 /**
668 * This field is set to true when TLR is to be enabled
669 */
670 u32 ssp_tlr_enable:1;
671
672 /**
673 * This is field specifies if the SCU DMAs a response frame to host
674 * memory for good response frames when operating in target mode.
675 */
676 u32 dma_ssp_target_good_response:1;
677
678 /**
679 * This field indicates if the SCU should DMA the response frame to
680 * host memory.
681 */
682 u32 do_not_dma_ssp_good_response:1;
683
684 /**
685 * This field is set to true when strict ordering is to be enabled
686 */
687 u32 strict_ordering:1;
688
689 /**
690 * This field indicates the type of endianess to be utilized for the
691 * frame. command, task, and response frames utilized control_frame
692 * set to 1.
693 */
694 u32 control_frame:1;
695
696 /**
697 * This field is reserved and the driver should set to 0x00
698 */
699 u32 tl_control_reserved:3;
700
701 /**
702 * This field is set to true when the SCU hardware task timeout control is to
703 * be enabled
704 */
705 u32 timeout_enable:1;
706
707 /**
708 * This field is reserved and the driver should set it to 0x00
709 */
710 u32 pts_control_reserved:7;
711
712 /**
713 * This field should be set to true when block guard is to be enabled
714 */
715 u32 block_guard_enable:1;
716
717 /**
718 * This field is reserved and the driver should set to 0x00
719 */
720 u32 sdma_control_reserved:7;
721
722 /* OFFSET 0x0C */
723 /**
724 * This field is the address modifier for this io request it should be
725 * programmed with the virtual function that is making the request.
726 */
727 u32 address_modifier:16;
728
729 /**
730 * @todo What we support mirrored SMP response frame?
731 */
732 u32 mirrored_protocol_engine:3; /* mirrored protocol Engine Index */
733
734 /**
735 * If this is a mirrored request the logical port index for the mirrored RNi
736 * must be programmed.
737 */
738 u32 mirrored_logical_port:4; /* mirrored local port index */
739
740 /**
741 * This field is reserved and the driver must set it to 0x00
742 */
743 u32 reserved_0C_0:8;
744
745 /**
746 * This field must be set to true if the mirrored request processing is to be
747 * enabled.
748 */
749 u32 mirror_request_enable:1; /* Mirrored request Enable */
750
751 /* OFFSET 0x10 */
752 /**
753 * This field is the command iu length in dwords
754 */
755 u32 ssp_command_iu_length:8;
756
757 /**
758 * This is the target TLR enable bit it must be set to 0 when creatning the
759 * task context.
760 */
761 u32 xfer_ready_tlr_enable:1;
762
763 /**
764 * This field is reserved and the driver must set it to 0x00
765 */
766 u32 reserved_10_0:7;
767
768 /**
769 * This is the maximum burst size that the SCU hardware will send in one
770 * connection its value is (N x 512) and N must be a multiple of 2. If the
771 * value is 0x00 then maximum burst size is disabled.
772 */
773 u32 ssp_max_burst_size:16;
774
775 /* OFFSET 0x14 */
776 /**
777 * This filed is set to the number of bytes to be transfered in the request.
778 */
779 u32 transfer_length_bytes:24; /* In terms of bytes */
780
781 /**
782 * This field is reserved and the driver should set it to 0x00
783 */
784 u32 reserved_14_0:8;
785
786 /* OFFSET 0x18-0x2C */
787 /**
788 * This union provides for the protocol specif part of the SCU Task Context.
789 */
790 union protocol_context type;
791
792 /* OFFSET 0x30-0x34 */
793 /**
794 * This field is the upper 32 bits of the 64 bit physical address of the
795 * command iu buffer
796 */
797 u32 command_iu_upper;
798
799 /**
800 * This field is the lower 32 bits of the 64 bit physical address of the
801 * command iu buffer
802 */
803 u32 command_iu_lower;
804
805 /* OFFSET 0x38-0x3C */
806 /**
807 * This field is the upper 32 bits of the 64 bit physical address of the
808 * response iu buffer
809 */
810 u32 response_iu_upper;
811
812 /**
813 * This field is the lower 32 bits of the 64 bit physical address of the
814 * response iu buffer
815 */
816 u32 response_iu_lower;
817
818 /* OFFSET 0x40 */
819 /**
820 * This field is set to the task phase of the SCU hardware. The driver must
821 * set this to 0x01
822 */
823 u32 task_phase:8;
824
825 /**
826 * This field is set to the transport layer task status. The driver must set
827 * this to 0x00
828 */
829 u32 task_status:8;
830
831 /**
832 * This field is used during initiator write TLR
833 */
834 u32 previous_extended_tag:4;
835
836 /**
837 * This field is set the maximum number of retries for a STP non-data FIS
838 */
839 u32 stp_retry_count:2;
840
841 /**
842 * This field is reserved and the driver must set it to 0x00
843 */
844 u32 reserved_40_1:2;
845
846 /**
847 * This field is used by the SCU TL to determine when to take a snapshot when
848 * tranmitting read data frames.
849 * - 0x00 The entire IO
850 * - 0x01 32k
851 * - 0x02 64k
852 * - 0x04 128k
853 * - 0x08 256k
854 */
855 u32 ssp_tlr_threshold:4;
856
857 /**
858 * This field is reserved and the driver must set it to 0x00
859 */
860 u32 reserved_40_2:4;
861
862 /* OFFSET 0x44 */
863 u32 write_data_length; /* read only set to 0 */
864
865 /* OFFSET 0x48-0x58 */
866 struct transport_snapshot snapshot; /* read only set to 0 */
867
868 /* OFFSET 0x5C */
869 u32 block_protection_enable:1;
870 u32 block_size:2;
871 u32 block_protection_function:2;
872 u32 reserved_5C_0:9;
873 u32 active_sgl_element:2; /* read only set to 0 */
874 u32 sgl_exhausted:1; /* read only set to 0 */
875 u32 payload_data_transfer_error:4; /* read only set to 0 */
876 u32 frame_buffer_offset:11; /* read only set to 0 */
877
878 /* OFFSET 0x60-0x7C */
879 /**
880 * This field is the first SGL element pair found in the TC data structure.
881 */
882 struct scu_sgl_element_pair sgl_pair_ab;
883 /* OFFSET 0x80-0x9C */
884 /**
885 * This field is the second SGL element pair found in the TC data structure.
886 */
887 struct scu_sgl_element_pair sgl_pair_cd;
888
889 /* OFFSET 0xA0-BC */
890 struct scu_sgl_element_pair sgl_snapshot_ac;
891
892 /* OFFSET 0xC0 */
893 u32 active_sgl_element_pair; /* read only set to 0 */
894
895 /* OFFSET 0xC4-0xCC */
896 u32 reserved_C4_CC[3];
897
898 /* OFFSET 0xD0 */
899 u32 intermediate_crc_value:16;
900 u32 initial_crc_seed:16;
901
902 /* OFFSET 0xD4 */
903 u32 application_tag_for_verify:16;
904 u32 application_tag_for_generate:16;
905
906 /* OFFSET 0xD8 */
907 u32 reference_tag_seed_for_verify_function;
908
909 /* OFFSET 0xDC */
910 u32 reserved_DC;
911
912 /* OFFSET 0xE0 */
913 u32 reserved_E0_0:16;
914 u32 application_tag_mask_for_generate:16;
915
916 /* OFFSET 0xE4 */
917 u32 block_protection_control:16;
918 u32 application_tag_mask_for_verify:16;
919
920 /* OFFSET 0xE8 */
921 u32 block_protection_error:8;
922 u32 reserved_E8_0:24;
923
924 /* OFFSET 0xEC */
925 u32 reference_tag_seed_for_verify;
926
927 /* OFFSET 0xF0 */
928 u32 intermediate_crc_valid_snapshot:16;
929 u32 reserved_F0_0:16;
930
931 /* OFFSET 0xF4 */
932 u32 reference_tag_seed_for_verify_function_snapshot;
933
934 /* OFFSET 0xF8 */
935 u32 snapshot_of_reserved_dword_DC_of_tc;
936
937 /* OFFSET 0xFC */
938 u32 reference_tag_seed_for_generate_function_snapshot;
939
940};
941
942#endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
new file mode 100644
index 000000000000..d6bcdd013dc9
--- /dev/null
+++ b/drivers/scsi/isci/task.c
@@ -0,0 +1,1676 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include <linux/completion.h>
57#include <linux/irqflags.h>
58#include "sas.h"
59#include <scsi/libsas.h>
60#include "remote_device.h"
61#include "remote_node_context.h"
62#include "isci.h"
63#include "request.h"
64#include "task.h"
65#include "host.h"
66
67/**
68* isci_task_refuse() - complete the request to the upper layer driver in
69* the case where an I/O needs to be completed back in the submit path.
70* @ihost: host on which the the request was queued
71* @task: request to complete
72* @response: response code for the completed task.
73* @status: status code for the completed task.
74*
75*/
76static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
77 enum service_response response,
78 enum exec_status status)
79
80{
81 enum isci_completion_selection disposition;
82
83 disposition = isci_perform_normal_io_completion;
84 disposition = isci_task_set_completion_status(task, response, status,
85 disposition);
86
87 /* Tasks aborted specifically by a call to the lldd_abort_task
88 * function should not be completed to the host in the regular path.
89 */
90 switch (disposition) {
91 case isci_perform_normal_io_completion:
92 /* Normal notification (task_done) */
93 dev_dbg(&ihost->pdev->dev,
94 "%s: Normal - task = %p, response=%d, "
95 "status=%d\n",
96 __func__, task, response, status);
97
98 task->lldd_task = NULL;
99
100 isci_execpath_callback(ihost, task, task->task_done);
101 break;
102
103 case isci_perform_aborted_io_completion:
104 /*
105 * No notification because this request is already in the
106 * abort path.
107 */
108 dev_dbg(&ihost->pdev->dev,
109 "%s: Aborted - task = %p, response=%d, "
110 "status=%d\n",
111 __func__, task, response, status);
112 break;
113
114 case isci_perform_error_io_completion:
115 /* Use sas_task_abort */
116 dev_dbg(&ihost->pdev->dev,
117 "%s: Error - task = %p, response=%d, "
118 "status=%d\n",
119 __func__, task, response, status);
120
121 isci_execpath_callback(ihost, task, sas_task_abort);
122 break;
123
124 default:
125 dev_dbg(&ihost->pdev->dev,
126 "%s: isci task notification default case!",
127 __func__);
128 sas_task_abort(task);
129 break;
130 }
131}
132
133#define for_each_sas_task(num, task) \
134 for (; num > 0; num--,\
135 task = list_entry(task->list.next, struct sas_task, list))
136
137
138static inline int isci_device_io_ready(struct isci_remote_device *idev,
139 struct sas_task *task)
140{
141 return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
142 (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
143 isci_task_is_ncq_recovery(task))
144 : 0;
145}
146/**
147 * isci_task_execute_task() - This function is one of the SAS Domain Template
148 * functions. This function is called by libsas to send a task down to
149 * hardware.
150 * @task: This parameter specifies the SAS task to send.
151 * @num: This parameter specifies the number of tasks to queue.
152 * @gfp_flags: This parameter specifies the context of this call.
153 *
154 * status, zero indicates success.
155 */
156int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
157{
158 struct isci_host *ihost = dev_to_ihost(task->dev);
159 struct isci_remote_device *idev;
160 unsigned long flags;
161 bool io_ready;
162 u16 tag;
163
164 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
165
166 for_each_sas_task(num, task) {
167 enum sci_status status = SCI_FAILURE;
168
169 spin_lock_irqsave(&ihost->scic_lock, flags);
170 idev = isci_lookup_device(task->dev);
171 io_ready = isci_device_io_ready(idev, task);
172 tag = isci_alloc_tag(ihost);
173 spin_unlock_irqrestore(&ihost->scic_lock, flags);
174
175 dev_dbg(&ihost->pdev->dev,
176 "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
177 task, num, task->dev, idev, idev ? idev->flags : 0,
178 task->uldd_task);
179
180 if (!idev) {
181 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
182 SAS_DEVICE_UNKNOWN);
183 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
184 /* Indicate QUEUE_FULL so that the scsi midlayer
185 * retries.
186 */
187 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
188 SAS_QUEUE_FULL);
189 } else {
190 /* There is a device and it's ready for I/O. */
191 spin_lock_irqsave(&task->task_state_lock, flags);
192
193 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
194 /* The I/O was aborted. */
195 spin_unlock_irqrestore(&task->task_state_lock,
196 flags);
197
198 isci_task_refuse(ihost, task,
199 SAS_TASK_UNDELIVERED,
200 SAM_STAT_TASK_ABORTED);
201 } else {
202 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
203 spin_unlock_irqrestore(&task->task_state_lock, flags);
204
205 /* build and send the request. */
206 status = isci_request_execute(ihost, idev, task, tag);
207
208 if (status != SCI_SUCCESS) {
209
210 spin_lock_irqsave(&task->task_state_lock, flags);
211 /* Did not really start this command. */
212 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
213 spin_unlock_irqrestore(&task->task_state_lock, flags);
214
215 /* Indicate QUEUE_FULL so that the scsi
216 * midlayer retries. if the request
217 * failed for remote device reasons,
218 * it gets returned as
219 * SAS_TASK_UNDELIVERED next time
220 * through.
221 */
222 isci_task_refuse(ihost, task,
223 SAS_TASK_COMPLETE,
224 SAS_QUEUE_FULL);
225 }
226 }
227 }
228 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
229 spin_lock_irqsave(&ihost->scic_lock, flags);
230 /* command never hit the device, so just free
231 * the tci and skip the sequence increment
232 */
233 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
234 spin_unlock_irqrestore(&ihost->scic_lock, flags);
235 }
236 isci_put_device(idev);
237 }
238 return 0;
239}
240
241static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
242{
243 struct isci_tmf *isci_tmf;
244 enum sci_status status;
245
246 if (tmf_task != ireq->ttype)
247 return SCI_FAILURE;
248
249 isci_tmf = isci_request_access_tmf(ireq);
250
251 switch (isci_tmf->tmf_code) {
252
253 case isci_tmf_sata_srst_high:
254 case isci_tmf_sata_srst_low: {
255 struct host_to_dev_fis *fis = &ireq->stp.cmd;
256
257 memset(fis, 0, sizeof(*fis));
258
259 fis->fis_type = 0x27;
260 fis->flags &= ~0x80;
261 fis->flags &= 0xF0;
262 if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
263 fis->control |= ATA_SRST;
264 else
265 fis->control &= ~ATA_SRST;
266 break;
267 }
268 /* other management commnd go here... */
269 default:
270 return SCI_FAILURE;
271 }
272
273 /* core builds the protocol specific request
274 * based on the h2d fis.
275 */
276 status = sci_task_request_construct_sata(ireq);
277
278 return status;
279}
280
281static struct isci_request *isci_task_request_build(struct isci_host *ihost,
282 struct isci_remote_device *idev,
283 u16 tag, struct isci_tmf *isci_tmf)
284{
285 enum sci_status status = SCI_FAILURE;
286 struct isci_request *ireq = NULL;
287 struct domain_device *dev;
288
289 dev_dbg(&ihost->pdev->dev,
290 "%s: isci_tmf = %p\n", __func__, isci_tmf);
291
292 dev = idev->domain_dev;
293
294 /* do common allocation and init of request object. */
295 ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
296 if (!ireq)
297 return NULL;
298
299 /* let the core do it's construct. */
300 status = sci_task_request_construct(ihost, idev, tag,
301 ireq);
302
303 if (status != SCI_SUCCESS) {
304 dev_warn(&ihost->pdev->dev,
305 "%s: sci_task_request_construct failed - "
306 "status = 0x%x\n",
307 __func__,
308 status);
309 return NULL;
310 }
311
312 /* XXX convert to get this from task->tproto like other drivers */
313 if (dev->dev_type == SAS_END_DEV) {
314 isci_tmf->proto = SAS_PROTOCOL_SSP;
315 status = sci_task_request_construct_ssp(ireq);
316 if (status != SCI_SUCCESS)
317 return NULL;
318 }
319
320 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
321 isci_tmf->proto = SAS_PROTOCOL_SATA;
322 status = isci_sata_management_task_request_build(ireq);
323
324 if (status != SCI_SUCCESS)
325 return NULL;
326 }
327 return ireq;
328}
329
330static int isci_task_execute_tmf(struct isci_host *ihost,
331 struct isci_remote_device *idev,
332 struct isci_tmf *tmf, unsigned long timeout_ms)
333{
334 DECLARE_COMPLETION_ONSTACK(completion);
335 enum sci_task_status status = SCI_TASK_FAILURE;
336 struct isci_request *ireq;
337 int ret = TMF_RESP_FUNC_FAILED;
338 unsigned long flags;
339 unsigned long timeleft;
340 u16 tag;
341
342 spin_lock_irqsave(&ihost->scic_lock, flags);
343 tag = isci_alloc_tag(ihost);
344 spin_unlock_irqrestore(&ihost->scic_lock, flags);
345
346 if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
347 return ret;
348
349 /* sanity check, return TMF_RESP_FUNC_FAILED
350 * if the device is not there and ready.
351 */
352 if (!idev ||
353 (!test_bit(IDEV_IO_READY, &idev->flags) &&
354 !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
355 dev_dbg(&ihost->pdev->dev,
356 "%s: idev = %p not ready (%#lx)\n",
357 __func__,
358 idev, idev ? idev->flags : 0);
359 goto err_tci;
360 } else
361 dev_dbg(&ihost->pdev->dev,
362 "%s: idev = %p\n",
363 __func__, idev);
364
365 /* Assign the pointer to the TMF's completion kernel wait structure. */
366 tmf->complete = &completion;
367
368 ireq = isci_task_request_build(ihost, idev, tag, tmf);
369 if (!ireq)
370 goto err_tci;
371
372 spin_lock_irqsave(&ihost->scic_lock, flags);
373
374 /* start the TMF io. */
375 status = sci_controller_start_task(ihost, idev, ireq);
376
377 if (status != SCI_TASK_SUCCESS) {
378 dev_dbg(&ihost->pdev->dev,
379 "%s: start_io failed - status = 0x%x, request = %p\n",
380 __func__,
381 status,
382 ireq);
383 spin_unlock_irqrestore(&ihost->scic_lock, flags);
384 goto err_tci;
385 }
386
387 if (tmf->cb_state_func != NULL)
388 tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
389
390 isci_request_change_state(ireq, started);
391
392 /* add the request to the remote device request list. */
393 list_add(&ireq->dev_node, &idev->reqs_in_process);
394
395 spin_unlock_irqrestore(&ihost->scic_lock, flags);
396
397 /* Wait for the TMF to complete, or a timeout. */
398 timeleft = wait_for_completion_timeout(&completion,
399 msecs_to_jiffies(timeout_ms));
400
401 if (timeleft == 0) {
402 spin_lock_irqsave(&ihost->scic_lock, flags);
403
404 if (tmf->cb_state_func != NULL)
405 tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
406
407 sci_controller_terminate_request(ihost,
408 idev,
409 ireq);
410
411 spin_unlock_irqrestore(&ihost->scic_lock, flags);
412
413 wait_for_completion(tmf->complete);
414 }
415
416 isci_print_tmf(tmf);
417
418 if (tmf->status == SCI_SUCCESS)
419 ret = TMF_RESP_FUNC_COMPLETE;
420 else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
421 dev_dbg(&ihost->pdev->dev,
422 "%s: tmf.status == "
423 "SCI_FAILURE_IO_RESPONSE_VALID\n",
424 __func__);
425 ret = TMF_RESP_FUNC_COMPLETE;
426 }
427 /* Else - leave the default "failed" status alone. */
428
429 dev_dbg(&ihost->pdev->dev,
430 "%s: completed request = %p\n",
431 __func__,
432 ireq);
433
434 return ret;
435
436 err_tci:
437 spin_lock_irqsave(&ihost->scic_lock, flags);
438 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
439 spin_unlock_irqrestore(&ihost->scic_lock, flags);
440
441 return ret;
442}
443
444static void isci_task_build_tmf(struct isci_tmf *tmf,
445 enum isci_tmf_function_codes code,
446 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
447 struct isci_tmf *,
448 void *),
449 void *cb_data)
450{
451 memset(tmf, 0, sizeof(*tmf));
452
453 tmf->tmf_code = code;
454 tmf->cb_state_func = tmf_sent_cb;
455 tmf->cb_data = cb_data;
456}
457
458static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
459 enum isci_tmf_function_codes code,
460 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
461 struct isci_tmf *,
462 void *),
463 struct isci_request *old_request)
464{
465 isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
466 tmf->io_tag = old_request->io_tag;
467}
468
469/**
470 * isci_task_validate_request_to_abort() - This function checks the given I/O
471 * against the "started" state. If the request is still "started", it's
472 * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
473 * BEFORE CALLING THIS FUNCTION.
474 * @isci_request: This parameter specifies the request object to control.
475 * @isci_host: This parameter specifies the ISCI host object
476 * @isci_device: This is the device to which the request is pending.
477 * @aborted_io_completion: This is a completion structure that will be added to
478 * the request in case it is changed to aborting; this completion is
479 * triggered when the request is fully completed.
480 *
481 * Either "started" on successful change of the task status to "aborted", or
482 * "unallocated" if the task cannot be controlled.
483 */
484static enum isci_request_status isci_task_validate_request_to_abort(
485 struct isci_request *isci_request,
486 struct isci_host *isci_host,
487 struct isci_remote_device *isci_device,
488 struct completion *aborted_io_completion)
489{
490 enum isci_request_status old_state = unallocated;
491
492 /* Only abort the task if it's in the
493 * device's request_in_process list
494 */
495 if (isci_request && !list_empty(&isci_request->dev_node)) {
496 old_state = isci_request_change_started_to_aborted(
497 isci_request, aborted_io_completion);
498
499 }
500
501 return old_state;
502}
503
504/**
505* isci_request_cleanup_completed_loiterer() - This function will take care of
506* the final cleanup on any request which has been explicitly terminated.
507* @isci_host: This parameter specifies the ISCI host object
508* @isci_device: This is the device to which the request is pending.
509* @isci_request: This parameter specifies the terminated request object.
510* @task: This parameter is the libsas I/O request.
511*/
512static void isci_request_cleanup_completed_loiterer(
513 struct isci_host *isci_host,
514 struct isci_remote_device *isci_device,
515 struct isci_request *isci_request,
516 struct sas_task *task)
517{
518 unsigned long flags;
519
520 dev_dbg(&isci_host->pdev->dev,
521 "%s: isci_device=%p, request=%p, task=%p\n",
522 __func__, isci_device, isci_request, task);
523
524 if (task != NULL) {
525
526 spin_lock_irqsave(&task->task_state_lock, flags);
527 task->lldd_task = NULL;
528
529 task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
530
531 isci_set_task_doneflags(task);
532
533 /* If this task is not in the abort path, call task_done. */
534 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
535
536 spin_unlock_irqrestore(&task->task_state_lock, flags);
537 task->task_done(task);
538 } else
539 spin_unlock_irqrestore(&task->task_state_lock, flags);
540 }
541
542 if (isci_request != NULL) {
543 spin_lock_irqsave(&isci_host->scic_lock, flags);
544 list_del_init(&isci_request->dev_node);
545 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
546 }
547}
548
549/**
550 * isci_terminate_request_core() - This function will terminate the given
551 * request, and wait for it to complete. This function must only be called
552 * from a thread that can wait. Note that the request is terminated and
553 * completed (back to the host, if started there).
554 * @ihost: This SCU.
555 * @idev: The target.
556 * @isci_request: The I/O request to be terminated.
557 *
558 */
559static void isci_terminate_request_core(struct isci_host *ihost,
560 struct isci_remote_device *idev,
561 struct isci_request *isci_request)
562{
563 enum sci_status status = SCI_SUCCESS;
564 bool was_terminated = false;
565 bool needs_cleanup_handling = false;
566 enum isci_request_status request_status;
567 unsigned long flags;
568 unsigned long termination_completed = 1;
569 struct completion *io_request_completion;
570 struct sas_task *task;
571
572 dev_dbg(&ihost->pdev->dev,
573 "%s: device = %p; request = %p\n",
574 __func__, idev, isci_request);
575
576 spin_lock_irqsave(&ihost->scic_lock, flags);
577
578 io_request_completion = isci_request->io_request_completion;
579
580 task = (isci_request->ttype == io_task)
581 ? isci_request_access_task(isci_request)
582 : NULL;
583
584 /* Note that we are not going to control
585 * the target to abort the request.
586 */
587 set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
588
589 /* Make sure the request wasn't just sitting around signalling
590 * device condition (if the request handle is NULL, then the
591 * request completed but needed additional handling here).
592 */
593 if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
594 was_terminated = true;
595 needs_cleanup_handling = true;
596 status = sci_controller_terminate_request(ihost,
597 idev,
598 isci_request);
599 }
600 spin_unlock_irqrestore(&ihost->scic_lock, flags);
601
602 /*
603 * The only time the request to terminate will
604 * fail is when the io request is completed and
605 * being aborted.
606 */
607 if (status != SCI_SUCCESS) {
608 dev_dbg(&ihost->pdev->dev,
609 "%s: sci_controller_terminate_request"
610 " returned = 0x%x\n",
611 __func__, status);
612
613 isci_request->io_request_completion = NULL;
614
615 } else {
616 if (was_terminated) {
617 dev_dbg(&ihost->pdev->dev,
618 "%s: before completion wait (%p/%p)\n",
619 __func__, isci_request, io_request_completion);
620
621 /* Wait here for the request to complete. */
622 #define TERMINATION_TIMEOUT_MSEC 500
623 termination_completed
624 = wait_for_completion_timeout(
625 io_request_completion,
626 msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
627
628 if (!termination_completed) {
629
630 /* The request to terminate has timed out. */
631 spin_lock_irqsave(&ihost->scic_lock,
632 flags);
633
634 /* Check for state changes. */
635 if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
636
637 /* The best we can do is to have the
638 * request die a silent death if it
639 * ever really completes.
640 *
641 * Set the request state to "dead",
642 * and clear the task pointer so that
643 * an actual completion event callback
644 * doesn't do anything.
645 */
646 isci_request->status = dead;
647 isci_request->io_request_completion
648 = NULL;
649
650 if (isci_request->ttype == io_task) {
651
652 /* Break links with the
653 * sas_task.
654 */
655 isci_request->ttype_ptr.io_task_ptr
656 = NULL;
657 }
658 } else
659 termination_completed = 1;
660
661 spin_unlock_irqrestore(&ihost->scic_lock,
662 flags);
663
664 if (!termination_completed) {
665
666 dev_dbg(&ihost->pdev->dev,
667 "%s: *** Timeout waiting for "
668 "termination(%p/%p)\n",
669 __func__, io_request_completion,
670 isci_request);
671
672 /* The request can no longer be referenced
673 * safely since it may go away if the
674 * termination every really does complete.
675 */
676 isci_request = NULL;
677 }
678 }
679 if (termination_completed)
680 dev_dbg(&ihost->pdev->dev,
681 "%s: after completion wait (%p/%p)\n",
682 __func__, isci_request, io_request_completion);
683 }
684
685 if (termination_completed) {
686
687 isci_request->io_request_completion = NULL;
688
689 /* Peek at the status of the request. This will tell
690 * us if there was special handling on the request such that it
691 * needs to be detached and freed here.
692 */
693 spin_lock_irqsave(&isci_request->state_lock, flags);
694 request_status = isci_request->status;
695
696 if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
697 && ((request_status == aborted)
698 || (request_status == aborting)
699 || (request_status == terminating)
700 || (request_status == completed)
701 || (request_status == dead)
702 )
703 ) {
704
705 /* The completion routine won't free a request in
706 * the aborted/aborting/etc. states, so we do
707 * it here.
708 */
709 needs_cleanup_handling = true;
710 }
711 spin_unlock_irqrestore(&isci_request->state_lock, flags);
712
713 }
714 if (needs_cleanup_handling)
715 isci_request_cleanup_completed_loiterer(
716 ihost, idev, isci_request, task);
717 }
718}
719
720/**
721 * isci_terminate_pending_requests() - This function will change the all of the
722 * requests on the given device's state to "aborting", will terminate the
723 * requests, and wait for them to complete. This function must only be
724 * called from a thread that can wait. Note that the requests are all
725 * terminated and completed (back to the host, if started there).
726 * @isci_host: This parameter specifies SCU.
727 * @idev: This parameter specifies the target.
728 *
729 */
730void isci_terminate_pending_requests(struct isci_host *ihost,
731 struct isci_remote_device *idev)
732{
733 struct completion request_completion;
734 enum isci_request_status old_state;
735 unsigned long flags;
736 LIST_HEAD(list);
737
738 spin_lock_irqsave(&ihost->scic_lock, flags);
739 list_splice_init(&idev->reqs_in_process, &list);
740
741 /* assumes that isci_terminate_request_core deletes from the list */
742 while (!list_empty(&list)) {
743 struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
744
745 /* Change state to "terminating" if it is currently
746 * "started".
747 */
748 old_state = isci_request_change_started_to_newstate(ireq,
749 &request_completion,
750 terminating);
751 switch (old_state) {
752 case started:
753 case completed:
754 case aborting:
755 break;
756 default:
757 /* termination in progress, or otherwise dispositioned.
758 * We know the request was on 'list' so should be safe
759 * to move it back to reqs_in_process
760 */
761 list_move(&ireq->dev_node, &idev->reqs_in_process);
762 ireq = NULL;
763 break;
764 }
765
766 if (!ireq)
767 continue;
768 spin_unlock_irqrestore(&ihost->scic_lock, flags);
769
770 init_completion(&request_completion);
771
772 dev_dbg(&ihost->pdev->dev,
773 "%s: idev=%p request=%p; task=%p old_state=%d\n",
774 __func__, idev, ireq,
775 ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
776 old_state);
777
778 /* If the old_state is started:
779 * This request was not already being aborted. If it had been,
780 * then the aborting I/O (ie. the TMF request) would not be in
781 * the aborting state, and thus would be terminated here. Note
782 * that since the TMF completion's call to the kernel function
783 * "complete()" does not happen until the pending I/O request
784 * terminate fully completes, we do not have to implement a
785 * special wait here for already aborting requests - the
786 * termination of the TMF request will force the request
787 * to finish it's already started terminate.
788 *
789 * If old_state == completed:
790 * This request completed from the SCU hardware perspective
791 * and now just needs cleaning up in terms of freeing the
792 * request and potentially calling up to libsas.
793 *
794 * If old_state == aborting:
795 * This request has already gone through a TMF timeout, but may
796 * not have been terminated; needs cleaning up at least.
797 */
798 isci_terminate_request_core(ihost, idev, ireq);
799 spin_lock_irqsave(&ihost->scic_lock, flags);
800 }
801 spin_unlock_irqrestore(&ihost->scic_lock, flags);
802}
803
804/**
805 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
806 * Template functions.
807 * @lun: This parameter specifies the lun to be reset.
808 *
809 * status, zero indicates success.
810 */
811static int isci_task_send_lu_reset_sas(
812 struct isci_host *isci_host,
813 struct isci_remote_device *isci_device,
814 u8 *lun)
815{
816 struct isci_tmf tmf;
817 int ret = TMF_RESP_FUNC_FAILED;
818
819 dev_dbg(&isci_host->pdev->dev,
820 "%s: isci_host = %p, isci_device = %p\n",
821 __func__, isci_host, isci_device);
822 /* Send the LUN reset to the target. By the time the call returns,
823 * the TMF has fully exected in the target (in which case the return
824 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
825 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
826 */
827 isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
828
829 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
830 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
831
832 if (ret == TMF_RESP_FUNC_COMPLETE)
833 dev_dbg(&isci_host->pdev->dev,
834 "%s: %p: TMF_LU_RESET passed\n",
835 __func__, isci_device);
836 else
837 dev_dbg(&isci_host->pdev->dev,
838 "%s: %p: TMF_LU_RESET failed (%x)\n",
839 __func__, isci_device, ret);
840
841 return ret;
842}
843
844static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
845 struct isci_remote_device *idev, u8 *lun)
846{
847 int ret = TMF_RESP_FUNC_FAILED;
848 struct isci_tmf tmf;
849
850 /* Send the soft reset to the target */
851 #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
852 isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
853
854 ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
855
856 if (ret != TMF_RESP_FUNC_COMPLETE) {
857 dev_dbg(&ihost->pdev->dev,
858 "%s: Assert SRST failed (%p) = %x",
859 __func__, idev, ret);
860
861 /* Return the failure so that the LUN reset is escalated
862 * to a target reset.
863 */
864 }
865 return ret;
866}
867
868/**
869 * isci_task_lu_reset() - This function is one of the SAS Domain Template
870 * functions. This is one of the Task Management functoins called by libsas,
871 * to reset the given lun. Note the assumption that while this call is
872 * executing, no I/O will be sent by the host to the device.
873 * @lun: This parameter specifies the lun to be reset.
874 *
875 * status, zero indicates success.
876 */
877int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
878{
879 struct isci_host *isci_host = dev_to_ihost(domain_device);
880 struct isci_remote_device *isci_device;
881 unsigned long flags;
882 int ret;
883
884 spin_lock_irqsave(&isci_host->scic_lock, flags);
885 isci_device = isci_lookup_device(domain_device);
886 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
887
888 dev_dbg(&isci_host->pdev->dev,
889 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
890 __func__, domain_device, isci_host, isci_device);
891
892 if (isci_device)
893 set_bit(IDEV_EH, &isci_device->flags);
894
895 /* If there is a device reset pending on any request in the
896 * device's list, fail this LUN reset request in order to
897 * escalate to the device reset.
898 */
899 if (!isci_device ||
900 isci_device_is_reset_pending(isci_host, isci_device)) {
901 dev_dbg(&isci_host->pdev->dev,
902 "%s: No dev (%p), or "
903 "RESET PENDING: domain_device=%p\n",
904 __func__, isci_device, domain_device);
905 ret = TMF_RESP_FUNC_FAILED;
906 goto out;
907 }
908
909 /* Send the task management part of the reset. */
910 if (sas_protocol_ata(domain_device->tproto)) {
911 ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
912 } else
913 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
914
915 /* If the LUN reset worked, all the I/O can now be terminated. */
916 if (ret == TMF_RESP_FUNC_COMPLETE)
917 /* Terminate all I/O now. */
918 isci_terminate_pending_requests(isci_host,
919 isci_device);
920
921 out:
922 isci_put_device(isci_device);
923 return ret;
924}
925
926
927/* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
928int isci_task_clear_nexus_port(struct asd_sas_port *port)
929{
930 return TMF_RESP_FUNC_FAILED;
931}
932
933
934
935int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
936{
937 return TMF_RESP_FUNC_FAILED;
938}
939
940/* Task Management Functions. Must be called from process context. */
941
942/**
943 * isci_abort_task_process_cb() - This is a helper function for the abort task
944 * TMF command. It manages the request state with respect to the successful
945 * transmission / completion of the abort task request.
946 * @cb_state: This parameter specifies when this function was called - after
947 * the TMF request has been started and after it has timed-out.
948 * @tmf: This parameter specifies the TMF in progress.
949 *
950 *
951 */
952static void isci_abort_task_process_cb(
953 enum isci_tmf_cb_state cb_state,
954 struct isci_tmf *tmf,
955 void *cb_data)
956{
957 struct isci_request *old_request;
958
959 old_request = (struct isci_request *)cb_data;
960
961 dev_dbg(&old_request->isci_host->pdev->dev,
962 "%s: tmf=%p, old_request=%p\n",
963 __func__, tmf, old_request);
964
965 switch (cb_state) {
966
967 case isci_tmf_started:
968 /* The TMF has been started. Nothing to do here, since the
969 * request state was already set to "aborted" by the abort
970 * task function.
971 */
972 if ((old_request->status != aborted)
973 && (old_request->status != completed))
974 dev_dbg(&old_request->isci_host->pdev->dev,
975 "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
976 __func__, old_request->status, tmf, old_request);
977 break;
978
979 case isci_tmf_timed_out:
980
981 /* Set the task's state to "aborting", since the abort task
982 * function thread set it to "aborted" (above) in anticipation
983 * of the task management request working correctly. Since the
984 * timeout has now fired, the TMF request failed. We set the
985 * state such that the request completion will indicate the
986 * device is no longer present.
987 */
988 isci_request_change_state(old_request, aborting);
989 break;
990
991 default:
992 dev_dbg(&old_request->isci_host->pdev->dev,
993 "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
994 __func__, cb_state, tmf, old_request);
995 break;
996 }
997}
998
999/**
1000 * isci_task_abort_task() - This function is one of the SAS Domain Template
1001 * functions. This function is called by libsas to abort a specified task.
1002 * @task: This parameter specifies the SAS task to abort.
1003 *
1004 * status, zero indicates success.
1005 */
1006int isci_task_abort_task(struct sas_task *task)
1007{
1008 struct isci_host *isci_host = dev_to_ihost(task->dev);
1009 DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
1010 struct isci_request *old_request = NULL;
1011 enum isci_request_status old_state;
1012 struct isci_remote_device *isci_device = NULL;
1013 struct isci_tmf tmf;
1014 int ret = TMF_RESP_FUNC_FAILED;
1015 unsigned long flags;
1016 bool any_dev_reset = false;
1017
1018 /* Get the isci_request reference from the task. Note that
1019 * this check does not depend on the pending request list
1020 * in the device, because tasks driving resets may land here
1021 * after completion in the core.
1022 */
1023 spin_lock_irqsave(&isci_host->scic_lock, flags);
1024 spin_lock(&task->task_state_lock);
1025
1026 old_request = task->lldd_task;
1027
1028 /* If task is already done, the request isn't valid */
1029 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
1030 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
1031 old_request)
1032 isci_device = isci_lookup_device(task->dev);
1033
1034 spin_unlock(&task->task_state_lock);
1035 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1036
1037 dev_dbg(&isci_host->pdev->dev,
1038 "%s: task = %p\n", __func__, task);
1039
1040 if (!isci_device || !old_request)
1041 goto out;
1042
1043 set_bit(IDEV_EH, &isci_device->flags);
1044
1045 /* This version of the driver will fail abort requests for
1046 * SATA/STP. Failing the abort request this way will cause the
1047 * SCSI error handler thread to escalate to LUN reset
1048 */
1049 if (sas_protocol_ata(task->task_proto)) {
1050 dev_dbg(&isci_host->pdev->dev,
1051 " task %p is for a STP/SATA device;"
1052 " returning TMF_RESP_FUNC_FAILED\n"
1053 " to cause a LUN reset...\n", task);
1054 goto out;
1055 }
1056
1057 dev_dbg(&isci_host->pdev->dev,
1058 "%s: old_request == %p\n", __func__, old_request);
1059
1060 any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device);
1061
1062 spin_lock_irqsave(&task->task_state_lock, flags);
1063
1064 any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
1065
1066 /* If the extraction of the request reference from the task
1067 * failed, then the request has been completed (or if there is a
1068 * pending reset then this abort request function must be failed
1069 * in order to escalate to the target reset).
1070 */
1071 if ((old_request == NULL) || any_dev_reset) {
1072
1073 /* If the device reset task flag is set, fail the task
1074 * management request. Otherwise, the original request
1075 * has completed.
1076 */
1077 if (any_dev_reset) {
1078
1079 /* Turn off the task's DONE to make sure this
1080 * task is escalated to a target reset.
1081 */
1082 task->task_state_flags &= ~SAS_TASK_STATE_DONE;
1083
1084 /* Make the reset happen as soon as possible. */
1085 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
1086
1087 spin_unlock_irqrestore(&task->task_state_lock, flags);
1088
1089 /* Fail the task management request in order to
1090 * escalate to the target reset.
1091 */
1092 ret = TMF_RESP_FUNC_FAILED;
1093
1094 dev_dbg(&isci_host->pdev->dev,
1095 "%s: Failing task abort in order to "
1096 "escalate to target reset because\n"
1097 "SAS_TASK_NEED_DEV_RESET is set for "
1098 "task %p on dev %p\n",
1099 __func__, task, isci_device);
1100
1101
1102 } else {
1103 /* The request has already completed and there
1104 * is nothing to do here other than to set the task
1105 * done bit, and indicate that the task abort function
1106 * was sucessful.
1107 */
1108 isci_set_task_doneflags(task);
1109
1110 spin_unlock_irqrestore(&task->task_state_lock, flags);
1111
1112 ret = TMF_RESP_FUNC_COMPLETE;
1113
1114 dev_dbg(&isci_host->pdev->dev,
1115 "%s: abort task not needed for %p\n",
1116 __func__, task);
1117 }
1118 goto out;
1119 } else {
1120 spin_unlock_irqrestore(&task->task_state_lock, flags);
1121 }
1122
1123 spin_lock_irqsave(&isci_host->scic_lock, flags);
1124
1125 /* Check the request status and change to "aborted" if currently
1126 * "starting"; if true then set the I/O kernel completion
1127 * struct that will be triggered when the request completes.
1128 */
1129 old_state = isci_task_validate_request_to_abort(
1130 old_request, isci_host, isci_device,
1131 &aborted_io_completion);
1132 if ((old_state != started) &&
1133 (old_state != completed) &&
1134 (old_state != aborting)) {
1135
1136 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1137
1138 /* The request was already being handled by someone else (because
1139 * they got to set the state away from started).
1140 */
1141 dev_dbg(&isci_host->pdev->dev,
1142 "%s: device = %p; old_request %p already being aborted\n",
1143 __func__,
1144 isci_device, old_request);
1145 ret = TMF_RESP_FUNC_COMPLETE;
1146 goto out;
1147 }
1148 if (task->task_proto == SAS_PROTOCOL_SMP ||
1149 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1150
1151 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1152
1153 dev_dbg(&isci_host->pdev->dev,
1154 "%s: SMP request (%d)"
1155 " or complete_in_target (%d), thus no TMF\n",
1156 __func__, (task->task_proto == SAS_PROTOCOL_SMP),
1157 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
1158
1159 /* Set the state on the task. */
1160 isci_task_all_done(task);
1161
1162 ret = TMF_RESP_FUNC_COMPLETE;
1163
1164 /* Stopping and SMP devices are not sent a TMF, and are not
1165 * reset, but the outstanding I/O request is terminated below.
1166 */
1167 } else {
1168 /* Fill in the tmf stucture */
1169 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
1170 isci_abort_task_process_cb,
1171 old_request);
1172
1173 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1174
1175 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
1176 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
1177 ISCI_ABORT_TASK_TIMEOUT_MS);
1178
1179 if (ret != TMF_RESP_FUNC_COMPLETE)
1180 dev_dbg(&isci_host->pdev->dev,
1181 "%s: isci_task_send_tmf failed\n",
1182 __func__);
1183 }
1184 if (ret == TMF_RESP_FUNC_COMPLETE) {
1185 set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
1186
1187 /* Clean up the request on our side, and wait for the aborted
1188 * I/O to complete.
1189 */
1190 isci_terminate_request_core(isci_host, isci_device, old_request);
1191 }
1192
1193 /* Make sure we do not leave a reference to aborted_io_completion */
1194 old_request->io_request_completion = NULL;
1195 out:
1196 isci_put_device(isci_device);
1197 return ret;
1198}
1199
1200/**
1201 * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1202 * functions. This is one of the Task Management functoins called by libsas,
1203 * to abort all task for the given lun.
1204 * @d_device: This parameter specifies the domain device associated with this
1205 * request.
1206 * @lun: This parameter specifies the lun associated with this request.
1207 *
1208 * status, zero indicates success.
1209 */
1210int isci_task_abort_task_set(
1211 struct domain_device *d_device,
1212 u8 *lun)
1213{
1214 return TMF_RESP_FUNC_FAILED;
1215}
1216
1217
1218/**
1219 * isci_task_clear_aca() - This function is one of the SAS Domain Template
1220 * functions. This is one of the Task Management functoins called by libsas.
1221 * @d_device: This parameter specifies the domain device associated with this
1222 * request.
1223 * @lun: This parameter specifies the lun associated with this request.
1224 *
1225 * status, zero indicates success.
1226 */
1227int isci_task_clear_aca(
1228 struct domain_device *d_device,
1229 u8 *lun)
1230{
1231 return TMF_RESP_FUNC_FAILED;
1232}
1233
1234
1235
1236/**
1237 * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1238 * functions. This is one of the Task Management functoins called by libsas.
1239 * @d_device: This parameter specifies the domain device associated with this
1240 * request.
1241 * @lun: This parameter specifies the lun associated with this request.
1242 *
1243 * status, zero indicates success.
1244 */
1245int isci_task_clear_task_set(
1246 struct domain_device *d_device,
1247 u8 *lun)
1248{
1249 return TMF_RESP_FUNC_FAILED;
1250}
1251
1252
1253/**
1254 * isci_task_query_task() - This function is implemented to cause libsas to
1255 * correctly escalate the failed abort to a LUN or target reset (this is
1256 * because sas_scsi_find_task libsas function does not correctly interpret
1257 * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
1258 * returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1259 * returned, libsas will turn this into a target reset
1260 * @task: This parameter specifies the sas task being queried.
1261 * @lun: This parameter specifies the lun associated with this request.
1262 *
1263 * status, zero indicates success.
1264 */
1265int isci_task_query_task(
1266 struct sas_task *task)
1267{
1268 /* See if there is a pending device reset for this device. */
1269 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1270 return TMF_RESP_FUNC_FAILED;
1271 else
1272 return TMF_RESP_FUNC_SUCC;
1273}
1274
1275/*
1276 * isci_task_request_complete() - This function is called by the sci core when
1277 * an task request completes.
1278 * @ihost: This parameter specifies the ISCI host object
1279 * @ireq: This parameter is the completed isci_request object.
1280 * @completion_status: This parameter specifies the completion status from the
1281 * sci core.
1282 *
1283 * none.
1284 */
1285void
1286isci_task_request_complete(struct isci_host *ihost,
1287 struct isci_request *ireq,
1288 enum sci_task_status completion_status)
1289{
1290 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1291 struct completion *tmf_complete;
1292
1293 dev_dbg(&ihost->pdev->dev,
1294 "%s: request = %p, status=%d\n",
1295 __func__, ireq, completion_status);
1296
1297 isci_request_change_state(ireq, completed);
1298
1299 tmf->status = completion_status;
1300 set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
1301
1302 if (tmf->proto == SAS_PROTOCOL_SSP) {
1303 memcpy(&tmf->resp.resp_iu,
1304 &ireq->ssp.rsp,
1305 SSP_RESP_IU_MAX_SIZE);
1306 } else if (tmf->proto == SAS_PROTOCOL_SATA) {
1307 memcpy(&tmf->resp.d2h_fis,
1308 &ireq->stp.rsp,
1309 sizeof(struct dev_to_host_fis));
1310 }
1311
1312 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1313 tmf_complete = tmf->complete;
1314
1315 sci_controller_complete_io(ihost, ireq->target_device, ireq);
1316 /* set the 'terminated' flag handle to make sure it cannot be terminated
1317 * or completed again.
1318 */
1319 set_bit(IREQ_TERMINATED, &ireq->flags);
1320
1321 isci_request_change_state(ireq, unallocated);
1322 list_del_init(&ireq->dev_node);
1323
1324 /* The task management part completes last. */
1325 complete(tmf_complete);
1326}
1327
1328static void isci_smp_task_timedout(unsigned long _task)
1329{
1330 struct sas_task *task = (void *) _task;
1331 unsigned long flags;
1332
1333 spin_lock_irqsave(&task->task_state_lock, flags);
1334 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
1335 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1336 spin_unlock_irqrestore(&task->task_state_lock, flags);
1337
1338 complete(&task->completion);
1339}
1340
1341static void isci_smp_task_done(struct sas_task *task)
1342{
1343 if (!del_timer(&task->timer))
1344 return;
1345 complete(&task->completion);
1346}
1347
1348static struct sas_task *isci_alloc_task(void)
1349{
1350 struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
1351
1352 if (task) {
1353 INIT_LIST_HEAD(&task->list);
1354 spin_lock_init(&task->task_state_lock);
1355 task->task_state_flags = SAS_TASK_STATE_PENDING;
1356 init_timer(&task->timer);
1357 init_completion(&task->completion);
1358 }
1359
1360 return task;
1361}
1362
1363static void isci_free_task(struct isci_host *ihost, struct sas_task *task)
1364{
1365 if (task) {
1366 BUG_ON(!list_empty(&task->list));
1367 kfree(task);
1368 }
1369}
1370
1371static int isci_smp_execute_task(struct isci_host *ihost,
1372 struct domain_device *dev, void *req,
1373 int req_size, void *resp, int resp_size)
1374{
1375 int res, retry;
1376 struct sas_task *task = NULL;
1377
1378 for (retry = 0; retry < 3; retry++) {
1379 task = isci_alloc_task();
1380 if (!task)
1381 return -ENOMEM;
1382
1383 task->dev = dev;
1384 task->task_proto = dev->tproto;
1385 sg_init_one(&task->smp_task.smp_req, req, req_size);
1386 sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
1387
1388 task->task_done = isci_smp_task_done;
1389
1390 task->timer.data = (unsigned long) task;
1391 task->timer.function = isci_smp_task_timedout;
1392 task->timer.expires = jiffies + 10*HZ;
1393 add_timer(&task->timer);
1394
1395 res = isci_task_execute_task(task, 1, GFP_KERNEL);
1396
1397 if (res) {
1398 del_timer(&task->timer);
1399 dev_dbg(&ihost->pdev->dev,
1400 "%s: executing SMP task failed:%d\n",
1401 __func__, res);
1402 goto ex_err;
1403 }
1404
1405 wait_for_completion(&task->completion);
1406 res = -ECOMM;
1407 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1408 dev_dbg(&ihost->pdev->dev,
1409 "%s: smp task timed out or aborted\n",
1410 __func__);
1411 isci_task_abort_task(task);
1412 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1413 dev_dbg(&ihost->pdev->dev,
1414 "%s: SMP task aborted and not done\n",
1415 __func__);
1416 goto ex_err;
1417 }
1418 }
1419 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1420 task->task_status.stat == SAM_STAT_GOOD) {
1421 res = 0;
1422 break;
1423 }
1424 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1425 task->task_status.stat == SAS_DATA_UNDERRUN) {
1426 /* no error, but return the number of bytes of
1427 * underrun */
1428 res = task->task_status.residual;
1429 break;
1430 }
1431 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1432 task->task_status.stat == SAS_DATA_OVERRUN) {
1433 res = -EMSGSIZE;
1434 break;
1435 } else {
1436 dev_dbg(&ihost->pdev->dev,
1437 "%s: task to dev %016llx response: 0x%x "
1438 "status 0x%x\n", __func__,
1439 SAS_ADDR(dev->sas_addr),
1440 task->task_status.resp,
1441 task->task_status.stat);
1442 isci_free_task(ihost, task);
1443 task = NULL;
1444 }
1445 }
1446ex_err:
1447 BUG_ON(retry == 3 && task != NULL);
1448 isci_free_task(ihost, task);
1449 return res;
1450}
1451
1452#define DISCOVER_REQ_SIZE 16
1453#define DISCOVER_RESP_SIZE 56
1454
1455int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
1456 struct domain_device *dev,
1457 int phy_id, int *adt)
1458{
1459 struct smp_resp *disc_resp;
1460 u8 *disc_req;
1461 int res;
1462
1463 disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
1464 if (!disc_resp)
1465 return -ENOMEM;
1466
1467 disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
1468 if (disc_req) {
1469 disc_req[0] = SMP_REQUEST;
1470 disc_req[1] = SMP_DISCOVER;
1471 disc_req[9] = phy_id;
1472 } else {
1473 kfree(disc_resp);
1474 return -ENOMEM;
1475 }
1476 res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
1477 disc_resp, DISCOVER_RESP_SIZE);
1478 if (!res) {
1479 if (disc_resp->result != SMP_RESP_FUNC_ACC)
1480 res = disc_resp->result;
1481 else
1482 *adt = disc_resp->disc.attached_dev_type;
1483 }
1484 kfree(disc_req);
1485 kfree(disc_resp);
1486
1487 return res;
1488}
1489
1490static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
1491{
1492 struct domain_device *dev = idev->domain_dev;
1493 struct isci_port *iport = idev->isci_port;
1494 struct isci_host *ihost = iport->isci_host;
1495 int res, iteration = 0, attached_device_type;
1496 #define STP_WAIT_MSECS 25000
1497 unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
1498 unsigned long deadline = jiffies + tmo;
1499 enum {
1500 SMP_PHYWAIT_PHYDOWN,
1501 SMP_PHYWAIT_PHYUP,
1502 SMP_PHYWAIT_DONE
1503 } phy_state = SMP_PHYWAIT_PHYDOWN;
1504
1505 /* While there is time, wait for the phy to go away and come back */
1506 while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
1507 int event = atomic_read(&iport->event);
1508
1509 ++iteration;
1510
1511 tmo = wait_event_timeout(ihost->eventq,
1512 event != atomic_read(&iport->event) ||
1513 !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
1514 tmo);
1515 /* link down, stop polling */
1516 if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
1517 break;
1518
1519 dev_dbg(&ihost->pdev->dev,
1520 "%s: iport %p, iteration %d,"
1521 " phase %d: time_remaining %lu, bcns = %d\n",
1522 __func__, iport, iteration, phy_state,
1523 tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
1524
1525 res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
1526 &attached_device_type);
1527 tmo = deadline - jiffies;
1528
1529 if (res) {
1530 dev_dbg(&ihost->pdev->dev,
1531 "%s: iteration %d, phase %d:"
1532 " SMP error=%d, time_remaining=%lu\n",
1533 __func__, iteration, phy_state, res, tmo);
1534 break;
1535 }
1536 dev_dbg(&ihost->pdev->dev,
1537 "%s: iport %p, iteration %d,"
1538 " phase %d: time_remaining %lu, bcns = %d, "
1539 "attdevtype = %x\n",
1540 __func__, iport, iteration, phy_state,
1541 tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
1542 attached_device_type);
1543
1544 switch (phy_state) {
1545 case SMP_PHYWAIT_PHYDOWN:
1546 /* Has the device gone away? */
1547 if (!attached_device_type)
1548 phy_state = SMP_PHYWAIT_PHYUP;
1549
1550 break;
1551
1552 case SMP_PHYWAIT_PHYUP:
1553 /* Has the device come back? */
1554 if (attached_device_type)
1555 phy_state = SMP_PHYWAIT_DONE;
1556 break;
1557
1558 case SMP_PHYWAIT_DONE:
1559 break;
1560 }
1561
1562 }
1563 dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__);
1564}
1565
1566static int isci_reset_device(struct isci_host *ihost,
1567 struct isci_remote_device *idev)
1568{
1569 struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
1570 struct isci_port *iport = idev->isci_port;
1571 enum sci_status status;
1572 unsigned long flags;
1573 int rc;
1574
1575 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1576
1577 spin_lock_irqsave(&ihost->scic_lock, flags);
1578 status = sci_remote_device_reset(idev);
1579 if (status != SCI_SUCCESS) {
1580 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1581
1582 dev_dbg(&ihost->pdev->dev,
1583 "%s: sci_remote_device_reset(%p) returned %d!\n",
1584 __func__, idev, status);
1585
1586 return TMF_RESP_FUNC_FAILED;
1587 }
1588 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1589
1590 /* Make sure all pending requests are able to be fully terminated. */
1591 isci_device_clear_reset_pending(ihost, idev);
1592
1593 /* If this is a device on an expander, disable BCN processing. */
1594 if (!scsi_is_sas_phy_local(phy))
1595 set_bit(IPORT_BCN_BLOCKED, &iport->flags);
1596
1597 rc = sas_phy_reset(phy, true);
1598
1599 /* Terminate in-progress I/O now. */
1600 isci_remote_device_nuke_requests(ihost, idev);
1601
1602 /* Since all pending TCs have been cleaned, resume the RNC. */
1603 spin_lock_irqsave(&ihost->scic_lock, flags);
1604 status = sci_remote_device_reset_complete(idev);
1605 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1606
1607 /* If this is a device on an expander, bring the phy back up. */
1608 if (!scsi_is_sas_phy_local(phy)) {
1609 /* A phy reset will cause the device to go away then reappear.
1610 * Since libsas will take action on incoming BCNs (eg. remove
1611 * a device going through an SMP phy-control driven reset),
1612 * we need to wait until the phy comes back up before letting
1613 * discovery proceed in libsas.
1614 */
1615 isci_wait_for_smp_phy_reset(idev, phy->number);
1616
1617 spin_lock_irqsave(&ihost->scic_lock, flags);
1618 isci_port_bcn_enable(ihost, idev->isci_port);
1619 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1620 }
1621
1622 if (status != SCI_SUCCESS) {
1623 dev_dbg(&ihost->pdev->dev,
1624 "%s: sci_remote_device_reset_complete(%p) "
1625 "returned %d!\n", __func__, idev, status);
1626 }
1627
1628 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1629
1630 return rc;
1631}
1632
1633int isci_task_I_T_nexus_reset(struct domain_device *dev)
1634{
1635 struct isci_host *ihost = dev_to_ihost(dev);
1636 struct isci_remote_device *idev;
1637 unsigned long flags;
1638 int ret;
1639
1640 spin_lock_irqsave(&ihost->scic_lock, flags);
1641 idev = isci_lookup_device(dev);
1642 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1643
1644 if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
1645 ret = TMF_RESP_FUNC_COMPLETE;
1646 goto out;
1647 }
1648
1649 ret = isci_reset_device(ihost, idev);
1650 out:
1651 isci_put_device(idev);
1652 return ret;
1653}
1654
1655int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1656{
1657 struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1658 struct isci_host *ihost = dev_to_ihost(dev);
1659 struct isci_remote_device *idev;
1660 unsigned long flags;
1661 int ret;
1662
1663 spin_lock_irqsave(&ihost->scic_lock, flags);
1664 idev = isci_lookup_device(dev);
1665 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1666
1667 if (!idev) {
1668 ret = TMF_RESP_FUNC_COMPLETE;
1669 goto out;
1670 }
1671
1672 ret = isci_reset_device(ihost, idev);
1673 out:
1674 isci_put_device(idev);
1675 return ret;
1676}
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
new file mode 100644
index 000000000000..4a7fa90287ef
--- /dev/null
+++ b/drivers/scsi/isci/task.h
@@ -0,0 +1,367 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#ifndef _ISCI_TASK_H_
56#define _ISCI_TASK_H_
57
58#include <scsi/sas_ata.h>
59#include "host.h"
60
61struct isci_request;
62
63/**
64 * enum isci_tmf_cb_state - This enum defines the possible states in which the
65 * TMF callback function is invoked during the TMF execution process.
66 *
67 *
68 */
69enum isci_tmf_cb_state {
70
71 isci_tmf_init_state = 0,
72 isci_tmf_started,
73 isci_tmf_timed_out
74};
75
76/**
77 * enum isci_tmf_function_codes - This enum defines the possible preparations
78 * of task management requests.
79 *
80 *
81 */
82enum isci_tmf_function_codes {
83
84 isci_tmf_func_none = 0,
85 isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
86 isci_tmf_ssp_lun_reset = TMF_LU_RESET,
87 isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
88 isci_tmf_sata_srst_low = TMF_LU_RESET + 0x101 /* Non SCSI */
89};
90/**
91 * struct isci_tmf - This class represents the task management object which
92 * acts as an interface to libsas for processing task management requests
93 *
94 *
95 */
96struct isci_tmf {
97
98 struct completion *complete;
99 enum sas_protocol proto;
100 union {
101 struct ssp_response_iu resp_iu;
102 struct dev_to_host_fis d2h_fis;
103 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
104 } resp;
105 unsigned char lun[8];
106 u16 io_tag;
107 struct isci_remote_device *device;
108 enum isci_tmf_function_codes tmf_code;
109 int status;
110
111 /* The optional callback function allows the user process to
112 * track the TMF transmit / timeout conditions.
113 */
114 void (*cb_state_func)(
115 enum isci_tmf_cb_state,
116 struct isci_tmf *, void *);
117 void *cb_data;
118
119};
120
121static inline void isci_print_tmf(struct isci_tmf *tmf)
122{
123 if (SAS_PROTOCOL_SATA == tmf->proto)
124 dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
125 "%s: status = %x\n"
126 "tmf->resp.d2h_fis.status = %x\n"
127 "tmf->resp.d2h_fis.error = %x\n",
128 __func__,
129 tmf->status,
130 tmf->resp.d2h_fis.status,
131 tmf->resp.d2h_fis.error);
132 else
133 dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
134 "%s: status = %x\n"
135 "tmf->resp.resp_iu.data_present = %x\n"
136 "tmf->resp.resp_iu.status = %x\n"
137 "tmf->resp.resp_iu.data_length = %x\n"
138 "tmf->resp.resp_iu.data[0] = %x\n"
139 "tmf->resp.resp_iu.data[1] = %x\n"
140 "tmf->resp.resp_iu.data[2] = %x\n"
141 "tmf->resp.resp_iu.data[3] = %x\n",
142 __func__,
143 tmf->status,
144 tmf->resp.resp_iu.datapres,
145 tmf->resp.resp_iu.status,
146 be32_to_cpu(tmf->resp.resp_iu.response_data_len),
147 tmf->resp.resp_iu.resp_data[0],
148 tmf->resp.resp_iu.resp_data[1],
149 tmf->resp.resp_iu.resp_data[2],
150 tmf->resp.resp_iu.resp_data[3]);
151}
152
153
154int isci_task_execute_task(
155 struct sas_task *task,
156 int num,
157 gfp_t gfp_flags);
158
159int isci_task_abort_task(
160 struct sas_task *task);
161
162int isci_task_abort_task_set(
163 struct domain_device *d_device,
164 u8 *lun);
165
166int isci_task_clear_aca(
167 struct domain_device *d_device,
168 u8 *lun);
169
170int isci_task_clear_task_set(
171 struct domain_device *d_device,
172 u8 *lun);
173
174int isci_task_query_task(
175 struct sas_task *task);
176
177int isci_task_lu_reset(
178 struct domain_device *d_device,
179 u8 *lun);
180
181int isci_task_clear_nexus_port(
182 struct asd_sas_port *port);
183
184int isci_task_clear_nexus_ha(
185 struct sas_ha_struct *ha);
186
187int isci_task_I_T_nexus_reset(
188 struct domain_device *d_device);
189
190void isci_task_request_complete(
191 struct isci_host *isci_host,
192 struct isci_request *request,
193 enum sci_task_status completion_status);
194
195u16 isci_task_ssp_request_get_io_tag_to_manage(
196 struct isci_request *request);
197
198u8 isci_task_ssp_request_get_function(
199 struct isci_request *request);
200
201
202void *isci_task_ssp_request_get_response_data_address(
203 struct isci_request *request);
204
205u32 isci_task_ssp_request_get_response_data_length(
206 struct isci_request *request);
207
208int isci_queuecommand(
209 struct scsi_cmnd *scsi_cmd,
210 void (*donefunc)(struct scsi_cmnd *));
211
212int isci_bus_reset_handler(struct scsi_cmnd *cmd);
213
214/**
215 * enum isci_completion_selection - This enum defines the possible actions to
216 * take with respect to a given request's notification back to libsas.
217 *
218 *
219 */
220enum isci_completion_selection {
221
222 isci_perform_normal_io_completion, /* Normal notify (task_done) */
223 isci_perform_aborted_io_completion, /* No notification. */
224 isci_perform_error_io_completion /* Use sas_task_abort */
225};
226
227static inline void isci_set_task_doneflags(
228 struct sas_task *task)
229{
230 /* Since no futher action will be taken on this task,
231 * make sure to mark it complete from the lldd perspective.
232 */
233 task->task_state_flags |= SAS_TASK_STATE_DONE;
234 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
235 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
236}
237/**
238 * isci_task_all_done() - This function clears the task bits to indicate the
239 * LLDD is done with the task.
240 *
241 *
242 */
243static inline void isci_task_all_done(
244 struct sas_task *task)
245{
246 unsigned long flags;
247
248 /* Since no futher action will be taken on this task,
249 * make sure to mark it complete from the lldd perspective.
250 */
251 spin_lock_irqsave(&task->task_state_lock, flags);
252 isci_set_task_doneflags(task);
253 spin_unlock_irqrestore(&task->task_state_lock, flags);
254}
255
256/**
257 * isci_task_set_completion_status() - This function sets the completion status
258 * for the request.
259 * @task: This parameter is the completed request.
260 * @response: This parameter is the response code for the completed task.
261 * @status: This parameter is the status code for the completed task.
262 *
263* @return The new notification mode for the request.
264*/
265static inline enum isci_completion_selection
266isci_task_set_completion_status(
267 struct sas_task *task,
268 enum service_response response,
269 enum exec_status status,
270 enum isci_completion_selection task_notification_selection)
271{
272 unsigned long flags;
273
274 spin_lock_irqsave(&task->task_state_lock, flags);
275
276 /* If a device reset is being indicated, make sure the I/O
277 * is in the error path.
278 */
279 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
280 /* Fail the I/O to make sure it goes into the error path. */
281 response = SAS_TASK_UNDELIVERED;
282 status = SAM_STAT_TASK_ABORTED;
283
284 task_notification_selection = isci_perform_error_io_completion;
285 }
286 task->task_status.resp = response;
287 task->task_status.stat = status;
288
289 switch (task_notification_selection) {
290
291 case isci_perform_error_io_completion:
292
293 if (task->task_proto == SAS_PROTOCOL_SMP) {
294 /* There is no error escalation in the SMP case.
295 * Convert to a normal completion to avoid the
296 * timeout in the discovery path and to let the
297 * next action take place quickly.
298 */
299 task_notification_selection
300 = isci_perform_normal_io_completion;
301
302 /* Fall through to the normal case... */
303 } else {
304 /* Use sas_task_abort */
305 /* Leave SAS_TASK_STATE_DONE clear
306 * Leave SAS_TASK_AT_INITIATOR set.
307 */
308 break;
309 }
310
311 case isci_perform_aborted_io_completion:
312 /* This path can occur with task-managed requests as well as
313 * requests terminated because of LUN or device resets.
314 */
315 /* Fall through to the normal case... */
316 case isci_perform_normal_io_completion:
317 /* Normal notification (task_done) */
318 isci_set_task_doneflags(task);
319 break;
320 default:
321 WARN_ONCE(1, "unknown task_notification_selection: %d\n",
322 task_notification_selection);
323 break;
324 }
325
326 spin_unlock_irqrestore(&task->task_state_lock, flags);
327
328 return task_notification_selection;
329
330}
331/**
332* isci_execpath_callback() - This function is called from the task
333* execute path when the task needs to callback libsas about the submit-time
334* task failure. The callback occurs either through the task's done function
335* or through sas_task_abort. In the case of regular non-discovery SATA/STP I/O
336* requests, libsas takes the host lock before calling execute task. Therefore
337* in this situation the host lock must be managed before calling the func.
338*
339* @ihost: This parameter is the controller to which the I/O request was sent.
340* @task: This parameter is the I/O request.
341* @func: This parameter is the function to call in the correct context.
342* @status: This parameter is the status code for the completed task.
343*
344*/
345static inline void isci_execpath_callback(struct isci_host *ihost,
346 struct sas_task *task,
347 void (*func)(struct sas_task *))
348{
349 struct domain_device *dev = task->dev;
350
351 if (dev_is_sata(dev) && task->uldd_task) {
352 unsigned long flags;
353
354 /* Since we are still in the submit path, and since
355 * libsas takes the host lock on behalf of SATA
356 * devices before I/O starts (in the non-discovery case),
357 * we need to unlock before we can call the callback function.
358 */
359 raw_local_irq_save(flags);
360 spin_unlock(dev->sata_dev.ap->lock);
361 func(task);
362 spin_lock(dev->sata_dev.ap->lock);
363 raw_local_irq_restore(flags);
364 } else
365 func(task);
366}
367#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
new file mode 100644
index 000000000000..e9e1e2abacb9
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -0,0 +1,225 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "host.h"
57#include "unsolicited_frame_control.h"
58#include "registers.h"
59
60int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
61{
62 struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
63 struct sci_unsolicited_frame *uf;
64 u32 buf_len, header_len, i;
65 dma_addr_t dma;
66 size_t size;
67 void *virt;
68
69 /*
70 * Prepare all of the memory sizes for the UF headers, UF address
71 * table, and UF buffers themselves.
72 */
73 buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
74 header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
75 size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t);
76
77 /*
78 * The Unsolicited Frame buffers are set at the start of the UF
79 * memory descriptor entry. The headers and address table will be
80 * placed after the buffers.
81 */
82 virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
83 if (!virt)
84 return -ENOMEM;
85
86 /*
87 * Program the location of the UF header table into the SCU.
88 * Notes:
89 * - The address must align on a 64-byte boundary. Guaranteed to be
90 * on 64-byte boundary already 1KB boundary for unsolicited frames.
91 * - Program unused header entries to overlap with the last
92 * unsolicited frame. The silicon will never DMA to these unused
93 * headers, since we program the UF address table pointers to
94 * NULL.
95 */
96 uf_control->headers.physical_address = dma + buf_len;
97 uf_control->headers.array = virt + buf_len;
98
99 /*
100 * Program the location of the UF address table into the SCU.
101 * Notes:
102 * - The address must align on a 64-bit boundary. Guaranteed to be on 64
103 * byte boundary already due to above programming headers being on a
104 * 64-bit boundary and headers are on a 64-bytes in size.
105 */
106 uf_control->address_table.physical_address = dma + buf_len + header_len;
107 uf_control->address_table.array = virt + buf_len + header_len;
108 uf_control->get = 0;
109
110 /*
111 * UF buffer requirements are:
112 * - The last entry in the UF queue is not NULL.
113 * - There is a power of 2 number of entries (NULL or not-NULL)
114 * programmed into the queue.
115 * - Aligned on a 1KB boundary. */
116
117 /*
118 * Program the actual used UF buffers into the UF address table and
119 * the controller's array of UFs.
120 */
121 for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) {
122 uf = &uf_control->buffers.array[i];
123
124 uf_control->address_table.array[i] = dma;
125
126 uf->buffer = virt;
127 uf->header = &uf_control->headers.array[i];
128 uf->state = UNSOLICITED_FRAME_EMPTY;
129
130 /*
131 * Increment the address of the physical and virtual memory
132 * pointers. Everything is aligned on 1k boundary with an
133 * increment of 1k.
134 */
135 virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
136 dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
137 }
138
139 return 0;
140}
141
142enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
143 u32 frame_index,
144 void **frame_header)
145{
146 if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
147 /* Skip the first word in the frame since this is a controll word used
148 * by the hardware.
149 */
150 *frame_header = &uf_control->buffers.array[frame_index].header->data;
151
152 return SCI_SUCCESS;
153 }
154
155 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
156}
157
158enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
159 u32 frame_index,
160 void **frame_buffer)
161{
162 if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
163 *frame_buffer = uf_control->buffers.array[frame_index].buffer;
164
165 return SCI_SUCCESS;
166 }
167
168 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
169}
170
171bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
172 u32 frame_index)
173{
174 u32 frame_get;
175 u32 frame_cycle;
176
177 frame_get = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1);
178 frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES;
179
180 /*
181 * In the event there are NULL entries in the UF table, we need to
182 * advance the get pointer in order to find out if this frame should
183 * be released (i.e. update the get pointer)
184 */
185 while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
186 upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
187 frame_get < SCU_MAX_UNSOLICITED_FRAMES)
188 frame_get++;
189
190 /*
191 * The table has a NULL entry as it's last element. This is
192 * illegal.
193 */
194 BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
195 if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
196 return false;
197
198 uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
199
200 if (frame_get != frame_index) {
201 /*
202 * Frames remain in use until we advance the get pointer
203 * so there is nothing we can do here
204 */
205 return false;
206 }
207
208 /*
209 * The frame index is equal to the current get pointer so we
210 * can now free up all of the frame entries that
211 */
212 while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
213 uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
214
215 if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
216 frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
217 frame_get = 0;
218 } else
219 frame_get++;
220 }
221
222 uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
223
224 return true;
225}
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
new file mode 100644
index 000000000000..31cb9506f52d
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -0,0 +1,278 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
57#define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
58
59#include "isci.h"
60
61#define SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS 15
62
63/**
64 * struct scu_unsolicited_frame_header -
65 *
66 * This structure delineates the format of an unsolicited frame header. The
67 * first DWORD are UF attributes defined by the silicon architecture. The data
68 * depicts actual header information received on the link.
69 */
70struct scu_unsolicited_frame_header {
71 /**
72 * This field indicates if there is an Initiator Index Table entry with
73 * which this header is associated.
74 */
75 u32 iit_exists:1;
76
77 /**
78 * This field simply indicates the protocol type (i.e. SSP, STP, SMP).
79 */
80 u32 protocol_type:3;
81
82 /**
83 * This field indicates if the frame is an address frame (IAF or OAF)
84 * or if it is a information unit frame.
85 */
86 u32 is_address_frame:1;
87
88 /**
89 * This field simply indicates the connection rate at which the frame
90 * was received.
91 */
92 u32 connection_rate:4;
93
94 u32 reserved:23;
95
96 /**
97 * This field represents the actual header data received on the link.
98 */
99 u32 data[SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS];
100
101};
102
103
104
105/**
106 * enum unsolicited_frame_state -
107 *
108 * This enumeration represents the current unsolicited frame state. The
109 * controller object can not updtate the hardware unsolicited frame put pointer
110 * unless it has already processed the priror unsolicited frames.
111 */
112enum unsolicited_frame_state {
113 /**
114 * This state is when the frame is empty and not in use. It is
115 * different from the released state in that the hardware could DMA
116 * data to this frame buffer.
117 */
118 UNSOLICITED_FRAME_EMPTY,
119
120 /**
121 * This state is set when the frame buffer is in use by by some
122 * object in the system.
123 */
124 UNSOLICITED_FRAME_IN_USE,
125
126 /**
127 * This state is set when the frame is returned to the free pool
128 * but one or more frames prior to this one are still in use.
129 * Once all of the frame before this one are freed it will go to
130 * the empty state.
131 */
132 UNSOLICITED_FRAME_RELEASED,
133
134 UNSOLICITED_FRAME_MAX_STATES
135};
136
137/**
138 * struct sci_unsolicited_frame -
139 *
140 * This is the unsolicited frame data structure it acts as the container for
141 * the current frame state, frame header and frame buffer.
142 */
143struct sci_unsolicited_frame {
144 /**
145 * This field contains the current frame state
146 */
147 enum unsolicited_frame_state state;
148
149 /**
150 * This field points to the frame header data.
151 */
152 struct scu_unsolicited_frame_header *header;
153
154 /**
155 * This field points to the frame buffer data.
156 */
157 void *buffer;
158
159};
160
161/**
162 * struct sci_uf_header_array -
163 *
164 * This structure contains all of the unsolicited frame header information.
165 */
166struct sci_uf_header_array {
167 /**
168 * This field is represents a virtual pointer to the start
169 * address of the UF address table. The table contains
170 * 64-bit pointers as required by the hardware.
171 */
172 struct scu_unsolicited_frame_header *array;
173
174 /**
175 * This field specifies the physical address location for the UF
176 * buffer array.
177 */
178 dma_addr_t physical_address;
179
180};
181
182/**
183 * struct sci_uf_buffer_array -
184 *
185 * This structure contains all of the unsolicited frame buffer (actual payload)
186 * information.
187 */
188struct sci_uf_buffer_array {
189 /**
190 * This field is the unsolicited frame data its used to manage
191 * the data for the unsolicited frame requests. It also represents
192 * the virtual address location that corresponds to the
193 * physical_address field.
194 */
195 struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
196
197 /**
198 * This field specifies the physical address location for the UF
199 * buffer array.
200 */
201 dma_addr_t physical_address;
202};
203
204/**
205 * struct sci_uf_address_table_array -
206 *
207 * This object maintains all of the unsolicited frame address table specific
208 * data. The address table is a collection of 64-bit pointers that point to
209 * 1KB buffers into which the silicon will DMA unsolicited frames.
210 */
211struct sci_uf_address_table_array {
212 /**
213 * This field represents a virtual pointer that refers to the
214 * starting address of the UF address table.
215 * 64-bit pointers are required by the hardware.
216 */
217 dma_addr_t *array;
218
219 /**
220 * This field specifies the physical address location for the UF
221 * address table.
222 */
223 dma_addr_t physical_address;
224
225};
226
227/**
228 * struct sci_unsolicited_frame_control -
229 *
230 * This object contains all of the data necessary to handle unsolicited frames.
231 */
232struct sci_unsolicited_frame_control {
233 /**
234 * This field is the software copy of the unsolicited frame queue
235 * get pointer. The controller object writes this value to the
236 * hardware to let the hardware put more unsolicited frame entries.
237 */
238 u32 get;
239
240 /**
241 * This field contains all of the unsolicited frame header
242 * specific fields.
243 */
244 struct sci_uf_header_array headers;
245
246 /**
247 * This field contains all of the unsolicited frame buffer
248 * specific fields.
249 */
250 struct sci_uf_buffer_array buffers;
251
252 /**
253 * This field contains all of the unsolicited frame address table
254 * specific fields.
255 */
256 struct sci_uf_address_table_array address_table;
257
258};
259
260struct isci_host;
261
262int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
263
264enum sci_status sci_unsolicited_frame_control_get_header(
265 struct sci_unsolicited_frame_control *uf_control,
266 u32 frame_index,
267 void **frame_header);
268
269enum sci_status sci_unsolicited_frame_control_get_buffer(
270 struct sci_unsolicited_frame_control *uf_control,
271 u32 frame_index,
272 void **frame_buffer);
273
274bool sci_unsolicited_frame_control_release_frame(
275 struct sci_unsolicited_frame_control *uf_control,
276 u32 frame_index);
277
278#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */
diff --git a/drivers/staging/brcm80211/Kconfig b/drivers/staging/brcm80211/Kconfig
index f4cf9b23481e..379cf16e89f7 100644
--- a/drivers/staging/brcm80211/Kconfig
+++ b/drivers/staging/brcm80211/Kconfig
@@ -7,6 +7,7 @@ config BRCMSMAC
7 default n 7 default n
8 depends on PCI 8 depends on PCI
9 depends on WLAN && MAC80211 9 depends on WLAN && MAC80211
10 depends on X86 || MIPS
10 select BRCMUTIL 11 select BRCMUTIL
11 select FW_LOADER 12 select FW_LOADER
12 select CRC_CCITT 13 select CRC_CCITT
@@ -20,6 +21,7 @@ config BRCMFMAC
20 default n 21 default n
21 depends on MMC 22 depends on MMC
22 depends on WLAN && CFG80211 23 depends on WLAN && CFG80211
24 depends on X86 || MIPS
23 select BRCMUTIL 25 select BRCMUTIL
24 select FW_LOADER 26 select FW_LOADER
25 select WIRELESS_EXT 27 select WIRELESS_EXT
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 1502d80f6f78..20008a4376e8 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -2,6 +2,7 @@ config COMEDI
2 tristate "Data acquisition support (comedi)" 2 tristate "Data acquisition support (comedi)"
3 default N 3 default N
4 depends on m 4 depends on m
5 depends on BROKEN || FRV || M32R || MN10300 || SUPERH || TILE || X86
5 ---help--- 6 ---help---
6 Enable support a wide range of data acquisition devices 7 Enable support a wide range of data acquisition devices
7 for Linux. 8 for Linux.
@@ -160,6 +161,7 @@ config COMEDI_PCL730
160 161
161config COMEDI_PCL812 162config COMEDI_PCL812
162 tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216" 163 tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
164 depends on VIRT_TO_BUS
163 default N 165 default N
164 ---help--- 166 ---help---
165 Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink 167 Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
@@ -171,6 +173,7 @@ config COMEDI_PCL812
171 173
172config COMEDI_PCL816 174config COMEDI_PCL816
173 tristate "Advantech PCL-814 and PCL-816 ISA card support" 175 tristate "Advantech PCL-814 and PCL-816 ISA card support"
176 depends on VIRT_TO_BUS
174 default N 177 default N
175 ---help--- 178 ---help---
176 Enable support for Advantech PCL-814 and PCL-816 ISA cards 179 Enable support for Advantech PCL-814 and PCL-816 ISA cards
@@ -180,6 +183,7 @@ config COMEDI_PCL816
180 183
181config COMEDI_PCL818 184config COMEDI_PCL818
182 tristate "Advantech PCL-718 and PCL-818 ISA card support" 185 tristate "Advantech PCL-718 and PCL-818 ISA card support"
186 depends on VIRT_TO_BUS
183 default N 187 default N
184 ---help--- 188 ---help---
185 Enable support for Advantech PCL-818 ISA cards 189 Enable support for Advantech PCL-818 ISA cards
@@ -269,6 +273,7 @@ config COMEDI_DAS800
269 273
270config COMEDI_DAS1800 274config COMEDI_DAS1800
271 tristate "DAS1800 and compatible ISA card support" 275 tristate "DAS1800 and compatible ISA card support"
276 depends on VIRT_TO_BUS
272 select COMEDI_FC 277 select COMEDI_FC
273 default N 278 default N
274 ---help--- 279 ---help---
@@ -340,6 +345,7 @@ config COMEDI_DT2817
340config COMEDI_DT282X 345config COMEDI_DT282X
341 tristate "Data Translation DT2821 series and DT-EZ ISA card support" 346 tristate "Data Translation DT2821 series and DT-EZ ISA card support"
342 select COMEDI_FC 347 select COMEDI_FC
348 depends on VIRT_TO_BUS
343 default N 349 default N
344 ---help--- 350 ---help---
345 Enable support for Data Translation DT2821 series including DT-EZ 351 Enable support for Data Translation DT2821 series including DT-EZ
@@ -419,6 +425,7 @@ config COMEDI_ADQ12B
419config COMEDI_NI_AT_A2150 425config COMEDI_NI_AT_A2150
420 tristate "NI AT-A2150 ISA card support" 426 tristate "NI AT-A2150 ISA card support"
421 depends on COMEDI_NI_COMMON 427 depends on COMEDI_NI_COMMON
428 depends on VIRT_TO_BUS
422 default N 429 default N
423 ---help--- 430 ---help---
424 Enable support for National Instruments AT-A2150 cards 431 Enable support for National Instruments AT-A2150 cards
@@ -536,6 +543,7 @@ if COMEDI_PCI_DRIVERS && PCI
536 543
537config COMEDI_ADDI_APCI_035 544config COMEDI_ADDI_APCI_035
538 tristate "ADDI-DATA APCI_035 support" 545 tristate "ADDI-DATA APCI_035 support"
546 depends on VIRT_TO_BUS
539 default N 547 default N
540 ---help--- 548 ---help---
541 Enable support for ADDI-DATA APCI_035 cards 549 Enable support for ADDI-DATA APCI_035 cards
@@ -545,6 +553,7 @@ config COMEDI_ADDI_APCI_035
545 553
546config COMEDI_ADDI_APCI_1032 554config COMEDI_ADDI_APCI_1032
547 tristate "ADDI-DATA APCI_1032 support" 555 tristate "ADDI-DATA APCI_1032 support"
556 depends on VIRT_TO_BUS
548 default N 557 default N
549 ---help--- 558 ---help---
550 Enable support for ADDI-DATA APCI_1032 cards 559 Enable support for ADDI-DATA APCI_1032 cards
@@ -554,6 +563,7 @@ config COMEDI_ADDI_APCI_1032
554 563
555config COMEDI_ADDI_APCI_1500 564config COMEDI_ADDI_APCI_1500
556 tristate "ADDI-DATA APCI_1500 support" 565 tristate "ADDI-DATA APCI_1500 support"
566 depends on VIRT_TO_BUS
557 default N 567 default N
558 ---help--- 568 ---help---
559 Enable support for ADDI-DATA APCI_1500 cards 569 Enable support for ADDI-DATA APCI_1500 cards
@@ -563,6 +573,7 @@ config COMEDI_ADDI_APCI_1500
563 573
564config COMEDI_ADDI_APCI_1516 574config COMEDI_ADDI_APCI_1516
565 tristate "ADDI-DATA APCI_1516 support" 575 tristate "ADDI-DATA APCI_1516 support"
576 depends on VIRT_TO_BUS
566 default N 577 default N
567 ---help--- 578 ---help---
568 Enable support for ADDI-DATA APCI_1516 cards 579 Enable support for ADDI-DATA APCI_1516 cards
@@ -572,6 +583,7 @@ config COMEDI_ADDI_APCI_1516
572 583
573config COMEDI_ADDI_APCI_1564 584config COMEDI_ADDI_APCI_1564
574 tristate "ADDI-DATA APCI_1564 support" 585 tristate "ADDI-DATA APCI_1564 support"
586 depends on VIRT_TO_BUS
575 default N 587 default N
576 ---help--- 588 ---help---
577 Enable support for ADDI-DATA APCI_1564 cards 589 Enable support for ADDI-DATA APCI_1564 cards
@@ -581,6 +593,7 @@ config COMEDI_ADDI_APCI_1564
581 593
582config COMEDI_ADDI_APCI_16XX 594config COMEDI_ADDI_APCI_16XX
583 tristate "ADDI-DATA APCI_16xx support" 595 tristate "ADDI-DATA APCI_16xx support"
596 depends on VIRT_TO_BUS
584 default N 597 default N
585 ---help--- 598 ---help---
586 Enable support for ADDI-DATA APCI_16xx cards 599 Enable support for ADDI-DATA APCI_16xx cards
@@ -590,6 +603,7 @@ config COMEDI_ADDI_APCI_16XX
590 603
591config COMEDI_ADDI_APCI_2016 604config COMEDI_ADDI_APCI_2016
592 tristate "ADDI-DATA APCI_2016 support" 605 tristate "ADDI-DATA APCI_2016 support"
606 depends on VIRT_TO_BUS
593 default N 607 default N
594 ---help--- 608 ---help---
595 Enable support for ADDI-DATA APCI_2016 cards 609 Enable support for ADDI-DATA APCI_2016 cards
@@ -599,6 +613,7 @@ config COMEDI_ADDI_APCI_2016
599 613
600config COMEDI_ADDI_APCI_2032 614config COMEDI_ADDI_APCI_2032
601 tristate "ADDI-DATA APCI_2032 support" 615 tristate "ADDI-DATA APCI_2032 support"
616 depends on VIRT_TO_BUS
602 default N 617 default N
603 ---help--- 618 ---help---
604 Enable support for ADDI-DATA APCI_2032 cards 619 Enable support for ADDI-DATA APCI_2032 cards
@@ -608,6 +623,7 @@ config COMEDI_ADDI_APCI_2032
608 623
609config COMEDI_ADDI_APCI_2200 624config COMEDI_ADDI_APCI_2200
610 tristate "ADDI-DATA APCI_2200 support" 625 tristate "ADDI-DATA APCI_2200 support"
626 depends on VIRT_TO_BUS
611 default N 627 default N
612 ---help--- 628 ---help---
613 Enable support for ADDI-DATA APCI_2200 cards 629 Enable support for ADDI-DATA APCI_2200 cards
@@ -617,6 +633,7 @@ config COMEDI_ADDI_APCI_2200
617 633
618config COMEDI_ADDI_APCI_3001 634config COMEDI_ADDI_APCI_3001
619 tristate "ADDI-DATA APCI_3001 support" 635 tristate "ADDI-DATA APCI_3001 support"
636 depends on VIRT_TO_BUS
620 select COMEDI_FC 637 select COMEDI_FC
621 default N 638 default N
622 ---help--- 639 ---help---
@@ -627,6 +644,7 @@ config COMEDI_ADDI_APCI_3001
627 644
628config COMEDI_ADDI_APCI_3120 645config COMEDI_ADDI_APCI_3120
629 tristate "ADDI-DATA APCI_3520 support" 646 tristate "ADDI-DATA APCI_3520 support"
647 depends on VIRT_TO_BUS
630 select COMEDI_FC 648 select COMEDI_FC
631 default N 649 default N
632 ---help--- 650 ---help---
@@ -637,6 +655,7 @@ config COMEDI_ADDI_APCI_3120
637 655
638config COMEDI_ADDI_APCI_3501 656config COMEDI_ADDI_APCI_3501
639 tristate "ADDI-DATA APCI_3501 support" 657 tristate "ADDI-DATA APCI_3501 support"
658 depends on VIRT_TO_BUS
640 default N 659 default N
641 ---help--- 660 ---help---
642 Enable support for ADDI-DATA APCI_3501 cards 661 Enable support for ADDI-DATA APCI_3501 cards
@@ -646,6 +665,7 @@ config COMEDI_ADDI_APCI_3501
646 665
647config COMEDI_ADDI_APCI_3XXX 666config COMEDI_ADDI_APCI_3XXX
648 tristate "ADDI-DATA APCI_3xxx support" 667 tristate "ADDI-DATA APCI_3xxx support"
668 depends on VIRT_TO_BUS
649 default N 669 default N
650 ---help--- 670 ---help---
651 Enable support for ADDI-DATA APCI_3xxx cards 671 Enable support for ADDI-DATA APCI_3xxx cards
@@ -712,6 +732,7 @@ config COMEDI_ADL_PCI9111
712config COMEDI_ADL_PCI9118 732config COMEDI_ADL_PCI9118
713 tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support" 733 tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
714 select COMEDI_FC 734 select COMEDI_FC
735 depends on VIRT_TO_BUS
715 default N 736 default N
716 ---help--- 737 ---help---
717 Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards 738 Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
@@ -1287,6 +1308,7 @@ config COMEDI_NI_LABPC
1287 depends on COMEDI_MITE 1308 depends on COMEDI_MITE
1288 select COMEDI_8255 1309 select COMEDI_8255
1289 select COMEDI_FC 1310 select COMEDI_FC
1311 depends on VIRT_TO_BUS
1290 default N 1312 default N
1291 ---help--- 1313 ---help---
1292 Enable support for National Instruments Lab-PC and compatibles 1314 Enable support for National Instruments Lab-PC and compatibles
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index f96d5b5d5141..d329635fb5c4 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig IIO 5menuconfig IIO
6 tristate "Industrial I/O support" 6 tristate "Industrial I/O support"
7 depends on !S390 7 depends on GENERIC_HARDIRQS
8 help 8 help
9 The industrial I/O subsystem provides a unified framework for 9 The industrial I/O subsystem provides a unified framework for
10 drivers for many different types of embedded sensors using a 10 drivers for many different types of embedded sensors using a
diff --git a/drivers/staging/iio/accel/adis16204.h b/drivers/staging/iio/accel/adis16204.h
index 5310a4297688..1690c0d15690 100644
--- a/drivers/staging/iio/accel/adis16204.h
+++ b/drivers/staging/iio/accel/adis16204.h
@@ -84,7 +84,6 @@ struct adis16204_state {
84 84
85int adis16204_set_irq(struct iio_dev *indio_dev, bool enable); 85int adis16204_set_irq(struct iio_dev *indio_dev, bool enable);
86 86
87#ifdef CONFIG_IIO_RING_BUFFER
88enum adis16204_scan { 87enum adis16204_scan {
89 ADIS16204_SCAN_SUPPLY, 88 ADIS16204_SCAN_SUPPLY,
90 ADIS16204_SCAN_ACC_X, 89 ADIS16204_SCAN_ACC_X,
@@ -93,6 +92,7 @@ enum adis16204_scan {
93 ADIS16204_SCAN_TEMP, 92 ADIS16204_SCAN_TEMP,
94}; 93};
95 94
95#ifdef CONFIG_IIO_RING_BUFFER
96void adis16204_remove_trigger(struct iio_dev *indio_dev); 96void adis16204_remove_trigger(struct iio_dev *indio_dev);
97int adis16204_probe_trigger(struct iio_dev *indio_dev); 97int adis16204_probe_trigger(struct iio_dev *indio_dev);
98 98
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
index 58d08db6f9b5..3153cbee0957 100644
--- a/drivers/staging/iio/accel/adis16209.h
+++ b/drivers/staging/iio/accel/adis16209.h
@@ -121,8 +121,6 @@ struct adis16209_state {
121 121
122int adis16209_set_irq(struct iio_dev *indio_dev, bool enable); 122int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
123 123
124#ifdef CONFIG_IIO_RING_BUFFER
125
126#define ADIS16209_SCAN_SUPPLY 0 124#define ADIS16209_SCAN_SUPPLY 0
127#define ADIS16209_SCAN_ACC_X 1 125#define ADIS16209_SCAN_ACC_X 1
128#define ADIS16209_SCAN_ACC_Y 2 126#define ADIS16209_SCAN_ACC_Y 2
@@ -132,6 +130,8 @@ int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
132#define ADIS16209_SCAN_INCLI_Y 6 130#define ADIS16209_SCAN_INCLI_Y 6
133#define ADIS16209_SCAN_ROT 7 131#define ADIS16209_SCAN_ROT 7
134 132
133#ifdef CONFIG_IIO_RING_BUFFER
134
135void adis16209_remove_trigger(struct iio_dev *indio_dev); 135void adis16209_remove_trigger(struct iio_dev *indio_dev);
136int adis16209_probe_trigger(struct iio_dev *indio_dev); 136int adis16209_probe_trigger(struct iio_dev *indio_dev);
137 137
diff --git a/drivers/staging/iio/gyro/adis16260.h b/drivers/staging/iio/gyro/adis16260.h
index 702dc982f62f..24bf70e4b29b 100644
--- a/drivers/staging/iio/gyro/adis16260.h
+++ b/drivers/staging/iio/gyro/adis16260.h
@@ -104,7 +104,6 @@ struct adis16260_state {
104 104
105int adis16260_set_irq(struct iio_dev *indio_dev, bool enable); 105int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
106 106
107#ifdef CONFIG_IIO_RING_BUFFER
108/* At the moment triggers are only used for ring buffer 107/* At the moment triggers are only used for ring buffer
109 * filling. This may change! 108 * filling. This may change!
110 */ 109 */
@@ -115,6 +114,7 @@ int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
115#define ADIS16260_SCAN_TEMP 3 114#define ADIS16260_SCAN_TEMP 3
116#define ADIS16260_SCAN_ANGL 4 115#define ADIS16260_SCAN_ANGL 4
117 116
117#ifdef CONFIG_IIO_RING_BUFFER
118void adis16260_remove_trigger(struct iio_dev *indio_dev); 118void adis16260_remove_trigger(struct iio_dev *indio_dev);
119int adis16260_probe_trigger(struct iio_dev *indio_dev); 119int adis16260_probe_trigger(struct iio_dev *indio_dev);
120 120
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h
index db184d11dfc0..e87715b9acc6 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/staging/iio/imu/adis16400.h
@@ -158,7 +158,6 @@ struct adis16400_state {
158 158
159int adis16400_set_irq(struct iio_dev *indio_dev, bool enable); 159int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
160 160
161#ifdef CONFIG_IIO_RING_BUFFER
162/* At the moment triggers are only used for ring buffer 161/* At the moment triggers are only used for ring buffer
163 * filling. This may change! 162 * filling. This may change!
164 */ 163 */
@@ -182,6 +181,7 @@ int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
182#define ADIS16300_SCAN_INCLI_X 12 181#define ADIS16300_SCAN_INCLI_X 12
183#define ADIS16300_SCAN_INCLI_Y 13 182#define ADIS16300_SCAN_INCLI_Y 13
184 183
184#ifdef CONFIG_IIO_RING_BUFFER
185void adis16400_remove_trigger(struct iio_dev *indio_dev); 185void adis16400_remove_trigger(struct iio_dev *indio_dev);
186int adis16400_probe_trigger(struct iio_dev *indio_dev); 186int adis16400_probe_trigger(struct iio_dev *indio_dev);
187 187
diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/lirc/lirc_imon.c
index 4039eda2a15b..4a9e563f40fa 100644
--- a/drivers/staging/lirc/lirc_imon.c
+++ b/drivers/staging/lirc/lirc_imon.c
@@ -672,8 +672,6 @@ static void imon_incoming_packet(struct imon_context *context,
672static void usb_rx_callback(struct urb *urb) 672static void usb_rx_callback(struct urb *urb)
673{ 673{
674 struct imon_context *context; 674 struct imon_context *context;
675 unsigned char *buf;
676 int len;
677 int intfnum = 0; 675 int intfnum = 0;
678 676
679 if (!urb) 677 if (!urb)
@@ -683,9 +681,6 @@ static void usb_rx_callback(struct urb *urb)
683 if (!context) 681 if (!context)
684 return; 682 return;
685 683
686 buf = urb->transfer_buffer;
687 len = urb->actual_length;
688
689 switch (urb->status) { 684 switch (urb->status) {
690 case -ENOENT: /* usbcore unlink successful! */ 685 case -ENOENT: /* usbcore unlink successful! */
691 return; 686 return;
@@ -728,7 +723,6 @@ static int imon_probe(struct usb_interface *interface,
728 int ir_ep_found = 0; 723 int ir_ep_found = 0;
729 int alloc_status = 0; 724 int alloc_status = 0;
730 int vfd_proto_6p = 0; 725 int vfd_proto_6p = 0;
731 int code_length;
732 struct imon_context *context = NULL; 726 struct imon_context *context = NULL;
733 int i; 727 int i;
734 u16 vendor, product; 728 u16 vendor, product;
@@ -749,8 +743,6 @@ static int imon_probe(struct usb_interface *interface,
749 else 743 else
750 context->display = 1; 744 context->display = 1;
751 745
752 code_length = BUF_CHUNK_SIZE * 8;
753
754 usbdev = usb_get_dev(interface_to_usbdev(interface)); 746 usbdev = usb_get_dev(interface_to_usbdev(interface));
755 iface_desc = interface->cur_altsetting; 747 iface_desc = interface->cur_altsetting;
756 num_endpts = iface_desc->desc.bNumEndpoints; 748 num_endpts = iface_desc->desc.bNumEndpoints;
@@ -856,7 +848,7 @@ static int imon_probe(struct usb_interface *interface,
856 848
857 strcpy(driver->name, MOD_NAME); 849 strcpy(driver->name, MOD_NAME);
858 driver->minor = -1; 850 driver->minor = -1;
859 driver->code_length = sizeof(int) * 8; 851 driver->code_length = BUF_CHUNK_SIZE * 8;
860 driver->sample_rate = 0; 852 driver->sample_rate = 0;
861 driver->features = LIRC_CAN_REC_MODE2; 853 driver->features = LIRC_CAN_REC_MODE2;
862 driver->data = context; 854 driver->data = context;
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 4a3cca03224a..805df913bb6e 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -838,7 +838,23 @@ static int hardware_init_port(void)
838 838
839static int init_port(void) 839static int init_port(void)
840{ 840{
841 int i, nlow, nhigh; 841 int i, nlow, nhigh, result;
842
843 result = request_irq(irq, irq_handler,
844 IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0),
845 LIRC_DRIVER_NAME, (void *)&hardware);
846
847 switch (result) {
848 case -EBUSY:
849 printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
850 return -EBUSY;
851 case -EINVAL:
852 printk(KERN_ERR LIRC_DRIVER_NAME
853 ": Bad irq number or handler\n");
854 return -EINVAL;
855 default:
856 break;
857 };
842 858
843 /* Reserve io region. */ 859 /* Reserve io region. */
844 /* 860 /*
@@ -893,34 +909,17 @@ static int init_port(void)
893 printk(KERN_INFO LIRC_DRIVER_NAME ": Manually using active " 909 printk(KERN_INFO LIRC_DRIVER_NAME ": Manually using active "
894 "%s receiver\n", sense ? "low" : "high"); 910 "%s receiver\n", sense ? "low" : "high");
895 911
912 dprintk("Interrupt %d, port %04x obtained\n", irq, io);
896 return 0; 913 return 0;
897} 914}
898 915
899static int set_use_inc(void *data) 916static int set_use_inc(void *data)
900{ 917{
901 int result;
902 unsigned long flags; 918 unsigned long flags;
903 919
904 /* initialize timestamp */ 920 /* initialize timestamp */
905 do_gettimeofday(&lasttv); 921 do_gettimeofday(&lasttv);
906 922
907 result = request_irq(irq, irq_handler,
908 IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0),
909 LIRC_DRIVER_NAME, (void *)&hardware);
910
911 switch (result) {
912 case -EBUSY:
913 printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
914 return -EBUSY;
915 case -EINVAL:
916 printk(KERN_ERR LIRC_DRIVER_NAME
917 ": Bad irq number or handler\n");
918 return -EINVAL;
919 default:
920 dprintk("Interrupt %d, port %04x obtained\n", irq, io);
921 break;
922 }
923
924 spin_lock_irqsave(&hardware[type].lock, flags); 923 spin_lock_irqsave(&hardware[type].lock, flags);
925 924
926 /* Set DLAB 0. */ 925 /* Set DLAB 0. */
@@ -945,10 +944,6 @@ static void set_use_dec(void *data)
945 soutp(UART_IER, sinp(UART_IER) & 944 soutp(UART_IER, sinp(UART_IER) &
946 (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI))); 945 (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
947 spin_unlock_irqrestore(&hardware[type].lock, flags); 946 spin_unlock_irqrestore(&hardware[type].lock, flags);
948
949 free_irq(irq, (void *)&hardware);
950
951 dprintk("freed IRQ %d\n", irq);
952} 947}
953 948
954static ssize_t lirc_write(struct file *file, const char *buf, 949static ssize_t lirc_write(struct file *file, const char *buf,
@@ -1256,6 +1251,9 @@ exit_serial_exit:
1256static void __exit lirc_serial_exit_module(void) 1251static void __exit lirc_serial_exit_module(void)
1257{ 1252{
1258 lirc_serial_exit(); 1253 lirc_serial_exit();
1254
1255 free_irq(irq, (void *)&hardware);
1256
1259 if (iommap != 0) 1257 if (iommap != 0)
1260 release_mem_region(iommap, 8 << ioshift); 1258 release_mem_region(iommap, 8 << ioshift);
1261 else 1259 else
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c
index a7b46f24f24e..0d3864594b12 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/lirc/lirc_sir.c
@@ -739,23 +739,16 @@ static void send_space(unsigned long len)
739static void send_pulse(unsigned long len) 739static void send_pulse(unsigned long len)
740{ 740{
741 long bytes_out = len / TIME_CONST; 741 long bytes_out = len / TIME_CONST;
742 long time_left;
743 742
744 time_left = (long)len - (long)bytes_out * (long)TIME_CONST; 743 if (bytes_out == 0)
745 if (bytes_out == 0) {
746 bytes_out++; 744 bytes_out++;
747 time_left = 0; 745
748 }
749 while (bytes_out--) { 746 while (bytes_out--) {
750 outb(PULSE, io + UART_TX); 747 outb(PULSE, io + UART_TX);
751 /* FIXME treba seriozne cakanie z char/serial.c */ 748 /* FIXME treba seriozne cakanie z char/serial.c */
752 while (!(inb(io + UART_LSR) & UART_LSR_THRE)) 749 while (!(inb(io + UART_LSR) & UART_LSR_THRE))
753 ; 750 ;
754 } 751 }
755#if 0
756 if (time_left > 0)
757 safe_udelay(time_left);
758#endif
759} 752}
760#endif 753#endif
761 754
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index dd6a57c3c3a3..4e051f6b52db 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -475,14 +475,14 @@ static int lirc_thread(void *arg)
475 dprintk("poll thread started\n"); 475 dprintk("poll thread started\n");
476 476
477 while (!kthread_should_stop()) { 477 while (!kthread_should_stop()) {
478 set_current_state(TASK_INTERRUPTIBLE);
479
478 /* if device not opened, we can sleep half a second */ 480 /* if device not opened, we can sleep half a second */
479 if (atomic_read(&ir->open_count) == 0) { 481 if (atomic_read(&ir->open_count) == 0) {
480 schedule_timeout(HZ/2); 482 schedule_timeout(HZ/2);
481 continue; 483 continue;
482 } 484 }
483 485
484 set_current_state(TASK_INTERRUPTIBLE);
485
486 /* 486 /*
487 * This is ~113*2 + 24 + jitter (2*repeat gap + code length). 487 * This is ~113*2 + 24 + jitter (2*repeat gap + code length).
488 * We use this interval as the chip resets every time you poll 488 * We use this interval as the chip resets every time you poll
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index d1ffa32cd141..685fcf639644 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -189,7 +189,7 @@ int mei_hw_init(struct mei_device *dev)
189 mutex_lock(&dev->device_lock); 189 mutex_lock(&dev->device_lock);
190 } 190 }
191 191
192 if (!err && !dev->recvd_msg) { 192 if (err <= 0 && !dev->recvd_msg) {
193 dev->mei_state = MEI_DISABLED; 193 dev->mei_state = MEI_DISABLED;
194 dev_dbg(&dev->pdev->dev, 194 dev_dbg(&dev->pdev->dev,
195 "wait_event_interruptible_timeout failed" 195 "wait_event_interruptible_timeout failed"
diff --git a/drivers/staging/mei/wd.c b/drivers/staging/mei/wd.c
index 2564b038636a..fff53d0b5c6e 100644
--- a/drivers/staging/mei/wd.c
+++ b/drivers/staging/mei/wd.c
@@ -169,10 +169,15 @@ int mei_wd_stop(struct mei_device *dev, bool preserve)
169 ret = wait_event_interruptible_timeout(dev->wait_stop_wd, 169 ret = wait_event_interruptible_timeout(dev->wait_stop_wd,
170 dev->wd_stopped, 10 * HZ); 170 dev->wd_stopped, 10 * HZ);
171 mutex_lock(&dev->device_lock); 171 mutex_lock(&dev->device_lock);
172 if (!dev->wd_stopped) 172 if (dev->wd_stopped) {
173 dev_dbg(&dev->pdev->dev, "stop wd failed to complete.\n"); 173 dev_dbg(&dev->pdev->dev, "stop wd complete ret=%d.\n", ret);
174 else 174 ret = 0;
175 dev_dbg(&dev->pdev->dev, "stop wd complete.\n"); 175 } else {
176 if (!ret)
177 ret = -ETIMEDOUT;
178 dev_warn(&dev->pdev->dev,
179 "stop wd failed to complete ret=%d.\n", ret);
180 }
176 181
177 if (preserve) 182 if (preserve)
178 dev->wd_timeout = wd_timeout; 183 dev->wd_timeout = wd_timeout;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 09e8c7d53af3..19b4ae052af8 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -875,7 +875,8 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
875 *dp++ = last << 7 | first << 6 | 1; /* EA */ 875 *dp++ = last << 7 | first << 6 | 1; /* EA */
876 len--; 876 len--;
877 } 877 }
878 memcpy(dp, skb_pull(dlci->skb, len), len); 878 memcpy(dp, dlci->skb->data, len);
879 skb_pull(dlci->skb, len);
879 __gsm_data_queue(dlci, msg); 880 __gsm_data_queue(dlci, msg);
880 if (last) 881 if (last)
881 dlci->skb = NULL; 882 dlci->skb = NULL;
@@ -984,10 +985,22 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
984 */ 985 */
985 986
986static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci, 987static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
987 u32 modem) 988 u32 modem, int clen)
988{ 989{
989 int mlines = 0; 990 int mlines = 0;
990 u8 brk = modem >> 6; 991 u8 brk = 0;
992
993 /* The modem status command can either contain one octet (v.24 signals)
994 or two octets (v.24 signals + break signals). The length field will
995 either be 2 or 3 respectively. This is specified in section
996 5.4.6.3.7 of the 27.010 mux spec. */
997
998 if (clen == 2)
999 modem = modem & 0x7f;
1000 else {
1001 brk = modem & 0x7f;
1002 modem = (modem >> 7) & 0x7f;
1003 };
991 1004
992 /* Flow control/ready to communicate */ 1005 /* Flow control/ready to communicate */
993 if (modem & MDM_FC) { 1006 if (modem & MDM_FC) {
@@ -1061,7 +1074,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
1061 return; 1074 return;
1062 } 1075 }
1063 tty = tty_port_tty_get(&dlci->port); 1076 tty = tty_port_tty_get(&dlci->port);
1064 gsm_process_modem(tty, dlci, modem); 1077 gsm_process_modem(tty, dlci, modem, clen);
1065 if (tty) { 1078 if (tty) {
1066 tty_wakeup(tty); 1079 tty_wakeup(tty);
1067 tty_kref_put(tty); 1080 tty_kref_put(tty);
@@ -1482,12 +1495,13 @@ static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
1482 * open we shovel the bits down it, if not we drop them. 1495 * open we shovel the bits down it, if not we drop them.
1483 */ 1496 */
1484 1497
1485static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len) 1498static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
1486{ 1499{
1487 /* krefs .. */ 1500 /* krefs .. */
1488 struct tty_port *port = &dlci->port; 1501 struct tty_port *port = &dlci->port;
1489 struct tty_struct *tty = tty_port_tty_get(port); 1502 struct tty_struct *tty = tty_port_tty_get(port);
1490 unsigned int modem = 0; 1503 unsigned int modem = 0;
1504 int len = clen;
1491 1505
1492 if (debug & 16) 1506 if (debug & 16)
1493 pr_debug("%d bytes for tty %p\n", len, tty); 1507 pr_debug("%d bytes for tty %p\n", len, tty);
@@ -1507,7 +1521,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
1507 if (len == 0) 1521 if (len == 0)
1508 return; 1522 return;
1509 } 1523 }
1510 gsm_process_modem(tty, dlci, modem); 1524 gsm_process_modem(tty, dlci, modem, clen);
1511 /* Line state will go via DLCI 0 controls only */ 1525 /* Line state will go via DLCI 0 controls only */
1512 case 1: 1526 case 1:
1513 default: 1527 default:
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 0ad32888091c..c3954fbf6ac4 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1815,6 +1815,7 @@ do_it_again:
1815 /* FIXME: does n_tty_set_room need locking ? */ 1815 /* FIXME: does n_tty_set_room need locking ? */
1816 n_tty_set_room(tty); 1816 n_tty_set_room(tty);
1817 timeout = schedule_timeout(timeout); 1817 timeout = schedule_timeout(timeout);
1818 BUG_ON(!tty->read_buf);
1818 continue; 1819 continue;
1819 } 1820 }
1820 __set_current_state(TASK_RUNNING); 1821 __set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index b40f7b90c81d..b4129f53fb1b 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -3318,6 +3318,7 @@ void serial8250_unregister_port(int line)
3318 uart->port.flags &= ~UPF_BOOT_AUTOCONF; 3318 uart->port.flags &= ~UPF_BOOT_AUTOCONF;
3319 uart->port.type = PORT_UNKNOWN; 3319 uart->port.type = PORT_UNKNOWN;
3320 uart->port.dev = &serial8250_isa_devs->dev; 3320 uart->port.dev = &serial8250_isa_devs->dev;
3321 uart->capabilities = uart_config[uart->port.type].flags;
3321 uart_add_one_port(&serial8250_reg, &uart->port); 3322 uart_add_one_port(&serial8250_reg, &uart->port);
3322 } else { 3323 } else {
3323 uart->port.dev = NULL; 3324 uart->port.dev = NULL;
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 4b4968a294b2..f41b4259ecdd 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -973,7 +973,7 @@ ce4100_serial_setup(struct serial_private *priv,
973 973
974static int 974static int
975pci_omegapci_setup(struct serial_private *priv, 975pci_omegapci_setup(struct serial_private *priv,
976 struct pciserial_board *board, 976 const struct pciserial_board *board,
977 struct uart_port *port, int idx) 977 struct uart_port *port, int idx)
978{ 978{
979 return setup_port(priv, port, 2, idx * 8, 0); 979 return setup_port(priv, port, 2, idx * 8, 0);
@@ -994,6 +994,15 @@ static int skip_tx_en_setup(struct serial_private *priv,
994 return pci_default_setup(priv, board, port, idx); 994 return pci_default_setup(priv, board, port, idx);
995} 995}
996 996
997static int pci_eg20t_init(struct pci_dev *dev)
998{
999#if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE)
1000 return -ENODEV;
1001#else
1002 return 0;
1003#endif
1004}
1005
997/* This should be in linux/pci_ids.h */ 1006/* This should be in linux/pci_ids.h */
998#define PCI_VENDOR_ID_SBSMODULARIO 0x124B 1007#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
999#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B 1008#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
@@ -1446,6 +1455,56 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1446 .init = pci_oxsemi_tornado_init, 1455 .init = pci_oxsemi_tornado_init,
1447 .setup = pci_default_setup, 1456 .setup = pci_default_setup,
1448 }, 1457 },
1458 {
1459 .vendor = PCI_VENDOR_ID_INTEL,
1460 .device = 0x8811,
1461 .init = pci_eg20t_init,
1462 },
1463 {
1464 .vendor = PCI_VENDOR_ID_INTEL,
1465 .device = 0x8812,
1466 .init = pci_eg20t_init,
1467 },
1468 {
1469 .vendor = PCI_VENDOR_ID_INTEL,
1470 .device = 0x8813,
1471 .init = pci_eg20t_init,
1472 },
1473 {
1474 .vendor = PCI_VENDOR_ID_INTEL,
1475 .device = 0x8814,
1476 .init = pci_eg20t_init,
1477 },
1478 {
1479 .vendor = 0x10DB,
1480 .device = 0x8027,
1481 .init = pci_eg20t_init,
1482 },
1483 {
1484 .vendor = 0x10DB,
1485 .device = 0x8028,
1486 .init = pci_eg20t_init,
1487 },
1488 {
1489 .vendor = 0x10DB,
1490 .device = 0x8029,
1491 .init = pci_eg20t_init,
1492 },
1493 {
1494 .vendor = 0x10DB,
1495 .device = 0x800C,
1496 .init = pci_eg20t_init,
1497 },
1498 {
1499 .vendor = 0x10DB,
1500 .device = 0x800D,
1501 .init = pci_eg20t_init,
1502 },
1503 {
1504 .vendor = 0x10DB,
1505 .device = 0x800D,
1506 .init = pci_eg20t_init,
1507 },
1449 /* 1508 /*
1450 * Cronyx Omega PCI (PLX-chip based) 1509 * Cronyx Omega PCI (PLX-chip based)
1451 */ 1510 */
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8dc0541feecc..f5f6831b0a64 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -50,6 +50,7 @@
50#include <linux/dmaengine.h> 50#include <linux/dmaengine.h>
51#include <linux/dma-mapping.h> 51#include <linux/dma-mapping.h>
52#include <linux/scatterlist.h> 52#include <linux/scatterlist.h>
53#include <linux/delay.h>
53 54
54#include <asm/io.h> 55#include <asm/io.h>
55#include <asm/sizes.h> 56#include <asm/sizes.h>
@@ -65,6 +66,30 @@
65#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) 66#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
66#define UART_DUMMY_DR_RX (1 << 16) 67#define UART_DUMMY_DR_RX (1 << 16)
67 68
69
70#define UART_WA_SAVE_NR 14
71
72static void pl011_lockup_wa(unsigned long data);
73static const u32 uart_wa_reg[UART_WA_SAVE_NR] = {
74 ST_UART011_DMAWM,
75 ST_UART011_TIMEOUT,
76 ST_UART011_LCRH_RX,
77 UART011_IBRD,
78 UART011_FBRD,
79 ST_UART011_LCRH_TX,
80 UART011_IFLS,
81 ST_UART011_XFCR,
82 ST_UART011_XON1,
83 ST_UART011_XON2,
84 ST_UART011_XOFF1,
85 ST_UART011_XOFF2,
86 UART011_CR,
87 UART011_IMSC
88};
89
90static u32 uart_wa_regdata[UART_WA_SAVE_NR];
91static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0);
92
68/* There is by now at least one vendor with differing details, so handle it */ 93/* There is by now at least one vendor with differing details, so handle it */
69struct vendor_data { 94struct vendor_data {
70 unsigned int ifls; 95 unsigned int ifls;
@@ -72,6 +97,7 @@ struct vendor_data {
72 unsigned int lcrh_tx; 97 unsigned int lcrh_tx;
73 unsigned int lcrh_rx; 98 unsigned int lcrh_rx;
74 bool oversampling; 99 bool oversampling;
100 bool interrupt_may_hang; /* vendor-specific */
75 bool dma_threshold; 101 bool dma_threshold;
76}; 102};
77 103
@@ -90,9 +116,12 @@ static struct vendor_data vendor_st = {
90 .lcrh_tx = ST_UART011_LCRH_TX, 116 .lcrh_tx = ST_UART011_LCRH_TX,
91 .lcrh_rx = ST_UART011_LCRH_RX, 117 .lcrh_rx = ST_UART011_LCRH_RX,
92 .oversampling = true, 118 .oversampling = true,
119 .interrupt_may_hang = true,
93 .dma_threshold = true, 120 .dma_threshold = true,
94}; 121};
95 122
123static struct uart_amba_port *amba_ports[UART_NR];
124
96/* Deals with DMA transactions */ 125/* Deals with DMA transactions */
97 126
98struct pl011_sgbuf { 127struct pl011_sgbuf {
@@ -132,6 +161,7 @@ struct uart_amba_port {
132 unsigned int lcrh_rx; /* vendor-specific */ 161 unsigned int lcrh_rx; /* vendor-specific */
133 bool autorts; 162 bool autorts;
134 char type[12]; 163 char type[12];
164 bool interrupt_may_hang; /* vendor-specific */
135#ifdef CONFIG_DMA_ENGINE 165#ifdef CONFIG_DMA_ENGINE
136 /* DMA stuff */ 166 /* DMA stuff */
137 bool using_tx_dma; 167 bool using_tx_dma;
@@ -1008,6 +1038,68 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1008#endif 1038#endif
1009 1039
1010 1040
1041/*
1042 * pl011_lockup_wa
1043 * This workaround aims to break the deadlock situation
1044 * when after long transfer over uart in hardware flow
1045 * control, uart interrupt registers cannot be cleared.
1046 * Hence uart transfer gets blocked.
1047 *
1048 * It is seen that during such deadlock condition ICR
1049 * don't get cleared even on multiple write. This leads
1050 * pass_counter to decrease and finally reach zero. This
1051 * can be taken as trigger point to run this UART_BT_WA.
1052 *
1053 */
1054static void pl011_lockup_wa(unsigned long data)
1055{
1056 struct uart_amba_port *uap = amba_ports[0];
1057 void __iomem *base = uap->port.membase;
1058 struct circ_buf *xmit = &uap->port.state->xmit;
1059 struct tty_struct *tty = uap->port.state->port.tty;
1060 int buf_empty_retries = 200;
1061 int loop;
1062
1063 /* Stop HCI layer from submitting data for tx */
1064 tty->hw_stopped = 1;
1065 while (!uart_circ_empty(xmit)) {
1066 if (buf_empty_retries-- == 0)
1067 break;
1068 udelay(100);
1069 }
1070
1071 /* Backup registers */
1072 for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
1073 uart_wa_regdata[loop] = readl(base + uart_wa_reg[loop]);
1074
1075 /* Disable UART so that FIFO data is flushed out */
1076 writew(0x00, uap->port.membase + UART011_CR);
1077
1078 /* Soft reset UART module */
1079 if (uap->port.dev->platform_data) {
1080 struct amba_pl011_data *plat;
1081
1082 plat = uap->port.dev->platform_data;
1083 if (plat->reset)
1084 plat->reset();
1085 }
1086
1087 /* Restore registers */
1088 for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
1089 writew(uart_wa_regdata[loop] ,
1090 uap->port.membase + uart_wa_reg[loop]);
1091
1092 /* Initialise the old status of the modem signals */
1093 uap->old_status = readw(uap->port.membase + UART01x_FR) &
1094 UART01x_FR_MODEM_ANY;
1095
1096 if (readl(base + UART011_MIS) & 0x2)
1097 printk(KERN_EMERG "UART_BT_WA: ***FAILED***\n");
1098
1099 /* Start Tx/Rx */
1100 tty->hw_stopped = 0;
1101}
1102
1011static void pl011_stop_tx(struct uart_port *port) 1103static void pl011_stop_tx(struct uart_port *port)
1012{ 1104{
1013 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1105 struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -1158,8 +1250,11 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
1158 if (status & UART011_TXIS) 1250 if (status & UART011_TXIS)
1159 pl011_tx_chars(uap); 1251 pl011_tx_chars(uap);
1160 1252
1161 if (pass_counter-- == 0) 1253 if (pass_counter-- == 0) {
1254 if (uap->interrupt_may_hang)
1255 tasklet_schedule(&pl011_lockup_tlet);
1162 break; 1256 break;
1257 }
1163 1258
1164 status = readw(uap->port.membase + UART011_MIS); 1259 status = readw(uap->port.membase + UART011_MIS);
1165 } while (status != 0); 1260 } while (status != 0);
@@ -1339,6 +1434,14 @@ static int pl011_startup(struct uart_port *port)
1339 writew(uap->im, uap->port.membase + UART011_IMSC); 1434 writew(uap->im, uap->port.membase + UART011_IMSC);
1340 spin_unlock_irq(&uap->port.lock); 1435 spin_unlock_irq(&uap->port.lock);
1341 1436
1437 if (uap->port.dev->platform_data) {
1438 struct amba_pl011_data *plat;
1439
1440 plat = uap->port.dev->platform_data;
1441 if (plat->init)
1442 plat->init();
1443 }
1444
1342 return 0; 1445 return 0;
1343 1446
1344 clk_dis: 1447 clk_dis:
@@ -1394,6 +1497,15 @@ static void pl011_shutdown(struct uart_port *port)
1394 * Shut down the clock producer 1497 * Shut down the clock producer
1395 */ 1498 */
1396 clk_disable(uap->clk); 1499 clk_disable(uap->clk);
1500
1501 if (uap->port.dev->platform_data) {
1502 struct amba_pl011_data *plat;
1503
1504 plat = uap->port.dev->platform_data;
1505 if (plat->exit)
1506 plat->exit();
1507 }
1508
1397} 1509}
1398 1510
1399static void 1511static void
@@ -1700,6 +1812,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
1700 if (!uap) 1812 if (!uap)
1701 return -ENODEV; 1813 return -ENODEV;
1702 1814
1815 if (uap->port.dev->platform_data) {
1816 struct amba_pl011_data *plat;
1817
1818 plat = uap->port.dev->platform_data;
1819 if (plat->init)
1820 plat->init();
1821 }
1822
1703 uap->port.uartclk = clk_get_rate(uap->clk); 1823 uap->port.uartclk = clk_get_rate(uap->clk);
1704 1824
1705 if (options) 1825 if (options)
@@ -1774,6 +1894,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1774 uap->lcrh_rx = vendor->lcrh_rx; 1894 uap->lcrh_rx = vendor->lcrh_rx;
1775 uap->lcrh_tx = vendor->lcrh_tx; 1895 uap->lcrh_tx = vendor->lcrh_tx;
1776 uap->fifosize = vendor->fifosize; 1896 uap->fifosize = vendor->fifosize;
1897 uap->interrupt_may_hang = vendor->interrupt_may_hang;
1777 uap->port.dev = &dev->dev; 1898 uap->port.dev = &dev->dev;
1778 uap->port.mapbase = dev->res.start; 1899 uap->port.mapbase = dev->res.start;
1779 uap->port.membase = base; 1900 uap->port.membase = base;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 6d5d6e679fc7..af9b7814965a 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1709,12 +1709,13 @@ static int atmel_serial_resume(struct platform_device *pdev)
1709static int __devinit atmel_serial_probe(struct platform_device *pdev) 1709static int __devinit atmel_serial_probe(struct platform_device *pdev)
1710{ 1710{
1711 struct atmel_uart_port *port; 1711 struct atmel_uart_port *port;
1712 struct atmel_uart_data *pdata = pdev->dev.platform_data;
1712 void *data; 1713 void *data;
1713 int ret; 1714 int ret;
1714 1715
1715 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1)); 1716 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
1716 1717
1717 port = &atmel_ports[pdev->id]; 1718 port = &atmel_ports[pdata->num];
1718 port->backup_imr = 0; 1719 port->backup_imr = 0;
1719 1720
1720 atmel_init_port(port, pdev); 1721 atmel_init_port(port, pdev);
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index a1a0e55d0807..c0b68b9cad91 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -250,6 +250,20 @@ static void bcm_uart_do_rx(struct uart_port *port)
250 /* get overrun/fifo empty information from ier 250 /* get overrun/fifo empty information from ier
251 * register */ 251 * register */
252 iestat = bcm_uart_readl(port, UART_IR_REG); 252 iestat = bcm_uart_readl(port, UART_IR_REG);
253
254 if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
255 unsigned int val;
256
257 /* fifo reset is required to clear
258 * interrupt */
259 val = bcm_uart_readl(port, UART_CTL_REG);
260 val |= UART_CTL_RSTRXFIFO_MASK;
261 bcm_uart_writel(port, val, UART_CTL_REG);
262
263 port->icount.overrun++;
264 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
265 }
266
253 if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY))) 267 if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
254 break; 268 break;
255 269
@@ -284,10 +298,6 @@ static void bcm_uart_do_rx(struct uart_port *port)
284 if (uart_handle_sysrq_char(port, c)) 298 if (uart_handle_sysrq_char(port, c))
285 continue; 299 continue;
286 300
287 if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
288 port->icount.overrun++;
289 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
290 }
291 301
292 if ((cstat & port->ignore_status_mask) == 0) 302 if ((cstat & port->ignore_status_mask) == 0)
293 tty_insert_flip_char(tty, c, flag); 303 tty_insert_flip_char(tty, c, flag);
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 18f548449c63..96da17868cf3 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -125,7 +125,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
125 brd->bd_uart_offset = 0x200; 125 brd->bd_uart_offset = 0x200;
126 brd->bd_dividend = 921600; 126 brd->bd_dividend = 921600;
127 127
128 brd->re_map_membase = ioremap(brd->membase, 0x1000); 128 brd->re_map_membase = ioremap(brd->membase, pci_resource_len(pdev, 0));
129 if (!brd->re_map_membase) { 129 if (!brd->re_map_membase) {
130 dev_err(&pdev->dev, 130 dev_err(&pdev->dev,
131 "card has no PCI Memory resources, " 131 "card has no PCI Memory resources, "
diff --git a/drivers/tty/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
index fb2619f93d84..dd194dc80ee9 100644
--- a/drivers/tty/serial/s5pv210.c
+++ b/drivers/tty/serial/s5pv210.c
@@ -30,7 +30,7 @@ static int s5pv210_serial_setsource(struct uart_port *port,
30 struct s3c2410_uartcfg *cfg = port->dev->platform_data; 30 struct s3c2410_uartcfg *cfg = port->dev->platform_data;
31 unsigned long ucon = rd_regl(port, S3C2410_UCON); 31 unsigned long ucon = rd_regl(port, S3C2410_UCON);
32 32
33 if ((cfg->clocks_size) == 1) 33 if (cfg->flags & NO_NEED_CHECK_CLKSRC)
34 return 0; 34 return 0;
35 35
36 if (strcmp(clk->name, "pclk") == 0) 36 if (strcmp(clk->name, "pclk") == 0)
@@ -55,7 +55,7 @@ static int s5pv210_serial_getsource(struct uart_port *port,
55 55
56 clk->divisor = 1; 56 clk->divisor = 1;
57 57
58 if ((cfg->clocks_size) == 1) 58 if (cfg->flags & NO_NEED_CHECK_CLKSRC)
59 return 0; 59 return 0;
60 60
61 switch (ucon & S5PV210_UCON_CLKMASK) { 61 switch (ucon & S5PV210_UCON_CLKMASK) {
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 5d01d32e2cf0..ef925d581713 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -555,7 +555,7 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
555static int tty_ldisc_wait_idle(struct tty_struct *tty) 555static int tty_ldisc_wait_idle(struct tty_struct *tty)
556{ 556{
557 int ret; 557 int ret;
558 ret = wait_event_interruptible_timeout(tty_ldisc_idle, 558 ret = wait_event_timeout(tty_ldisc_idle,
559 atomic_read(&tty->ldisc->users) == 1, 5 * HZ); 559 atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
560 if (ret < 0) 560 if (ret < 0)
561 return ret; 561 return ret;
@@ -763,6 +763,8 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
763 if (IS_ERR(ld)) 763 if (IS_ERR(ld))
764 return -1; 764 return -1;
765 765
766 WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
767
766 tty_ldisc_close(tty, tty->ldisc); 768 tty_ldisc_close(tty, tty->ldisc);
767 tty_ldisc_put(tty->ldisc); 769 tty_ldisc_put(tty->ldisc);
768 tty->ldisc = NULL; 770 tty->ldisc = NULL;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index aa3cc465a601..34e3da5aa72a 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1187,13 +1187,22 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1187 for (i = n - 1; i >= 0; --i) { 1187 for (i = n - 1; i >= 0; --i) {
1188 intf = udev->actconfig->interface[i]; 1188 intf = udev->actconfig->interface[i];
1189 status = usb_suspend_interface(udev, intf, msg); 1189 status = usb_suspend_interface(udev, intf, msg);
1190
1191 /* Ignore errors during system sleep transitions */
1192 if (!(msg.event & PM_EVENT_AUTO))
1193 status = 0;
1190 if (status != 0) 1194 if (status != 0)
1191 break; 1195 break;
1192 } 1196 }
1193 } 1197 }
1194 if (status == 0) 1198 if (status == 0) {
1195 status = usb_suspend_device(udev, msg); 1199 status = usb_suspend_device(udev, msg);
1196 1200
1201 /* Again, ignore errors during system sleep transitions */
1202 if (!(msg.event & PM_EVENT_AUTO))
1203 status = 0;
1204 }
1205
1197 /* If the suspend failed, resume interfaces that did get suspended */ 1206 /* If the suspend failed, resume interfaces that did get suspended */
1198 if (status != 0) { 1207 if (status != 0) {
1199 msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); 1208 msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 90ae1753dda1..a428aa080a36 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1634,6 +1634,7 @@ void usb_disconnect(struct usb_device **pdev)
1634{ 1634{
1635 struct usb_device *udev = *pdev; 1635 struct usb_device *udev = *pdev;
1636 int i; 1636 int i;
1637 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
1637 1638
1638 if (!udev) { 1639 if (!udev) {
1639 pr_debug ("%s nodev\n", __func__); 1640 pr_debug ("%s nodev\n", __func__);
@@ -1661,7 +1662,9 @@ void usb_disconnect(struct usb_device **pdev)
1661 * so that the hardware is now fully quiesced. 1662 * so that the hardware is now fully quiesced.
1662 */ 1663 */
1663 dev_dbg (&udev->dev, "unregistering device\n"); 1664 dev_dbg (&udev->dev, "unregistering device\n");
1665 mutex_lock(hcd->bandwidth_mutex);
1664 usb_disable_device(udev, 0); 1666 usb_disable_device(udev, 0);
1667 mutex_unlock(hcd->bandwidth_mutex);
1665 usb_hcd_synchronize_unlinks(udev); 1668 usb_hcd_synchronize_unlinks(udev);
1666 1669
1667 usb_remove_ep_devs(&udev->ep0); 1670 usb_remove_ep_devs(&udev->ep0);
@@ -2362,6 +2365,10 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2362 USB_DEVICE_REMOTE_WAKEUP, 0, 2365 USB_DEVICE_REMOTE_WAKEUP, 0,
2363 NULL, 0, 2366 NULL, 0,
2364 USB_CTRL_SET_TIMEOUT); 2367 USB_CTRL_SET_TIMEOUT);
2368
2369 /* System sleep transitions should never fail */
2370 if (!(msg.event & PM_EVENT_AUTO))
2371 status = 0;
2365 } else { 2372 } else {
2366 /* device has up to 10 msec to fully suspend */ 2373 /* device has up to 10 msec to fully suspend */
2367 dev_dbg(&udev->dev, "usb %ssuspend\n", 2374 dev_dbg(&udev->dev, "usb %ssuspend\n",
@@ -2611,16 +2618,15 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
2611 struct usb_device *hdev = hub->hdev; 2618 struct usb_device *hdev = hub->hdev;
2612 unsigned port1; 2619 unsigned port1;
2613 2620
2614 /* fail if children aren't already suspended */ 2621 /* Warn if children aren't already suspended */
2615 for (port1 = 1; port1 <= hdev->maxchild; port1++) { 2622 for (port1 = 1; port1 <= hdev->maxchild; port1++) {
2616 struct usb_device *udev; 2623 struct usb_device *udev;
2617 2624
2618 udev = hdev->children [port1-1]; 2625 udev = hdev->children [port1-1];
2619 if (udev && udev->can_submit) { 2626 if (udev && udev->can_submit) {
2620 if (!(msg.event & PM_EVENT_AUTO)) 2627 dev_warn(&intf->dev, "port %d nyet suspended\n", port1);
2621 dev_dbg(&intf->dev, "port %d nyet suspended\n", 2628 if (msg.event & PM_EVENT_AUTO)
2622 port1); 2629 return -EBUSY;
2623 return -EBUSY;
2624 } 2630 }
2625 } 2631 }
2626 2632
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 5701e857392b..e0719b4ee189 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1135,10 +1135,13 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
1135 * Deallocates hcd/hardware state for the endpoints (nuking all or most 1135 * Deallocates hcd/hardware state for the endpoints (nuking all or most
1136 * pending urbs) and usbcore state for the interfaces, so that usbcore 1136 * pending urbs) and usbcore state for the interfaces, so that usbcore
1137 * must usb_set_configuration() before any interfaces could be used. 1137 * must usb_set_configuration() before any interfaces could be used.
1138 *
1139 * Must be called with hcd->bandwidth_mutex held.
1138 */ 1140 */
1139void usb_disable_device(struct usb_device *dev, int skip_ep0) 1141void usb_disable_device(struct usb_device *dev, int skip_ep0)
1140{ 1142{
1141 int i; 1143 int i;
1144 struct usb_hcd *hcd = bus_to_hcd(dev->bus);
1142 1145
1143 /* getting rid of interfaces will disconnect 1146 /* getting rid of interfaces will disconnect
1144 * any drivers bound to them (a key side effect) 1147 * any drivers bound to them (a key side effect)
@@ -1172,6 +1175,16 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1172 1175
1173 dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, 1176 dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
1174 skip_ep0 ? "non-ep0" : "all"); 1177 skip_ep0 ? "non-ep0" : "all");
1178 if (hcd->driver->check_bandwidth) {
1179 /* First pass: Cancel URBs, leave endpoint pointers intact. */
1180 for (i = skip_ep0; i < 16; ++i) {
1181 usb_disable_endpoint(dev, i, false);
1182 usb_disable_endpoint(dev, i + USB_DIR_IN, false);
1183 }
1184 /* Remove endpoints from the host controller internal state */
1185 usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
1186 /* Second pass: remove endpoint pointers */
1187 }
1175 for (i = skip_ep0; i < 16; ++i) { 1188 for (i = skip_ep0; i < 16; ++i) {
1176 usb_disable_endpoint(dev, i, true); 1189 usb_disable_endpoint(dev, i, true);
1177 usb_disable_endpoint(dev, i + USB_DIR_IN, true); 1190 usb_disable_endpoint(dev, i + USB_DIR_IN, true);
@@ -1273,6 +1286,8 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1273 interface); 1286 interface);
1274 return -EINVAL; 1287 return -EINVAL;
1275 } 1288 }
1289 if (iface->unregistering)
1290 return -ENODEV;
1276 1291
1277 alt = usb_altnum_to_altsetting(iface, alternate); 1292 alt = usb_altnum_to_altsetting(iface, alternate);
1278 if (!alt) { 1293 if (!alt) {
@@ -1727,6 +1742,7 @@ free_interfaces:
1727 /* if it's already configured, clear out old state first. 1742 /* if it's already configured, clear out old state first.
1728 * getting rid of old interfaces means unbinding their drivers. 1743 * getting rid of old interfaces means unbinding their drivers.
1729 */ 1744 */
1745 mutex_lock(hcd->bandwidth_mutex);
1730 if (dev->state != USB_STATE_ADDRESS) 1746 if (dev->state != USB_STATE_ADDRESS)
1731 usb_disable_device(dev, 1); /* Skip ep0 */ 1747 usb_disable_device(dev, 1); /* Skip ep0 */
1732 1748
@@ -1739,7 +1755,6 @@ free_interfaces:
1739 * host controller will not allow submissions to dropped endpoints. If 1755 * host controller will not allow submissions to dropped endpoints. If
1740 * this call fails, the device state is unchanged. 1756 * this call fails, the device state is unchanged.
1741 */ 1757 */
1742 mutex_lock(hcd->bandwidth_mutex);
1743 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); 1758 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
1744 if (ret < 0) { 1759 if (ret < 0) {
1745 mutex_unlock(hcd->bandwidth_mutex); 1760 mutex_unlock(hcd->bandwidth_mutex);
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 2cd9a60c7f3a..4e4833168087 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -46,7 +46,6 @@
46#include <asm/system.h> 46#include <asm/system.h>
47#include <asm/unaligned.h> 47#include <asm/unaligned.h>
48#include <asm/dma.h> 48#include <asm/dma.h>
49#include <asm/cacheflush.h>
50 49
51#include "fsl_usb2_udc.h" 50#include "fsl_usb2_udc.h"
52 51
@@ -118,6 +117,17 @@ static void (*_fsl_writel)(u32 v, unsigned __iomem *p);
118#define fsl_readl(p) (*_fsl_readl)((p)) 117#define fsl_readl(p) (*_fsl_readl)((p))
119#define fsl_writel(v, p) (*_fsl_writel)((v), (p)) 118#define fsl_writel(v, p) (*_fsl_writel)((v), (p))
120 119
120static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata)
121{
122 if (pdata->big_endian_mmio) {
123 _fsl_readl = _fsl_readl_be;
124 _fsl_writel = _fsl_writel_be;
125 } else {
126 _fsl_readl = _fsl_readl_le;
127 _fsl_writel = _fsl_writel_le;
128 }
129}
130
121static inline u32 cpu_to_hc32(const u32 x) 131static inline u32 cpu_to_hc32(const u32 x)
122{ 132{
123 return udc_controller->pdata->big_endian_desc 133 return udc_controller->pdata->big_endian_desc
@@ -132,6 +142,8 @@ static inline u32 hc32_to_cpu(const u32 x)
132 : le32_to_cpu((__force __le32)x); 142 : le32_to_cpu((__force __le32)x);
133} 143}
134#else /* !CONFIG_PPC32 */ 144#else /* !CONFIG_PPC32 */
145static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata) {}
146
135#define fsl_readl(addr) readl(addr) 147#define fsl_readl(addr) readl(addr)
136#define fsl_writel(val32, addr) writel(val32, addr) 148#define fsl_writel(val32, addr) writel(val32, addr)
137#define cpu_to_hc32(x) cpu_to_le32(x) 149#define cpu_to_hc32(x) cpu_to_le32(x)
@@ -1277,6 +1289,11 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction)
1277 req->req.complete = NULL; 1289 req->req.complete = NULL;
1278 req->dtd_count = 0; 1290 req->dtd_count = 0;
1279 1291
1292 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1293 req->req.buf, req->req.length,
1294 ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1295 req->mapped = 1;
1296
1280 if (fsl_req_to_dtd(req) == 0) 1297 if (fsl_req_to_dtd(req) == 0)
1281 fsl_queue_td(ep, req); 1298 fsl_queue_td(ep, req);
1282 else 1299 else
@@ -1348,9 +1365,6 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
1348 /* Fill in the reqest structure */ 1365 /* Fill in the reqest structure */
1349 *((u16 *) req->req.buf) = cpu_to_le16(tmp); 1366 *((u16 *) req->req.buf) = cpu_to_le16(tmp);
1350 1367
1351 /* flush cache for the req buffer */
1352 flush_dcache_range((u32)req->req.buf, (u32)req->req.buf + 8);
1353
1354 req->ep = ep; 1368 req->ep = ep;
1355 req->req.length = 2; 1369 req->req.length = 2;
1356 req->req.status = -EINPROGRESS; 1370 req->req.status = -EINPROGRESS;
@@ -1358,6 +1372,11 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
1358 req->req.complete = NULL; 1372 req->req.complete = NULL;
1359 req->dtd_count = 0; 1373 req->dtd_count = 0;
1360 1374
1375 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1376 req->req.buf, req->req.length,
1377 ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1378 req->mapped = 1;
1379
1361 /* prime the data phase */ 1380 /* prime the data phase */
1362 if ((fsl_req_to_dtd(req) == 0)) 1381 if ((fsl_req_to_dtd(req) == 0))
1363 fsl_queue_td(ep, req); 1382 fsl_queue_td(ep, req);
@@ -2354,7 +2373,6 @@ static int __init struct_udc_setup(struct fsl_udc *udc,
2354 struct fsl_req, req); 2373 struct fsl_req, req);
2355 /* allocate a small amount of memory to get valid address */ 2374 /* allocate a small amount of memory to get valid address */
2356 udc->status_req->req.buf = kmalloc(8, GFP_KERNEL); 2375 udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
2357 udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
2358 2376
2359 udc->resume_state = USB_STATE_NOTATTACHED; 2377 udc->resume_state = USB_STATE_NOTATTACHED;
2360 udc->usb_state = USB_STATE_POWERED; 2378 udc->usb_state = USB_STATE_POWERED;
@@ -2470,13 +2488,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
2470 } 2488 }
2471 2489
2472 /* Set accessors only after pdata->init() ! */ 2490 /* Set accessors only after pdata->init() ! */
2473 if (pdata->big_endian_mmio) { 2491 fsl_set_accessors(pdata);
2474 _fsl_readl = _fsl_readl_be;
2475 _fsl_writel = _fsl_writel_be;
2476 } else {
2477 _fsl_readl = _fsl_readl_le;
2478 _fsl_writel = _fsl_writel_le;
2479 }
2480 2492
2481#ifndef CONFIG_ARCH_MXC 2493#ifndef CONFIG_ARCH_MXC
2482 if (pdata->have_sysif_regs) 2494 if (pdata->have_sysif_regs)
diff --git a/drivers/usb/host/ehci-ath79.c b/drivers/usb/host/ehci-ath79.c
index 98cc8a13169c..aa248c2f2c60 100644
--- a/drivers/usb/host/ehci-ath79.c
+++ b/drivers/usb/host/ehci-ath79.c
@@ -44,7 +44,6 @@ static int ehci_ath79_init(struct usb_hcd *hcd)
44 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 44 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
45 struct platform_device *pdev = to_platform_device(hcd->self.controller); 45 struct platform_device *pdev = to_platform_device(hcd->self.controller);
46 const struct platform_device_id *id; 46 const struct platform_device_id *id;
47 int hclength;
48 int ret; 47 int ret;
49 48
50 id = platform_get_device_id(pdev); 49 id = platform_get_device_id(pdev);
@@ -53,20 +52,23 @@ static int ehci_ath79_init(struct usb_hcd *hcd)
53 return -EINVAL; 52 return -EINVAL;
54 } 53 }
55 54
56 hclength = HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
57 switch (id->driver_data) { 55 switch (id->driver_data) {
58 case EHCI_ATH79_IP_V1: 56 case EHCI_ATH79_IP_V1:
59 ehci->has_synopsys_hc_bug = 1; 57 ehci->has_synopsys_hc_bug = 1;
60 58
61 ehci->caps = hcd->regs; 59 ehci->caps = hcd->regs;
62 ehci->regs = hcd->regs + hclength; 60 ehci->regs = hcd->regs +
61 HC_LENGTH(ehci,
62 ehci_readl(ehci, &ehci->caps->hc_capbase));
63 break; 63 break;
64 64
65 case EHCI_ATH79_IP_V2: 65 case EHCI_ATH79_IP_V2:
66 hcd->has_tt = 1; 66 hcd->has_tt = 1;
67 67
68 ehci->caps = hcd->regs + 0x100; 68 ehci->caps = hcd->regs + 0x100;
69 ehci->regs = hcd->regs + 0x100 + hclength; 69 ehci->regs = hcd->regs + 0x100 +
70 HC_LENGTH(ehci,
71 ehci_readl(ehci, &ehci->caps->hc_capbase));
70 break; 72 break;
71 73
72 default: 74 default:
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b435ed67dd5c..f8030ee928e8 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1,4 +1,8 @@
1/* 1/*
2 * Enhanced Host Controller Interface (EHCI) driver for USB.
3 *
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5 *
2 * Copyright (c) 2000-2004 by David Brownell 6 * Copyright (c) 2000-2004 by David Brownell
3 * 7 *
4 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index c9e6e454c625..55d3d5859ac5 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1555,7 +1555,7 @@ static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
1555 1555
1556 /* We need to forcefully reclaim the slot since some transfers never 1556 /* We need to forcefully reclaim the slot since some transfers never
1557 return, e.g. interrupt transfers and NAKed bulk transfers. */ 1557 return, e.g. interrupt transfers and NAKed bulk transfers. */
1558 if (usb_pipebulk(urb->pipe)) { 1558 if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
1559 skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG); 1559 skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
1560 skip_map |= (1 << qh->slot); 1560 skip_map |= (1 << qh->slot);
1561 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map); 1561 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 9aa10bdf3918..f9cf3f04b742 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * OHCI HCD (Host Controller Driver) for USB. 2 * Open Host Controller Interface (OHCI) driver for USB.
3 *
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
3 * 5 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> 6 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net> 7 * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index db6f8b9c19b6..4586369dda00 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2517,6 +2517,7 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
2517 INIT_LIST_HEAD(&r8a66597->child_device); 2517 INIT_LIST_HEAD(&r8a66597->child_device);
2518 2518
2519 hcd->rsrc_start = res->start; 2519 hcd->rsrc_start = res->start;
2520 hcd->has_tt = 1;
2520 2521
2521 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger); 2522 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
2522 if (ret != 0) { 2523 if (ret != 0) {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 0f8e1d29a858..fcb7f7efc86d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1215,8 +1215,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1215 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); 1215 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1216 /* dig out max burst from ep companion desc */ 1216 /* dig out max burst from ep companion desc */
1217 max_packet = ep->ss_ep_comp.bMaxBurst; 1217 max_packet = ep->ss_ep_comp.bMaxBurst;
1218 if (!max_packet)
1219 xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
1220 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); 1218 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
1221 break; 1219 break;
1222 case USB_SPEED_HIGH: 1220 case USB_SPEED_HIGH:
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 17541d09eabb..cb16de213f64 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -29,6 +29,9 @@
29#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 29#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
30#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 30#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
31 31
32#define PCI_VENDOR_ID_ETRON 0x1b6f
33#define PCI_DEVICE_ID_ASROCK_P67 0x7023
34
32static const char hcd_name[] = "xhci_hcd"; 35static const char hcd_name[] = "xhci_hcd";
33 36
34/* called after powerup, by probe or system-pm "wakeup" */ 37/* called after powerup, by probe or system-pm "wakeup" */
@@ -134,6 +137,11 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
134 xhci->quirks |= XHCI_EP_LIMIT_QUIRK; 137 xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
135 xhci->limit_active_eps = 64; 138 xhci->limit_active_eps = 64;
136 } 139 }
140 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
141 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
142 xhci->quirks |= XHCI_RESET_ON_RESUME;
143 xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
144 }
137 145
138 /* Make sure the HC is halted. */ 146 /* Make sure the HC is halted. */
139 retval = xhci_halt(xhci); 147 retval = xhci_halt(xhci);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 800f417c7309..70cacbbe7fb9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1733,6 +1733,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1733 frame->status = -EOVERFLOW; 1733 frame->status = -EOVERFLOW;
1734 skip_td = true; 1734 skip_td = true;
1735 break; 1735 break;
1736 case COMP_DEV_ERR:
1736 case COMP_STALL: 1737 case COMP_STALL:
1737 frame->status = -EPROTO; 1738 frame->status = -EPROTO;
1738 skip_td = true; 1739 skip_td = true;
@@ -1767,9 +1768,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1767 } 1768 }
1768 } 1769 }
1769 1770
1770 if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
1771 *status = 0;
1772
1773 return finish_td(xhci, td, event_trb, event, ep, status, false); 1771 return finish_td(xhci, td, event_trb, event, ep, status, false);
1774} 1772}
1775 1773
@@ -1787,8 +1785,7 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1787 idx = urb_priv->td_cnt; 1785 idx = urb_priv->td_cnt;
1788 frame = &td->urb->iso_frame_desc[idx]; 1786 frame = &td->urb->iso_frame_desc[idx];
1789 1787
1790 /* The transfer is partly done */ 1788 /* The transfer is partly done. */
1791 *status = -EXDEV;
1792 frame->status = -EXDEV; 1789 frame->status = -EXDEV;
1793 1790
1794 /* calc actual length */ 1791 /* calc actual length */
@@ -2016,6 +2013,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2016 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2013 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2017 ep_index); 2014 ep_index);
2018 goto cleanup; 2015 goto cleanup;
2016 case COMP_DEV_ERR:
2017 xhci_warn(xhci, "WARN: detect an incompatible device");
2018 status = -EPROTO;
2019 break;
2019 case COMP_MISSED_INT: 2020 case COMP_MISSED_INT:
2020 /* 2021 /*
2021 * When encounter missed service error, one or more isoc tds 2022 * When encounter missed service error, one or more isoc tds
@@ -2063,6 +2064,20 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2063 /* Is this a TRB in the currently executing TD? */ 2064 /* Is this a TRB in the currently executing TD? */
2064 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 2065 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
2065 td->last_trb, event_dma); 2066 td->last_trb, event_dma);
2067
2068 /*
2069 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2070 * is not in the current TD pointed by ep_ring->dequeue because
2071 * that the hardware dequeue pointer still at the previous TRB
2072 * of the current TD. The previous TRB maybe a Link TD or the
2073 * last TRB of the previous TD. The command completion handle
2074 * will take care the rest.
2075 */
2076 if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
2077 ret = 0;
2078 goto cleanup;
2079 }
2080
2066 if (!event_seg) { 2081 if (!event_seg) {
2067 if (!ep->skip || 2082 if (!ep->skip ||
2068 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { 2083 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
@@ -2158,6 +2173,11 @@ cleanup:
2158 urb->transfer_buffer_length, 2173 urb->transfer_buffer_length,
2159 status); 2174 status);
2160 spin_unlock(&xhci->lock); 2175 spin_unlock(&xhci->lock);
2176 /* EHCI, UHCI, and OHCI always unconditionally set the
2177 * urb->status of an isochronous endpoint to 0.
2178 */
2179 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2180 status = 0;
2161 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status); 2181 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2162 spin_lock(&xhci->lock); 2182 spin_lock(&xhci->lock);
2163 } 2183 }
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 06e7023258d0..f5fe1ac301ab 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -759,6 +759,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
759 msleep(100); 759 msleep(100);
760 760
761 spin_lock_irq(&xhci->lock); 761 spin_lock_irq(&xhci->lock);
762 if (xhci->quirks & XHCI_RESET_ON_RESUME)
763 hibernated = true;
762 764
763 if (!hibernated) { 765 if (!hibernated) {
764 /* step 1: restore register */ 766 /* step 1: restore register */
@@ -1401,6 +1403,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1401 u32 added_ctxs; 1403 u32 added_ctxs;
1402 unsigned int last_ctx; 1404 unsigned int last_ctx;
1403 u32 new_add_flags, new_drop_flags, new_slot_info; 1405 u32 new_add_flags, new_drop_flags, new_slot_info;
1406 struct xhci_virt_device *virt_dev;
1404 int ret = 0; 1407 int ret = 0;
1405 1408
1406 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1409 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
@@ -1425,11 +1428,25 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1425 return 0; 1428 return 0;
1426 } 1429 }
1427 1430
1428 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1431 virt_dev = xhci->devs[udev->slot_id];
1429 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1432 in_ctx = virt_dev->in_ctx;
1433 out_ctx = virt_dev->out_ctx;
1430 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1434 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1431 ep_index = xhci_get_endpoint_index(&ep->desc); 1435 ep_index = xhci_get_endpoint_index(&ep->desc);
1432 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1436 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1437
1438 /* If this endpoint is already in use, and the upper layers are trying
1439 * to add it again without dropping it, reject the addition.
1440 */
1441 if (virt_dev->eps[ep_index].ring &&
1442 !(le32_to_cpu(ctrl_ctx->drop_flags) &
1443 xhci_get_endpoint_flag(&ep->desc))) {
1444 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1445 "without dropping it.\n",
1446 (unsigned int) ep->desc.bEndpointAddress);
1447 return -EINVAL;
1448 }
1449
1433 /* If the HCD has already noted the endpoint is enabled, 1450 /* If the HCD has already noted the endpoint is enabled,
1434 * ignore this request. 1451 * ignore this request.
1435 */ 1452 */
@@ -1445,8 +1462,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1445 * process context, not interrupt context (or so documenation 1462 * process context, not interrupt context (or so documenation
1446 * for usb_set_interface() and usb_set_configuration() claim). 1463 * for usb_set_interface() and usb_set_configuration() claim).
1447 */ 1464 */
1448 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], 1465 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1449 udev, ep, GFP_NOIO) < 0) {
1450 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1466 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1451 __func__, ep->desc.bEndpointAddress); 1467 __func__, ep->desc.bEndpointAddress);
1452 return -ENOMEM; 1468 return -ENOMEM;
@@ -1537,6 +1553,11 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1537 "and endpoint is not disabled.\n"); 1553 "and endpoint is not disabled.\n");
1538 ret = -EINVAL; 1554 ret = -EINVAL;
1539 break; 1555 break;
1556 case COMP_DEV_ERR:
1557 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1558 "configure command.\n");
1559 ret = -ENODEV;
1560 break;
1540 case COMP_SUCCESS: 1561 case COMP_SUCCESS:
1541 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); 1562 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1542 ret = 0; 1563 ret = 0;
@@ -1571,6 +1592,11 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1571 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); 1592 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1572 ret = -EINVAL; 1593 ret = -EINVAL;
1573 break; 1594 break;
1595 case COMP_DEV_ERR:
1596 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1597 "context command.\n");
1598 ret = -ENODEV;
1599 break;
1574 case COMP_MEL_ERR: 1600 case COMP_MEL_ERR:
1575 /* Max Exit Latency too large error */ 1601 /* Max Exit Latency too large error */
1576 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); 1602 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
@@ -2853,6 +2879,11 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2853 dev_warn(&udev->dev, "Device not responding to set address.\n"); 2879 dev_warn(&udev->dev, "Device not responding to set address.\n");
2854 ret = -EPROTO; 2880 ret = -EPROTO;
2855 break; 2881 break;
2882 case COMP_DEV_ERR:
2883 dev_warn(&udev->dev, "ERROR: Incompatible device for address "
2884 "device command.\n");
2885 ret = -ENODEV;
2886 break;
2856 case COMP_SUCCESS: 2887 case COMP_SUCCESS:
2857 xhci_dbg(xhci, "Successful Address Device command\n"); 2888 xhci_dbg(xhci, "Successful Address Device command\n");
2858 break; 2889 break;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7d1ea3bf5e1f..d8bbf5ccb10d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -874,6 +874,8 @@ struct xhci_transfer_event {
874#define COMP_PING_ERR 20 874#define COMP_PING_ERR 20
875/* Event Ring is full */ 875/* Event Ring is full */
876#define COMP_ER_FULL 21 876#define COMP_ER_FULL 21
877/* Incompatible Device Error */
878#define COMP_DEV_ERR 22
877/* Missed Service Error - HC couldn't service an isoc ep within interval */ 879/* Missed Service Error - HC couldn't service an isoc ep within interval */
878#define COMP_MISSED_INT 23 880#define COMP_MISSED_INT 23
879/* Successfully stopped command ring */ 881/* Successfully stopped command ring */
@@ -1308,6 +1310,7 @@ struct xhci_hcd {
1308 */ 1310 */
1309#define XHCI_EP_LIMIT_QUIRK (1 << 5) 1311#define XHCI_EP_LIMIT_QUIRK (1 << 5)
1310#define XHCI_BROKEN_MSI (1 << 6) 1312#define XHCI_BROKEN_MSI (1 << 6)
1313#define XHCI_RESET_ON_RESUME (1 << 7)
1311 unsigned int num_active_eps; 1314 unsigned int num_active_eps;
1312 unsigned int limit_active_eps; 1315 unsigned int limit_active_eps;
1313 /* There are two roothubs to keep track of bus suspend info for */ 1316 /* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 0a50a35e1853..6aeb363e63e7 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1524,6 +1524,12 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep)
1524 csr = musb_readw(epio, MUSB_TXCSR); 1524 csr = musb_readw(epio, MUSB_TXCSR);
1525 if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1525 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1526 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; 1526 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1527 /*
1528 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1529 * to interrupt current FIFO loading, but not flushing
1530 * the already loaded ones.
1531 */
1532 csr &= ~MUSB_TXCSR_TXPKTRDY;
1527 musb_writew(epio, MUSB_TXCSR, csr); 1533 musb_writew(epio, MUSB_TXCSR, csr);
1528 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1534 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1529 musb_writew(epio, MUSB_TXCSR, csr); 1535 musb_writew(epio, MUSB_TXCSR, csr);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 7295e316bdfc..8b2473fa0f47 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1575,7 +1575,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1575 /* even if there was an error, we did the dma 1575 /* even if there was an error, we did the dma
1576 * for iso_frame_desc->length 1576 * for iso_frame_desc->length
1577 */ 1577 */
1578 if (d->status != EILSEQ && d->status != -EOVERFLOW) 1578 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1579 d->status = 0; 1579 d->status = 0;
1580 1580
1581 if (++qh->iso_idx >= urb->number_of_packets) 1581 if (++qh->iso_idx >= urb->number_of_packets)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 162728977553..2e06b90aa1f8 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -179,6 +179,7 @@ static struct usb_device_id id_table_combined [] = {
179 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, 179 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
180 { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) }, 180 { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) },
181 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, 181 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
182 { USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
182 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, 183 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
183 { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) }, 184 { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
184 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, 185 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -848,7 +849,8 @@ static const char *ftdi_chip_name[] = {
848 [FT2232C] = "FT2232C", 849 [FT2232C] = "FT2232C",
849 [FT232RL] = "FT232RL", 850 [FT232RL] = "FT232RL",
850 [FT2232H] = "FT2232H", 851 [FT2232H] = "FT2232H",
851 [FT4232H] = "FT4232H" 852 [FT4232H] = "FT4232H",
853 [FT232H] = "FT232H"
852}; 854};
853 855
854 856
@@ -1168,6 +1170,7 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
1168 break; 1170 break;
1169 case FT2232H: /* FT2232H chip */ 1171 case FT2232H: /* FT2232H chip */
1170 case FT4232H: /* FT4232H chip */ 1172 case FT4232H: /* FT4232H chip */
1173 case FT232H: /* FT232H chip */
1171 if ((baud <= 12000000) & (baud >= 1200)) { 1174 if ((baud <= 12000000) & (baud >= 1200)) {
1172 div_value = ftdi_2232h_baud_to_divisor(baud); 1175 div_value = ftdi_2232h_baud_to_divisor(baud);
1173 } else if (baud < 1200) { 1176 } else if (baud < 1200) {
@@ -1429,9 +1432,12 @@ static void ftdi_determine_type(struct usb_serial_port *port)
1429 } else if (version < 0x600) { 1432 } else if (version < 0x600) {
1430 /* Assume it's an FT232BM (or FT245BM) */ 1433 /* Assume it's an FT232BM (or FT245BM) */
1431 priv->chip_type = FT232BM; 1434 priv->chip_type = FT232BM;
1432 } else { 1435 } else if (version < 0x900) {
1433 /* Assume it's an FT232R */ 1436 /* Assume it's an FT232RL */
1434 priv->chip_type = FT232RL; 1437 priv->chip_type = FT232RL;
1438 } else {
1439 /* Assume it's an FT232H */
1440 priv->chip_type = FT232H;
1435 } 1441 }
1436 dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]); 1442 dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
1437} 1443}
@@ -1559,7 +1565,8 @@ static int create_sysfs_attrs(struct usb_serial_port *port)
1559 priv->chip_type == FT2232C || 1565 priv->chip_type == FT2232C ||
1560 priv->chip_type == FT232RL || 1566 priv->chip_type == FT232RL ||
1561 priv->chip_type == FT2232H || 1567 priv->chip_type == FT2232H ||
1562 priv->chip_type == FT4232H)) { 1568 priv->chip_type == FT4232H ||
1569 priv->chip_type == FT232H)) {
1563 retval = device_create_file(&port->dev, 1570 retval = device_create_file(&port->dev,
1564 &dev_attr_latency_timer); 1571 &dev_attr_latency_timer);
1565 } 1572 }
@@ -1580,7 +1587,8 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
1580 priv->chip_type == FT2232C || 1587 priv->chip_type == FT2232C ||
1581 priv->chip_type == FT232RL || 1588 priv->chip_type == FT232RL ||
1582 priv->chip_type == FT2232H || 1589 priv->chip_type == FT2232H ||
1583 priv->chip_type == FT4232H) { 1590 priv->chip_type == FT4232H ||
1591 priv->chip_type == FT232H) {
1584 device_remove_file(&port->dev, &dev_attr_latency_timer); 1592 device_remove_file(&port->dev, &dev_attr_latency_timer);
1585 } 1593 }
1586 } 1594 }
@@ -2212,6 +2220,7 @@ static int ftdi_tiocmget(struct tty_struct *tty)
2212 case FT232RL: 2220 case FT232RL:
2213 case FT2232H: 2221 case FT2232H:
2214 case FT4232H: 2222 case FT4232H:
2223 case FT232H:
2215 len = 2; 2224 len = 2;
2216 break; 2225 break;
2217 default: 2226 default:
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 213fe3d61282..19584faa86f9 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -156,7 +156,8 @@ enum ftdi_chip_type {
156 FT2232C = 4, 156 FT2232C = 4,
157 FT232RL = 5, 157 FT232RL = 5,
158 FT2232H = 6, 158 FT2232H = 6,
159 FT4232H = 7 159 FT4232H = 7,
160 FT232H = 8
160}; 161};
161 162
162enum ftdi_sio_baudrate { 163enum ftdi_sio_baudrate {
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index ab1fcdf3c378..19156d1049fe 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -22,6 +22,7 @@
22#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */ 22#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
23#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */ 23#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
24#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */ 24#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
25#define FTDI_232H_PID 0x6014 /* Single channel hi-speed device */
25#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */ 26#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
26#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */ 27#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
27 28
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index c6d92a530086..ea8445689c85 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1745,6 +1745,7 @@ static int ti_download_firmware(struct ti_device *tdev)
1745 } 1745 }
1746 if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { 1746 if (fw_p->size > TI_FIRMWARE_BUF_SIZE) {
1747 dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size); 1747 dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size);
1748 release_firmware(fw_p);
1748 return -ENOENT; 1749 return -ENOENT;
1749 } 1750 }
1750 1751
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 5fc983c5b92c..cf03ad067147 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -447,6 +447,8 @@ static int clcdfb_register(struct clcd_fb *fb)
447 goto out; 447 goto out;
448 } 448 }
449 449
450 fb->fb.device = &fb->dev->dev;
451
450 fb->fb.fix.mmio_start = fb->dev->res.start; 452 fb->fb.fix.mmio_start = fb->dev->res.start;
451 fb->fb.fix.mmio_len = resource_size(&fb->dev->res); 453 fb->fb.fix.mmio_len = resource_size(&fb->dev->res);
452 454
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index bedf5be27f05..0acc7d65aeaa 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -555,8 +555,6 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
555static int fsl_diu_check_var(struct fb_var_screeninfo *var, 555static int fsl_diu_check_var(struct fb_var_screeninfo *var,
556 struct fb_info *info) 556 struct fb_info *info)
557{ 557{
558 unsigned long htotal, vtotal;
559
560 pr_debug("check_var xres: %d\n", var->xres); 558 pr_debug("check_var xres: %d\n", var->xres);
561 pr_debug("check_var yres: %d\n", var->yres); 559 pr_debug("check_var yres: %d\n", var->yres);
562 560
@@ -635,20 +633,6 @@ static int fsl_diu_check_var(struct fb_var_screeninfo *var,
635 633
636 break; 634 break;
637 } 635 }
638 /* If the pixclock is below the minimum spec'd value then set to
639 * refresh rate for 60Hz since this is supported by most monitors.
640 * Refer to Documentation/fb/ for calculations.
641 */
642 if ((var->pixclock < MIN_PIX_CLK) || (var->pixclock > MAX_PIX_CLK)) {
643 htotal = var->xres + var->right_margin + var->hsync_len +
644 var->left_margin;
645 vtotal = var->yres + var->lower_margin + var->vsync_len +
646 var->upper_margin;
647 var->pixclock = (vtotal * htotal * 6UL) / 100UL;
648 var->pixclock = KHZ2PICOS(var->pixclock);
649 pr_debug("pixclock set for 60Hz refresh = %u ps\n",
650 var->pixclock);
651 }
652 636
653 var->height = -1; 637 var->height = -1;
654 var->width = -1; 638 var->width = -1;
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
index c6b554f72c6d..5a5d0928df33 100644
--- a/drivers/video/geode/gx1fb_core.c
+++ b/drivers/video/geode/gx1fb_core.c
@@ -29,7 +29,7 @@ static int crt_option = 1;
29static char panel_option[32] = ""; 29static char panel_option[32] = "";
30 30
31/* Modes relevant to the GX1 (taken from modedb.c) */ 31/* Modes relevant to the GX1 (taken from modedb.c) */
32static const struct fb_videomode __initdata gx1_modedb[] = { 32static const struct fb_videomode __devinitdata gx1_modedb[] = {
33 /* 640x480-60 VESA */ 33 /* 640x480-60 VESA */
34 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, 34 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
35 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, 35 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
@@ -195,7 +195,7 @@ static int gx1fb_blank(int blank_mode, struct fb_info *info)
195 return par->vid_ops->blank_display(info, blank_mode); 195 return par->vid_ops->blank_display(info, blank_mode);
196} 196}
197 197
198static int __init gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev) 198static int __devinit gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
199{ 199{
200 struct geodefb_par *par = info->par; 200 struct geodefb_par *par = info->par;
201 unsigned gx_base; 201 unsigned gx_base;
@@ -268,7 +268,7 @@ static struct fb_ops gx1fb_ops = {
268 .fb_imageblit = cfb_imageblit, 268 .fb_imageblit = cfb_imageblit,
269}; 269};
270 270
271static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev) 271static struct fb_info * __devinit gx1fb_init_fbinfo(struct device *dev)
272{ 272{
273 struct geodefb_par *par; 273 struct geodefb_par *par;
274 struct fb_info *info; 274 struct fb_info *info;
@@ -318,7 +318,7 @@ static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev)
318 return info; 318 return info;
319} 319}
320 320
321static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id) 321static int __devinit gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
322{ 322{
323 struct geodefb_par *par; 323 struct geodefb_par *par;
324 struct fb_info *info; 324 struct fb_info *info;
@@ -382,7 +382,7 @@ static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *
382 return ret; 382 return ret;
383} 383}
384 384
385static void gx1fb_remove(struct pci_dev *pdev) 385static void __devexit gx1fb_remove(struct pci_dev *pdev)
386{ 386{
387 struct fb_info *info = pci_get_drvdata(pdev); 387 struct fb_info *info = pci_get_drvdata(pdev);
388 struct geodefb_par *par = info->par; 388 struct geodefb_par *par = info->par;
@@ -441,7 +441,7 @@ static struct pci_driver gx1fb_driver = {
441 .name = "gx1fb", 441 .name = "gx1fb",
442 .id_table = gx1fb_id_table, 442 .id_table = gx1fb_id_table,
443 .probe = gx1fb_probe, 443 .probe = gx1fb_probe,
444 .remove = gx1fb_remove, 444 .remove = __devexit_p(gx1fb_remove),
445}; 445};
446 446
447static int __init gx1fb_init(void) 447static int __init gx1fb_init(void)
@@ -456,7 +456,7 @@ static int __init gx1fb_init(void)
456 return pci_register_driver(&gx1fb_driver); 456 return pci_register_driver(&gx1fb_driver);
457} 457}
458 458
459static void __exit gx1fb_cleanup(void) 459static void __devexit gx1fb_cleanup(void)
460{ 460{
461 pci_unregister_driver(&gx1fb_driver); 461 pci_unregister_driver(&gx1fb_driver);
462} 462}
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index fbef15f7a218..614251a9af91 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -233,7 +233,7 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
233 233
234 videomemory = vzalloc(videomemorysize); 234 videomemory = vzalloc(videomemorysize);
235 if (!videomemory) 235 if (!videomemory)
236 return retval; 236 goto err_videomem_alloc;
237 237
238 info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev); 238 info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
239 if (!info) 239 if (!info)
@@ -275,6 +275,7 @@ err_fbreg:
275 framebuffer_release(info); 275 framebuffer_release(info);
276err_fballoc: 276err_fballoc:
277 vfree(videomemory); 277 vfree(videomemory);
278err_videomem_alloc:
278 module_put(board->owner); 279 module_put(board->owner);
279 return retval; 280 return retval;
280} 281}
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
index 9170c82b495c..cc7d7329dc15 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/sh_mobile_meram.c
@@ -218,7 +218,7 @@ static inline void meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata,
218 icb_offset = 0xc0000000 | (cfg->current_reg << 23); 218 icb_offset = 0xc0000000 | (cfg->current_reg << 23);
219 219
220 *icb_addr_y = icb_offset | (cfg->icb[0].marker_icb << 24); 220 *icb_addr_y = icb_offset | (cfg->icb[0].marker_icb << 24);
221 if ((*icb_addr_c) && is_nvcolor(cfg->pixelformat)) 221 if (is_nvcolor(cfg->pixelformat))
222 *icb_addr_c = icb_offset | (cfg->icb[1].marker_icb << 24); 222 *icb_addr_c = icb_offset | (cfg->icb[1].marker_icb << 24);
223} 223}
224 224
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 87f0be1e78b5..6294dca95500 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -1664,7 +1664,7 @@ static void sm501fb_stop(struct sm501fb_info *info)
1664 resource_size(info->regs_res)); 1664 resource_size(info->regs_res));
1665} 1665}
1666 1666
1667static int sm501fb_init_fb(struct fb_info *fb, 1667static int __devinit sm501fb_init_fb(struct fb_info *fb,
1668 enum sm501_controller head, 1668 enum sm501_controller head,
1669 const char *fbname) 1669 const char *fbname)
1670{ 1670{
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 52b0f3e8ccac..816a4fda04f5 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -1233,8 +1233,12 @@ static int dlfb_setup_modes(struct dlfb_data *dev,
1233 if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info)) 1233 if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info))
1234 fb_add_videomode(&info->monspecs.modedb[i], 1234 fb_add_videomode(&info->monspecs.modedb[i],
1235 &info->modelist); 1235 &info->modelist);
1236 else /* if we've removed top/best mode */ 1236 else {
1237 info->monspecs.misc &= ~FB_MISC_1ST_DETAIL; 1237 if (i == 0)
1238 /* if we've removed top/best mode */
1239 info->monspecs.misc
1240 &= ~FB_MISC_1ST_DETAIL;
1241 }
1238 } 1242 }
1239 1243
1240 default_vmode = fb_find_best_display(&info->monspecs, 1244 default_vmode = fb_find_best_display(&info->monspecs,
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index a99bbe86db13..501b3406c6d5 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -175,6 +175,7 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
175 175
176static void vesafb_destroy(struct fb_info *info) 176static void vesafb_destroy(struct fb_info *info)
177{ 177{
178 fb_dealloc_cmap(&info->cmap);
178 if (info->screen_base) 179 if (info->screen_base)
179 iounmap(info->screen_base); 180 iounmap(info->screen_base);
180 release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); 181 release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 022f9eb0b7bf..9536d386bb38 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -535,8 +535,7 @@ config I6300ESB_WDT
535 535
536config INTEL_SCU_WATCHDOG 536config INTEL_SCU_WATCHDOG
537 bool "Intel SCU Watchdog for Mobile Platforms" 537 bool "Intel SCU Watchdog for Mobile Platforms"
538 depends on WATCHDOG 538 depends on X86_MRST
539 depends on INTEL_SCU_IPC
540 ---help--- 539 ---help---
541 Hardware driver for the watchdog time built into the Intel SCU 540 Hardware driver for the watchdog time built into the Intel SCU
542 for Intel Mobile Platforms. 541 for Intel Mobile Platforms.
diff --git a/drivers/watchdog/at32ap700x_wdt.c b/drivers/watchdog/at32ap700x_wdt.c
index 750bc5281d79..4ca5d40304b2 100644
--- a/drivers/watchdog/at32ap700x_wdt.c
+++ b/drivers/watchdog/at32ap700x_wdt.c
@@ -448,7 +448,7 @@ static void __exit at32_wdt_exit(void)
448} 448}
449module_exit(at32_wdt_exit); 449module_exit(at32_wdt_exit);
450 450
451MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); 451MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
452MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X"); 452MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X");
453MODULE_LICENSE("GPL"); 453MODULE_LICENSE("GPL");
454MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 454MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 29a7cd4b90c8..b146082bd85a 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -329,4 +329,4 @@ MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
329MODULE_DESCRIPTION("GE watchdog driver"); 329MODULE_DESCRIPTION("GE watchdog driver");
330MODULE_LICENSE("GPL"); 330MODULE_LICENSE("GPL");
331MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 331MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
332MODULE_ALIAS("platform: gef_wdt"); 332MODULE_ALIAS("platform:gef_wdt");
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index 919bdd16136f..ba4386066a42 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -42,7 +42,6 @@
42#include <linux/sched.h> 42#include <linux/sched.h>
43#include <linux/signal.h> 43#include <linux/signal.h>
44#include <linux/sfi.h> 44#include <linux/sfi.h>
45#include <linux/types.h>
46#include <asm/irq.h> 45#include <asm/irq.h>
47#include <asm/atomic.h> 46#include <asm/atomic.h>
48#include <asm/intel_scu_ipc.h> 47#include <asm/intel_scu_ipc.h>
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 1479dc4d6129..0430e093b1a0 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -66,23 +66,18 @@ static struct {
66 int default_ticks; 66 int default_ticks;
67 unsigned long inuse; 67 unsigned long inuse;
68 unsigned gpio; 68 unsigned gpio;
69 int gstate; 69 unsigned int gstate;
70} mtx1_wdt_device; 70} mtx1_wdt_device;
71 71
72static void mtx1_wdt_trigger(unsigned long unused) 72static void mtx1_wdt_trigger(unsigned long unused)
73{ 73{
74 u32 tmp;
75
76 spin_lock(&mtx1_wdt_device.lock); 74 spin_lock(&mtx1_wdt_device.lock);
77 if (mtx1_wdt_device.running) 75 if (mtx1_wdt_device.running)
78 ticks--; 76 ticks--;
79 77
80 /* toggle wdt gpio */ 78 /* toggle wdt gpio */
81 mtx1_wdt_device.gstate = ~mtx1_wdt_device.gstate; 79 mtx1_wdt_device.gstate = !mtx1_wdt_device.gstate;
82 if (mtx1_wdt_device.gstate) 80 gpio_set_value(mtx1_wdt_device.gpio, mtx1_wdt_device.gstate);
83 gpio_direction_output(mtx1_wdt_device.gpio, 1);
84 else
85 gpio_direction_input(mtx1_wdt_device.gpio);
86 81
87 if (mtx1_wdt_device.queue && ticks) 82 if (mtx1_wdt_device.queue && ticks)
88 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); 83 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
@@ -105,7 +100,7 @@ static void mtx1_wdt_start(void)
105 if (!mtx1_wdt_device.queue) { 100 if (!mtx1_wdt_device.queue) {
106 mtx1_wdt_device.queue = 1; 101 mtx1_wdt_device.queue = 1;
107 mtx1_wdt_device.gstate = 1; 102 mtx1_wdt_device.gstate = 1;
108 gpio_direction_output(mtx1_wdt_device.gpio, 1); 103 gpio_set_value(mtx1_wdt_device.gpio, 1);
109 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); 104 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
110 } 105 }
111 mtx1_wdt_device.running++; 106 mtx1_wdt_device.running++;
@@ -120,7 +115,7 @@ static int mtx1_wdt_stop(void)
120 if (mtx1_wdt_device.queue) { 115 if (mtx1_wdt_device.queue) {
121 mtx1_wdt_device.queue = 0; 116 mtx1_wdt_device.queue = 0;
122 mtx1_wdt_device.gstate = 0; 117 mtx1_wdt_device.gstate = 0;
123 gpio_direction_output(mtx1_wdt_device.gpio, 0); 118 gpio_set_value(mtx1_wdt_device.gpio, 0);
124 } 119 }
125 ticks = mtx1_wdt_device.default_ticks; 120 ticks = mtx1_wdt_device.default_ticks;
126 spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags); 121 spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags);
@@ -214,6 +209,12 @@ static int __devinit mtx1_wdt_probe(struct platform_device *pdev)
214 int ret; 209 int ret;
215 210
216 mtx1_wdt_device.gpio = pdev->resource[0].start; 211 mtx1_wdt_device.gpio = pdev->resource[0].start;
212 ret = gpio_request_one(mtx1_wdt_device.gpio,
213 GPIOF_OUT_INIT_HIGH, "mtx1-wdt");
214 if (ret < 0) {
215 dev_err(&pdev->dev, "failed to request gpio");
216 return ret;
217 }
217 218
218 spin_lock_init(&mtx1_wdt_device.lock); 219 spin_lock_init(&mtx1_wdt_device.lock);
219 init_completion(&mtx1_wdt_device.stop); 220 init_completion(&mtx1_wdt_device.stop);
@@ -239,11 +240,13 @@ static int __devexit mtx1_wdt_remove(struct platform_device *pdev)
239 mtx1_wdt_device.queue = 0; 240 mtx1_wdt_device.queue = 0;
240 wait_for_completion(&mtx1_wdt_device.stop); 241 wait_for_completion(&mtx1_wdt_device.stop);
241 } 242 }
243
244 gpio_free(mtx1_wdt_device.gpio);
242 misc_deregister(&mtx1_wdt_misc); 245 misc_deregister(&mtx1_wdt_misc);
243 return 0; 246 return 0;
244} 247}
245 248
246static struct platform_driver mtx1_wdt = { 249static struct platform_driver mtx1_wdt_driver = {
247 .probe = mtx1_wdt_probe, 250 .probe = mtx1_wdt_probe,
248 .remove = __devexit_p(mtx1_wdt_remove), 251 .remove = __devexit_p(mtx1_wdt_remove),
249 .driver.name = "mtx1-wdt", 252 .driver.name = "mtx1-wdt",
@@ -252,12 +255,12 @@ static struct platform_driver mtx1_wdt = {
252 255
253static int __init mtx1_wdt_init(void) 256static int __init mtx1_wdt_init(void)
254{ 257{
255 return platform_driver_register(&mtx1_wdt); 258 return platform_driver_register(&mtx1_wdt_driver);
256} 259}
257 260
258static void __exit mtx1_wdt_exit(void) 261static void __exit mtx1_wdt_exit(void)
259{ 262{
260 platform_driver_unregister(&mtx1_wdt); 263 platform_driver_unregister(&mtx1_wdt_driver);
261} 264}
262 265
263module_init(mtx1_wdt_init); 266module_init(mtx1_wdt_init);
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 8c4b2d5bb7da..871caea4e1c6 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -320,6 +320,11 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
320 struct wm831x_watchdog_pdata *pdata; 320 struct wm831x_watchdog_pdata *pdata;
321 int reg, ret; 321 int reg, ret;
322 322
323 if (wm831x) {
324 dev_err(&pdev->dev, "wm831x watchdog already registered\n");
325 return -EBUSY;
326 }
327
323 wm831x = dev_get_drvdata(pdev->dev.parent); 328 wm831x = dev_get_drvdata(pdev->dev.parent);
324 329
325 ret = wm831x_reg_read(wm831x, WM831X_WATCHDOG); 330 ret = wm831x_reg_read(wm831x, WM831X_WATCHDOG);