aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/message/fusion/Kconfig2
-rw-r--r--drivers/message/fusion/lsi/mpi.h7
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h131
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt77
-rw-r--r--drivers/message/fusion/lsi/mpi_init.h7
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h75
-rw-r--r--drivers/message/fusion/lsi/mpi_log_sas.h284
-rw-r--r--drivers/message/fusion/lsi/mpi_sas.h9
-rw-r--r--drivers/scsi/53c700.c24
-rw-r--r--drivers/scsi/53c700.h2
-rw-r--r--drivers/scsi/Kconfig9
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR_D700.c1
-rw-r--r--drivers/scsi/aacraid/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c602
-rw-r--r--drivers/scsi/aacraid/aacraid.h46
-rw-r--r--drivers/scsi/aacraid/comminit.c14
-rw-r--r--drivers/scsi/aacraid/commsup.c40
-rw-r--r--drivers/scsi/aacraid/linit.c17
-rw-r--r--drivers/scsi/aacraid/nark.c87
-rw-r--r--drivers/scsi/aacraid/rkt.c64
-rw-r--r--drivers/scsi/aacraid/rx.c263
-rw-r--r--drivers/scsi/aacraid/sa.c33
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c16
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c48
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sas.h1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c122
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c10
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.c43
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.h1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c9
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c5
-rw-r--r--drivers/scsi/ipr.c94
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/lasi700.c1
-rw-r--r--drivers/scsi/libsas/sas_discover.c62
-rw-r--r--drivers/scsi/libsas/sas_event.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c18
-rw-r--r--drivers/scsi/libsas/sas_init.c52
-rw-r--r--drivers/scsi/libsas/sas_internal.h9
-rw-r--r--drivers/scsi/libsas/sas_port.c14
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c258
-rw-r--r--drivers/scsi/megaraid/mbox_defs.h2
-rw-r--r--drivers/scsi/megaraid/mega_common.h122
-rw-r--r--drivers/scsi/megaraid/megaraid_ioctl.h36
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c421
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h46
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c65
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h38
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c218
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c245
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c348
-rw-r--r--drivers/scsi/scsi.c21
-rw-r--r--drivers/scsi/scsi_debug.c42
-rw-r--r--drivers/scsi/scsi_error.c12
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_priv.h6
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c118
-rw-r--r--drivers/scsi/sim710.c1
-rw-r--r--drivers/scsi/sni_53c710.c159
66 files changed, 3093 insertions, 1511 deletions
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index ea31d8470510..71037f91c222 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -66,7 +66,7 @@ config FUSION_MAX_SGE
66 66
67config FUSION_CTL 67config FUSION_CTL
68 tristate "Fusion MPT misc device (ioctl) driver" 68 tristate "Fusion MPT misc device (ioctl) driver"
69 depends on FUSION_SPI || FUSION_FC 69 depends on FUSION_SPI || FUSION_FC || FUSION_SAS
70 ---help--- 70 ---help---
71 The Fusion MPT misc device driver provides specialized control 71 The Fusion MPT misc device driver provides specialized control
72 of MPT adapters via system ioctl calls. Use of ioctl calls to 72 of MPT adapters via system ioctl calls. Use of ioctl calls to
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
index 81ad77622dac..75223bf24ae8 100644
--- a/drivers/message/fusion/lsi/mpi.h
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2005 LSI Logic Corporation. 2 * Copyright (c) 2000-2006 LSI Logic Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi.h 5 * Name: mpi.h
6 * Title: MPI Message independent structures and definitions 6 * Title: MPI Message independent structures and definitions
7 * Creation Date: July 27, 2000 7 * Creation Date: July 27, 2000
8 * 8 *
9 * mpi.h Version: 01.05.11 9 * mpi.h Version: 01.05.12
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -77,6 +77,7 @@
77 * 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT. 77 * 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT.
78 * 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target. 78 * 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target.
79 * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT. 79 * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
80 * 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT.
80 * -------------------------------------------------------------------------- 81 * --------------------------------------------------------------------------
81 */ 82 */
82 83
@@ -107,7 +108,7 @@
107/* Note: The major versions of 0xe0 through 0xff are reserved */ 108/* Note: The major versions of 0xe0 through 0xff are reserved */
108 109
109/* versioning for this MPI header set */ 110/* versioning for this MPI header set */
110#define MPI_HEADER_VERSION_UNIT (0x0D) 111#define MPI_HEADER_VERSION_UNIT (0x0E)
111#define MPI_HEADER_VERSION_DEV (0x00) 112#define MPI_HEADER_VERSION_DEV (0x00)
112#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00) 113#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00)
113#define MPI_HEADER_VERSION_UNIT_SHIFT (8) 114#define MPI_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 47e13e360c10..0e4c8e77a81d 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2005 LSI Logic Corporation. 2 * Copyright (c) 2000-2006 LSI Logic Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_cnfg.h 5 * Name: mpi_cnfg.h
6 * Title: MPI Config message, structures, and Pages 6 * Title: MPI Config message, structures, and Pages
7 * Creation Date: July 27, 2000 7 * Creation Date: July 27, 2000
8 * 8 *
9 * mpi_cnfg.h Version: 01.05.12 9 * mpi_cnfg.h Version: 01.05.13
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -276,6 +276,23 @@
276 * Added AdditionalControlFlags, MaxTargetPortConnectTime, 276 * Added AdditionalControlFlags, MaxTargetPortConnectTime,
277 * ReportDeviceMissingDelay, and IODeviceMissingDelay 277 * ReportDeviceMissingDelay, and IODeviceMissingDelay
278 * fields to SAS IO Unit Page 1. 278 * fields to SAS IO Unit Page 1.
279 * 10-11-06 01.05.13 Added NumForceWWID field and ForceWWID array to
280 * Manufacturing Page 5.
281 * Added Manufacturing pages 8 through 10.
282 * Added defines for supported metadata size bits in
283 * CapabilitiesFlags field of IOC Page 6.
284 * Added defines for metadata size bits in VolumeSettings
285 * field of RAID Volume Page 0.
286 * Added SATA Link Reset settings, Enable SATA Asynchronous
287 * Notification bit, and HideNonZeroAttachedPhyIdentifiers
288 * bit to AdditionalControlFlags field of SAS IO Unit
289 * Page 1.
290 * Added defines for Enclosure Devices Unmapped and
291 * Device Limit Exceeded bits in Status field of SAS IO
292 * Unit Page 2.
293 * Added more AccessStatus values for SAS Device Page 0.
294 * Added bit for SATA Asynchronous Notification Support in
295 * Flags field of SAS Device Page 0.
279 * -------------------------------------------------------------------------- 296 * --------------------------------------------------------------------------
280 */ 297 */
281 298
@@ -654,17 +671,24 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_4
654#define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA (0x01) 671#define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA (0x01)
655 672
656 673
674#ifndef MPI_MANPAGE5_NUM_FORCEWWID
675#define MPI_MANPAGE5_NUM_FORCEWWID (1)
676#endif
677
657typedef struct _CONFIG_PAGE_MANUFACTURING_5 678typedef struct _CONFIG_PAGE_MANUFACTURING_5
658{ 679{
659 CONFIG_PAGE_HEADER Header; /* 00h */ 680 CONFIG_PAGE_HEADER Header; /* 00h */
660 U64 BaseWWID; /* 04h */ 681 U64 BaseWWID; /* 04h */
661 U8 Flags; /* 0Ch */ 682 U8 Flags; /* 0Ch */
662 U8 Reserved1; /* 0Dh */ 683 U8 NumForceWWID; /* 0Dh */
663 U16 Reserved2; /* 0Eh */ 684 U16 Reserved2; /* 0Eh */
685 U32 Reserved3; /* 10h */
686 U32 Reserved4; /* 14h */
687 U64 ForceWWID[MPI_MANPAGE5_NUM_FORCEWWID]; /* 18h */
664} CONFIG_PAGE_MANUFACTURING_5, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_5, 688} CONFIG_PAGE_MANUFACTURING_5, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_5,
665 ManufacturingPage5_t, MPI_POINTER pManufacturingPage5_t; 689 ManufacturingPage5_t, MPI_POINTER pManufacturingPage5_t;
666 690
667#define MPI_MANUFACTURING5_PAGEVERSION (0x01) 691#define MPI_MANUFACTURING5_PAGEVERSION (0x02)
668 692
669/* defines for the Flags field */ 693/* defines for the Flags field */
670#define MPI_MANPAGE5_TWO_WWID_PER_PHY (0x01) 694#define MPI_MANPAGE5_TWO_WWID_PER_PHY (0x01)
@@ -740,6 +764,36 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_7
740#define MPI_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) 764#define MPI_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
741 765
742 766
767typedef struct _CONFIG_PAGE_MANUFACTURING_8
768{
769 CONFIG_PAGE_HEADER Header; /* 00h */
770 U32 ProductSpecificInfo;/* 04h */
771} CONFIG_PAGE_MANUFACTURING_8, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_8,
772 ManufacturingPage8_t, MPI_POINTER pManufacturingPage8_t;
773
774#define MPI_MANUFACTURING8_PAGEVERSION (0x00)
775
776
777typedef struct _CONFIG_PAGE_MANUFACTURING_9
778{
779 CONFIG_PAGE_HEADER Header; /* 00h */
780 U32 ProductSpecificInfo;/* 04h */
781} CONFIG_PAGE_MANUFACTURING_9, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_9,
782 ManufacturingPage9_t, MPI_POINTER pManufacturingPage9_t;
783
784#define MPI_MANUFACTURING6_PAGEVERSION (0x00)
785
786
787typedef struct _CONFIG_PAGE_MANUFACTURING_10
788{
789 CONFIG_PAGE_HEADER Header; /* 00h */
790 U32 ProductSpecificInfo;/* 04h */
791} CONFIG_PAGE_MANUFACTURING_10, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_10,
792 ManufacturingPage10_t, MPI_POINTER pManufacturingPage10_t;
793
794#define MPI_MANUFACTURING10_PAGEVERSION (0x00)
795
796
743/**************************************************************************** 797/****************************************************************************
744* IO Unit Config Pages 798* IO Unit Config Pages
745****************************************************************************/ 799****************************************************************************/
@@ -1080,10 +1134,14 @@ typedef struct _CONFIG_PAGE_IOC_6
1080} CONFIG_PAGE_IOC_6, MPI_POINTER PTR_CONFIG_PAGE_IOC_6, 1134} CONFIG_PAGE_IOC_6, MPI_POINTER PTR_CONFIG_PAGE_IOC_6,
1081 IOCPage6_t, MPI_POINTER pIOCPage6_t; 1135 IOCPage6_t, MPI_POINTER pIOCPage6_t;
1082 1136
1083#define MPI_IOCPAGE6_PAGEVERSION (0x00) 1137#define MPI_IOCPAGE6_PAGEVERSION (0x01)
1084 1138
1085/* IOC Page 6 Capabilities Flags */ 1139/* IOC Page 6 Capabilities Flags */
1086 1140
1141#define MPI_IOCPAGE6_CAP_FLAGS_MASK_METADATA_SIZE (0x00000006)
1142#define MPI_IOCPAGE6_CAP_FLAGS_64MB_METADATA_SIZE (0x00000000)
1143#define MPI_IOCPAGE6_CAP_FLAGS_512MB_METADATA_SIZE (0x00000002)
1144
1087#define MPI_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001) 1145#define MPI_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001)
1088 1146
1089 1147
@@ -2160,6 +2218,11 @@ typedef struct _RAID_VOL0_SETTINGS
2160#define MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE (0x0004) 2218#define MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE (0x0004)
2161#define MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC (0x0008) 2219#define MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC (0x0008)
2162#define MPI_RAIDVOL0_SETTING_FAST_DATA_SCRUBBING_0102 (0x0020) /* obsolete */ 2220#define MPI_RAIDVOL0_SETTING_FAST_DATA_SCRUBBING_0102 (0x0020) /* obsolete */
2221
2222#define MPI_RAIDVOL0_SETTING_MASK_METADATA_SIZE (0x00C0)
2223#define MPI_RAIDVOL0_SETTING_64MB_METADATA_SIZE (0x0000)
2224#define MPI_RAIDVOL0_SETTING_512MB_METADATA_SIZE (0x0040)
2225
2163#define MPI_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0010) 2226#define MPI_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0010)
2164#define MPI_RAIDVOL0_SETTING_USE_DEFAULTS (0x8000) 2227#define MPI_RAIDVOL0_SETTING_USE_DEFAULTS (0x8000)
2165 2228
@@ -2203,7 +2266,7 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0
2203} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0, 2266} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0,
2204 RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t; 2267 RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t;
2205 2268
2206#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x06) 2269#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x07)
2207 2270
2208/* values for RAID Volume Page 0 InactiveStatus field */ 2271/* values for RAID Volume Page 0 InactiveStatus field */
2209#define MPI_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00) 2272#define MPI_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
@@ -2518,7 +2581,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
2518} CONFIG_PAGE_SAS_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_1, 2581} CONFIG_PAGE_SAS_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_1,
2519 SasIOUnitPage1_t, MPI_POINTER pSasIOUnitPage1_t; 2582 SasIOUnitPage1_t, MPI_POINTER pSasIOUnitPage1_t;
2520 2583
2521#define MPI_SASIOUNITPAGE1_PAGEVERSION (0x06) 2584#define MPI_SASIOUNITPAGE1_PAGEVERSION (0x07)
2522 2585
2523/* values for SAS IO Unit Page 1 ControlFlags */ 2586/* values for SAS IO Unit Page 1 ControlFlags */
2524#define MPI_SAS_IOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000) 2587#define MPI_SAS_IOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
@@ -2544,7 +2607,13 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
2544#define MPI_SAS_IOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) 2607#define MPI_SAS_IOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
2545 2608
2546/* values for SAS IO Unit Page 1 AdditionalControlFlags */ 2609/* values for SAS IO Unit Page 1 AdditionalControlFlags */
2547#define MPI_SAS_IOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001) 2610#define MPI_SAS_IOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
2611#define MPI_SAS_IOUNIT1_ACONTROL_HIDE_NONZERO_ATTACHED_PHY_IDENT (0x0020)
2612#define MPI_SAS_IOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
2613#define MPI_SAS_IOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
2614#define MPI_SAS_IOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
2615#define MPI_SAS_IOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
2616#define MPI_SAS_IOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
2548 2617
2549/* defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */ 2618/* defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */
2550#define MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F) 2619#define MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F)
@@ -2585,9 +2654,11 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_2
2585} CONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2, 2654} CONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2,
2586 SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t; 2655 SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t;
2587 2656
2588#define MPI_SASIOUNITPAGE2_PAGEVERSION (0x05) 2657#define MPI_SASIOUNITPAGE2_PAGEVERSION (0x06)
2589 2658
2590/* values for SAS IO Unit Page 2 Status field */ 2659/* values for SAS IO Unit Page 2 Status field */
2660#define MPI_SAS_IOUNIT2_STATUS_DEVICE_LIMIT_EXCEEDED (0x08)
2661#define MPI_SAS_IOUNIT2_STATUS_ENCLOSURE_DEVICES_UNMAPPED (0x04)
2591#define MPI_SAS_IOUNIT2_STATUS_DISABLED_PERSISTENT_MAPPINGS (0x02) 2662#define MPI_SAS_IOUNIT2_STATUS_DISABLED_PERSISTENT_MAPPINGS (0x02)
2592#define MPI_SAS_IOUNIT2_STATUS_FULL_PERSISTENT_MAPPINGS (0x01) 2663#define MPI_SAS_IOUNIT2_STATUS_FULL_PERSISTENT_MAPPINGS (0x01)
2593 2664
@@ -2739,24 +2810,38 @@ typedef struct _CONFIG_PAGE_SAS_DEVICE_0
2739} CONFIG_PAGE_SAS_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_0, 2810} CONFIG_PAGE_SAS_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_0,
2740 SasDevicePage0_t, MPI_POINTER pSasDevicePage0_t; 2811 SasDevicePage0_t, MPI_POINTER pSasDevicePage0_t;
2741 2812
2742#define MPI_SASDEVICE0_PAGEVERSION (0x04) 2813#define MPI_SASDEVICE0_PAGEVERSION (0x05)
2743 2814
2744/* values for SAS Device Page 0 AccessStatus field */ 2815/* values for SAS Device Page 0 AccessStatus field */
2745#define MPI_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) 2816#define MPI_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
2746#define MPI_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) 2817#define MPI_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01)
2747#define MPI_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) 2818#define MPI_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02)
2819#define MPI_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03)
2820/* specific values for SATA Init failures */
2821#define MPI_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10)
2822#define MPI_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11)
2823#define MPI_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12)
2824#define MPI_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13)
2825#define MPI_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14)
2826#define MPI_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15)
2827#define MPI_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16)
2828#define MPI_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17)
2829#define MPI_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18)
2830#define MPI_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19)
2831#define MPI_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F)
2748 2832
2749/* values for SAS Device Page 0 Flags field */ 2833/* values for SAS Device Page 0 Flags field */
2750#define MPI_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) 2834#define MPI_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
2751#define MPI_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100) 2835#define MPI_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200)
2752#define MPI_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080) 2836#define MPI_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
2753#define MPI_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040) 2837#define MPI_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080)
2754#define MPI_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) 2838#define MPI_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040)
2755#define MPI_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) 2839#define MPI_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
2756#define MPI_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) 2840#define MPI_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
2757#define MPI_SAS_DEVICE0_FLAGS_MAPPING_PERSISTENT (0x0004) 2841#define MPI_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
2758#define MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED (0x0002) 2842#define MPI_SAS_DEVICE0_FLAGS_MAPPING_PERSISTENT (0x0004)
2759#define MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) 2843#define MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED (0x0002)
2844#define MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
2760 2845
2761/* see mpi_sas.h for values for SAS Device Page 0 DeviceInfo values */ 2846/* see mpi_sas.h for values for SAS Device Page 0 DeviceInfo values */
2762 2847
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index 582cfe7c2aa1..d6b4c607453b 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -3,28 +3,28 @@
3 MPI Header File Change History 3 MPI Header File Change History
4 ============================== 4 ==============================
5 5
6 Copyright (c) 2000-2005 LSI Logic Corporation. 6 Copyright (c) 2000-2006 LSI Logic Corporation.
7 7
8 --------------------------------------- 8 ---------------------------------------
9 Header Set Release Version: 01.05.13 9 Header Set Release Version: 01.05.14
10 Header Set Release Date: 03-27-06 10 Header Set Release Date: 10-11-06
11 --------------------------------------- 11 ---------------------------------------
12 12
13 Filename Current version Prior version 13 Filename Current version Prior version
14 ---------- --------------- ------------- 14 ---------- --------------- -------------
15 mpi.h 01.05.11 01.05.10 15 mpi.h 01.05.12 01.05.11
16 mpi_ioc.h 01.05.11 01.05.10 16 mpi_ioc.h 01.05.12 01.05.11
17 mpi_cnfg.h 01.05.12 01.05.11 17 mpi_cnfg.h 01.05.13 01.05.12
18 mpi_init.h 01.05.07 01.05.06 18 mpi_init.h 01.05.08 01.05.07
19 mpi_targ.h 01.05.06 01.05.05 19 mpi_targ.h 01.05.06 01.05.06
20 mpi_fc.h 01.05.01 01.05.01 20 mpi_fc.h 01.05.01 01.05.01
21 mpi_lan.h 01.05.01 01.05.01 21 mpi_lan.h 01.05.01 01.05.01
22 mpi_raid.h 01.05.02 01.05.02 22 mpi_raid.h 01.05.02 01.05.02
23 mpi_tool.h 01.05.03 01.05.03 23 mpi_tool.h 01.05.03 01.05.03
24 mpi_inb.h 01.05.01 01.05.01 24 mpi_inb.h 01.05.01 01.05.01
25 mpi_sas.h 01.05.03 01.05.02 25 mpi_sas.h 01.05.04 01.05.03
26 mpi_type.h 01.05.02 01.05.02 26 mpi_type.h 01.05.02 01.05.02
27 mpi_history.txt 01.05.13 01.05.12 27 mpi_history.txt 01.05.14 01.05.13
28 28
29 29
30 * Date Version Description 30 * Date Version Description
@@ -94,6 +94,7 @@ mpi.h
94 * 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT. 94 * 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT.
95 * 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target. 95 * 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target.
96 * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT. 96 * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
97 * 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT.
97 * -------------------------------------------------------------------------- 98 * --------------------------------------------------------------------------
98 99
99mpi_ioc.h 100mpi_ioc.h
@@ -182,6 +183,14 @@ mpi_ioc.h
182 * Added MPI_EVENT_SAS_INIT_TABLE_OVERFLOW and event 183 * Added MPI_EVENT_SAS_INIT_TABLE_OVERFLOW and event
183 * data structure. 184 * data structure.
184 * Added MPI_EXT_IMAGE_TYPE_INITIALIZATION. 185 * Added MPI_EXT_IMAGE_TYPE_INITIALIZATION.
186 * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED.
187 * Added MaxInitiators field to PortFacts reply.
188 * Added SAS Device Status Change ReasonCode for
189 * asynchronous notificaiton.
190 * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event
191 * data structure.
192 * Added new ImageType values for FWDownload and FWUpload
193 * requests.
185 * -------------------------------------------------------------------------- 194 * --------------------------------------------------------------------------
186 195
187mpi_cnfg.h 196mpi_cnfg.h
@@ -447,6 +456,23 @@ mpi_cnfg.h
447 * Added AdditionalControlFlags, MaxTargetPortConnectTime, 456 * Added AdditionalControlFlags, MaxTargetPortConnectTime,
448 * ReportDeviceMissingDelay, and IODeviceMissingDelay 457 * ReportDeviceMissingDelay, and IODeviceMissingDelay
449 * fields to SAS IO Unit Page 1. 458 * fields to SAS IO Unit Page 1.
459 * 10-11-06 01.05.13 Added NumForceWWID field and ForceWWID array to
460 * Manufacturing Page 5.
461 * Added Manufacturing pages 8 through 10.
462 * Added defines for supported metadata size bits in
463 * CapabilitiesFlags field of IOC Page 6.
464 * Added defines for metadata size bits in VolumeSettings
465 * field of RAID Volume Page 0.
466 * Added SATA Link Reset settings, Enable SATA Asynchronous
467 * Notification bit, and HideNonZeroAttachedPhyIdentifiers
468 * bit to AdditionalControlFlags field of SAS IO Unit
469 * Page 1.
470 * Added defines for Enclosure Devices Unmapped and
471 * Device Limit Exceeded bits in Status field of SAS IO
472 * Unit Page 2.
473 * Added more AccessStatus values for SAS Device Page 0.
474 * Added bit for SATA Asynchronous Notification Support in
475 * Flags field of SAS Device Page 0.
450 * -------------------------------------------------------------------------- 476 * --------------------------------------------------------------------------
451 477
452mpi_init.h 478mpi_init.h
@@ -490,6 +516,7 @@ mpi_init.h
490 * 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them 516 * 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them
491 * unique in the first 32 characters. 517 * unique in the first 32 characters.
492 * 03-27-06 01.05.07 Added Task Management type of Clear ACA. 518 * 03-27-06 01.05.07 Added Task Management type of Clear ACA.
519 * 10-11-06 01.05.08 Shortened define for Task Management type of Clear ACA.
493 * -------------------------------------------------------------------------- 520 * --------------------------------------------------------------------------
494 521
495mpi_targ.h 522mpi_targ.h
@@ -638,6 +665,8 @@ mpi_sas.h
638 * and Remove Device operations to SAS IO Unit Control. 665 * and Remove Device operations to SAS IO Unit Control.
639 * Added DevHandle field to SAS IO Unit Control request and 666 * Added DevHandle field to SAS IO Unit Control request and
640 * reply. 667 * reply.
668 * 10-11-06 01.05.04 Fixed the name of a define for Operation field of SAS IO
669 * Unit Control request.
641 * -------------------------------------------------------------------------- 670 * --------------------------------------------------------------------------
642 671
643mpi_type.h 672mpi_type.h
@@ -653,20 +682,20 @@ mpi_type.h
653 682
654mpi_history.txt Parts list history 683mpi_history.txt Parts list history
655 684
656Filename 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09 685Filename 01.05.13 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09
657---------- -------- -------- -------- -------- -------- 686---------- -------- -------- -------- -------- -------- --------
658mpi.h 01.05.11 01.05.10 01.05.09 01.05.08 01.05.07 687mpi.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.08 01.05.07
659mpi_ioc.h 01.05.11 01.05.10 01.05.09 01.05.09 01.05.08 688mpi_ioc.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.09 01.05.08
660mpi_cnfg.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.08 689mpi_cnfg.h 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09 01.05.08
661mpi_init.h 01.05.07 01.05.06 01.05.06 01.05.05 01.05.04 690mpi_init.h 01.05.08 01.05.07 01.05.06 01.05.06 01.05.05 01.05.04
662mpi_targ.h 01.05.06 01.05.05 01.05.05 01.05.05 01.05.04 691mpi_targ.h 01.05.06 01.05.06 01.05.05 01.05.05 01.05.05 01.05.04
663mpi_fc.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 692mpi_fc.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
664mpi_lan.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 693mpi_lan.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
665mpi_raid.h 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02 694mpi_raid.h 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02
666mpi_tool.h 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03 695mpi_tool.h 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03
667mpi_inb.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 696mpi_inb.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
668mpi_sas.h 01.05.03 01.05.02 01.05.01 01.05.01 01.05.01 697mpi_sas.h 01.05.04 01.05.03 01.05.02 01.05.01 01.05.01 01.05.01
669mpi_type.h 01.05.02 01.05.02 01.05.01 01.05.01 01.05.01 698mpi_type.h 01.05.02 01.05.02 01.05.02 01.05.01 01.05.01 01.05.01
670 699
671Filename 01.05.08 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03 700Filename 01.05.08 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03
672---------- -------- -------- -------- -------- -------- -------- 701---------- -------- -------- -------- -------- -------- --------
diff --git a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h
index c1c678989a23..ec9dff2249a7 100644
--- a/drivers/message/fusion/lsi/mpi_init.h
+++ b/drivers/message/fusion/lsi/mpi_init.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2005 LSI Logic Corporation. 2 * Copyright (c) 2000-2006 LSI Logic Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_init.h 5 * Name: mpi_init.h
6 * Title: MPI initiator mode messages and structures 6 * Title: MPI initiator mode messages and structures
7 * Creation Date: June 8, 2000 7 * Creation Date: June 8, 2000
8 * 8 *
9 * mpi_init.h Version: 01.05.07 9 * mpi_init.h Version: 01.05.08
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -53,6 +53,7 @@
53 * 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them 53 * 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them
54 * unique in the first 32 characters. 54 * unique in the first 32 characters.
55 * 03-27-06 01.05.07 Added Task Management type of Clear ACA. 55 * 03-27-06 01.05.07 Added Task Management type of Clear ACA.
56 * 10-11-06 01.05.08 Shortened define for Task Management type of Clear ACA.
56 * -------------------------------------------------------------------------- 57 * --------------------------------------------------------------------------
57 */ 58 */
58 59
@@ -428,7 +429,7 @@ typedef struct _MSG_SCSI_TASK_MGMT
428#define MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) 429#define MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
429#define MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) 430#define MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
430#define MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) 431#define MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
431#define MPI_SCSITASKMGMT_TASKTYPE_CLEAR_ACA (0x08) 432#define MPI_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
432 433
433/* MsgFlags bits */ 434/* MsgFlags bits */
434#define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION (0x00) 435#define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION (0x00)
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index 18ba407fd399..6c33e3353375 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2005 LSI Logic Corporation. 2 * Copyright (c) 2000-2006 LSI Logic Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_ioc.h 5 * Name: mpi_ioc.h
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: August 11, 2000 7 * Creation Date: August 11, 2000
8 * 8 *
9 * mpi_ioc.h Version: 01.05.11 9 * mpi_ioc.h Version: 01.05.12
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -98,6 +98,14 @@
98 * Added MPI_EVENT_SAS_INIT_TABLE_OVERFLOW and event 98 * Added MPI_EVENT_SAS_INIT_TABLE_OVERFLOW and event
99 * data structure. 99 * data structure.
100 * Added MPI_EXT_IMAGE_TYPE_INITIALIZATION. 100 * Added MPI_EXT_IMAGE_TYPE_INITIALIZATION.
101 * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED.
102 * Added MaxInitiators field to PortFacts reply.
103 * Added SAS Device Status Change ReasonCode for
104 * asynchronous notificaiton.
105 * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event
106 * data structure.
107 * Added new ImageType values for FWDownload and FWUpload
108 * requests.
101 * -------------------------------------------------------------------------- 109 * --------------------------------------------------------------------------
102 */ 110 */
103 111
@@ -264,6 +272,7 @@ typedef struct _MSG_IOC_FACTS_REPLY
264#define MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002) 272#define MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002)
265#define MPI_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004) 273#define MPI_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004)
266#define MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL (0x0008) 274#define MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL (0x0008)
275#define MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010)
267 276
268#define MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT (0x01) 277#define MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT (0x01)
269#define MPI_IOCFACTS_FLAGS_REPLY_FIFO_HOST_SIGNAL (0x02) 278#define MPI_IOCFACTS_FLAGS_REPLY_FIFO_HOST_SIGNAL (0x02)
@@ -328,7 +337,8 @@ typedef struct _MSG_PORT_FACTS_REPLY
328 U16 MaxPostedCmdBuffers; /* 1Ch */ 337 U16 MaxPostedCmdBuffers; /* 1Ch */
329 U16 MaxPersistentIDs; /* 1Eh */ 338 U16 MaxPersistentIDs; /* 1Eh */
330 U16 MaxLanBuckets; /* 20h */ 339 U16 MaxLanBuckets; /* 20h */
331 U16 Reserved4; /* 22h */ 340 U8 MaxInitiators; /* 22h */
341 U8 Reserved4; /* 23h */
332 U32 Reserved5; /* 24h */ 342 U32 Reserved5; /* 24h */
333} MSG_PORT_FACTS_REPLY, MPI_POINTER PTR_MSG_PORT_FACTS_REPLY, 343} MSG_PORT_FACTS_REPLY, MPI_POINTER PTR_MSG_PORT_FACTS_REPLY,
334 PortFactsReply_t, MPI_POINTER pPortFactsReply_t; 344 PortFactsReply_t, MPI_POINTER pPortFactsReply_t;
@@ -487,6 +497,7 @@ typedef struct _MSG_EVENT_ACK_REPLY
487#define MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x00000018) 497#define MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x00000018)
488#define MPI_EVENT_SAS_INIT_TABLE_OVERFLOW (0x00000019) 498#define MPI_EVENT_SAS_INIT_TABLE_OVERFLOW (0x00000019)
489#define MPI_EVENT_SAS_SMP_ERROR (0x0000001A) 499#define MPI_EVENT_SAS_SMP_ERROR (0x0000001A)
500#define MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE (0x0000001B)
490#define MPI_EVENT_LOG_ENTRY_ADDED (0x00000021) 501#define MPI_EVENT_LOG_ENTRY_ADDED (0x00000021)
491 502
492/* AckRequired field values */ 503/* AckRequired field values */
@@ -593,6 +604,7 @@ typedef struct _EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
593#define MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) 604#define MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
594#define MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) 605#define MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
595#define MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) 606#define MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
607#define MPI_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
596 608
597 609
598/* SCSI Event data for Queue Full event */ 610/* SCSI Event data for Queue Full event */
@@ -895,6 +907,54 @@ typedef struct _EVENT_DATA_SAS_INIT_TABLE_OVERFLOW
895 MpiEventDataSasInitTableOverflow_t, 907 MpiEventDataSasInitTableOverflow_t,
896 MPI_POINTER pMpiEventDataSasInitTableOverflow_t; 908 MPI_POINTER pMpiEventDataSasInitTableOverflow_t;
897 909
910/* SAS Expander Status Change Event data */
911
912typedef struct _EVENT_DATA_SAS_EXPANDER_STATUS_CHANGE
913{
914 U8 ReasonCode; /* 00h */
915 U8 Reserved1; /* 01h */
916 U16 Reserved2; /* 02h */
917 U8 PhysicalPort; /* 04h */
918 U8 Reserved3; /* 05h */
919 U16 EnclosureHandle; /* 06h */
920 U64 SASAddress; /* 08h */
921 U32 DiscoveryStatus; /* 10h */
922 U16 DevHandle; /* 14h */
923 U16 ParentDevHandle; /* 16h */
924 U16 ExpanderChangeCount; /* 18h */
925 U16 ExpanderRouteIndexes; /* 1Ah */
926 U8 NumPhys; /* 1Ch */
927 U8 SASLevel; /* 1Dh */
928 U8 Flags; /* 1Eh */
929 U8 Reserved4; /* 1Fh */
930} EVENT_DATA_SAS_EXPANDER_STATUS_CHANGE,
931 MPI_POINTER PTR_EVENT_DATA_SAS_EXPANDER_STATUS_CHANGE,
932 MpiEventDataSasExpanderStatusChange_t,
933 MPI_POINTER pMpiEventDataSasExpanderStatusChange_t;
934
935/* values for ReasonCode field of SAS Expander Status Change Event data */
936#define MPI_EVENT_SAS_EXP_RC_ADDED (0x00)
937#define MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING (0x01)
938
939/* values for DiscoveryStatus field of SAS Expander Status Change Event data */
940#define MPI_EVENT_SAS_EXP_DS_LOOP_DETECTED (0x00000001)
941#define MPI_EVENT_SAS_EXP_DS_UNADDRESSABLE_DEVICE (0x00000002)
942#define MPI_EVENT_SAS_EXP_DS_MULTIPLE_PORTS (0x00000004)
943#define MPI_EVENT_SAS_EXP_DS_EXPANDER_ERR (0x00000008)
944#define MPI_EVENT_SAS_EXP_DS_SMP_TIMEOUT (0x00000010)
945#define MPI_EVENT_SAS_EXP_DS_OUT_ROUTE_ENTRIES (0x00000020)
946#define MPI_EVENT_SAS_EXP_DS_INDEX_NOT_EXIST (0x00000040)
947#define MPI_EVENT_SAS_EXP_DS_SMP_FUNCTION_FAILED (0x00000080)
948#define MPI_EVENT_SAS_EXP_DS_SMP_CRC_ERROR (0x00000100)
949#define MPI_EVENT_SAS_EXP_DS_SUBTRACTIVE_LINK (0x00000200)
950#define MPI_EVENT_SAS_EXP_DS_TABLE_LINK (0x00000400)
951#define MPI_EVENT_SAS_EXP_DS_UNSUPPORTED_DEVICE (0x00000800)
952
953/* values for Flags field of SAS Expander Status Change Event data */
954#define MPI_EVENT_SAS_EXP_FLAGS_ROUTE_TABLE_CONFIG (0x02)
955#define MPI_EVENT_SAS_EXP_FLAGS_CONFIG_IN_PROGRESS (0x01)
956
957
898 958
899/***************************************************************************** 959/*****************************************************************************
900* 960*
@@ -926,6 +986,10 @@ typedef struct _MSG_FW_DOWNLOAD
926#define MPI_FW_DOWNLOAD_ITYPE_BIOS (0x02) 986#define MPI_FW_DOWNLOAD_ITYPE_BIOS (0x02)
927#define MPI_FW_DOWNLOAD_ITYPE_NVDATA (0x03) 987#define MPI_FW_DOWNLOAD_ITYPE_NVDATA (0x03)
928#define MPI_FW_DOWNLOAD_ITYPE_BOOTLOADER (0x04) 988#define MPI_FW_DOWNLOAD_ITYPE_BOOTLOADER (0x04)
989#define MPI_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06)
990#define MPI_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
991#define MPI_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
992#define MPI_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
929 993
930 994
931typedef struct _FWDownloadTCSGE 995typedef struct _FWDownloadTCSGE
@@ -980,6 +1044,11 @@ typedef struct _MSG_FW_UPLOAD
980#define MPI_FW_UPLOAD_ITYPE_NVDATA (0x03) 1044#define MPI_FW_UPLOAD_ITYPE_NVDATA (0x03)
981#define MPI_FW_UPLOAD_ITYPE_BOOTLOADER (0x04) 1045#define MPI_FW_UPLOAD_ITYPE_BOOTLOADER (0x04)
982#define MPI_FW_UPLOAD_ITYPE_FW_BACKUP (0x05) 1046#define MPI_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
1047#define MPI_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
1048#define MPI_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
1049#define MPI_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
1050#define MPI_FW_UPLOAD_ITYPE_MEGARAID (0x09)
1051#define MPI_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
983 1052
984typedef struct _FWUploadTCSGE 1053typedef struct _FWUploadTCSGE
985{ 1054{
diff --git a/drivers/message/fusion/lsi/mpi_log_sas.h b/drivers/message/fusion/lsi/mpi_log_sas.h
index 871ebc08b706..635bbe04513e 100644
--- a/drivers/message/fusion/lsi/mpi_log_sas.h
+++ b/drivers/message/fusion/lsi/mpi_log_sas.h
@@ -1,4 +1,3 @@
1
2/*************************************************************************** 1/***************************************************************************
3 * * 2 * *
4 * Copyright 2003 LSI Logic Corporation. All rights reserved. * 3 * Copyright 2003 LSI Logic Corporation. All rights reserved. *
@@ -14,7 +13,7 @@
14#define IOPI_IOCLOGINFO_H_INCLUDED 13#define IOPI_IOCLOGINFO_H_INCLUDED
15 14
16#define SAS_LOGINFO_NEXUS_LOSS 0x31170000 15#define SAS_LOGINFO_NEXUS_LOSS 0x31170000
17#define SAS_LOGINFO_MASK 0xFFFF0000 16#define SAS_LOGINFO_MASK 0xFFFF0000
18 17
19/****************************************************************************/ 18/****************************************************************************/
20/* IOC LOGINFO defines, 0x00000000 - 0x0FFFFFFF */ 19/* IOC LOGINFO defines, 0x00000000 - 0x0FFFFFFF */
@@ -43,129 +42,172 @@
43/****************************************************************************/ 42/****************************************************************************/
44/* IOP LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = IOP */ 43/* IOP LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = IOP */
45/****************************************************************************/ 44/****************************************************************************/
46#define IOP_LOGINFO_CODE_INVALID_SAS_ADDRESS (0x00010000) 45#define IOP_LOGINFO_CODE_INVALID_SAS_ADDRESS (0x00010000)
47#define IOP_LOGINFO_CODE_UNUSED2 (0x00020000) 46#define IOP_LOGINFO_CODE_UNUSED2 (0x00020000)
48#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE (0x00030000) 47#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE (0x00030000)
49#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_RT (0x00030100) /* Route Table Entry not found */ 48#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_RT (0x00030100) /* Route Table Entry not found */
50#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PN (0x00030200) /* Invalid Page Number */ 49#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PN (0x00030200) /* Invalid Page Number */
51#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_FORM (0x00030300) /* Invalid FORM */ 50#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_FORM (0x00030300) /* Invalid FORM */
52#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PT (0x00030400) /* Invalid Page Type */ 51#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PT (0x00030400) /* Invalid Page Type */
53#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DNM (0x00030500) /* Device Not Mapped */ 52#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DNM (0x00030500) /* Device Not Mapped */
54#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PERSIST (0x00030600) /* Persistent Page not found */ 53#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PERSIST (0x00030600) /* Persistent Page not found */
55#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DEFAULT (0x00030700) /* Default Page not found */ 54#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DEFAULT (0x00030700) /* Default Page not found */
56 55
57#define IOP_LOGINFO_CODE_DIAG_MSG_ERROR (0x00040000) /* Error handling diag msg - or'd with diag status */ 56#define IOP_LOGINFO_CODE_FWUPLOAD_NO_FLASH_AVAILABLE (0x0003E000) /* Tried to upload from flash, but there is none */
58 57#define IOP_LOGINFO_CODE_FWUPLOAD_UNKNOWN_IMAGE_TYPE (0x0003E001) /* ImageType field contents were invalid */
59#define IOP_LOGINFO_CODE_TASK_TERMINATED (0x00050000) 58#define IOP_LOGINFO_CODE_FWUPLOAD_WRONG_IMAGE_SIZE (0x0003E002) /* ImageSize field in TCSGE was bad/offset in MfgPg 4 was wrong */
60 59#define IOP_LOGINFO_CODE_FWUPLOAD_ENTIRE_FLASH_UPLOAD_FAILED (0x0003E003) /* Error occured while attempting to upload the entire flash */
61#define IOP_LOGINFO_CODE_ENCL_MGMT_READ_ACTION_ERR0R (0x00060001) /* Read Action not supported for SEP msg */ 60#define IOP_LOGINFO_CODE_FWUPLOAD_REGION_UPLOAD_FAILED (0x0003E004) /* Error occured while attempting to upload single flash region */
62#define IOP_LOGINFO_CODE_ENCL_MGMT_INVALID_BUS_ID_ERR0R (0x00060002) /* Invalid Bus/ID in SEP msg */ 61#define IOP_LOGINFO_CODE_FWUPLOAD_DMA_FAILURE (0x0003E005) /* Problem occured while DMAing FW to host memory */
63 62
64#define IOP_LOGINFO_CODE_TARGET_ASSIST_TERMINATED (0x00070001) 63#define IOP_LOGINFO_CODE_DIAG_MSG_ERROR (0x00040000) /* Error handling diag msg - or'd with diag status */
65#define IOP_LOGINFO_CODE_TARGET_STATUS_SEND_TERMINATED (0x00070002) 64
66#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_ALL_IO (0x00070003) 65#define IOP_LOGINFO_CODE_TASK_TERMINATED (0x00050000)
67#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO (0x00070004) 66
68#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO_REQ (0x00070005) 67#define IOP_LOGINFO_CODE_ENCL_MGMT_READ_ACTION_ERR0R (0x00060001) /* Read Action not supported for SEP msg */
68#define IOP_LOGINFO_CODE_ENCL_MGMT_INVALID_BUS_ID_ERR0R (0x00060002) /* Invalid Bus/ID in SEP msg */
69
70#define IOP_LOGINFO_CODE_TARGET_ASSIST_TERMINATED (0x00070001)
71#define IOP_LOGINFO_CODE_TARGET_STATUS_SEND_TERMINATED (0x00070002)
72#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_ALL_IO (0x00070003)
73#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO (0x00070004)
74#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO_REQ (0x00070005)
69 75
70/****************************************************************************/ 76/****************************************************************************/
71/* PL LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = PL */ 77/* PL LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = PL */
72/****************************************************************************/ 78/****************************************************************************/
73#define PL_LOGINFO_CODE_OPEN_FAILURE (0x00010000) 79#define PL_LOGINFO_CODE_OPEN_FAILURE (0x00010000) /* see SUB_CODE_OPEN_FAIL_ below */
74#define PL_LOG_INFO_CODE_OPEN_FAILURE_NO_DEST_TIME_OUT (0x00010001) 80
75#define PL_LOGINFO_CODE_OPEN_FAILURE_BAD_DESTINATION (0x00010011) 81#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_NO_DEST_TIME_OUT (0x00000001)
76#define PL_LOGINFO_CODE_OPEN_FAILURE_PROTOCOL_NOT_SUPPORTED (0x00010013) 82#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_PATHWAY_BLOCKED (0x00000002)
77#define PL_LOGINFO_CODE_OPEN_FAILURE_STP_RESOURCES_BSY (0x00010018) 83#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_CONTINUE0 (0x00000003)
78#define PL_LOGINFO_CODE_OPEN_FAILURE_WRONG_DESTINATION (0x00010019) 84#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_CONTINUE1 (0x00000004)
79#define PL_LOGINFO_CODE_OPEN_FAILURE_ORR_TIMEOUT (0X0001001A) 85#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_INITIALIZE0 (0x00000005)
80#define PL_LOGINFO_CODE_OPEN_FAILURE_PATHWAY_BLOCKED (0x0001001B) 86#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_INITIALIZE1 (0x00000006)
81#define PL_LOGINFO_CODE_OPEN_FAILURE_AWT_MAXED (0x0001001C) 87#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_STOP0 (0x00000007)
82#define PL_LOGINFO_CODE_INVALID_SGL (0x00020000) 88#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_STOP1 (0x00000008)
83#define PL_LOGINFO_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00030000) 89#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RETRY (0x00000009)
84#define PL_LOGINFO_CODE_FRAME_XFER_ERROR (0x00040000) 90#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_BREAK (0x0000000A)
85#define PL_LOGINFO_CODE_TX_FM_CONNECTED_LOW (0x00050000) 91#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_UNUSED_0B (0x0000000B)
86#define PL_LOGINFO_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00060000) 92#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_OPEN_TIMEOUT_EXP (0x0000000C)
87#define PL_LOGINFO_CODE_SATA_READ_LOG_RECEIVE_DATA_ERR (0x00070000) 93#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_UNUSED_0D (0x0000000D)
88#define PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR (0x00080000) 94#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_DVTBLE_ACCSS_FAIL (0x0000000E)
89#define PL_LOGINFO_CODE_SATA_ERR_IN_RCV_SET_DEV_BIT_FIS (0x00090000) 95#define PL_LOGINFO_SUB CODE_OPEN_FAIL_BAD_DEST (0x00000011)
90#define PL_LOGINFO_CODE_RX_FM_INVALID_MESSAGE (0x000A0000) 96#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RATE_NOT_SUPP (0x00000012)
91#define PL_LOGINFO_CODE_RX_CTX_MESSAGE_VALID_ERROR (0x000B0000) 97#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_PROT_NOT_SUPP (0x00000013)
92#define PL_LOGINFO_CODE_RX_FM_CURRENT_FRAME_ERROR (0x000C0000) 98#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON0 (0x00000014)
93#define PL_LOGINFO_CODE_SATA_LINK_DOWN (0x000D0000) 99#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON1 (0x00000015)
94#define PL_LOGINFO_CODE_DISCOVERY_SATA_INIT_W_IOS (0x000E0000) 100#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON2 (0x00000016)
95#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE (0x000F0000) 101#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON3 (0x00000017)
96#define PL_LOGINFO_CODE_CONFIG_PL_NOT_INITIALIZED (0x000F0001) /* PL not yet initialized, can't do config page req. */ 102#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_STP_RESOURCES_BSY (0x00000018)
97#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_PT (0x000F0100) /* Invalid Page Type */ 103#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_WRONG_DESTINATION (0x00000019)
98#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NUM_PHYS (0x000F0200) /* Invalid Number of Phys */ 104
99#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NOT_IMP (0x000F0300) /* Case Not Handled */ 105#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_PATH_BLOCKED (0x0000001B) /* Retry Timeout */
100#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NO_DEV (0x000F0400) /* No Device Found */ 106#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_AWT_MAXED (0x0000001C) /* Retry Timeout */
101#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_FORM (0x000F0500) /* Invalid FORM */ 107
102#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_PHY (0x000F0600) /* Invalid Phy */ 108
103#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NO_OWNER (0x000F0700) /* No Owner Found */ 109
104#define PL_LOGINFO_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00100000) 110#define PL_LOGINFO_CODE_INVALID_SGL (0x00020000)
105#define PL_LOGINFO_CODE_RESET (0x00110000) /* See Sub-Codes below */ 111#define PL_LOGINFO_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00030000)
106#define PL_LOGINFO_CODE_ABORT (0x00120000) /* See Sub-Codes below */ 112#define PL_LOGINFO_CODE_FRAME_XFER_ERROR (0x00040000)
107#define PL_LOGINFO_CODE_IO_NOT_YET_EXECUTED (0x00130000) 113#define PL_LOGINFO_CODE_TX_FM_CONNECTED_LOW (0x00050000)
108#define PL_LOGINFO_CODE_IO_EXECUTED (0x00140000) 114#define PL_LOGINFO_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00060000)
109#define PL_LOGINFO_CODE_PERS_RESV_OUT_NOT_AFFIL_OWNER (0x00150000) 115#define PL_LOGINFO_CODE_SATA_READ_LOG_RECEIVE_DATA_ERR (0x00070000)
110#define PL_LOGINFO_CODE_OPEN_TXDMA_ABORT (0x00160000) 116#define PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR (0x00080000)
111#define PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY (0x00170000) 117#define PL_LOGINFO_CODE_SATA_ERR_IN_RCV_SET_DEV_BIT_FIS (0x00090000)
112#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE (0x00000100) 118#define PL_LOGINFO_CODE_RX_FM_INVALID_MESSAGE (0x000A0000)
113#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_NO_DEST_TIMEOUT (0x00000101) 119#define PL_LOGINFO_CODE_RX_CTX_MESSAGE_VALID_ERROR (0x000B0000)
114#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ORR_TIMEOUT (0x0000011A) /* Open Reject (Retry) Timeout */ 120#define PL_LOGINFO_CODE_RX_FM_CURRENT_FRAME_ERROR (0x000C0000)
115#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_PATHWAY_BLOCKED (0x0000011B) 121#define PL_LOGINFO_CODE_SATA_LINK_DOWN (0x000D0000)
116#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_AWT_MAXED (0x0000011C) /* Arbitration Wait Timer Maxed */ 122#define PL_LOGINFO_CODE_DISCOVERY_SATA_INIT_W_IOS (0x000E0000)
117 123#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE (0x000F0000)
118#define PL_LOGINFO_SUB_CODE_TARGET_BUS_RESET (0x00000120) 124#define PL_LOGINFO_CODE_CONFIG_PL_NOT_INITIALIZED (0x000F0001) /* PL not yet initialized, can't do config page req. */
119#define PL_LOGINFO_SUB_CODE_TRANSPORT_LAYER (0x00000130) /* Leave lower nibble (1-f) reserved. */ 125#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_PT (0x000F0100) /* Invalid Page Type */
120#define PL_LOGINFO_SUB_CODE_PORT_LAYER (0x00000140) /* Leave lower nibble (1-f) reserved. */ 126#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NUM_PHYS (0x000F0200) /* Invalid Number of Phys */
121 127#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NOT_IMP (0x000F0300) /* Case Not Handled */
122 128#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NO_DEV (0x000F0400) /* No Device Found */
123#define PL_LOGINFO_SUB_CODE_INVALID_SGL (0x00000200) 129#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_FORM (0x000F0500) /* Invalid FORM */
124#define PL_LOGINFO_SUB_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00000300) 130#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_PHY (0x000F0600) /* Invalid Phy */
125#define PL_LOGINFO_SUB_CODE_FRAME_XFER_ERROR (0x00000400) 131#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NO_OWNER (0x000F0700) /* No Owner Found */
126#define PL_LOGINFO_SUB_CODE_TX_FM_CONNECTED_LOW (0x00000500) 132#define PL_LOGINFO_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00100000)
127#define PL_LOGINFO_SUB_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00000600) 133#define PL_LOGINFO_CODE_RESET (0x00110000) /* See Sub-Codes below (PL_LOGINFO_SUB_CODE) */
128#define PL_LOGINFO_SUB_CODE_SATA_READ_LOG_RECEIVE_DATA_ERR (0x00000700) 134#define PL_LOGINFO_CODE_ABORT (0x00120000) /* See Sub-Codes below (PL_LOGINFO_SUB_CODE)*/
129#define PL_LOGINFO_SUB_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR (0x00000800) 135#define PL_LOGINFO_CODE_IO_NOT_YET_EXECUTED (0x00130000)
130#define PL_LOGINFO_SUB_CODE_SATA_ERR_IN_RCV_SET_DEV_BIT_FIS (0x00000900) 136#define PL_LOGINFO_CODE_IO_EXECUTED (0x00140000)
131#define PL_LOGINFO_SUB_CODE_RX_FM_INVALID_MESSAGE (0x00000A00) 137#define PL_LOGINFO_CODE_PERS_RESV_OUT_NOT_AFFIL_OWNER (0x00150000)
132#define PL_LOGINFO_SUB_CODE_RX_CTX_MESSAGE_VALID_ERROR (0x00000B00) 138#define PL_LOGINFO_CODE_OPEN_TXDMA_ABORT (0x00160000)
133#define PL_LOGINFO_SUB_CODE_RX_FM_CURRENT_FRAME_ERROR (0x00000C00) 139#define PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY (0x00170000)
134#define PL_LOGINFO_SUB_CODE_SATA_LINK_DOWN (0x00000D00) 140#define PL_LOGINFO_CODE_IO_CANCELLED_DUE_TO_R_ERR (0x00180000)
135#define PL_LOGINFO_SUB_CODE_DISCOVERY_SATA_INIT_W_IOS (0x00000E00) 141#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE (0x00000100)
136#define PL_LOGINFO_SUB_CODE_DISCOVERY_REMOTE_SEP_RESET (0x00000E01) 142#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_NO_DEST_TIMEOUT (0x00000101)
137#define PL_LOGINFO_SUB_CODE_SECOND_OPEN (0x00000F00) 143#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_SATA_NEG_RATE_2HI (0x00000102)
138#define PL_LOGINFO_SUB_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00001000) 144#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_RATE_NOT_SUPPORTED (0x00000103)
139 145#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_BREAK (0x00000104)
140 146#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ZONE_VIOLATION (0x00000114)
141#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_FAILURE (0x00200000) /* Can't get SMP Frame */ 147#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ABANDON0 (0x00000114) /* Open Reject (Zone Violation) - available on SAS-2 devices */
142#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_READ_ERROR (0x00200010) /* Error occured on SMP Read */ 148#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ABANDON1 (0x00000115)
143#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_WRITE_ERROR (0x00200020) /* Error occured on SMP Write */ 149#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ABANDON2 (0x00000116)
144#define PL_LOGINFO_CODE_ENCL_MGMT_NOT_SUPPORTED_ON_ENCL (0x00200040) /* Encl Mgmt services not available for this WWID */ 150#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ABANDON3 (0x00000117)
145#define PL_LOGINFO_CODE_ENCL_MGMT_ADDR_MODE_NOT_SUPPORTED (0x00200050) /* Address Mode not suppored */ 151#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ORR_TIMEOUT (0x0000011A) /* Open Reject (Retry) Timeout */
146#define PL_LOGINFO_CODE_ENCL_MGMT_BAD_SLOT_NUM (0x00200060) /* Invalid Slot Number in SEP Msg */ 152#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_PATH_BLOCKED (0x0000011B)
147#define PL_LOGINFO_CODE_ENCL_MGMT_SGPIO_NOT_PRESENT (0x00200070) /* SGPIO not present/enabled */ 153#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_AWT_MAXED (0x0000011C) /* Arbitration Wait Timer Maxed */
148#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_NOT_CONFIGURED (0x00200080) /* GPIO not configured */ 154
149#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_FRAME_ERROR (0x00200090) /* GPIO can't allocate a frame */ 155#define PL_LOGINFO_SUB_CODE_TARGET_BUS_RESET (0x00000120)
150#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_CONFIG_PAGE_ERROR (0x002000A0) /* GPIO failed config page request */ 156#define PL_LOGINFO_SUB_CODE_TRANSPORT_LAYER (0x00000130) /* Leave lower nibble (1-f) reserved. */
151#define PL_LOGINFO_CODE_ENCL_MGMT_SES_FRAME_ALLOC_ERROR (0x002000B0) /* Can't get frame for SES command */ 157#define PL_LOGINFO_SUB_CODE_PORT_LAYER (0x00000140) /* Leave lower nibble (1-f) reserved. */
152#define PL_LOGINFO_CODE_ENCL_MGMT_SES_IO_ERROR (0x002000C0) /* I/O execution error */ 158
153#define PL_LOGINFO_CODE_ENCL_MGMT_SES_RETRIES_EXHAUSTED (0x002000D0) /* SEP I/O retries exhausted */ 159
154#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_ALLOC_ERROR (0x002000E0) /* Can't get frame for SMP command */ 160#define PL_LOGINFO_SUB_CODE_INVALID_SGL (0x00000200)
155 161#define PL_LOGINFO_SUB_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00000300)
156#define PL_LOGINFO_DA_SEP_NOT_PRESENT (0x00200100) /* SEP not present when msg received */ 162#define PL_LOGINFO_SUB_CODE_FRAME_XFER_ERROR (0x00000400) /* Bits 0-3 encode Transport Status Register (offset 0x08) */
157#define PL_LOGINFO_DA_SEP_SINGLE_THREAD_ERROR (0x00200101) /* Can only accept 1 msg at a time */ 163 /* Bit 0 is Status Bit 0: FrameXferErr */
158#define PL_LOGINFO_DA_SEP_ISTWI_INTR_IN_IDLE_STATE (0x00200102) /* ISTWI interrupt recvd. while IDLE */ 164 /* Bit 1 & 2 are Status Bits 16 and 17: FrameXmitErrStatus */
159#define PL_LOGINFO_DA_SEP_RECEIVED_NACK_FROM_SLAVE (0x00200103) /* SEP NACK'd, it is busy */ 165 /* Bit 3 is Status Bit 18 WriteDataLenghtGTDataLengthErr */
160#define PL_LOGINFO_DA_SEP_DID_NOT_RECEIVE_ACK (0x00200104) /* SEP didn't rcv. ACK (Last Rcvd Bit = 1) */ 166
161#define PL_LOGINFO_DA_SEP_BAD_STATUS_HDR_CHKSUM (0x00200105) /* SEP stopped or sent bad chksum in Hdr */ 167#define PL_LOGINFO_SUB_CODE_TX_FM_CONNECTED_LOW (0x00000500)
162#define PL_LOGINFO_DA_SEP_STOP_ON_DATA (0x00200106) /* SEP stopped while transfering data */ 168#define PL_LOGINFO_SUB_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00000600)
163#define PL_LOGINFO_DA_SEP_STOP_ON_SENSE_DATA (0x00200107) /* SEP stopped while transfering sense data */ 169#define PL_LOGINFO_SUB_CODE_SATA_READ_LOG_RECEIVE_DATA_ERR (0x00000700)
164#define PL_LOGINFO_DA_SEP_UNSUPPORTED_SCSI_STATUS_1 (0x00200108) /* SEP returned unknown scsi status */ 170#define PL_LOGINFO_SUB_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR (0x00000800)
165#define PL_LOGINFO_DA_SEP_UNSUPPORTED_SCSI_STATUS_2 (0x00200109) /* SEP returned unknown scsi status */ 171#define PL_LOGINFO_SUB_CODE_SATA_ERR_IN_RCV_SET_DEV_BIT_FIS (0x00000900)
166#define PL_LOGINFO_DA_SEP_CHKSUM_ERROR_AFTER_STOP (0x0020010A) /* SEP returned bad chksum after STOP */ 172#define PL_LOGINFO_SUB_CODE_RX_FM_INVALID_MESSAGE (0x00000A00)
167#define PL_LOGINFO_DA_SEP_CHKSUM_ERROR_AFTER_STOP_GETDATA (0x0020010B) /* SEP returned bad chksum after STOP while gettin data*/ 173#define PL_LOGINFO_SUB_CODE_RX_CTX_MESSAGE_VALID_ERROR (0x00000B00)
168#define PL_LOGINFO_DA_SEP_UNSUPPORTED_COMMAND (0x0020010C) /* SEP doesn't support CDB opcode */ 174#define PL_LOGINFO_SUB_CODE_RX_FM_CURRENT_FRAME_ERROR (0x00000C00)
175#define PL_LOGINFO_SUB_CODE_SATA_LINK_DOWN (0x00000D00)
176#define PL_LOGINFO_SUB_CODE_DISCOVERY_SATA_INIT_W_IOS (0x00000E00)
177#define PL_LOGINFO_SUB_CODE_DISCOVERY_REMOTE_SEP_RESET (0x00000E01)
178#define PL_LOGINFO_SUB_CODE_SECOND_OPEN (0x00000F00)
179#define PL_LOGINFO_SUB_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00001000)
180
181#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_FAILURE (0x00200000) /* Can't get SMP Frame */
182#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_READ_ERROR (0x00200010) /* Error occured on SMP Read */
183#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_WRITE_ERROR (0x00200020) /* Error occured on SMP Write */
184#define PL_LOGINFO_CODE_ENCL_MGMT_NOT_SUPPORTED_ON_ENCL (0x00200040) /* Encl Mgmt services not available for this WWID */
185#define PL_LOGINFO_CODE_ENCL_MGMT_ADDR_MODE_NOT_SUPPORTED (0x00200050) /* Address Mode not suppored */
186#define PL_LOGINFO_CODE_ENCL_MGMT_BAD_SLOT_NUM (0x00200060) /* Invalid Slot Number in SEP Msg */
187#define PL_LOGINFO_CODE_ENCL_MGMT_SGPIO_NOT_PRESENT (0x00200070) /* SGPIO not present/enabled */
188#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_NOT_CONFIGURED (0x00200080) /* GPIO not configured */
189#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_FRAME_ERROR (0x00200090) /* GPIO can't allocate a frame */
190#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_CONFIG_PAGE_ERROR (0x002000A0) /* GPIO failed config page request */
191#define PL_LOGINFO_CODE_ENCL_MGMT_SES_FRAME_ALLOC_ERROR (0x002000B0) /* Can't get frame for SES command */
192#define PL_LOGINFO_CODE_ENCL_MGMT_SES_IO_ERROR (0x002000C0) /* I/O execution error */
193#define PL_LOGINFO_CODE_ENCL_MGMT_SES_RETRIES_EXHAUSTED (0x002000D0) /* SEP I/O retries exhausted */
194#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_ALLOC_ERROR (0x002000E0) /* Can't get frame for SMP command */
195
196#define PL_LOGINFO_DA_SEP_NOT_PRESENT (0x00200100) /* SEP not present when msg received */
197#define PL_LOGINFO_DA_SEP_SINGLE_THREAD_ERROR (0x00200101) /* Can only accept 1 msg at a time */
198#define PL_LOGINFO_DA_SEP_ISTWI_INTR_IN_IDLE_STATE (0x00200102) /* ISTWI interrupt recvd. while IDLE */
199#define PL_LOGINFO_DA_SEP_RECEIVED_NACK_FROM_SLAVE (0x00200103) /* SEP NACK'd, it is busy */
200#define PL_LOGINFO_DA_SEP_DID_NOT_RECEIVE_ACK (0x00200104) /* SEP didn't rcv. ACK (Last Rcvd Bit = 1) */
201#define PL_LOGINFO_DA_SEP_BAD_STATUS_HDR_CHKSUM (0x00200105) /* SEP stopped or sent bad chksum in Hdr */
202#define PL_LOGINFO_DA_SEP_STOP_ON_DATA (0x00200106) /* SEP stopped while transfering data */
203#define PL_LOGINFO_DA_SEP_STOP_ON_SENSE_DATA (0x00200107) /* SEP stopped while transfering sense data */
204#define PL_LOGINFO_DA_SEP_UNSUPPORTED_SCSI_STATUS_1 (0x00200108) /* SEP returned unknown scsi status */
205#define PL_LOGINFO_DA_SEP_UNSUPPORTED_SCSI_STATUS_2 (0x00200109) /* SEP returned unknown scsi status */
206#define PL_LOGINFO_DA_SEP_CHKSUM_ERROR_AFTER_STOP (0x0020010A) /* SEP returned bad chksum after STOP */
207#define PL_LOGINFO_DA_SEP_CHKSUM_ERROR_AFTER_STOP_GETDATA (0x0020010B) /* SEP returned bad chksum after STOP while gettin data*/
208#define PL_LOGINFO_DA_SEP_UNSUPPORTED_COMMAND (0x0020010C) /* SEP doesn't support CDB opcode f/w location 1 */
209#define PL_LOGINFO_DA_SEP_UNSUPPORTED_COMMAND_2 (0x0020010D) /* SEP doesn't support CDB opcode f/w location 2 */
210#define PL_LOGINFO_DA_SEP_UNSUPPORTED_COMMAND_3 (0x0020010E) /* SEP doesn't support CDB opcode f/w location 3 */
169 211
170 212
171/****************************************************************************/ 213/****************************************************************************/
diff --git a/drivers/message/fusion/lsi/mpi_sas.h b/drivers/message/fusion/lsi/mpi_sas.h
index 50b8f0a8f456..8e990a0fa7a2 100644
--- a/drivers/message/fusion/lsi/mpi_sas.h
+++ b/drivers/message/fusion/lsi/mpi_sas.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2004 LSI Logic Corporation. 2 * Copyright (c) 2004-2006 LSI Logic Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_sas.h 5 * Name: mpi_sas.h
6 * Title: MPI Serial Attached SCSI structures and definitions 6 * Title: MPI Serial Attached SCSI structures and definitions
7 * Creation Date: August 19, 2004 7 * Creation Date: August 19, 2004
8 * 8 *
9 * mpi_sas.h Version: 01.05.03 9 * mpi_sas.h Version: 01.05.04
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -21,6 +21,8 @@
21 * and Remove Device operations to SAS IO Unit Control. 21 * and Remove Device operations to SAS IO Unit Control.
22 * Added DevHandle field to SAS IO Unit Control request and 22 * Added DevHandle field to SAS IO Unit Control request and
23 * reply. 23 * reply.
24 * 10-11-06 01.05.04 Fixed the name of a define for Operation field of SAS IO
25 * Unit Control request.
24 * -------------------------------------------------------------------------- 26 * --------------------------------------------------------------------------
25 */ 27 */
26 28
@@ -237,7 +239,8 @@ typedef struct _MSG_SAS_IOUNIT_CONTROL_REQUEST
237#define MPI_SAS_OP_SEND_PRIMITIVE (0x0A) 239#define MPI_SAS_OP_SEND_PRIMITIVE (0x0A)
238#define MPI_SAS_OP_FORCE_FULL_DISCOVERY (0x0B) 240#define MPI_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
239#define MPI_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) 241#define MPI_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C)
240#define MPI_SAS_OP_TRANSMIT_REMOVE_DEVICE (0x0D) 242#define MPI_SAS_OP_TRANSMIT_REMOVE_DEVICE (0x0D) /* obsolete name */
243#define MPI_SAS_OP_REMOVE_DEVICE (0x0D)
241 244
242/* values for the PrimFlags field */ 245/* values for the PrimFlags field */
243#define MPI_SAS_PRIMFLAGS_SINGLE (0x08) 246#define MPI_SAS_PRIMFLAGS_SINGLE (0x08)
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 68103e508db7..88e061d13d0b 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -667,12 +667,30 @@ NCR_700_chip_setup(struct Scsi_Host *host)
667 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP); 667 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
668 668
669 if(hostdata->chip710) { 669 if(hostdata->chip710) {
670 __u8 burst_disable = hostdata->burst_disable 670 __u8 burst_disable = 0;
671 ? BURST_DISABLE : 0; 671 __u8 burst_length = 0;
672
673 switch (hostdata->burst_length) {
674 case 1:
675 burst_length = BURST_LENGTH_1;
676 break;
677 case 2:
678 burst_length = BURST_LENGTH_2;
679 break;
680 case 4:
681 burst_length = BURST_LENGTH_4;
682 break;
683 case 8:
684 burst_length = BURST_LENGTH_8;
685 break;
686 default:
687 burst_disable = BURST_DISABLE;
688 break;
689 }
672 dcntl_extra = COMPAT_700_MODE; 690 dcntl_extra = COMPAT_700_MODE;
673 691
674 NCR_700_writeb(dcntl_extra, host, DCNTL_REG); 692 NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
675 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra, 693 NCR_700_writeb(burst_length | hostdata->dmode_extra,
676 host, DMODE_710_REG); 694 host, DMODE_710_REG);
677 NCR_700_writeb(burst_disable | (hostdata->differential ? 695 NCR_700_writeb(burst_disable | (hostdata->differential ?
678 DIFF : 0), host, CTEST7_REG); 696 DIFF : 0), host, CTEST7_REG);
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index f38822db4210..841e1bb27d57 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -203,7 +203,7 @@ struct NCR_700_Host_Parameters {
203 __u32 force_le_on_be:1; 203 __u32 force_le_on_be:1;
204#endif 204#endif
205 __u32 chip710:1; /* set if really a 710 not 700 */ 205 __u32 chip710:1; /* set if really a 710 not 700 */
206 __u32 burst_disable:1; /* set to 1 to disable 710 bursting */ 206 __u32 burst_length:4; /* set to 0 to disable 710 bursting */
207 207
208 /* NOTHING BELOW HERE NEEDS ALTERING */ 208 /* NOTHING BELOW HERE NEEDS ALTERING */
209 __u32 fast:1; /* if we can alter the SCSI bus clock 209 __u32 fast:1; /* if we can alter the SCSI bus clock
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7869c34a4a3e..5bf3f07870ba 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -973,6 +973,15 @@ config SCSI_LASI700
973 many PA-RISC workstations & servers. If you do not know whether you 973 many PA-RISC workstations & servers. If you do not know whether you
974 have a Lasi chip, it is safe to say "Y" here. 974 have a Lasi chip, it is safe to say "Y" here.
975 975
976config SCSI_SNI_53C710
977 tristate "SNI RM SCSI support for 53c710"
978 depends on SNI_RM && SCSI
979 select SCSI_SPI_ATTRS
980 select 53C700_LE_ON_BE
981 help
982 This is a driver for the onboard SCSI controller found in older
983 SNI RM workstations & servers.
984
976config 53C700_LE_ON_BE 985config 53C700_LE_ON_BE
977 bool 986 bool
978 depends on SCSI_LASI700 987 depends on SCSI_LASI700
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index bd7c9888f7f4..79ecf4ebe6eb 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -124,6 +124,7 @@ obj-$(CONFIG_JAZZ_ESP) += NCR53C9x.o jazz_esp.o
124obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o 124obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o
125obj-$(CONFIG_SCSI_FCAL) += fcal.o 125obj-$(CONFIG_SCSI_FCAL) += fcal.o
126obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o 126obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
127obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
127obj-$(CONFIG_SCSI_NSP32) += nsp32.o 128obj-$(CONFIG_SCSI_NSP32) += nsp32.o
128obj-$(CONFIG_SCSI_IPR) += ipr.o 129obj-$(CONFIG_SCSI_IPR) += ipr.o
129obj-$(CONFIG_SCSI_SRP) += libsrp.o 130obj-$(CONFIG_SCSI_SRP) += libsrp.o
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index 9859cd17fc57..f12864abed2f 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -200,6 +200,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
200 hostdata->base = ioport_map(region, 64); 200 hostdata->base = ioport_map(region, 64);
201 hostdata->differential = (((1<<siop) & differential) != 0); 201 hostdata->differential = (((1<<siop) & differential) != 0);
202 hostdata->clock = NCR_D700_CLOCK_MHZ; 202 hostdata->clock = NCR_D700_CLOCK_MHZ;
203 hostdata->burst_length = 8;
203 204
204 /* and register the siop */ 205 /* and register the siop */
205 host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev); 206 host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev);
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile
index 28d133a3094f..f1cca4ee5410 100644
--- a/drivers/scsi/aacraid/Makefile
+++ b/drivers/scsi/aacraid/Makefile
@@ -3,6 +3,6 @@
3obj-$(CONFIG_SCSI_AACRAID) := aacraid.o 3obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
4 4
5aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ 5aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \
6 dpcsup.o rx.o sa.o rkt.o 6 dpcsup.o rx.o sa.o rkt.o nark.o
7 7
8EXTRA_CFLAGS := -Idrivers/scsi 8EXTRA_CFLAGS := -Idrivers/scsi
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 426cd6f49f5d..ddb33b06e0ef 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -170,9 +170,9 @@ int acbsize = -1;
170module_param(acbsize, int, S_IRUGO|S_IWUSR); 170module_param(acbsize, int, S_IRUGO|S_IWUSR);
171MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); 171MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
172 172
173int expose_physicals = 0; 173int expose_physicals = -1;
174module_param(expose_physicals, int, S_IRUGO|S_IWUSR); 174module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
175MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. 0=off, 1=on"); 175MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on");
176/** 176/**
177 * aac_get_config_status - check the adapter configuration 177 * aac_get_config_status - check the adapter configuration
178 * @common: adapter to query 178 * @common: adapter to query
@@ -706,6 +706,309 @@ static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
706 } 706 }
707} 707}
708 708
709static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
710{
711 if (lba & 0xffffffff00000000LL) {
712 int cid = scmd_id(cmd);
713 dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
714 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
715 SAM_STAT_CHECK_CONDITION;
716 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
717 HARDWARE_ERROR,
718 SENCODE_INTERNAL_TARGET_FAILURE,
719 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
720 0, 0);
721 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
722 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(cmd->sense_buffer))
723 ? sizeof(cmd->sense_buffer)
724 : sizeof(dev->fsa_dev[cid].sense_data));
725 cmd->scsi_done(cmd);
726 return 1;
727 }
728 return 0;
729}
730
731static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
732{
733 return 0;
734}
735
736static void io_callback(void *context, struct fib * fibptr);
737
738static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
739{
740 u16 fibsize;
741 struct aac_raw_io *readcmd;
742 aac_fib_init(fib);
743 readcmd = (struct aac_raw_io *) fib_data(fib);
744 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
745 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
746 readcmd->count = cpu_to_le32(count<<9);
747 readcmd->cid = cpu_to_le16(scmd_id(cmd));
748 readcmd->flags = cpu_to_le16(1);
749 readcmd->bpTotal = 0;
750 readcmd->bpComplete = 0;
751
752 aac_build_sgraw(cmd, &readcmd->sg);
753 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
754 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
755 /*
756 * Now send the Fib to the adapter
757 */
758 return aac_fib_send(ContainerRawIo,
759 fib,
760 fibsize,
761 FsaNormal,
762 0, 1,
763 (fib_callback) io_callback,
764 (void *) cmd);
765}
766
767static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
768{
769 u16 fibsize;
770 struct aac_read64 *readcmd;
771 aac_fib_init(fib);
772 readcmd = (struct aac_read64 *) fib_data(fib);
773 readcmd->command = cpu_to_le32(VM_CtHostRead64);
774 readcmd->cid = cpu_to_le16(scmd_id(cmd));
775 readcmd->sector_count = cpu_to_le16(count);
776 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
777 readcmd->pad = 0;
778 readcmd->flags = 0;
779
780 aac_build_sg64(cmd, &readcmd->sg);
781 fibsize = sizeof(struct aac_read64) +
782 ((le32_to_cpu(readcmd->sg.count) - 1) *
783 sizeof (struct sgentry64));
784 BUG_ON (fibsize > (fib->dev->max_fib_size -
785 sizeof(struct aac_fibhdr)));
786 /*
787 * Now send the Fib to the adapter
788 */
789 return aac_fib_send(ContainerCommand64,
790 fib,
791 fibsize,
792 FsaNormal,
793 0, 1,
794 (fib_callback) io_callback,
795 (void *) cmd);
796}
797
798static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
799{
800 u16 fibsize;
801 struct aac_read *readcmd;
802 aac_fib_init(fib);
803 readcmd = (struct aac_read *) fib_data(fib);
804 readcmd->command = cpu_to_le32(VM_CtBlockRead);
805 readcmd->cid = cpu_to_le16(scmd_id(cmd));
806 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
807 readcmd->count = cpu_to_le32(count * 512);
808
809 aac_build_sg(cmd, &readcmd->sg);
810 fibsize = sizeof(struct aac_read) +
811 ((le32_to_cpu(readcmd->sg.count) - 1) *
812 sizeof (struct sgentry));
813 BUG_ON (fibsize > (fib->dev->max_fib_size -
814 sizeof(struct aac_fibhdr)));
815 /*
816 * Now send the Fib to the adapter
817 */
818 return aac_fib_send(ContainerCommand,
819 fib,
820 fibsize,
821 FsaNormal,
822 0, 1,
823 (fib_callback) io_callback,
824 (void *) cmd);
825}
826
827static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
828{
829 u16 fibsize;
830 struct aac_raw_io *writecmd;
831 aac_fib_init(fib);
832 writecmd = (struct aac_raw_io *) fib_data(fib);
833 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
834 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
835 writecmd->count = cpu_to_le32(count<<9);
836 writecmd->cid = cpu_to_le16(scmd_id(cmd));
837 writecmd->flags = 0;
838 writecmd->bpTotal = 0;
839 writecmd->bpComplete = 0;
840
841 aac_build_sgraw(cmd, &writecmd->sg);
842 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
843 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
844 /*
845 * Now send the Fib to the adapter
846 */
847 return aac_fib_send(ContainerRawIo,
848 fib,
849 fibsize,
850 FsaNormal,
851 0, 1,
852 (fib_callback) io_callback,
853 (void *) cmd);
854}
855
856static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
857{
858 u16 fibsize;
859 struct aac_write64 *writecmd;
860 aac_fib_init(fib);
861 writecmd = (struct aac_write64 *) fib_data(fib);
862 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
863 writecmd->cid = cpu_to_le16(scmd_id(cmd));
864 writecmd->sector_count = cpu_to_le16(count);
865 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
866 writecmd->pad = 0;
867 writecmd->flags = 0;
868
869 aac_build_sg64(cmd, &writecmd->sg);
870 fibsize = sizeof(struct aac_write64) +
871 ((le32_to_cpu(writecmd->sg.count) - 1) *
872 sizeof (struct sgentry64));
873 BUG_ON (fibsize > (fib->dev->max_fib_size -
874 sizeof(struct aac_fibhdr)));
875 /*
876 * Now send the Fib to the adapter
877 */
878 return aac_fib_send(ContainerCommand64,
879 fib,
880 fibsize,
881 FsaNormal,
882 0, 1,
883 (fib_callback) io_callback,
884 (void *) cmd);
885}
886
887static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
888{
889 u16 fibsize;
890 struct aac_write *writecmd;
891 aac_fib_init(fib);
892 writecmd = (struct aac_write *) fib_data(fib);
893 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
894 writecmd->cid = cpu_to_le16(scmd_id(cmd));
895 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
896 writecmd->count = cpu_to_le32(count * 512);
897 writecmd->sg.count = cpu_to_le32(1);
898 /* ->stable is not used - it did mean which type of write */
899
900 aac_build_sg(cmd, &writecmd->sg);
901 fibsize = sizeof(struct aac_write) +
902 ((le32_to_cpu(writecmd->sg.count) - 1) *
903 sizeof (struct sgentry));
904 BUG_ON (fibsize > (fib->dev->max_fib_size -
905 sizeof(struct aac_fibhdr)));
906 /*
907 * Now send the Fib to the adapter
908 */
909 return aac_fib_send(ContainerCommand,
910 fib,
911 fibsize,
912 FsaNormal,
913 0, 1,
914 (fib_callback) io_callback,
915 (void *) cmd);
916}
917
918static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
919{
920 struct aac_srb * srbcmd;
921 u32 flag;
922 u32 timeout;
923
924 aac_fib_init(fib);
925 switch(cmd->sc_data_direction){
926 case DMA_TO_DEVICE:
927 flag = SRB_DataOut;
928 break;
929 case DMA_BIDIRECTIONAL:
930 flag = SRB_DataIn | SRB_DataOut;
931 break;
932 case DMA_FROM_DEVICE:
933 flag = SRB_DataIn;
934 break;
935 case DMA_NONE:
936 default: /* shuts up some versions of gcc */
937 flag = SRB_NoDataXfer;
938 break;
939 }
940
941 srbcmd = (struct aac_srb*) fib_data(fib);
942 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
943 srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
944 srbcmd->id = cpu_to_le32(scmd_id(cmd));
945 srbcmd->lun = cpu_to_le32(cmd->device->lun);
946 srbcmd->flags = cpu_to_le32(flag);
947 timeout = cmd->timeout_per_command/HZ;
948 if (timeout == 0)
949 timeout = 1;
950 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
951 srbcmd->retry_limit = 0; /* Obsolete parameter */
952 srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
953 return srbcmd;
954}
955
956static void aac_srb_callback(void *context, struct fib * fibptr);
957
958static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
959{
960 u16 fibsize;
961 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
962
963 aac_build_sg64(cmd, (struct sgmap64*) &srbcmd->sg);
964 srbcmd->count = cpu_to_le32(cmd->request_bufflen);
965
966 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
967 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
968 /*
969 * Build Scatter/Gather list
970 */
971 fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
972 ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
973 sizeof (struct sgentry64));
974 BUG_ON (fibsize > (fib->dev->max_fib_size -
975 sizeof(struct aac_fibhdr)));
976
977 /*
978 * Now send the Fib to the adapter
979 */
980 return aac_fib_send(ScsiPortCommand64, fib,
981 fibsize, FsaNormal, 0, 1,
982 (fib_callback) aac_srb_callback,
983 (void *) cmd);
984}
985
986static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
987{
988 u16 fibsize;
989 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
990
991 aac_build_sg(cmd, (struct sgmap*)&srbcmd->sg);
992 srbcmd->count = cpu_to_le32(cmd->request_bufflen);
993
994 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
995 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
996 /*
997 * Build Scatter/Gather list
998 */
999 fibsize = sizeof (struct aac_srb) +
1000 (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
1001 sizeof (struct sgentry));
1002 BUG_ON (fibsize > (fib->dev->max_fib_size -
1003 sizeof(struct aac_fibhdr)));
1004
1005 /*
1006 * Now send the Fib to the adapter
1007 */
1008 return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
1009 (fib_callback) aac_srb_callback, (void *) cmd);
1010}
1011
709int aac_get_adapter_info(struct aac_dev* dev) 1012int aac_get_adapter_info(struct aac_dev* dev)
710{ 1013{
711 struct fib* fibptr; 1014 struct fib* fibptr;
@@ -874,14 +1177,27 @@ int aac_get_adapter_info(struct aac_dev* dev)
874 } 1177 }
875 } 1178 }
876 /* 1179 /*
877 * 57 scatter gather elements 1180 * Deal with configuring for the individualized limits of each packet
1181 * interface.
878 */ 1182 */
879 if (!(dev->raw_io_interface)) { 1183 dev->a_ops.adapter_scsi = (dev->dac_support)
1184 ? aac_scsi_64
1185 : aac_scsi_32;
1186 if (dev->raw_io_interface) {
1187 dev->a_ops.adapter_bounds = (dev->raw_io_64)
1188 ? aac_bounds_64
1189 : aac_bounds_32;
1190 dev->a_ops.adapter_read = aac_read_raw_io;
1191 dev->a_ops.adapter_write = aac_write_raw_io;
1192 } else {
1193 dev->a_ops.adapter_bounds = aac_bounds_32;
880 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - 1194 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
881 sizeof(struct aac_fibhdr) - 1195 sizeof(struct aac_fibhdr) -
882 sizeof(struct aac_write) + sizeof(struct sgentry)) / 1196 sizeof(struct aac_write) + sizeof(struct sgentry)) /
883 sizeof(struct sgentry); 1197 sizeof(struct sgentry);
884 if (dev->dac_support) { 1198 if (dev->dac_support) {
1199 dev->a_ops.adapter_read = aac_read_block64;
1200 dev->a_ops.adapter_write = aac_write_block64;
885 /* 1201 /*
886 * 38 scatter gather elements 1202 * 38 scatter gather elements
887 */ 1203 */
@@ -891,6 +1207,9 @@ int aac_get_adapter_info(struct aac_dev* dev)
891 sizeof(struct aac_write64) + 1207 sizeof(struct aac_write64) +
892 sizeof(struct sgentry64)) / 1208 sizeof(struct sgentry64)) /
893 sizeof(struct sgentry64); 1209 sizeof(struct sgentry64);
1210 } else {
1211 dev->a_ops.adapter_read = aac_read_block;
1212 dev->a_ops.adapter_write = aac_write_block;
894 } 1213 }
895 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; 1214 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
896 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { 1215 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
@@ -1004,8 +1323,6 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1004 u64 lba; 1323 u64 lba;
1005 u32 count; 1324 u32 count;
1006 int status; 1325 int status;
1007
1008 u16 fibsize;
1009 struct aac_dev *dev; 1326 struct aac_dev *dev;
1010 struct fib * cmd_fibcontext; 1327 struct fib * cmd_fibcontext;
1011 1328
@@ -1059,23 +1376,8 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1059 } 1376 }
1060 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", 1377 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
1061 smp_processor_id(), (unsigned long long)lba, jiffies)); 1378 smp_processor_id(), (unsigned long long)lba, jiffies));
1062 if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) && 1379 if (aac_adapter_bounds(dev,scsicmd,lba))
1063 (lba & 0xffffffff00000000LL)) {
1064 dprintk((KERN_DEBUG "aac_read: Illegal lba\n"));
1065 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1066 SAM_STAT_CHECK_CONDITION;
1067 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1068 HARDWARE_ERROR,
1069 SENCODE_INTERNAL_TARGET_FAILURE,
1070 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1071 0, 0);
1072 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1073 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1074 ? sizeof(scsicmd->sense_buffer)
1075 : sizeof(dev->fsa_dev[cid].sense_data));
1076 scsicmd->scsi_done(scsicmd);
1077 return 0; 1380 return 0;
1078 }
1079 /* 1381 /*
1080 * Alocate and initialize a Fib 1382 * Alocate and initialize a Fib
1081 */ 1383 */
@@ -1083,85 +1385,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1083 return -1; 1385 return -1;
1084 } 1386 }
1085 1387
1086 aac_fib_init(cmd_fibcontext); 1388 status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
1087
1088 if (dev->raw_io_interface) {
1089 struct aac_raw_io *readcmd;
1090 readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
1091 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1092 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1093 readcmd->count = cpu_to_le32(count<<9);
1094 readcmd->cid = cpu_to_le16(cid);
1095 readcmd->flags = cpu_to_le16(1);
1096 readcmd->bpTotal = 0;
1097 readcmd->bpComplete = 0;
1098
1099 aac_build_sgraw(scsicmd, &readcmd->sg);
1100 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
1101 BUG_ON(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)));
1102 /*
1103 * Now send the Fib to the adapter
1104 */
1105 status = aac_fib_send(ContainerRawIo,
1106 cmd_fibcontext,
1107 fibsize,
1108 FsaNormal,
1109 0, 1,
1110 (fib_callback) io_callback,
1111 (void *) scsicmd);
1112 } else if (dev->dac_support == 1) {
1113 struct aac_read64 *readcmd;
1114 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
1115 readcmd->command = cpu_to_le32(VM_CtHostRead64);
1116 readcmd->cid = cpu_to_le16(cid);
1117 readcmd->sector_count = cpu_to_le16(count);
1118 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1119 readcmd->pad = 0;
1120 readcmd->flags = 0;
1121
1122 aac_build_sg64(scsicmd, &readcmd->sg);
1123 fibsize = sizeof(struct aac_read64) +
1124 ((le32_to_cpu(readcmd->sg.count) - 1) *
1125 sizeof (struct sgentry64));
1126 BUG_ON (fibsize > (dev->max_fib_size -
1127 sizeof(struct aac_fibhdr)));
1128 /*
1129 * Now send the Fib to the adapter
1130 */
1131 status = aac_fib_send(ContainerCommand64,
1132 cmd_fibcontext,
1133 fibsize,
1134 FsaNormal,
1135 0, 1,
1136 (fib_callback) io_callback,
1137 (void *) scsicmd);
1138 } else {
1139 struct aac_read *readcmd;
1140 readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
1141 readcmd->command = cpu_to_le32(VM_CtBlockRead);
1142 readcmd->cid = cpu_to_le32(cid);
1143 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1144 readcmd->count = cpu_to_le32(count * 512);
1145
1146 aac_build_sg(scsicmd, &readcmd->sg);
1147 fibsize = sizeof(struct aac_read) +
1148 ((le32_to_cpu(readcmd->sg.count) - 1) *
1149 sizeof (struct sgentry));
1150 BUG_ON (fibsize > (dev->max_fib_size -
1151 sizeof(struct aac_fibhdr)));
1152 /*
1153 * Now send the Fib to the adapter
1154 */
1155 status = aac_fib_send(ContainerCommand,
1156 cmd_fibcontext,
1157 fibsize,
1158 FsaNormal,
1159 0, 1,
1160 (fib_callback) io_callback,
1161 (void *) scsicmd);
1162 }
1163
1164
1165 1389
1166 /* 1390 /*
1167 * Check that the command queued to the controller 1391 * Check that the command queued to the controller
@@ -1187,7 +1411,6 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1187 u64 lba; 1411 u64 lba;
1188 u32 count; 1412 u32 count;
1189 int status; 1413 int status;
1190 u16 fibsize;
1191 struct aac_dev *dev; 1414 struct aac_dev *dev;
1192 struct fib * cmd_fibcontext; 1415 struct fib * cmd_fibcontext;
1193 1416
@@ -1227,22 +1450,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1227 } 1450 }
1228 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", 1451 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
1229 smp_processor_id(), (unsigned long long)lba, jiffies)); 1452 smp_processor_id(), (unsigned long long)lba, jiffies));
1230 if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) 1453 if (aac_adapter_bounds(dev,scsicmd,lba))
1231 && (lba & 0xffffffff00000000LL)) {
1232 dprintk((KERN_DEBUG "aac_write: Illegal lba\n"));
1233 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1234 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1235 HARDWARE_ERROR,
1236 SENCODE_INTERNAL_TARGET_FAILURE,
1237 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1238 0, 0);
1239 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1240 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1241 ? sizeof(scsicmd->sense_buffer)
1242 : sizeof(dev->fsa_dev[cid].sense_data));
1243 scsicmd->scsi_done(scsicmd);
1244 return 0; 1454 return 0;
1245 }
1246 /* 1455 /*
1247 * Allocate and initialize a Fib then setup a BlockWrite command 1456 * Allocate and initialize a Fib then setup a BlockWrite command
1248 */ 1457 */
@@ -1251,85 +1460,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1251 scsicmd->scsi_done(scsicmd); 1460 scsicmd->scsi_done(scsicmd);
1252 return 0; 1461 return 0;
1253 } 1462 }
1254 aac_fib_init(cmd_fibcontext);
1255 1463
1256 if (dev->raw_io_interface) { 1464 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count);
1257 struct aac_raw_io *writecmd;
1258 writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
1259 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1260 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1261 writecmd->count = cpu_to_le32(count<<9);
1262 writecmd->cid = cpu_to_le16(cid);
1263 writecmd->flags = 0;
1264 writecmd->bpTotal = 0;
1265 writecmd->bpComplete = 0;
1266
1267 aac_build_sgraw(scsicmd, &writecmd->sg);
1268 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
1269 BUG_ON(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)));
1270 /*
1271 * Now send the Fib to the adapter
1272 */
1273 status = aac_fib_send(ContainerRawIo,
1274 cmd_fibcontext,
1275 fibsize,
1276 FsaNormal,
1277 0, 1,
1278 (fib_callback) io_callback,
1279 (void *) scsicmd);
1280 } else if (dev->dac_support == 1) {
1281 struct aac_write64 *writecmd;
1282 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
1283 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
1284 writecmd->cid = cpu_to_le16(cid);
1285 writecmd->sector_count = cpu_to_le16(count);
1286 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1287 writecmd->pad = 0;
1288 writecmd->flags = 0;
1289
1290 aac_build_sg64(scsicmd, &writecmd->sg);
1291 fibsize = sizeof(struct aac_write64) +
1292 ((le32_to_cpu(writecmd->sg.count) - 1) *
1293 sizeof (struct sgentry64));
1294 BUG_ON (fibsize > (dev->max_fib_size -
1295 sizeof(struct aac_fibhdr)));
1296 /*
1297 * Now send the Fib to the adapter
1298 */
1299 status = aac_fib_send(ContainerCommand64,
1300 cmd_fibcontext,
1301 fibsize,
1302 FsaNormal,
1303 0, 1,
1304 (fib_callback) io_callback,
1305 (void *) scsicmd);
1306 } else {
1307 struct aac_write *writecmd;
1308 writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
1309 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
1310 writecmd->cid = cpu_to_le32(cid);
1311 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1312 writecmd->count = cpu_to_le32(count * 512);
1313 writecmd->sg.count = cpu_to_le32(1);
1314 /* ->stable is not used - it did mean which type of write */
1315
1316 aac_build_sg(scsicmd, &writecmd->sg);
1317 fibsize = sizeof(struct aac_write) +
1318 ((le32_to_cpu(writecmd->sg.count) - 1) *
1319 sizeof (struct sgentry));
1320 BUG_ON (fibsize > (dev->max_fib_size -
1321 sizeof(struct aac_fibhdr)));
1322 /*
1323 * Now send the Fib to the adapter
1324 */
1325 status = aac_fib_send(ContainerCommand,
1326 cmd_fibcontext,
1327 fibsize,
1328 FsaNormal,
1329 0, 1,
1330 (fib_callback) io_callback,
1331 (void *) scsicmd);
1332 }
1333 1465
1334 /* 1466 /*
1335 * Check that the command queued to the controller 1467 * Check that the command queued to the controller
@@ -2099,10 +2231,6 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2099 struct fib* cmd_fibcontext; 2231 struct fib* cmd_fibcontext;
2100 struct aac_dev* dev; 2232 struct aac_dev* dev;
2101 int status; 2233 int status;
2102 struct aac_srb *srbcmd;
2103 u16 fibsize;
2104 u32 flag;
2105 u32 timeout;
2106 2234
2107 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 2235 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2108 if (scmd_id(scsicmd) >= dev->maximum_num_physicals || 2236 if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
@@ -2112,88 +2240,14 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2112 return 0; 2240 return 0;
2113 } 2241 }
2114 2242
2115 switch(scsicmd->sc_data_direction){
2116 case DMA_TO_DEVICE:
2117 flag = SRB_DataOut;
2118 break;
2119 case DMA_BIDIRECTIONAL:
2120 flag = SRB_DataIn | SRB_DataOut;
2121 break;
2122 case DMA_FROM_DEVICE:
2123 flag = SRB_DataIn;
2124 break;
2125 case DMA_NONE:
2126 default: /* shuts up some versions of gcc */
2127 flag = SRB_NoDataXfer;
2128 break;
2129 }
2130
2131
2132 /* 2243 /*
2133 * Allocate and initialize a Fib then setup a BlockWrite command 2244 * Allocate and initialize a Fib then setup a BlockWrite command
2134 */ 2245 */
2135 if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 2246 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
2136 return -1; 2247 return -1;
2137 } 2248 }
2138 aac_fib_init(cmd_fibcontext); 2249 status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
2139
2140 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
2141 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
2142 srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(scsicmd)));
2143 srbcmd->id = cpu_to_le32(scmd_id(scsicmd));
2144 srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
2145 srbcmd->flags = cpu_to_le32(flag);
2146 timeout = scsicmd->timeout_per_command/HZ;
2147 if(timeout == 0){
2148 timeout = 1;
2149 }
2150 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
2151 srbcmd->retry_limit = 0; /* Obsolete parameter */
2152 srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
2153
2154 if( dev->dac_support == 1 ) {
2155 aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
2156 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
2157
2158 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2159 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
2160 /*
2161 * Build Scatter/Gather list
2162 */
2163 fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
2164 ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
2165 sizeof (struct sgentry64));
2166 BUG_ON (fibsize > (dev->max_fib_size -
2167 sizeof(struct aac_fibhdr)));
2168 2250
2169 /*
2170 * Now send the Fib to the adapter
2171 */
2172 status = aac_fib_send(ScsiPortCommand64, cmd_fibcontext,
2173 fibsize, FsaNormal, 0, 1,
2174 (fib_callback) aac_srb_callback,
2175 (void *) scsicmd);
2176 } else {
2177 aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
2178 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
2179
2180 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2181 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
2182 /*
2183 * Build Scatter/Gather list
2184 */
2185 fibsize = sizeof (struct aac_srb) +
2186 (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
2187 sizeof (struct sgentry));
2188 BUG_ON (fibsize > (dev->max_fib_size -
2189 sizeof(struct aac_fibhdr)));
2190
2191 /*
2192 * Now send the Fib to the adapter
2193 */
2194 status = aac_fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
2195 (fib_callback) aac_srb_callback, (void *) scsicmd);
2196 }
2197 /* 2251 /*
2198 * Check that the command queued to the controller 2252 * Check that the command queued to the controller
2199 */ 2253 */
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 4f8b4c53d435..39ecd0d22eb0 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -5,6 +5,7 @@
5#define _nblank(x) #x 5#define _nblank(x) #x
6#define nblank(x) _nblank(x)[0] 6#define nblank(x) _nblank(x)[0]
7 7
8#include <linux/interrupt.h>
8 9
9/*------------------------------------------------------------------------------ 10/*------------------------------------------------------------------------------
10 * D E F I N E S 11 * D E F I N E S
@@ -485,16 +486,28 @@ enum aac_log_level {
485 486
486struct aac_dev; 487struct aac_dev;
487struct fib; 488struct fib;
489struct scsi_cmnd;
488 490
489struct adapter_ops 491struct adapter_ops
490{ 492{
493 /* Low level operations */
491 void (*adapter_interrupt)(struct aac_dev *dev); 494 void (*adapter_interrupt)(struct aac_dev *dev);
492 void (*adapter_notify)(struct aac_dev *dev, u32 event); 495 void (*adapter_notify)(struct aac_dev *dev, u32 event);
493 void (*adapter_disable_int)(struct aac_dev *dev); 496 void (*adapter_disable_int)(struct aac_dev *dev);
497 void (*adapter_enable_int)(struct aac_dev *dev);
494 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); 498 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
495 int (*adapter_check_health)(struct aac_dev *dev); 499 int (*adapter_check_health)(struct aac_dev *dev);
496 int (*adapter_send)(struct fib * fib); 500 /* Transport operations */
497 int (*adapter_ioremap)(struct aac_dev * dev, u32 size); 501 int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
502 irqreturn_t (*adapter_intr)(int irq, void *dev_id);
503 /* Packet operations */
504 int (*adapter_deliver)(struct fib * fib);
505 int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba);
506 int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
507 int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
508 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
509 /* Administrative operations */
510 int (*adapter_comm)(struct aac_dev * dev, int comm);
498}; 511};
499 512
500/* 513/*
@@ -1018,7 +1031,9 @@ struct aac_dev
1018 u8 nondasd_support; 1031 u8 nondasd_support;
1019 u8 dac_support; 1032 u8 dac_support;
1020 u8 raid_scsi_mode; 1033 u8 raid_scsi_mode;
1021 u8 new_comm_interface; 1034 u8 comm_interface;
1035# define AAC_COMM_PRODUCER 0
1036# define AAC_COMM_MESSAGE 1
1022 /* macro side-effects BEWARE */ 1037 /* macro side-effects BEWARE */
1023# define raw_io_interface \ 1038# define raw_io_interface \
1024 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) 1039 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
@@ -1036,18 +1051,36 @@ struct aac_dev
1036#define aac_adapter_disable_int(dev) \ 1051#define aac_adapter_disable_int(dev) \
1037 (dev)->a_ops.adapter_disable_int(dev) 1052 (dev)->a_ops.adapter_disable_int(dev)
1038 1053
1054#define aac_adapter_enable_int(dev) \
1055 (dev)->a_ops.adapter_enable_int(dev)
1056
1039#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ 1057#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
1040 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) 1058 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
1041 1059
1042#define aac_adapter_check_health(dev) \ 1060#define aac_adapter_check_health(dev) \
1043 (dev)->a_ops.adapter_check_health(dev) 1061 (dev)->a_ops.adapter_check_health(dev)
1044 1062
1045#define aac_adapter_send(fib) \
1046 ((fib)->dev)->a_ops.adapter_send(fib)
1047
1048#define aac_adapter_ioremap(dev, size) \ 1063#define aac_adapter_ioremap(dev, size) \
1049 (dev)->a_ops.adapter_ioremap(dev, size) 1064 (dev)->a_ops.adapter_ioremap(dev, size)
1050 1065
1066#define aac_adapter_deliver(fib) \
1067 ((fib)->dev)->a_ops.adapter_deliver(fib)
1068
1069#define aac_adapter_bounds(dev,cmd,lba) \
1070 dev->a_ops.adapter_bounds(dev,cmd,lba)
1071
1072#define aac_adapter_read(fib,cmd,lba,count) \
1073 ((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count)
1074
1075#define aac_adapter_write(fib,cmd,lba,count) \
1076 ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count)
1077
1078#define aac_adapter_scsi(fib,cmd) \
1079 ((fib)->dev)->a_ops.adapter_scsi(fib,cmd)
1080
1081#define aac_adapter_comm(dev,comm) \
1082 (dev)->a_ops.adapter_comm(dev, comm)
1083
1051#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) 1084#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
1052 1085
1053/* 1086/*
@@ -1767,7 +1800,6 @@ static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
1767 return (u32)capacity; 1800 return (u32)capacity;
1768} 1801}
1769 1802
1770struct scsi_cmnd;
1771/* SCp.phase values */ 1803/* SCp.phase values */
1772#define AAC_OWNER_MIDLEVEL 0x101 1804#define AAC_OWNER_MIDLEVEL 0x101
1773#define AAC_OWNER_LOWLEVEL 0x102 1805#define AAC_OWNER_LOWLEVEL 0x102
@@ -1794,7 +1826,9 @@ int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
1794int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg); 1826int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
1795int aac_rx_init(struct aac_dev *dev); 1827int aac_rx_init(struct aac_dev *dev);
1796int aac_rkt_init(struct aac_dev *dev); 1828int aac_rkt_init(struct aac_dev *dev);
1829int aac_nark_init(struct aac_dev *dev);
1797int aac_sa_init(struct aac_dev *dev); 1830int aac_sa_init(struct aac_dev *dev);
1831int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
1798unsigned int aac_response_normal(struct aac_queue * q); 1832unsigned int aac_response_normal(struct aac_queue * q);
1799unsigned int aac_command_normal(struct aac_queue * q); 1833unsigned int aac_command_normal(struct aac_queue * q);
1800unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1834unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 6d305b2f854e..df67ba686023 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -95,7 +95,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
95 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); 95 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
96 96
97 init->InitFlags = 0; 97 init->InitFlags = 0;
98 if (dev->new_comm_interface) { 98 if (dev->comm_interface == AAC_COMM_MESSAGE) {
99 init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); 99 init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
100 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); 100 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
101 } 101 }
@@ -297,21 +297,23 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
297 - sizeof(struct aac_fibhdr) 297 - sizeof(struct aac_fibhdr)
298 - sizeof(struct aac_write) + sizeof(struct sgentry)) 298 - sizeof(struct aac_write) + sizeof(struct sgentry))
299 / sizeof(struct sgentry); 299 / sizeof(struct sgentry);
300 dev->new_comm_interface = 0; 300 dev->comm_interface = AAC_COMM_PRODUCER;
301 dev->raw_io_64 = 0; 301 dev->raw_io_64 = 0;
302 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 302 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
303 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && 303 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
304 (status[0] == 0x00000001)) { 304 (status[0] == 0x00000001)) {
305 if (status[1] & AAC_OPT_NEW_COMM_64) 305 if (status[1] & AAC_OPT_NEW_COMM_64)
306 dev->raw_io_64 = 1; 306 dev->raw_io_64 = 1;
307 if (status[1] & AAC_OPT_NEW_COMM) 307 if (dev->a_ops.adapter_comm &&
308 dev->new_comm_interface = dev->a_ops.adapter_send != 0; 308 (status[1] & AAC_OPT_NEW_COMM))
309 if (dev->new_comm_interface && (status[2] > dev->base_size)) { 309 dev->comm_interface = AAC_COMM_MESSAGE;
310 if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
311 (status[2] > dev->base_size)) {
310 aac_adapter_ioremap(dev, 0); 312 aac_adapter_ioremap(dev, 0);
311 dev->base_size = status[2]; 313 dev->base_size = status[2];
312 if (aac_adapter_ioremap(dev, status[2])) { 314 if (aac_adapter_ioremap(dev, status[2])) {
313 /* remap failed, go back ... */ 315 /* remap failed, go back ... */
314 dev->new_comm_interface = 0; 316 dev->comm_interface = AAC_COMM_PRODUCER;
315 if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) { 317 if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
316 printk(KERN_WARNING 318 printk(KERN_WARNING
317 "aacraid: unable to map adapter.\n"); 319 "aacraid: unable to map adapter.\n");
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 4893a6d06a33..1b97f60652ba 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -317,7 +317,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
317 * success. 317 * success.
318 */ 318 */
319 319
320static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) 320int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
321{ 321{
322 struct aac_entry * entry = NULL; 322 struct aac_entry * entry = NULL;
323 int map = 0; 323 int map = 0;
@@ -387,7 +387,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
387{ 387{
388 struct aac_dev * dev = fibptr->dev; 388 struct aac_dev * dev = fibptr->dev;
389 struct hw_fib * hw_fib = fibptr->hw_fib; 389 struct hw_fib * hw_fib = fibptr->hw_fib;
390 struct aac_queue * q;
391 unsigned long flags = 0; 390 unsigned long flags = 0;
392 unsigned long qflags; 391 unsigned long qflags;
393 392
@@ -469,38 +468,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
469 468
470 if (!dev->queues) 469 if (!dev->queues)
471 return -EBUSY; 470 return -EBUSY;
472 q = &dev->queues->queue[AdapNormCmdQueue];
473 471
474 if(wait) 472 if(wait)
475 spin_lock_irqsave(&fibptr->event_lock, flags); 473 spin_lock_irqsave(&fibptr->event_lock, flags);
476 spin_lock_irqsave(q->lock, qflags); 474 aac_adapter_deliver(fibptr);
477 if (dev->new_comm_interface) {
478 unsigned long count = 10000000L; /* 50 seconds */
479 q->numpending++;
480 spin_unlock_irqrestore(q->lock, qflags);
481 while (aac_adapter_send(fibptr) != 0) {
482 if (--count == 0) {
483 if (wait)
484 spin_unlock_irqrestore(&fibptr->event_lock, flags);
485 spin_lock_irqsave(q->lock, qflags);
486 q->numpending--;
487 spin_unlock_irqrestore(q->lock, qflags);
488 return -ETIMEDOUT;
489 }
490 udelay(5);
491 }
492 } else {
493 u32 index;
494 unsigned long nointr = 0;
495 aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);
496
497 q->numpending++;
498 *(q->headers.producer) = cpu_to_le32(index + 1);
499 spin_unlock_irqrestore(q->lock, qflags);
500 dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index));
501 if (!(nointr & aac_config.irq_mod))
502 aac_adapter_notify(dev, AdapNormCmdQueue);
503 }
504 475
505 /* 476 /*
506 * If the caller wanted us to wait for response wait now. 477 * If the caller wanted us to wait for response wait now.
@@ -520,6 +491,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
520 while (down_trylock(&fibptr->event_wait)) { 491 while (down_trylock(&fibptr->event_wait)) {
521 int blink; 492 int blink;
522 if (--count == 0) { 493 if (--count == 0) {
494 struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
523 spin_lock_irqsave(q->lock, qflags); 495 spin_lock_irqsave(q->lock, qflags);
524 q->numpending--; 496 q->numpending--;
525 spin_unlock_irqrestore(q->lock, qflags); 497 spin_unlock_irqrestore(q->lock, qflags);
@@ -659,7 +631,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
659 unsigned long qflags; 631 unsigned long qflags;
660 632
661 if (hw_fib->header.XferState == 0) { 633 if (hw_fib->header.XferState == 0) {
662 if (dev->new_comm_interface) 634 if (dev->comm_interface == AAC_COMM_MESSAGE)
663 kfree (hw_fib); 635 kfree (hw_fib);
664 return 0; 636 return 0;
665 } 637 }
@@ -667,7 +639,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
667 * If we plan to do anything check the structure type first. 639 * If we plan to do anything check the structure type first.
668 */ 640 */
669 if ( hw_fib->header.StructType != FIB_MAGIC ) { 641 if ( hw_fib->header.StructType != FIB_MAGIC ) {
670 if (dev->new_comm_interface) 642 if (dev->comm_interface == AAC_COMM_MESSAGE)
671 kfree (hw_fib); 643 kfree (hw_fib);
672 return -EINVAL; 644 return -EINVAL;
673 } 645 }
@@ -679,7 +651,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
679 * send the completed cdb to the adapter. 651 * send the completed cdb to the adapter.
680 */ 652 */
681 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { 653 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
682 if (dev->new_comm_interface) { 654 if (dev->comm_interface == AAC_COMM_MESSAGE) {
683 kfree (hw_fib); 655 kfree (hw_fib);
684 } else { 656 } else {
685 u32 index; 657 u32 index;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index d2cf875af59b..a9734e08fe28 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -157,6 +157,7 @@ static struct pci_device_id aac_pci_tbl[] = {
157 { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */ 157 { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
158 { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ 158 { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
159 { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ 159 { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
160 { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
160 { 0,} 161 { 0,}
161}; 162};
162MODULE_DEVICE_TABLE(pci, aac_pci_tbl); 163MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -230,7 +231,8 @@ static struct aac_driver_ident aac_drivers[] = {
230 { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */ 231 { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */
231 { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */ 232 { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */
232 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */ 233 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */
233 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec Rocket Catch All */ 234 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
235 { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */
234}; 236};
235 237
236/** 238/**
@@ -396,11 +398,15 @@ static int aac_slave_configure(struct scsi_device *sdev)
396 sdev->skip_ms_page_3f = 1; 398 sdev->skip_ms_page_3f = 1;
397 } 399 }
398 if ((sdev->type == TYPE_DISK) && 400 if ((sdev->type == TYPE_DISK) &&
399 !expose_physicals &&
400 (sdev_channel(sdev) != CONTAINER_CHANNEL)) { 401 (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
401 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; 402 if (expose_physicals == 0)
402 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) 403 return -ENXIO;
403 sdev->no_uld_attach = 1; 404 if (expose_physicals < 0) {
405 struct aac_dev *aac =
406 (struct aac_dev *)sdev->host->hostdata;
407 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
408 sdev->no_uld_attach = 1;
409 }
404 } 410 }
405 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && 411 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
406 (sdev_channel(sdev) == CONTAINER_CHANNEL)) { 412 (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
@@ -804,7 +810,6 @@ static struct scsi_host_template aac_driver_template = {
804 .emulated = 1, 810 .emulated = 1,
805}; 811};
806 812
807
808static int __devinit aac_probe_one(struct pci_dev *pdev, 813static int __devinit aac_probe_one(struct pci_dev *pdev,
809 const struct pci_device_id *id) 814 const struct pci_device_id *id)
810{ 815{
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
new file mode 100644
index 000000000000..c76b611b6afb
--- /dev/null
+++ b/drivers/scsi/aacraid/nark.c
@@ -0,0 +1,87 @@
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * nark.c
26 *
27 * Abstract: Hardware Device Interface for NEMER/ARK
28 *
29 */
30
31#include <linux/pci.h>
32#include <linux/blkdev.h>
33
34#include <scsi/scsi_host.h>
35
36#include "aacraid.h"
37
38/**
39 * aac_nark_ioremap
40 * @size: mapping resize request
41 *
42 */
43static int aac_nark_ioremap(struct aac_dev * dev, u32 size)
44{
45 if (!size) {
46 iounmap(dev->regs.rx);
47 dev->regs.rx = NULL;
48 iounmap(dev->base);
49 dev->base = NULL;
50 return 0;
51 }
52 dev->scsi_host_ptr->base = pci_resource_start(dev->pdev, 2);
53 dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) |
54 ((u64)pci_resource_start(dev->pdev, 1) << 32),
55 sizeof(struct rx_registers) - sizeof(struct rx_inbound));
56 dev->base = NULL;
57 if (dev->regs.rx == NULL)
58 return -1;
59 dev->base = ioremap(dev->scsi_host_ptr->base, size);
60 if (dev->base == NULL) {
61 iounmap(dev->regs.rx);
62 dev->regs.rx = NULL;
63 return -1;
64 }
65 dev->IndexRegs = &((struct rx_registers __iomem *)dev->base)->IndexRegs;
66 return 0;
67}
68
69/**
70 * aac_nark_init - initialize an NEMER/ARK Split Bar card
71 * @dev: device to configure
72 *
73 */
74
75int aac_nark_init(struct aac_dev * dev)
76{
77 extern int _aac_rx_init(struct aac_dev *dev);
78 extern int aac_rx_select_comm(struct aac_dev *dev, int comm);
79
80 /*
81 * Fill in the function dispatch table.
82 */
83 dev->a_ops.adapter_ioremap = aac_nark_ioremap;
84 dev->a_ops.adapter_comm = aac_rx_select_comm;
85
86 return _aac_rx_init(dev);
87}
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 643f23b5ded8..d953c3fe998a 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -34,6 +34,40 @@
34 34
35#include "aacraid.h" 35#include "aacraid.h"
36 36
37#define AAC_NUM_IO_FIB_RKT (246 - AAC_NUM_MGT_FIB)
38
39/**
40 * aac_rkt_select_comm - Select communications method
41 * @dev: Adapter
42 * @comm: communications method
43 */
44
45static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
46{
47 int retval;
48 extern int aac_rx_select_comm(struct aac_dev *dev, int comm);
49 retval = aac_rx_select_comm(dev, comm);
50 if (comm == AAC_COMM_MESSAGE) {
51 /*
52 * FIB Setup has already been done, but we can minimize the
53 * damage by at least ensuring the OS never issues more
54 * commands than we can handle. The Rocket adapters currently
55 * can only handle 246 commands and 8 AIFs at the same time,
56 * and in fact do notify us accordingly if we negotiate the
57 * FIB size. The problem that causes us to add this check is
58 * to ensure that we do not overdo it with the adapter when a
59 * hard coded FIB override is being utilized. This special
60 * case warrants this half baked, but convenient, check here.
61 */
62 if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) {
63 dev->init->MaxIoCommands =
64 cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB);
65 dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT;
66 }
67 }
68 return retval;
69}
70
37/** 71/**
38 * aac_rkt_ioremap 72 * aac_rkt_ioremap
39 * @size: mapping resize request 73 * @size: mapping resize request
@@ -63,39 +97,13 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
63 97
64int aac_rkt_init(struct aac_dev *dev) 98int aac_rkt_init(struct aac_dev *dev)
65{ 99{
66 int retval;
67 extern int _aac_rx_init(struct aac_dev *dev); 100 extern int _aac_rx_init(struct aac_dev *dev);
68 extern void aac_rx_start_adapter(struct aac_dev *dev);
69 101
70 /* 102 /*
71 * Fill in the function dispatch table. 103 * Fill in the function dispatch table.
72 */ 104 */
73 dev->a_ops.adapter_ioremap = aac_rkt_ioremap; 105 dev->a_ops.adapter_ioremap = aac_rkt_ioremap;
106 dev->a_ops.adapter_comm = aac_rkt_select_comm;
74 107
75 retval = _aac_rx_init(dev); 108 return _aac_rx_init(dev);
76 if (retval)
77 return retval;
78 if (dev->new_comm_interface) {
79 /*
80 * FIB Setup has already been done, but we can minimize the
81 * damage by at least ensuring the OS never issues more
82 * commands than we can handle. The Rocket adapters currently
83 * can only handle 246 commands and 8 AIFs at the same time,
84 * and in fact do notify us accordingly if we negotiate the
85 * FIB size. The problem that causes us to add this check is
86 * to ensure that we do not overdo it with the adapter when a
87 * hard coded FIB override is being utilized. This special
88 * case warrants this half baked, but convenient, check here.
89 */
90 if (dev->scsi_host_ptr->can_queue > (246 - AAC_NUM_MGT_FIB)) {
91 dev->init->MaxIoCommands = cpu_to_le32(246);
92 dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB;
93 }
94 }
95 /*
96 * Tell the adapter that all is configured, and it can start
97 * accepting requests
98 */
99 aac_rx_start_adapter(dev);
100 return 0;
101} 109}
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index dcc8b0ea7a9d..c632d9354a26 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -46,60 +46,60 @@
46 46
47#include "aacraid.h" 47#include "aacraid.h"
48 48
49static irqreturn_t aac_rx_intr(int irq, void *dev_id) 49static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
50{ 50{
51 struct aac_dev *dev = dev_id; 51 struct aac_dev *dev = dev_id;
52 unsigned long bellbits;
53 u8 intstat = rx_readb(dev, MUnit.OISR);
52 54
53 dprintk((KERN_DEBUG "aac_rx_intr(%d,%p)\n", irq, dev_id)); 55 /*
54 if (dev->new_comm_interface) { 56 * Read mask and invert because drawbridge is reversed.
55 u32 Index = rx_readl(dev, MUnit.OutboundQueue); 57 * This allows us to only service interrupts that have
56 if (Index == 0xFFFFFFFFL) 58 * been enabled.
57 Index = rx_readl(dev, MUnit.OutboundQueue); 59 * Check to see if this is our interrupt. If it isn't just return
58 if (Index != 0xFFFFFFFFL) { 60 */
59 do { 61 if (intstat & ~(dev->OIMR)) {
60 if (aac_intr_normal(dev, Index)) { 62 bellbits = rx_readl(dev, OutboundDoorbellReg);
61 rx_writel(dev, MUnit.OutboundQueue, Index); 63 if (bellbits & DoorBellPrintfReady) {
62 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); 64 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
63 } 65 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
64 Index = rx_readl(dev, MUnit.OutboundQueue); 66 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
65 } while (Index != 0xFFFFFFFFL);
66 return IRQ_HANDLED;
67 } 67 }
68 } else { 68 else if (bellbits & DoorBellAdapterNormCmdReady) {
69 unsigned long bellbits; 69 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
70 u8 intstat; 70 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
71 intstat = rx_readb(dev, MUnit.OISR); 71 }
72 /* 72 else if (bellbits & DoorBellAdapterNormRespReady) {
73 * Read mask and invert because drawbridge is reversed. 73 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
74 * This allows us to only service interrupts that have 74 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
75 * been enabled. 75 }
76 * Check to see if this is our interrupt. If it isn't just return 76 else if (bellbits & DoorBellAdapterNormCmdNotFull) {
77 */ 77 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
78 if (intstat & ~(dev->OIMR))
79 {
80 bellbits = rx_readl(dev, OutboundDoorbellReg);
81 if (bellbits & DoorBellPrintfReady) {
82 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
83 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
84 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
85 }
86 else if (bellbits & DoorBellAdapterNormCmdReady) {
87 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
88 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
89 }
90 else if (bellbits & DoorBellAdapterNormRespReady) {
91 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
92 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
93 }
94 else if (bellbits & DoorBellAdapterNormCmdNotFull) {
95 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
96 }
97 else if (bellbits & DoorBellAdapterNormRespNotFull) {
98 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
99 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
100 }
101 return IRQ_HANDLED;
102 } 78 }
79 else if (bellbits & DoorBellAdapterNormRespNotFull) {
80 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
81 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
82 }
83 return IRQ_HANDLED;
84 }
85 return IRQ_NONE;
86}
87
88static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
89{
90 struct aac_dev *dev = dev_id;
91 u32 Index = rx_readl(dev, MUnit.OutboundQueue);
92 if (Index == 0xFFFFFFFFL)
93 Index = rx_readl(dev, MUnit.OutboundQueue);
94 if (Index != 0xFFFFFFFFL) {
95 do {
96 if (aac_intr_normal(dev, Index)) {
97 rx_writel(dev, MUnit.OutboundQueue, Index);
98 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
99 }
100 Index = rx_readl(dev, MUnit.OutboundQueue);
101 } while (Index != 0xFFFFFFFFL);
102 return IRQ_HANDLED;
103 } 103 }
104 return IRQ_NONE; 104 return IRQ_NONE;
105} 105}
@@ -115,6 +115,26 @@ static void aac_rx_disable_interrupt(struct aac_dev *dev)
115} 115}
116 116
117/** 117/**
118 * aac_rx_enable_interrupt_producer - Enable interrupts
119 * @dev: Adapter
120 */
121
122static void aac_rx_enable_interrupt_producer(struct aac_dev *dev)
123{
124 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
125}
126
127/**
128 * aac_rx_enable_interrupt_message - Enable interrupts
129 * @dev: Adapter
130 */
131
132static void aac_rx_enable_interrupt_message(struct aac_dev *dev)
133{
134 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
135}
136
137/**
118 * rx_sync_cmd - send a command and wait 138 * rx_sync_cmd - send a command and wait
119 * @dev: Adapter 139 * @dev: Adapter
120 * @command: Command to execute 140 * @command: Command to execute
@@ -189,10 +209,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
189 /* 209 /*
190 * Restore interrupt mask even though we timed out 210 * Restore interrupt mask even though we timed out
191 */ 211 */
192 if (dev->new_comm_interface) 212 aac_adapter_enable_int(dev);
193 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
194 else
195 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
196 return -ETIMEDOUT; 213 return -ETIMEDOUT;
197 } 214 }
198 /* 215 /*
@@ -215,10 +232,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
215 /* 232 /*
216 * Restore interrupt mask 233 * Restore interrupt mask
217 */ 234 */
218 if (dev->new_comm_interface) 235 aac_adapter_enable_int(dev);
219 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
220 else
221 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
222 return 0; 236 return 0;
223 237
224} 238}
@@ -360,35 +374,72 @@ static int aac_rx_check_health(struct aac_dev *dev)
360} 374}
361 375
362/** 376/**
363 * aac_rx_send 377 * aac_rx_deliver_producer
364 * @fib: fib to issue 378 * @fib: fib to issue
365 * 379 *
366 * Will send a fib, returning 0 if successful. 380 * Will send a fib, returning 0 if successful.
367 */ 381 */
368static int aac_rx_send(struct fib * fib) 382static int aac_rx_deliver_producer(struct fib * fib)
369{ 383{
370 u64 addr = fib->hw_fib_pa;
371 struct aac_dev *dev = fib->dev; 384 struct aac_dev *dev = fib->dev;
372 volatile void __iomem *device = dev->regs.rx; 385 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
386 unsigned long qflags;
373 u32 Index; 387 u32 Index;
388 unsigned long nointr = 0;
374 389
375 dprintk((KERN_DEBUG "%p->aac_rx_send(%p->%llx)\n", dev, fib, addr)); 390 spin_lock_irqsave(q->lock, qflags);
376 Index = rx_readl(dev, MUnit.InboundQueue); 391 aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib, 1, fib, &nointr);
377 if (Index == 0xFFFFFFFFL) 392
393 q->numpending++;
394 *(q->headers.producer) = cpu_to_le32(Index + 1);
395 spin_unlock_irqrestore(q->lock, qflags);
396 if (!(nointr & aac_config.irq_mod))
397 aac_adapter_notify(dev, AdapNormCmdQueue);
398
399 return 0;
400}
401
402/**
403 * aac_rx_deliver_message
404 * @fib: fib to issue
405 *
406 * Will send a fib, returning 0 if successful.
407 */
408static int aac_rx_deliver_message(struct fib * fib)
409{
410 struct aac_dev *dev = fib->dev;
411 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
412 unsigned long qflags;
413 u32 Index;
414 u64 addr;
415 volatile void __iomem *device;
416
417 unsigned long count = 10000000L; /* 50 seconds */
418 spin_lock_irqsave(q->lock, qflags);
419 q->numpending++;
420 spin_unlock_irqrestore(q->lock, qflags);
421 for(;;) {
378 Index = rx_readl(dev, MUnit.InboundQueue); 422 Index = rx_readl(dev, MUnit.InboundQueue);
379 dprintk((KERN_DEBUG "Index = 0x%x\n", Index)); 423 if (Index == 0xFFFFFFFFL)
380 if (Index == 0xFFFFFFFFL) 424 Index = rx_readl(dev, MUnit.InboundQueue);
381 return Index; 425 if (Index != 0xFFFFFFFFL)
426 break;
427 if (--count == 0) {
428 spin_lock_irqsave(q->lock, qflags);
429 q->numpending--;
430 spin_unlock_irqrestore(q->lock, qflags);
431 return -ETIMEDOUT;
432 }
433 udelay(5);
434 }
382 device = dev->base + Index; 435 device = dev->base + Index;
383 dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff), 436 addr = fib->hw_fib_pa;
384 (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size)));
385 writel((u32)(addr & 0xffffffff), device); 437 writel((u32)(addr & 0xffffffff), device);
386 device += sizeof(u32); 438 device += sizeof(u32);
387 writel((u32)(addr >> 32), device); 439 writel((u32)(addr >> 32), device);
388 device += sizeof(u32); 440 device += sizeof(u32);
389 writel(le16_to_cpu(fib->hw_fib->header.Size), device); 441 writel(le16_to_cpu(fib->hw_fib->header.Size), device);
390 rx_writel(dev, MUnit.InboundQueue, Index); 442 rx_writel(dev, MUnit.InboundQueue, Index);
391 dprintk((KERN_DEBUG "aac_rx_send - return 0\n"));
392 return 0; 443 return 0;
393} 444}
394 445
@@ -430,6 +481,31 @@ static int aac_rx_restart_adapter(struct aac_dev *dev)
430} 481}
431 482
432/** 483/**
484 * aac_rx_select_comm - Select communications method
485 * @dev: Adapter
486 * @comm: communications method
487 */
488
489int aac_rx_select_comm(struct aac_dev *dev, int comm)
490{
491 switch (comm) {
492 case AAC_COMM_PRODUCER:
493 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer;
494 dev->a_ops.adapter_intr = aac_rx_intr_producer;
495 dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
496 break;
497 case AAC_COMM_MESSAGE:
498 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message;
499 dev->a_ops.adapter_intr = aac_rx_intr_message;
500 dev->a_ops.adapter_deliver = aac_rx_deliver_message;
501 break;
502 default:
503 return 1;
504 }
505 return 0;
506}
507
508/**
433 * aac_rx_init - initialize an i960 based AAC card 509 * aac_rx_init - initialize an i960 based AAC card
434 * @dev: device to configure 510 * @dev: device to configure
435 * 511 *
@@ -489,40 +565,42 @@ int _aac_rx_init(struct aac_dev *dev)
489 } 565 }
490 msleep(1); 566 msleep(1);
491 } 567 }
492 if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev)<0)
493 {
494 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
495 goto error_iounmap;
496 }
497 /* 568 /*
498 * Fill in the function dispatch table. 569 * Fill in the common function dispatch table.
499 */ 570 */
500 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; 571 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
501 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; 572 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
502 dev->a_ops.adapter_notify = aac_rx_notify_adapter; 573 dev->a_ops.adapter_notify = aac_rx_notify_adapter;
503 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 574 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
504 dev->a_ops.adapter_check_health = aac_rx_check_health; 575 dev->a_ops.adapter_check_health = aac_rx_check_health;
505 dev->a_ops.adapter_send = aac_rx_send;
506 576
507 /* 577 /*
508 * First clear out all interrupts. Then enable the one's that we 578 * First clear out all interrupts. Then enable the one's that we
509 * can handle. 579 * can handle.
510 */ 580 */
511 rx_writeb(dev, MUnit.OIMR, 0xff); 581 aac_adapter_comm(dev, AAC_COMM_PRODUCER);
582 aac_adapter_disable_int(dev);
512 rx_writel(dev, MUnit.ODR, 0xffffffff); 583 rx_writel(dev, MUnit.ODR, 0xffffffff);
513 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); 584 aac_adapter_enable_int(dev);
514 585
515 if (aac_init_adapter(dev) == NULL) 586 if (aac_init_adapter(dev) == NULL)
516 goto error_irq; 587 goto error_iounmap;
517 if (dev->new_comm_interface) 588 aac_adapter_comm(dev, dev->comm_interface);
518 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); 589 if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr,
590 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
591 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
592 name, instance);
593 goto error_iounmap;
594 }
595 aac_adapter_enable_int(dev);
596 /*
597 * Tell the adapter that all is configured, and it can
598 * start accepting requests
599 */
600 aac_rx_start_adapter(dev);
519 601
520 return 0; 602 return 0;
521 603
522error_irq:
523 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
524 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
525
526error_iounmap: 604error_iounmap:
527 605
528 return -1; 606 return -1;
@@ -530,20 +608,11 @@ error_iounmap:
530 608
531int aac_rx_init(struct aac_dev *dev) 609int aac_rx_init(struct aac_dev *dev)
532{ 610{
533 int retval;
534
535 /* 611 /*
536 * Fill in the function dispatch table. 612 * Fill in the function dispatch table.
537 */ 613 */
538 dev->a_ops.adapter_ioremap = aac_rx_ioremap; 614 dev->a_ops.adapter_ioremap = aac_rx_ioremap;
615 dev->a_ops.adapter_comm = aac_rx_select_comm;
539 616
540 retval = _aac_rx_init(dev); 617 return _aac_rx_init(dev);
541 if (!retval) {
542 /*
543 * Tell the adapter that all is configured, and it can
544 * start accepting requests
545 */
546 aac_rx_start_adapter(dev);
547 }
548 return retval;
549} 618}
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 511b0a938fb1..8535db068c2f 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -92,6 +92,17 @@ static void aac_sa_disable_interrupt (struct aac_dev *dev)
92} 92}
93 93
94/** 94/**
95 * aac_sa_enable_interrupt - enable interrupt
96 * @dev: Which adapter to enable.
97 */
98
99static void aac_sa_enable_interrupt (struct aac_dev *dev)
100{
101 sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 |
102 DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
103}
104
105/**
95 * aac_sa_notify_adapter - handle adapter notification 106 * aac_sa_notify_adapter - handle adapter notification
96 * @dev: Adapter that notification is for 107 * @dev: Adapter that notification is for
97 * @event: Event to notidy 108 * @event: Event to notidy
@@ -347,32 +358,36 @@ int aac_sa_init(struct aac_dev *dev)
347 msleep(1); 358 msleep(1);
348 } 359 }
349 360
350 if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev ) < 0) {
351 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance);
352 goto error_iounmap;
353 }
354
355 /* 361 /*
356 * Fill in the function dispatch table. 362 * Fill in the function dispatch table.
357 */ 363 */
358 364
359 dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter; 365 dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
360 dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt; 366 dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
367 dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
361 dev->a_ops.adapter_notify = aac_sa_notify_adapter; 368 dev->a_ops.adapter_notify = aac_sa_notify_adapter;
362 dev->a_ops.adapter_sync_cmd = sa_sync_cmd; 369 dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
363 dev->a_ops.adapter_check_health = aac_sa_check_health; 370 dev->a_ops.adapter_check_health = aac_sa_check_health;
371 dev->a_ops.adapter_intr = aac_sa_intr;
364 dev->a_ops.adapter_ioremap = aac_sa_ioremap; 372 dev->a_ops.adapter_ioremap = aac_sa_ioremap;
365 373
366 /* 374 /*
367 * First clear out all interrupts. Then enable the one's that 375 * First clear out all interrupts. Then enable the one's that
368 * we can handle. 376 * we can handle.
369 */ 377 */
370 sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); 378 aac_adapter_disable_int(dev);
371 sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | 379 aac_adapter_enable_int(dev);
372 DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
373 380
374 if(aac_init_adapter(dev) == NULL) 381 if(aac_init_adapter(dev) == NULL)
375 goto error_irq; 382 goto error_irq;
383 if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr,
384 IRQF_SHARED|IRQF_DISABLED,
385 "aacraid", (void *)dev ) < 0) {
386 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
387 name, instance);
388 goto error_iounmap;
389 }
390 aac_adapter_enable_int(dev);
376 391
377 /* 392 /*
378 * Tell the adapter that all is configure, and it can start 393 * Tell the adapter that all is configure, and it can start
@@ -382,7 +397,7 @@ int aac_sa_init(struct aac_dev *dev)
382 return 0; 397 return 0;
383 398
384error_irq: 399error_irq:
385 sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); 400 aac_sa_disable_interrupt(dev);
386 free_irq(dev->scsi_host_ptr->irq, (void *)dev); 401 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
387 402
388error_iounmap: 403error_iounmap:
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 6f8901b748f7..c520e5b41fb5 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -37,18 +37,14 @@
37 37
38static inline int asd_get_ddb(struct asd_ha_struct *asd_ha) 38static inline int asd_get_ddb(struct asd_ha_struct *asd_ha)
39{ 39{
40 unsigned long flags;
41 int ddb, i; 40 int ddb, i;
42 41
43 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
44 ddb = FIND_FREE_DDB(asd_ha); 42 ddb = FIND_FREE_DDB(asd_ha);
45 if (ddb >= asd_ha->hw_prof.max_ddbs) { 43 if (ddb >= asd_ha->hw_prof.max_ddbs) {
46 ddb = -ENOMEM; 44 ddb = -ENOMEM;
47 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
48 goto out; 45 goto out;
49 } 46 }
50 SET_DDB(ddb, asd_ha); 47 SET_DDB(ddb, asd_ha);
51 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
52 48
53 for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4) 49 for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4)
54 asd_ddbsite_write_dword(asd_ha, ddb, i, 0); 50 asd_ddbsite_write_dword(asd_ha, ddb, i, 0);
@@ -77,14 +73,10 @@ out:
77 73
78static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb) 74static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb)
79{ 75{
80 unsigned long flags;
81
82 if (!ddb || ddb >= 0xFFFF) 76 if (!ddb || ddb >= 0xFFFF)
83 return; 77 return;
84 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED); 78 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED);
85 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
86 CLEAR_DDB(ddb, asd_ha); 79 CLEAR_DDB(ddb, asd_ha);
87 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
88} 80}
89 81
90static inline void asd_set_ddb_type(struct domain_device *dev) 82static inline void asd_set_ddb_type(struct domain_device *dev)
@@ -320,8 +312,11 @@ out:
320 312
321int asd_dev_found(struct domain_device *dev) 313int asd_dev_found(struct domain_device *dev)
322{ 314{
315 unsigned long flags;
323 int res = 0; 316 int res = 0;
317 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
324 318
319 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
325 switch (dev->dev_type) { 320 switch (dev->dev_type) {
326 case SATA_PM: 321 case SATA_PM:
327 res = asd_init_sata_pm_ddb(dev); 322 res = asd_init_sata_pm_ddb(dev);
@@ -335,14 +330,18 @@ int asd_dev_found(struct domain_device *dev)
335 else 330 else
336 res = asd_init_initiator_ddb(dev); 331 res = asd_init_initiator_ddb(dev);
337 } 332 }
333 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
334
338 return res; 335 return res;
339} 336}
340 337
341void asd_dev_gone(struct domain_device *dev) 338void asd_dev_gone(struct domain_device *dev)
342{ 339{
343 int ddb, sister_ddb; 340 int ddb, sister_ddb;
341 unsigned long flags;
344 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; 342 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
345 343
344 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
346 ddb = (int) (unsigned long) dev->lldd_dev; 345 ddb = (int) (unsigned long) dev->lldd_dev;
347 sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB); 346 sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB);
348 347
@@ -350,4 +349,5 @@ void asd_dev_gone(struct domain_device *dev)
350 asd_free_ddb(asd_ha, sister_ddb); 349 asd_free_ddb(asd_ha, sister_ddb);
351 asd_free_ddb(asd_ha, ddb); 350 asd_free_ddb(asd_ha, ddb);
352 dev->lldd_dev = NULL; 351 dev->lldd_dev = NULL;
352 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
353} 353}
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index fbc82b00a418..3aa568fbdbf7 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -38,7 +38,7 @@
38#include "aic94xx_seq.h" 38#include "aic94xx_seq.h"
39 39
40/* The format is "version.release.patchlevel" */ 40/* The format is "version.release.patchlevel" */
41#define ASD_DRIVER_VERSION "1.0.2" 41#define ASD_DRIVER_VERSION "1.0.3"
42 42
43static int use_msi = 0; 43static int use_msi = 0;
44module_param_named(use_msi, use_msi, int, S_IRUGO); 44module_param_named(use_msi, use_msi, int, S_IRUGO);
@@ -57,6 +57,8 @@ MODULE_PARM_DESC(collector, "\n"
57char sas_addr_str[2*SAS_ADDR_SIZE + 1] = ""; 57char sas_addr_str[2*SAS_ADDR_SIZE + 1] = "";
58 58
59static struct scsi_transport_template *aic94xx_transport_template; 59static struct scsi_transport_template *aic94xx_transport_template;
60static int asd_scan_finished(struct Scsi_Host *, unsigned long);
61static void asd_scan_start(struct Scsi_Host *);
60 62
61static struct scsi_host_template aic94xx_sht = { 63static struct scsi_host_template aic94xx_sht = {
62 .module = THIS_MODULE, 64 .module = THIS_MODULE,
@@ -66,6 +68,8 @@ static struct scsi_host_template aic94xx_sht = {
66 .target_alloc = sas_target_alloc, 68 .target_alloc = sas_target_alloc,
67 .slave_configure = sas_slave_configure, 69 .slave_configure = sas_slave_configure,
68 .slave_destroy = sas_slave_destroy, 70 .slave_destroy = sas_slave_destroy,
71 .scan_finished = asd_scan_finished,
72 .scan_start = asd_scan_start,
69 .change_queue_depth = sas_change_queue_depth, 73 .change_queue_depth = sas_change_queue_depth,
70 .change_queue_type = sas_change_queue_type, 74 .change_queue_type = sas_change_queue_type,
71 .bios_param = sas_bios_param, 75 .bios_param = sas_bios_param,
@@ -75,6 +79,7 @@ static struct scsi_host_template aic94xx_sht = {
75 .sg_tablesize = SG_ALL, 79 .sg_tablesize = SG_ALL,
76 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 80 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
77 .use_clustering = ENABLE_CLUSTERING, 81 .use_clustering = ENABLE_CLUSTERING,
82 .eh_device_reset_handler = sas_eh_device_reset_handler,
78}; 83};
79 84
80static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha) 85static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha)
@@ -234,7 +239,7 @@ static int __devinit asd_common_setup(struct asd_ha_struct *asd_ha)
234 } 239 }
235 /* Provide some sane default values. */ 240 /* Provide some sane default values. */
236 asd_ha->hw_prof.max_scbs = 512; 241 asd_ha->hw_prof.max_scbs = 512;
237 asd_ha->hw_prof.max_ddbs = 128; 242 asd_ha->hw_prof.max_ddbs = ASD_MAX_DDBS;
238 asd_ha->hw_prof.num_phys = ASD_MAX_PHYS; 243 asd_ha->hw_prof.num_phys = ASD_MAX_PHYS;
239 /* All phys are enabled, by default. */ 244 /* All phys are enabled, by default. */
240 asd_ha->hw_prof.enabled_phys = 0xFF; 245 asd_ha->hw_prof.enabled_phys = 0xFF;
@@ -526,6 +531,7 @@ static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
526 asd_ha->sas_ha.num_phys= ASD_MAX_PHYS; 531 asd_ha->sas_ha.num_phys= ASD_MAX_PHYS;
527 532
528 asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue; 533 asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue;
534 asd_ha->sas_ha.lldd_max_execute_num = lldd_max_execute_num;
529 535
530 return sas_register_ha(&asd_ha->sas_ha); 536 return sas_register_ha(&asd_ha->sas_ha);
531} 537}
@@ -671,21 +677,10 @@ static int __devinit asd_pci_probe(struct pci_dev *dev,
671 if (err) 677 if (err)
672 goto Err_reg_sas; 678 goto Err_reg_sas;
673 679
674 err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys); 680 scsi_scan_host(shost);
675 if (err) {
676 asd_printk("coudln't enable phys, err:%d\n", err);
677 goto Err_en_phys;
678 }
679 ASD_DPRINTK("enabled phys\n");
680 /* give the phy enabling interrupt event time to come in (1s
681 * is empirically about all it takes) */
682 ssleep(1);
683 /* Wait for discovery to finish */
684 scsi_flush_work(asd_ha->sas_ha.core.shost);
685 681
686 return 0; 682 return 0;
687Err_en_phys: 683
688 asd_unregister_sas_ha(asd_ha);
689Err_reg_sas: 684Err_reg_sas:
690 asd_remove_dev_attrs(asd_ha); 685 asd_remove_dev_attrs(asd_ha);
691Err_dev_attrs: 686Err_dev_attrs:
@@ -778,6 +773,28 @@ static void __devexit asd_pci_remove(struct pci_dev *dev)
778 return; 773 return;
779} 774}
780 775
776static void asd_scan_start(struct Scsi_Host *shost)
777{
778 struct asd_ha_struct *asd_ha;
779 int err;
780
781 asd_ha = SHOST_TO_SAS_HA(shost)->lldd_ha;
782 err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys);
783 if (err)
784 asd_printk("Couldn't enable phys, err:%d\n", err);
785}
786
787static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time)
788{
789 /* give the phy enabling interrupt event time to come in (1s
790 * is empirically about all it takes) */
791 if (time < HZ)
792 return 0;
793 /* Wait for discovery to finish */
794 scsi_flush_work(shost);
795 return 1;
796}
797
781static ssize_t asd_version_show(struct device_driver *driver, char *buf) 798static ssize_t asd_version_show(struct device_driver *driver, char *buf)
782{ 799{
783 return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION); 800 return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION);
@@ -885,6 +902,7 @@ static void __exit aic94xx_exit(void)
885 asd_remove_driver_attrs(&aic94xx_pci_driver.driver); 902 asd_remove_driver_attrs(&aic94xx_pci_driver.driver);
886 pci_unregister_driver(&aic94xx_pci_driver); 903 pci_unregister_driver(&aic94xx_pci_driver);
887 sas_release_transport(aic94xx_transport_template); 904 sas_release_transport(aic94xx_transport_template);
905 asd_release_firmware();
888 asd_destroy_global_caches(); 906 asd_destroy_global_caches();
889 asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION, 907 asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION,
890 ASD_DRIVER_VERSION); 908 ASD_DRIVER_VERSION);
diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h
index 9050e93bfd5e..fa7c5290257d 100644
--- a/drivers/scsi/aic94xx/aic94xx_sas.h
+++ b/drivers/scsi/aic94xx/aic94xx_sas.h
@@ -34,6 +34,7 @@
34 * domain that this sequencer can maintain low-level connections for 34 * domain that this sequencer can maintain low-level connections for
35 * us. They are be 64 bytes. 35 * us. They are be 64 bytes.
36 */ 36 */
37#define ASD_MAX_DDBS 128
37 38
38struct asd_ddb_ssp_smp_target_port { 39struct asd_ddb_ssp_smp_target_port {
39 u8 conn_type; /* byte 0 */ 40 u8 conn_type; /* byte 0 */
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 75ed6b0569d1..8f43ff772f23 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -413,40 +413,6 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id)
413 } 413 }
414} 414}
415 415
416/* hard reset a phy later */
417static void do_phy_reset_later(struct work_struct *work)
418{
419 struct sas_phy *sas_phy =
420 container_of(work, struct sas_phy, reset_work);
421 int error;
422
423 ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__,
424 sas_phy->identify.phy_identifier);
425 /* Reset device port */
426 error = sas_phy_reset(sas_phy, 1);
427 if (error)
428 ASD_DPRINTK("%s: Hard reset of phy %d failed (%d).\n",
429 __FUNCTION__, sas_phy->identify.phy_identifier, error);
430}
431
432static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost)
433{
434 INIT_WORK(&sas_phy->reset_work, do_phy_reset_later);
435 queue_work(shost->work_q, &sas_phy->reset_work);
436}
437
438/* start up the ABORT TASK tmf... */
439static void task_kill_later(struct asd_ascb *ascb)
440{
441 struct asd_ha_struct *asd_ha = ascb->ha;
442 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
443 struct Scsi_Host *shost = sas_ha->core.shost;
444 struct sas_task *task = ascb->uldd_task;
445
446 INIT_WORK(&task->abort_work, sas_task_abort);
447 queue_work(shost->work_q, &task->abort_work);
448}
449
450static void escb_tasklet_complete(struct asd_ascb *ascb, 416static void escb_tasklet_complete(struct asd_ascb *ascb,
451 struct done_list_struct *dl) 417 struct done_list_struct *dl)
452{ 418{
@@ -479,26 +445,55 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
479 case REQ_TASK_ABORT: { 445 case REQ_TASK_ABORT: {
480 struct asd_ascb *a, *b; 446 struct asd_ascb *a, *b;
481 u16 tc_abort; 447 u16 tc_abort;
448 struct domain_device *failed_dev = NULL;
449
450 ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
451 __FUNCTION__, dl->status_block[3]);
482 452
453 /*
454 * Find the task that caused the abort and abort it first.
455 * The sequencer won't put anything on the done list until
456 * that happens.
457 */
483 tc_abort = *((u16*)(&dl->status_block[1])); 458 tc_abort = *((u16*)(&dl->status_block[1]));
484 tc_abort = le16_to_cpu(tc_abort); 459 tc_abort = le16_to_cpu(tc_abort);
485 460
486 ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n", 461 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
487 __FUNCTION__, dl->status_block[3]); 462 struct sas_task *task = ascb->uldd_task;
488 463
489 /* Find the pending task and abort it. */ 464 if (task && a->tc_index == tc_abort) {
490 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) 465 failed_dev = task->dev;
491 if (a->tc_index == tc_abort) { 466 sas_task_abort(task);
492 task_kill_later(a);
493 break; 467 break;
494 } 468 }
469 }
470
471 if (!failed_dev) {
472 ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n",
473 __FUNCTION__, tc_abort);
474 goto out;
475 }
476
477 /*
478 * Now abort everything else for that device (hba?) so
479 * that the EH will wake up and do something.
480 */
481 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
482 struct sas_task *task = ascb->uldd_task;
483
484 if (task &&
485 task->dev == failed_dev &&
486 a->tc_index != tc_abort)
487 sas_task_abort(task);
488 }
489
495 goto out; 490 goto out;
496 } 491 }
497 case REQ_DEVICE_RESET: { 492 case REQ_DEVICE_RESET: {
498 struct Scsi_Host *shost = sas_ha->core.shost;
499 struct sas_phy *dev_phy;
500 struct asd_ascb *a; 493 struct asd_ascb *a;
501 u16 conn_handle; 494 u16 conn_handle;
495 unsigned long flags;
496 struct sas_task *last_dev_task = NULL;
502 497
503 conn_handle = *((u16*)(&dl->status_block[1])); 498 conn_handle = *((u16*)(&dl->status_block[1]));
504 conn_handle = le16_to_cpu(conn_handle); 499 conn_handle = le16_to_cpu(conn_handle);
@@ -506,32 +501,47 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
506 ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__, 501 ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__,
507 dl->status_block[3]); 502 dl->status_block[3]);
508 503
509 /* Kill all pending tasks and reset the device */ 504 /* Find the last pending task for the device... */
510 dev_phy = NULL;
511 list_for_each_entry(a, &asd_ha->seq.pend_q, list) { 505 list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
512 struct sas_task *task;
513 struct domain_device *dev;
514 u16 x; 506 u16 x;
507 struct domain_device *dev;
508 struct sas_task *task = a->uldd_task;
515 509
516 task = a->uldd_task;
517 if (!task) 510 if (!task)
518 continue; 511 continue;
519 dev = task->dev; 512 dev = task->dev;
520 513
521 x = (unsigned long)dev->lldd_dev; 514 x = (unsigned long)dev->lldd_dev;
522 if (x == conn_handle) { 515 if (x == conn_handle)
523 dev_phy = dev->port->phy; 516 last_dev_task = task;
524 task_kill_later(a);
525 }
526 } 517 }
527 518
528 /* Reset device port */ 519 if (!last_dev_task) {
529 if (!dev_phy) { 520 ASD_DPRINTK("%s: Device reset for idle device %d?\n",
530 ASD_DPRINTK("%s: No pending commands; can't reset.\n", 521 __FUNCTION__, conn_handle);
531 __FUNCTION__);
532 goto out; 522 goto out;
533 } 523 }
534 phy_reset_later(dev_phy, shost); 524
525 /* ...and set the reset flag */
526 spin_lock_irqsave(&last_dev_task->task_state_lock, flags);
527 last_dev_task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
528 spin_unlock_irqrestore(&last_dev_task->task_state_lock, flags);
529
530 /* Kill all pending tasks for the device */
531 list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
532 u16 x;
533 struct domain_device *dev;
534 struct sas_task *task = a->uldd_task;
535
536 if (!task)
537 continue;
538 dev = task->dev;
539
540 x = (unsigned long)dev->lldd_dev;
541 if (x == conn_handle)
542 sas_task_abort(task);
543 }
544
535 goto out; 545 goto out;
536 } 546 }
537 case SIGNAL_NCQ_ERROR: 547 case SIGNAL_NCQ_ERROR:
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index e5a0ec37e954..5b0932f61473 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -427,7 +427,7 @@ struct asd_manuf_sec {
427 427
428struct asd_manuf_phy_desc { 428struct asd_manuf_phy_desc {
429 u8 state; /* low 4 bits */ 429 u8 state; /* low 4 bits */
430#define MS_PHY_STATE_ENABLEABLE 0 430#define MS_PHY_STATE_ENABLED 0
431#define MS_PHY_STATE_REPORTED 1 431#define MS_PHY_STATE_REPORTED 1
432#define MS_PHY_STATE_HIDDEN 2 432#define MS_PHY_STATE_HIDDEN 2
433 u8 phy_id; 433 u8 phy_id;
@@ -756,11 +756,11 @@ static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1)
756 * 756 *
757 * HIDDEN phys do not count in the total count. REPORTED phys cannot 757 * HIDDEN phys do not count in the total count. REPORTED phys cannot
758 * be enabled but are reported and counted towards the total. 758 * be enabled but are reported and counted towards the total.
759 * ENEBLEABLE phys are enabled by default and count towards the total. 759 * ENABLED phys are enabled by default and count towards the total.
760 * The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys 760 * The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys
761 * merely specifies the number of phys the host adapter decided to 761 * merely specifies the number of phys the host adapter decided to
762 * report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN, 762 * report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN,
763 * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENEBLEABLE. 763 * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENABLED.
764 * In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2 764 * In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2
765 * are actually enabled (enabled by default, max number of phys 765 * are actually enabled (enabled by default, max number of phys
766 * enableable in this case). 766 * enableable in this case).
@@ -816,8 +816,8 @@ static int asd_ms_get_phy_params(struct asd_ha_struct *asd_ha,
816 asd_ha->hw_prof.enabled_phys &= ~(1 << i); 816 asd_ha->hw_prof.enabled_phys &= ~(1 << i);
817 rep_phys++; 817 rep_phys++;
818 continue; 818 continue;
819 case MS_PHY_STATE_ENABLEABLE: 819 case MS_PHY_STATE_ENABLED:
820 ASD_DPRINTK("ms: phy%d: ENEBLEABLE\n", i); 820 ASD_DPRINTK("ms: phy%d: ENABLED\n", i);
821 asd_ha->hw_prof.enabled_phys |= (1 << i); 821 asd_ha->hw_prof.enabled_phys |= (1 << i);
822 en_phys++; 822 en_phys++;
823 break; 823 break;
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index 845112539d05..2768fe4d66ba 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -907,6 +907,16 @@ static void asd_init_scb_sites(struct asd_ha_struct *asd_ha)
907 for (i = 0; i < ASD_SCB_SIZE; i += 4) 907 for (i = 0; i < ASD_SCB_SIZE; i += 4)
908 asd_scbsite_write_dword(asd_ha, site_no, i, 0); 908 asd_scbsite_write_dword(asd_ha, site_no, i, 0);
909 909
910 /* Initialize SCB Site Opcode field to invalid. */
911 asd_scbsite_write_byte(asd_ha, site_no,
912 offsetof(struct scb_header, opcode),
913 0xFF);
914
915 /* Initialize SCB Site Flags field to mean a response
916 * frame has been received. This means inadvertent
917 * frames received to be dropped. */
918 asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01);
919
910 /* Workaround needed by SEQ to fix a SATA issue is to exclude 920 /* Workaround needed by SEQ to fix a SATA issue is to exclude
911 * certain SCB sites from the free list. */ 921 * certain SCB sites from the free list. */
912 if (!SCB_SITE_VALID(site_no)) 922 if (!SCB_SITE_VALID(site_no))
@@ -922,16 +932,6 @@ static void asd_init_scb_sites(struct asd_ha_struct *asd_ha)
922 /* Q_NEXT field of the last SCB is invalidated. */ 932 /* Q_NEXT field of the last SCB is invalidated. */
923 asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no); 933 asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no);
924 934
925 /* Initialize SCB Site Opcode field to invalid. */
926 asd_scbsite_write_byte(asd_ha, site_no,
927 offsetof(struct scb_header, opcode),
928 0xFF);
929
930 /* Initialize SCB Site Flags field to mean a response
931 * frame has been received. This means inadvertent
932 * frames received to be dropped. */
933 asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01);
934
935 first_scb_site_no = site_no; 935 first_scb_site_no = site_no;
936 max_scbs++; 936 max_scbs++;
937 } 937 }
@@ -1173,6 +1173,16 @@ static void asd_init_ddb_0(struct asd_ha_struct *asd_ha)
1173 set_bit(0, asd_ha->hw_prof.ddb_bitmap); 1173 set_bit(0, asd_ha->hw_prof.ddb_bitmap);
1174} 1174}
1175 1175
1176static void asd_seq_init_ddb_sites(struct asd_ha_struct *asd_ha)
1177{
1178 unsigned int i;
1179 unsigned int ddb_site;
1180
1181 for (ddb_site = 0 ; ddb_site < ASD_MAX_DDBS; ddb_site++)
1182 for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4)
1183 asd_ddbsite_write_dword(asd_ha, ddb_site, i, 0);
1184}
1185
1176/** 1186/**
1177 * asd_seq_setup_seqs -- setup and initialize central and link sequencers 1187 * asd_seq_setup_seqs -- setup and initialize central and link sequencers
1178 * @asd_ha: pointer to host adapter structure 1188 * @asd_ha: pointer to host adapter structure
@@ -1182,6 +1192,9 @@ static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha)
1182 int lseq; 1192 int lseq;
1183 u8 lseq_mask; 1193 u8 lseq_mask;
1184 1194
1195 /* Initialize DDB sites */
1196 asd_seq_init_ddb_sites(asd_ha);
1197
1185 /* Initialize SCB sites. Done first to compute some values which 1198 /* Initialize SCB sites. Done first to compute some values which
1186 * the rest of the init code depends on. */ 1199 * the rest of the init code depends on. */
1187 asd_init_scb_sites(asd_ha); 1200 asd_init_scb_sites(asd_ha);
@@ -1232,6 +1245,13 @@ static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq)
1232 return asd_seq_unpause_lseq(asd_ha, lseq); 1245 return asd_seq_unpause_lseq(asd_ha, lseq);
1233} 1246}
1234 1247
1248int asd_release_firmware(void)
1249{
1250 if (sequencer_fw)
1251 release_firmware(sequencer_fw);
1252 return 0;
1253}
1254
1235static int asd_request_firmware(struct asd_ha_struct *asd_ha) 1255static int asd_request_firmware(struct asd_ha_struct *asd_ha)
1236{ 1256{
1237 int err, i; 1257 int err, i;
@@ -1375,7 +1395,9 @@ void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
1375 u8 phy_is_up; 1395 u8 phy_is_up;
1376 u8 mask; 1396 u8 mask;
1377 int i, err; 1397 int i, err;
1398 unsigned long flags;
1378 1399
1400 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
1379 for_each_phy(phy_mask, mask, i) 1401 for_each_phy(phy_mask, mask, i)
1380 asd_ddbsite_write_byte(asd_ha, 0, 1402 asd_ddbsite_write_byte(asd_ha, 0,
1381 offsetof(struct asd_ddb_seq_shared, 1403 offsetof(struct asd_ddb_seq_shared,
@@ -1395,6 +1417,7 @@ void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
1395 break; 1417 break;
1396 } 1418 }
1397 } 1419 }
1420 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
1398 1421
1399 if (err) 1422 if (err)
1400 asd_printk("couldn't update DDB 0:error:%d\n", err); 1423 asd_printk("couldn't update DDB 0:error:%d\n", err);
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.h b/drivers/scsi/aic94xx/aic94xx_seq.h
index 9e715e5496af..9437ff0ae3a4 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.h
+++ b/drivers/scsi/aic94xx/aic94xx_seq.h
@@ -63,6 +63,7 @@ int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask);
63int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask); 63int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask);
64int asd_init_seqs(struct asd_ha_struct *asd_ha); 64int asd_init_seqs(struct asd_ha_struct *asd_ha);
65int asd_start_seqs(struct asd_ha_struct *asd_ha); 65int asd_start_seqs(struct asd_ha_struct *asd_ha);
66int asd_release_firmware(void);
66 67
67void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy); 68void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy);
68#endif 69#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index d202ed5a6709..e2ad5bed9403 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -349,6 +349,7 @@ Again:
349 349
350 spin_lock_irqsave(&task->task_state_lock, flags); 350 spin_lock_irqsave(&task->task_state_lock, flags);
351 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 351 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
352 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
352 task->task_state_flags |= SAS_TASK_STATE_DONE; 353 task->task_state_flags |= SAS_TASK_STATE_DONE;
353 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { 354 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
354 spin_unlock_irqrestore(&task->task_state_lock, flags); 355 spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -557,6 +558,7 @@ int asd_execute_task(struct sas_task *task, const int num,
557 struct sas_task *t = task; 558 struct sas_task *t = task;
558 struct asd_ascb *ascb = NULL, *a; 559 struct asd_ascb *ascb = NULL, *a;
559 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; 560 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
561 unsigned long flags;
560 562
561 res = asd_can_queue(asd_ha, num); 563 res = asd_can_queue(asd_ha, num);
562 if (res) 564 if (res)
@@ -599,6 +601,10 @@ int asd_execute_task(struct sas_task *task, const int num,
599 } 601 }
600 if (res) 602 if (res)
601 goto out_err_unmap; 603 goto out_err_unmap;
604
605 spin_lock_irqsave(&t->task_state_lock, flags);
606 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
607 spin_unlock_irqrestore(&t->task_state_lock, flags);
602 } 608 }
603 list_del_init(&alist); 609 list_del_init(&alist);
604 610
@@ -617,6 +623,9 @@ out_err_unmap:
617 if (a == b) 623 if (a == b)
618 break; 624 break;
619 t = a->uldd_task; 625 t = a->uldd_task;
626 spin_lock_irqsave(&t->task_state_lock, flags);
627 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
628 spin_unlock_irqrestore(&t->task_state_lock, flags);
620 switch (t->task_proto) { 629 switch (t->task_proto) {
621 case SATA_PROTO: 630 case SATA_PROTO:
622 case SAS_PROTO_STP: 631 case SAS_PROTO_STP:
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 61234384503b..fd5269e086a6 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -566,6 +566,11 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
566 res = TMF_RESP_FUNC_ESUPP; 566 res = TMF_RESP_FUNC_ESUPP;
567 break; 567 break;
568 default: 568 default:
569 if (tmf == TMF_QUERY_TASK) {
570 ASD_DPRINTK("%s: QUERY_SSP_TASK response: 0x%x\n",
571 __FUNCTION__, res);
572 break;
573 }
569 ASD_DPRINTK("%s: converting result 0x%x to TMF_RESP_FUNC_FAILED\n", 574 ASD_DPRINTK("%s: converting result 0x%x to TMF_RESP_FUNC_FAILED\n",
570 __FUNCTION__, res); 575 __FUNCTION__, res);
571 res = TMF_RESP_FUNC_FAILED; 576 res = TMF_RESP_FUNC_FAILED;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b318500785e5..95045e33710d 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -595,10 +595,8 @@ static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
595{ 595{
596 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); 596 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
597 597
598 if (pcix_cmd_reg == 0) { 598 if (pcix_cmd_reg == 0)
599 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); 599 return 0;
600 return -EIO;
601 }
602 600
603 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, 601 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
604 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { 602 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
@@ -627,10 +625,6 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
627 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); 625 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
628 return -EIO; 626 return -EIO;
629 } 627 }
630 } else {
631 dev_err(&ioa_cfg->pdev->dev,
632 "Failed to setup PCI-X command register\n");
633 return -EIO;
634 } 628 }
635 629
636 return 0; 630 return 0;
@@ -6314,7 +6308,6 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6314 int rc; 6308 int rc;
6315 6309
6316 ENTER; 6310 ENTER;
6317 pci_unblock_user_cfg_access(ioa_cfg->pdev);
6318 rc = pci_restore_state(ioa_cfg->pdev); 6311 rc = pci_restore_state(ioa_cfg->pdev);
6319 6312
6320 if (rc != PCIBIOS_SUCCESSFUL) { 6313 if (rc != PCIBIOS_SUCCESSFUL) {
@@ -6355,6 +6348,24 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6355} 6348}
6356 6349
6357/** 6350/**
6351 * ipr_reset_bist_done - BIST has completed on the adapter.
6352 * @ipr_cmd: ipr command struct
6353 *
6354 * Description: Unblock config space and resume the reset process.
6355 *
6356 * Return value:
6357 * IPR_RC_JOB_CONTINUE
6358 **/
6359static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6360{
6361 ENTER;
6362 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6363 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6364 LEAVE;
6365 return IPR_RC_JOB_CONTINUE;
6366}
6367
6368/**
6358 * ipr_reset_start_bist - Run BIST on the adapter. 6369 * ipr_reset_start_bist - Run BIST on the adapter.
6359 * @ipr_cmd: ipr command struct 6370 * @ipr_cmd: ipr command struct
6360 * 6371 *
@@ -6376,7 +6387,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6376 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 6387 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6377 rc = IPR_RC_JOB_CONTINUE; 6388 rc = IPR_RC_JOB_CONTINUE;
6378 } else { 6389 } else {
6379 ipr_cmd->job_step = ipr_reset_restore_cfg_space; 6390 ipr_cmd->job_step = ipr_reset_bist_done;
6380 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 6391 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6381 rc = IPR_RC_JOB_RETURN; 6392 rc = IPR_RC_JOB_RETURN;
6382 } 6393 }
@@ -7166,9 +7177,6 @@ ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7166{ 7177{
7167 int i; 7178 int i;
7168 7179
7169 if (dev_id->driver_data)
7170 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
7171
7172 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) 7180 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7173 if (ipr_chip[i].vendor == dev_id->vendor && 7181 if (ipr_chip[i].vendor == dev_id->vendor &&
7174 ipr_chip[i].device == dev_id->device) 7182 ipr_chip[i].device == dev_id->device)
@@ -7517,65 +7525,43 @@ static void ipr_shutdown(struct pci_dev *pdev)
7517 7525
7518static struct pci_device_id ipr_pci_table[] __devinitdata = { 7526static struct pci_device_id ipr_pci_table[] __devinitdata = {
7519 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 7527 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7520 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 7528 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
7521 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7522 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 7529 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7523 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 7530 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
7524 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7525 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 7531 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7526 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 7532 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
7527 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7528 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 7533 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7529 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 7534 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
7530 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7531 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 7535 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7532 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 7536 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
7533 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7534 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 7537 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7535 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 7538 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
7536 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7537 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 7539 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7538 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 7540 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
7539 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7540 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 7541 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7541 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 7542 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 0 },
7542 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7543 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7543 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7544 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 7544 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7545 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7546 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7545 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7547 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 7546 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
7548 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7549 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7547 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7550 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 7548 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 },
7551 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7552 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7553 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
7554 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7555 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7549 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7556 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 7550 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7557 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7558 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7551 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7559 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 7552 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
7560 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7561 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7553 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7562 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8, 7554 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 },
7563 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7564 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 7555 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7565 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 7556 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 0 },
7566 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7567 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 7557 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7568 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 7558 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
7569 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7570 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7559 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7571 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 7560 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
7572 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7573 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7561 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7574 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 7562 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 0 },
7575 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7576 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7563 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7577 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 7564 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 0 },
7578 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7579 { } 7565 { }
7580}; 7566};
7581MODULE_DEVICE_TABLE(pci, ipr_pci_table); 7567MODULE_DEVICE_TABLE(pci, ipr_pci_table);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 9f62a1d4d511..88f285de97bb 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
37/* 37/*
38 * Literals 38 * Literals
39 */ 39 */
40#define IPR_DRIVER_VERSION "2.3.0" 40#define IPR_DRIVER_VERSION "2.3.1"
41#define IPR_DRIVER_DATE "(November 8, 2006)" 41#define IPR_DRIVER_DATE "(January 23, 2007)"
42 42
43/* 43/*
44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index f0871c3ac3d9..2aae1b081fcf 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -123,6 +123,7 @@ lasi700_probe(struct parisc_device *dev)
123 hostdata->force_le_on_be = 0; 123 hostdata->force_le_on_be = 0;
124 hostdata->chip710 = 1; 124 hostdata->chip710 = 1;
125 hostdata->dmode_extra = DMODE_FC2; 125 hostdata->dmode_extra = DMODE_FC2;
126 hostdata->burst_length = 8;
126 } 127 }
127 128
128 host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev); 129 host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index fb7df7b75811..a65598b1e536 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -548,7 +548,7 @@ int sas_discover_sata(struct domain_device *dev)
548 548
549 res = sas_notify_lldd_dev_found(dev); 549 res = sas_notify_lldd_dev_found(dev);
550 if (res) 550 if (res)
551 return res; 551 goto out_err2;
552 552
553 switch (dev->dev_type) { 553 switch (dev->dev_type) {
554 case SATA_DEV: 554 case SATA_DEV:
@@ -560,11 +560,23 @@ int sas_discover_sata(struct domain_device *dev)
560 default: 560 default:
561 break; 561 break;
562 } 562 }
563 if (res)
564 goto out_err;
563 565
564 sas_notify_lldd_dev_gone(dev); 566 sas_notify_lldd_dev_gone(dev);
565 if (!res) { 567 res = sas_notify_lldd_dev_found(dev);
566 sas_notify_lldd_dev_found(dev); 568 if (res)
567 } 569 goto out_err2;
570
571 res = sas_rphy_add(dev->rphy);
572 if (res)
573 goto out_err;
574
575 return res;
576
577out_err:
578 sas_notify_lldd_dev_gone(dev);
579out_err2:
568 return res; 580 return res;
569} 581}
570 582
@@ -580,21 +592,17 @@ int sas_discover_end_dev(struct domain_device *dev)
580 592
581 res = sas_notify_lldd_dev_found(dev); 593 res = sas_notify_lldd_dev_found(dev);
582 if (res) 594 if (res)
583 return res; 595 goto out_err2;
584 596
585 res = sas_rphy_add(dev->rphy); 597 res = sas_rphy_add(dev->rphy);
586 if (res) 598 if (res)
587 goto out_err; 599 goto out_err;
588 600
589 /* do this to get the end device port attributes which will have
590 * been scanned in sas_rphy_add */
591 sas_notify_lldd_dev_gone(dev);
592 sas_notify_lldd_dev_found(dev);
593
594 return 0; 601 return 0;
595 602
596out_err: 603out_err:
597 sas_notify_lldd_dev_gone(dev); 604 sas_notify_lldd_dev_gone(dev);
605out_err2:
598 return res; 606 return res;
599} 607}
600 608
@@ -649,6 +657,7 @@ void sas_unregister_domain_devices(struct asd_sas_port *port)
649 */ 657 */
650static void sas_discover_domain(struct work_struct *work) 658static void sas_discover_domain(struct work_struct *work)
651{ 659{
660 struct domain_device *dev;
652 int error = 0; 661 int error = 0;
653 struct sas_discovery_event *ev = 662 struct sas_discovery_event *ev =
654 container_of(work, struct sas_discovery_event, work); 663 container_of(work, struct sas_discovery_event, work);
@@ -658,35 +667,42 @@ static void sas_discover_domain(struct work_struct *work)
658 &port->disc.pending); 667 &port->disc.pending);
659 668
660 if (port->port_dev) 669 if (port->port_dev)
661 return ; 670 return;
662 else { 671
663 error = sas_get_port_device(port); 672 error = sas_get_port_device(port);
664 if (error) 673 if (error)
665 return; 674 return;
666 } 675 dev = port->port_dev;
667 676
668 SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id, 677 SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id,
669 current->pid); 678 current->pid);
670 679
671 switch (port->port_dev->dev_type) { 680 switch (dev->dev_type) {
672 case SAS_END_DEV: 681 case SAS_END_DEV:
673 error = sas_discover_end_dev(port->port_dev); 682 error = sas_discover_end_dev(dev);
674 break; 683 break;
675 case EDGE_DEV: 684 case EDGE_DEV:
676 case FANOUT_DEV: 685 case FANOUT_DEV:
677 error = sas_discover_root_expander(port->port_dev); 686 error = sas_discover_root_expander(dev);
678 break; 687 break;
679 case SATA_DEV: 688 case SATA_DEV:
680 case SATA_PM: 689 case SATA_PM:
681 error = sas_discover_sata(port->port_dev); 690 error = sas_discover_sata(dev);
682 break; 691 break;
683 default: 692 default:
684 SAS_DPRINTK("unhandled device %d\n", port->port_dev->dev_type); 693 SAS_DPRINTK("unhandled device %d\n", dev->dev_type);
685 break; 694 break;
686 } 695 }
687 696
688 if (error) { 697 if (error) {
689 kfree(port->port_dev); /* not kobject_register-ed yet */ 698 sas_rphy_free(dev->rphy);
699 dev->rphy = NULL;
700
701 spin_lock(&port->dev_list_lock);
702 list_del_init(&dev->dev_list_node);
703 spin_unlock(&port->dev_list_lock);
704
705 kfree(dev); /* not kobject_register-ed yet */
690 port->port_dev = NULL; 706 port->port_dev = NULL;
691 } 707 }
692 708
@@ -726,7 +742,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
726 BUG_ON(ev >= DISC_NUM_EVENTS); 742 BUG_ON(ev >= DISC_NUM_EVENTS);
727 743
728 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, 744 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
729 &disc->disc_work[ev].work, port->ha->core.shost); 745 &disc->disc_work[ev].work, port->ha);
730 746
731 return 0; 747 return 0;
732} 748}
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index d83392ee6823..9db30fb5caf2 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -31,7 +31,7 @@ static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
31 BUG_ON(event >= HA_NUM_EVENTS); 31 BUG_ON(event >= HA_NUM_EVENTS);
32 32
33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, 33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
34 &sas_ha->ha_events[event].work, sas_ha->core.shost); 34 &sas_ha->ha_events[event].work, sas_ha);
35} 35}
36 36
37static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) 37static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
@@ -41,7 +41,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
41 BUG_ON(event >= PORT_NUM_EVENTS); 41 BUG_ON(event >= PORT_NUM_EVENTS);
42 42
43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, 43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
44 &phy->port_events[event].work, ha->core.shost); 44 &phy->port_events[event].work, ha);
45} 45}
46 46
47static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) 47static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
@@ -51,7 +51,7 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
51 BUG_ON(event >= PHY_NUM_EVENTS); 51 BUG_ON(event >= PHY_NUM_EVENTS);
52 52
53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, 53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
54 &phy->phy_events[event].work, ha->core.shost); 54 &phy->phy_events[event].work, ha);
55} 55}
56 56
57int sas_init_events(struct sas_ha_struct *sas_ha) 57int sas_init_events(struct sas_ha_struct *sas_ha)
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index d31e6fa466f7..d9b9a008d36d 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -667,8 +667,9 @@ static struct domain_device *sas_ex_discover_end_dev(
667 return child; 667 return child;
668 668
669 out_list_del: 669 out_list_del:
670 sas_rphy_free(child->rphy);
671 child->rphy = NULL;
670 list_del(&child->dev_list_node); 672 list_del(&child->dev_list_node);
671 sas_rphy_free(rphy);
672 out_free: 673 out_free:
673 sas_port_delete(phy->port); 674 sas_port_delete(phy->port);
674 out_err: 675 out_err:
@@ -1431,13 +1432,22 @@ int sas_discover_root_expander(struct domain_device *dev)
1431 int res; 1432 int res;
1432 struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); 1433 struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy);
1433 1434
1434 sas_rphy_add(dev->rphy); 1435 res = sas_rphy_add(dev->rphy);
1436 if (res)
1437 goto out_err;
1435 1438
1436 ex->level = dev->port->disc.max_level; /* 0 */ 1439 ex->level = dev->port->disc.max_level; /* 0 */
1437 res = sas_discover_expander(dev); 1440 res = sas_discover_expander(dev);
1438 if (!res) 1441 if (res)
1439 sas_ex_bfs_disc(dev->port); 1442 goto out_err2;
1440 1443
1444 sas_ex_bfs_disc(dev->port);
1445
1446 return res;
1447
1448out_err2:
1449 sas_rphy_remove(dev->rphy);
1450out_err:
1441 return res; 1451 return res;
1442} 1452}
1443 1453
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 2f0c07fc3f48..965698c8b7bf 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -87,6 +87,9 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
87 else if (sas_ha->lldd_queue_size == -1) 87 else if (sas_ha->lldd_queue_size == -1)
88 sas_ha->lldd_queue_size = 128; /* Sanity */ 88 sas_ha->lldd_queue_size = 128; /* Sanity */
89 89
90 sas_ha->state = SAS_HA_REGISTERED;
91 spin_lock_init(&sas_ha->state_lock);
92
90 error = sas_register_phys(sas_ha); 93 error = sas_register_phys(sas_ha);
91 if (error) { 94 if (error) {
92 printk(KERN_NOTICE "couldn't register sas phys:%d\n", error); 95 printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
@@ -127,12 +130,22 @@ Undo_phys:
127 130
128int sas_unregister_ha(struct sas_ha_struct *sas_ha) 131int sas_unregister_ha(struct sas_ha_struct *sas_ha)
129{ 132{
133 unsigned long flags;
134
135 /* Set the state to unregistered to avoid further
136 * events to be queued */
137 spin_lock_irqsave(&sas_ha->state_lock, flags);
138 sas_ha->state = SAS_HA_UNREGISTERED;
139 spin_unlock_irqrestore(&sas_ha->state_lock, flags);
140 scsi_flush_work(sas_ha->core.shost);
141
142 sas_unregister_ports(sas_ha);
143
130 if (sas_ha->lldd_max_execute_num > 1) { 144 if (sas_ha->lldd_max_execute_num > 1) {
131 sas_shutdown_queue(sas_ha); 145 sas_shutdown_queue(sas_ha);
146 sas_ha->lldd_max_execute_num = 1;
132 } 147 }
133 148
134 sas_unregister_ports(sas_ha);
135
136 return 0; 149 return 0;
137} 150}
138 151
@@ -146,6 +159,36 @@ static int sas_get_linkerrors(struct sas_phy *phy)
146 return sas_smp_get_phy_events(phy); 159 return sas_smp_get_phy_events(phy);
147} 160}
148 161
162int sas_phy_enable(struct sas_phy *phy, int enable)
163{
164 int ret;
165 enum phy_func command;
166
167 if (enable)
168 command = PHY_FUNC_LINK_RESET;
169 else
170 command = PHY_FUNC_DISABLE;
171
172 if (scsi_is_sas_phy_local(phy)) {
173 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
174 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
175 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
176 struct sas_internal *i =
177 to_sas_internal(sas_ha->core.shost->transportt);
178
179 if (!enable) {
180 sas_phy_disconnected(asd_phy);
181 sas_ha->notify_phy_event(asd_phy, PHYE_LOSS_OF_SIGNAL);
182 }
183 ret = i->dft->lldd_control_phy(asd_phy, command, NULL);
184 } else {
185 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
186 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
187 ret = sas_smp_phy_control(ddev, phy->number, command, NULL);
188 }
189 return ret;
190}
191
149int sas_phy_reset(struct sas_phy *phy, int hard_reset) 192int sas_phy_reset(struct sas_phy *phy, int hard_reset)
150{ 193{
151 int ret; 194 int ret;
@@ -172,8 +215,8 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
172 return ret; 215 return ret;
173} 216}
174 217
175static int sas_set_phy_speed(struct sas_phy *phy, 218int sas_set_phy_speed(struct sas_phy *phy,
176 struct sas_phy_linkrates *rates) 219 struct sas_phy_linkrates *rates)
177{ 220{
178 int ret; 221 int ret;
179 222
@@ -212,6 +255,7 @@ static int sas_set_phy_speed(struct sas_phy *phy,
212} 255}
213 256
214static struct sas_function_template sft = { 257static struct sas_function_template sft = {
258 .phy_enable = sas_phy_enable,
215 .phy_reset = sas_phy_reset, 259 .phy_reset = sas_phy_reset,
216 .set_phy_speed = sas_set_phy_speed, 260 .set_phy_speed = sas_set_phy_speed,
217 .get_linkerrors = sas_get_linkerrors, 261 .get_linkerrors = sas_get_linkerrors,
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 137d7e496b6d..a78638df2018 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -80,7 +80,7 @@ void sas_hae_reset(struct work_struct *work);
80static inline void sas_queue_event(int event, spinlock_t *lock, 80static inline void sas_queue_event(int event, spinlock_t *lock,
81 unsigned long *pending, 81 unsigned long *pending,
82 struct work_struct *work, 82 struct work_struct *work,
83 struct Scsi_Host *shost) 83 struct sas_ha_struct *sas_ha)
84{ 84{
85 unsigned long flags; 85 unsigned long flags;
86 86
@@ -91,7 +91,12 @@ static inline void sas_queue_event(int event, spinlock_t *lock,
91 } 91 }
92 __set_bit(event, pending); 92 __set_bit(event, pending);
93 spin_unlock_irqrestore(lock, flags); 93 spin_unlock_irqrestore(lock, flags);
94 scsi_queue_work(shost, work); 94
95 spin_lock_irqsave(&sas_ha->state_lock, flags);
96 if (sas_ha->state != SAS_HA_UNREGISTERED) {
97 scsi_queue_work(sas_ha->core.shost, work);
98 }
99 spin_unlock_irqrestore(&sas_ha->state_lock, flags);
95} 100}
96 101
97static inline void sas_begin_event(int event, spinlock_t *lock, 102static inline void sas_begin_event(int event, spinlock_t *lock,
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 971c37ceecb4..e1e2d085c920 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -42,10 +42,11 @@ static void sas_form_port(struct asd_sas_phy *phy)
42 struct asd_sas_port *port = phy->port; 42 struct asd_sas_port *port = phy->port;
43 struct sas_internal *si = 43 struct sas_internal *si =
44 to_sas_internal(sas_ha->core.shost->transportt); 44 to_sas_internal(sas_ha->core.shost->transportt);
45 unsigned long flags;
45 46
46 if (port) { 47 if (port) {
47 if (memcmp(port->attached_sas_addr, phy->attached_sas_addr, 48 if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
48 SAS_ADDR_SIZE) == 0) 49 SAS_ADDR_SIZE) != 0)
49 sas_deform_port(phy); 50 sas_deform_port(phy);
50 else { 51 else {
51 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", 52 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
@@ -56,7 +57,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
56 } 57 }
57 58
58 /* find a port */ 59 /* find a port */
59 spin_lock(&sas_ha->phy_port_lock); 60 spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
60 for (i = 0; i < sas_ha->num_phys; i++) { 61 for (i = 0; i < sas_ha->num_phys; i++) {
61 port = sas_ha->sas_port[i]; 62 port = sas_ha->sas_port[i];
62 spin_lock(&port->phy_list_lock); 63 spin_lock(&port->phy_list_lock);
@@ -78,7 +79,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
78 if (i >= sas_ha->num_phys) { 79 if (i >= sas_ha->num_phys) {
79 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", 80 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
80 __FUNCTION__); 81 __FUNCTION__);
81 spin_unlock(&sas_ha->phy_port_lock); 82 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
82 return; 83 return;
83 } 84 }
84 85
@@ -105,7 +106,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
105 } else 106 } else
106 port->linkrate = max(port->linkrate, phy->linkrate); 107 port->linkrate = max(port->linkrate, phy->linkrate);
107 spin_unlock(&port->phy_list_lock); 108 spin_unlock(&port->phy_list_lock);
108 spin_unlock(&sas_ha->phy_port_lock); 109 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
109 110
110 if (!port->port) { 111 if (!port->port) {
111 port->port = sas_port_alloc(phy->phy->dev.parent, port->id); 112 port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
@@ -137,6 +138,7 @@ void sas_deform_port(struct asd_sas_phy *phy)
137 struct asd_sas_port *port = phy->port; 138 struct asd_sas_port *port = phy->port;
138 struct sas_internal *si = 139 struct sas_internal *si =
139 to_sas_internal(sas_ha->core.shost->transportt); 140 to_sas_internal(sas_ha->core.shost->transportt);
141 unsigned long flags;
140 142
141 if (!port) 143 if (!port)
142 return; /* done by a phy event */ 144 return; /* done by a phy event */
@@ -155,7 +157,7 @@ void sas_deform_port(struct asd_sas_phy *phy)
155 if (si->dft->lldd_port_deformed) 157 if (si->dft->lldd_port_deformed)
156 si->dft->lldd_port_deformed(phy); 158 si->dft->lldd_port_deformed(phy);
157 159
158 spin_lock(&sas_ha->phy_port_lock); 160 spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
159 spin_lock(&port->phy_list_lock); 161 spin_lock(&port->phy_list_lock);
160 162
161 list_del_init(&phy->port_phy_el); 163 list_del_init(&phy->port_phy_el);
@@ -174,7 +176,7 @@ void sas_deform_port(struct asd_sas_phy *phy)
174 port->phy_mask = 0; 176 port->phy_mask = 0;
175 } 177 }
176 spin_unlock(&port->phy_list_lock); 178 spin_unlock(&port->phy_list_lock);
177 spin_unlock(&sas_ha->phy_port_lock); 179 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
178 180
179 return; 181 return;
180} 182}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 22672d54aa27..cd2378010c7e 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -34,6 +34,7 @@
34#include <scsi/scsi_transport_sas.h> 34#include <scsi/scsi_transport_sas.h>
35#include "../scsi_sas_internal.h" 35#include "../scsi_sas_internal.h"
36#include "../scsi_transport_api.h" 36#include "../scsi_transport_api.h"
37#include "../scsi_priv.h"
37 38
38#include <linux/err.h> 39#include <linux/err.h>
39#include <linux/blkdev.h> 40#include <linux/blkdev.h>
@@ -130,7 +131,7 @@ static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd)
130 if (cmd->request && blk_rq_tagged(cmd->request)) { 131 if (cmd->request && blk_rq_tagged(cmd->request)) {
131 if (cmd->device->ordered_tags && 132 if (cmd->device->ordered_tags &&
132 (cmd->request->cmd_flags & REQ_HARDBARRIER)) 133 (cmd->request->cmd_flags & REQ_HARDBARRIER))
133 ta = TASK_ATTR_HOQ; 134 ta = TASK_ATTR_ORDERED;
134 } 135 }
135 return ta; 136 return ta;
136} 137}
@@ -281,6 +282,7 @@ enum task_disposition {
281 TASK_IS_ABORTED, 282 TASK_IS_ABORTED,
282 TASK_IS_AT_LU, 283 TASK_IS_AT_LU,
283 TASK_IS_NOT_AT_LU, 284 TASK_IS_NOT_AT_LU,
285 TASK_ABORT_FAILED,
284}; 286};
285 287
286static enum task_disposition sas_scsi_find_task(struct sas_task *task) 288static enum task_disposition sas_scsi_find_task(struct sas_task *task)
@@ -310,15 +312,6 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
310 spin_unlock_irqrestore(&core->task_queue_lock, flags); 312 spin_unlock_irqrestore(&core->task_queue_lock, flags);
311 } 313 }
312 314
313 spin_lock_irqsave(&task->task_state_lock, flags);
314 if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
315 spin_unlock_irqrestore(&task->task_state_lock, flags);
316 SAS_DPRINTK("%s: task 0x%p already aborted\n",
317 __FUNCTION__, task);
318 return TASK_IS_ABORTED;
319 }
320 spin_unlock_irqrestore(&task->task_state_lock, flags);
321
322 for (i = 0; i < 5; i++) { 315 for (i = 0; i < 5; i++) {
323 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task); 316 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
324 res = si->dft->lldd_abort_task(task); 317 res = si->dft->lldd_abort_task(task);
@@ -340,15 +333,21 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
340 SAS_DPRINTK("%s: querying task 0x%p\n", 333 SAS_DPRINTK("%s: querying task 0x%p\n",
341 __FUNCTION__, task); 334 __FUNCTION__, task);
342 res = si->dft->lldd_query_task(task); 335 res = si->dft->lldd_query_task(task);
343 if (res == TMF_RESP_FUNC_SUCC) { 336 switch (res) {
337 case TMF_RESP_FUNC_SUCC:
344 SAS_DPRINTK("%s: task 0x%p at LU\n", 338 SAS_DPRINTK("%s: task 0x%p at LU\n",
345 __FUNCTION__, task); 339 __FUNCTION__, task);
346 return TASK_IS_AT_LU; 340 return TASK_IS_AT_LU;
347 } else if (res == TMF_RESP_FUNC_COMPLETE) { 341 case TMF_RESP_FUNC_COMPLETE:
348 SAS_DPRINTK("%s: task 0x%p not at LU\n", 342 SAS_DPRINTK("%s: task 0x%p not at LU\n",
349 __FUNCTION__, task); 343 __FUNCTION__, task);
350 return TASK_IS_NOT_AT_LU; 344 return TASK_IS_NOT_AT_LU;
351 } 345 case TMF_RESP_FUNC_FAILED:
346 SAS_DPRINTK("%s: task 0x%p failed to abort\n",
347 __FUNCTION__, task);
348 return TASK_ABORT_FAILED;
349 }
350
352 } 351 }
353 } 352 }
354 return res; 353 return res;
@@ -398,35 +397,82 @@ static int sas_recover_I_T(struct domain_device *dev)
398 return res; 397 return res;
399} 398}
400 399
401void sas_scsi_recover_host(struct Scsi_Host *shost) 400/* Find the sas_phy that's attached to this device */
401struct sas_phy *find_local_sas_phy(struct domain_device *dev)
402{
403 struct domain_device *pdev = dev->parent;
404 struct ex_phy *exphy = NULL;
405 int i;
406
407 /* Directly attached device */
408 if (!pdev)
409 return dev->port->phy;
410
411 /* Otherwise look in the expander */
412 for (i = 0; i < pdev->ex_dev.num_phys; i++)
413 if (!memcmp(dev->sas_addr,
414 pdev->ex_dev.ex_phy[i].attached_sas_addr,
415 SAS_ADDR_SIZE)) {
416 exphy = &pdev->ex_dev.ex_phy[i];
417 break;
418 }
419
420 BUG_ON(!exphy);
421 return exphy->phy;
422}
423
424/* Attempt to send a target reset message to a device */
425int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
426{
427 struct domain_device *dev = cmd_to_domain_dev(cmd);
428 struct sas_phy *phy = find_local_sas_phy(dev);
429 int res;
430
431 res = sas_phy_reset(phy, 1);
432 if (res)
433 SAS_DPRINTK("Device reset of %s failed 0x%x\n",
434 phy->dev.kobj.k_name,
435 res);
436 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
437 return SUCCESS;
438
439 return FAILED;
440}
441
442/* Try to reset a device */
443static int try_to_reset_cmd_device(struct Scsi_Host *shost,
444 struct scsi_cmnd *cmd)
445{
446 if (!shost->hostt->eh_device_reset_handler)
447 return FAILED;
448
449 return shost->hostt->eh_device_reset_handler(cmd);
450}
451
452static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
453 struct list_head *work_q,
454 struct list_head *done_q)
402{ 455{
403 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
404 unsigned long flags;
405 LIST_HEAD(error_q);
406 struct scsi_cmnd *cmd, *n; 456 struct scsi_cmnd *cmd, *n;
407 enum task_disposition res = TASK_IS_DONE; 457 enum task_disposition res = TASK_IS_DONE;
408 int tmf_resp; 458 int tmf_resp, need_reset;
409 struct sas_internal *i = to_sas_internal(shost->transportt); 459 struct sas_internal *i = to_sas_internal(shost->transportt);
460 unsigned long flags;
461 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
410 462
411 spin_lock_irqsave(shost->host_lock, flags);
412 list_splice_init(&shost->eh_cmd_q, &error_q);
413 spin_unlock_irqrestore(shost->host_lock, flags);
414
415 SAS_DPRINTK("Enter %s\n", __FUNCTION__);
416
417 /* All tasks on this list were marked SAS_TASK_STATE_ABORTED
418 * by sas_scsi_timed_out() callback.
419 */
420Again: 463Again:
421 SAS_DPRINTK("going over list...\n"); 464 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
422 list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
423 struct sas_task *task = TO_SAS_TASK(cmd); 465 struct sas_task *task = TO_SAS_TASK(cmd);
424 list_del_init(&cmd->eh_entry);
425 466
426 if (!task) { 467 if (!task)
427 SAS_DPRINTK("%s: taskless cmd?!\n", __FUNCTION__);
428 continue; 468 continue;
429 } 469
470 list_del_init(&cmd->eh_entry);
471
472 spin_lock_irqsave(&task->task_state_lock, flags);
473 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
474 spin_unlock_irqrestore(&task->task_state_lock, flags);
475
430 SAS_DPRINTK("trying to find task 0x%p\n", task); 476 SAS_DPRINTK("trying to find task 0x%p\n", task);
431 res = sas_scsi_find_task(task); 477 res = sas_scsi_find_task(task);
432 478
@@ -437,11 +483,15 @@ Again:
437 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, 483 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
438 task); 484 task);
439 task->task_done(task); 485 task->task_done(task);
486 if (need_reset)
487 try_to_reset_cmd_device(shost, cmd);
440 continue; 488 continue;
441 case TASK_IS_ABORTED: 489 case TASK_IS_ABORTED:
442 SAS_DPRINTK("%s: task 0x%p is aborted\n", 490 SAS_DPRINTK("%s: task 0x%p is aborted\n",
443 __FUNCTION__, task); 491 __FUNCTION__, task);
444 task->task_done(task); 492 task->task_done(task);
493 if (need_reset)
494 try_to_reset_cmd_device(shost, cmd);
445 continue; 495 continue;
446 case TASK_IS_AT_LU: 496 case TASK_IS_AT_LU:
447 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); 497 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@@ -452,11 +502,14 @@ Again:
452 SAS_ADDR(task->dev), 502 SAS_ADDR(task->dev),
453 cmd->device->lun); 503 cmd->device->lun);
454 task->task_done(task); 504 task->task_done(task);
455 sas_scsi_clear_queue_lu(&error_q, cmd); 505 if (need_reset)
506 try_to_reset_cmd_device(shost, cmd);
507 sas_scsi_clear_queue_lu(work_q, cmd);
456 goto Again; 508 goto Again;
457 } 509 }
458 /* fallthrough */ 510 /* fallthrough */
459 case TASK_IS_NOT_AT_LU: 511 case TASK_IS_NOT_AT_LU:
512 case TASK_ABORT_FAILED:
460 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n", 513 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
461 task); 514 task);
462 tmf_resp = sas_recover_I_T(task->dev); 515 tmf_resp = sas_recover_I_T(task->dev);
@@ -464,7 +517,9 @@ Again:
464 SAS_DPRINTK("I_T %016llx recovered\n", 517 SAS_DPRINTK("I_T %016llx recovered\n",
465 SAS_ADDR(task->dev->sas_addr)); 518 SAS_ADDR(task->dev->sas_addr));
466 task->task_done(task); 519 task->task_done(task);
467 sas_scsi_clear_queue_I_T(&error_q, task->dev); 520 if (need_reset)
521 try_to_reset_cmd_device(shost, cmd);
522 sas_scsi_clear_queue_I_T(work_q, task->dev);
468 goto Again; 523 goto Again;
469 } 524 }
470 /* Hammer time :-) */ 525 /* Hammer time :-) */
@@ -477,7 +532,9 @@ Again:
477 SAS_DPRINTK("clear nexus port:%d " 532 SAS_DPRINTK("clear nexus port:%d "
478 "succeeded\n", port->id); 533 "succeeded\n", port->id);
479 task->task_done(task); 534 task->task_done(task);
480 sas_scsi_clear_queue_port(&error_q, 535 if (need_reset)
536 try_to_reset_cmd_device(shost, cmd);
537 sas_scsi_clear_queue_port(work_q,
481 port); 538 port);
482 goto Again; 539 goto Again;
483 } 540 }
@@ -489,6 +546,8 @@ Again:
489 SAS_DPRINTK("clear nexus ha " 546 SAS_DPRINTK("clear nexus ha "
490 "succeeded\n"); 547 "succeeded\n");
491 task->task_done(task); 548 task->task_done(task);
549 if (need_reset)
550 try_to_reset_cmd_device(shost, cmd);
492 goto out; 551 goto out;
493 } 552 }
494 } 553 }
@@ -502,20 +561,54 @@ Again:
502 cmd->device->lun); 561 cmd->device->lun);
503 562
504 task->task_done(task); 563 task->task_done(task);
564 if (need_reset)
565 try_to_reset_cmd_device(shost, cmd);
505 goto clear_q; 566 goto clear_q;
506 } 567 }
507 } 568 }
508out: 569out:
509 scsi_eh_flush_done_q(&ha->eh_done_q); 570 return list_empty(work_q);
510 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
511 return;
512clear_q: 571clear_q:
513 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); 572 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__);
514 list_for_each_entry_safe(cmd, n, &error_q, eh_entry) { 573 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
515 struct sas_task *task = TO_SAS_TASK(cmd); 574 struct sas_task *task = TO_SAS_TASK(cmd);
516 list_del_init(&cmd->eh_entry); 575 list_del_init(&cmd->eh_entry);
517 task->task_done(task); 576 task->task_done(task);
518 } 577 }
578 return list_empty(work_q);
579}
580
581void sas_scsi_recover_host(struct Scsi_Host *shost)
582{
583 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
584 unsigned long flags;
585 LIST_HEAD(eh_work_q);
586
587 spin_lock_irqsave(shost->host_lock, flags);
588 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
589 spin_unlock_irqrestore(shost->host_lock, flags);
590
591 SAS_DPRINTK("Enter %s\n", __FUNCTION__);
592 /*
593 * Deal with commands that still have SAS tasks (i.e. they didn't
594 * complete via the normal sas_task completion mechanism)
595 */
596 if (sas_eh_handle_sas_errors(shost, &eh_work_q, &ha->eh_done_q))
597 goto out;
598
599 /*
600 * Now deal with SCSI commands that completed ok but have a an error
601 * code (and hopefully sense data) attached. This is roughly what
602 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
603 * command we see here has no sas_task and is thus unknown to the HA.
604 */
605 if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
606 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
607
608out:
609 scsi_eh_flush_done_q(&ha->eh_done_q);
610 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
611 return;
519} 612}
520 613
521enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) 614enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
@@ -524,24 +617,30 @@ enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
524 unsigned long flags; 617 unsigned long flags;
525 618
526 if (!task) { 619 if (!task) {
527 SAS_DPRINTK("command 0x%p, task 0x%p, gone: EH_HANDLED\n", 620 cmd->timeout_per_command /= 2;
528 cmd, task); 621 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
529 return EH_HANDLED; 622 cmd, task, (cmd->timeout_per_command ?
623 "EH_RESET_TIMER" : "EH_NOT_HANDLED"));
624 if (!cmd->timeout_per_command)
625 return EH_NOT_HANDLED;
626 return EH_RESET_TIMER;
530 } 627 }
531 628
532 spin_lock_irqsave(&task->task_state_lock, flags); 629 spin_lock_irqsave(&task->task_state_lock, flags);
533 if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) { 630 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
534 spin_unlock_irqrestore(&task->task_state_lock, flags);
535 SAS_DPRINTK("command 0x%p, task 0x%p, aborted by initiator: "
536 "EH_NOT_HANDLED\n", cmd, task);
537 return EH_NOT_HANDLED;
538 }
539 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 631 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
540 spin_unlock_irqrestore(&task->task_state_lock, flags); 632 spin_unlock_irqrestore(&task->task_state_lock, flags);
541 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", 633 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
542 cmd, task); 634 cmd, task);
543 return EH_HANDLED; 635 return EH_HANDLED;
544 } 636 }
637 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
638 spin_unlock_irqrestore(&task->task_state_lock, flags);
639 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
640 "EH_RESET_TIMER\n",
641 cmd, task);
642 return EH_RESET_TIMER;
643 }
545 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 644 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
546 spin_unlock_irqrestore(&task->task_state_lock, flags); 645 spin_unlock_irqrestore(&task->task_state_lock, flags);
547 646
@@ -557,8 +656,9 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
557 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 656 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
558 struct domain_device *found_dev = NULL; 657 struct domain_device *found_dev = NULL;
559 int i; 658 int i;
659 unsigned long flags;
560 660
561 spin_lock(&ha->phy_port_lock); 661 spin_lock_irqsave(&ha->phy_port_lock, flags);
562 for (i = 0; i < ha->num_phys; i++) { 662 for (i = 0; i < ha->num_phys; i++) {
563 struct asd_sas_port *port = ha->sas_port[i]; 663 struct asd_sas_port *port = ha->sas_port[i];
564 struct domain_device *dev; 664 struct domain_device *dev;
@@ -574,7 +674,7 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
574 spin_unlock(&port->dev_list_lock); 674 spin_unlock(&port->dev_list_lock);
575 } 675 }
576 found: 676 found:
577 spin_unlock(&ha->phy_port_lock); 677 spin_unlock_irqrestore(&ha->phy_port_lock, flags);
578 678
579 return found_dev; 679 return found_dev;
580} 680}
@@ -623,6 +723,8 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
623 scsi_deactivate_tcq(scsi_dev, 1); 723 scsi_deactivate_tcq(scsi_dev, 1);
624 } 724 }
625 725
726 scsi_dev->allow_restart = 1;
727
626 return 0; 728 return 0;
627} 729}
628 730
@@ -799,46 +901,42 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
799 spin_unlock_irqrestore(&core->task_queue_lock, flags); 901 spin_unlock_irqrestore(&core->task_queue_lock, flags);
800} 902}
801 903
802static int do_sas_task_abort(struct sas_task *task) 904/*
905 * Call the LLDD task abort routine directly. This function is intended for
906 * use by upper layers that need to tell the LLDD to abort a task.
907 */
908int __sas_task_abort(struct sas_task *task)
803{ 909{
804 struct scsi_cmnd *sc = task->uldd_task;
805 struct sas_internal *si = 910 struct sas_internal *si =
806 to_sas_internal(task->dev->port->ha->core.shost->transportt); 911 to_sas_internal(task->dev->port->ha->core.shost->transportt);
807 unsigned long flags; 912 unsigned long flags;
808 int res; 913 int res;
809 914
810 spin_lock_irqsave(&task->task_state_lock, flags); 915 spin_lock_irqsave(&task->task_state_lock, flags);
811 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 916 if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
917 task->task_state_flags & SAS_TASK_STATE_DONE) {
812 spin_unlock_irqrestore(&task->task_state_lock, flags); 918 spin_unlock_irqrestore(&task->task_state_lock, flags);
813 SAS_DPRINTK("%s: Task %p already aborted.\n", __FUNCTION__, 919 SAS_DPRINTK("%s: Task %p already finished.\n", __FUNCTION__,
814 task); 920 task);
815 return 0; 921 return 0;
816 } 922 }
817 923 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
818 task->task_state_flags |= SAS_TASK_INITIATOR_ABORTED;
819 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
820 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
821 spin_unlock_irqrestore(&task->task_state_lock, flags); 924 spin_unlock_irqrestore(&task->task_state_lock, flags);
822 925
823 if (!si->dft->lldd_abort_task) 926 if (!si->dft->lldd_abort_task)
824 return -ENODEV; 927 return -ENODEV;
825 928
826 res = si->dft->lldd_abort_task(task); 929 res = si->dft->lldd_abort_task(task);
930
931 spin_lock_irqsave(&task->task_state_lock, flags);
827 if ((task->task_state_flags & SAS_TASK_STATE_DONE) || 932 if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
828 (res == TMF_RESP_FUNC_COMPLETE)) 933 (res == TMF_RESP_FUNC_COMPLETE))
829 { 934 {
830 /* SMP commands don't have scsi_cmds(?) */ 935 spin_unlock_irqrestore(&task->task_state_lock, flags);
831 if (!sc) { 936 task->task_done(task);
832 task->task_done(task);
833 return 0;
834 }
835 scsi_req_abort_cmd(sc);
836 scsi_schedule_eh(sc->device->host);
837 return 0; 937 return 0;
838 } 938 }
839 939
840 spin_lock_irqsave(&task->task_state_lock, flags);
841 task->task_state_flags &= ~SAS_TASK_INITIATOR_ABORTED;
842 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) 940 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
843 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; 941 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
844 spin_unlock_irqrestore(&task->task_state_lock, flags); 942 spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -846,17 +944,24 @@ static int do_sas_task_abort(struct sas_task *task)
846 return -EAGAIN; 944 return -EAGAIN;
847} 945}
848 946
849void sas_task_abort(struct work_struct *work) 947/*
948 * Tell an upper layer that it needs to initiate an abort for a given task.
949 * This should only ever be called by an LLDD.
950 */
951void sas_task_abort(struct sas_task *task)
850{ 952{
851 struct sas_task *task = 953 struct scsi_cmnd *sc = task->uldd_task;
852 container_of(work, struct sas_task, abort_work);
853 int i;
854 954
855 for (i = 0; i < 5; i++) 955 /* Escape for libsas internal commands */
856 if (!do_sas_task_abort(task)) 956 if (!sc) {
957 if (!del_timer(&task->timer))
857 return; 958 return;
959 task->timer.function(task->timer.data);
960 return;
961 }
858 962
859 SAS_DPRINTK("%s: Could not kill task!\n", __FUNCTION__); 963 scsi_req_abort_cmd(sc);
964 scsi_schedule_eh(sc->device->host);
860} 965}
861 966
862EXPORT_SYMBOL_GPL(sas_queuecommand); 967EXPORT_SYMBOL_GPL(sas_queuecommand);
@@ -866,5 +971,8 @@ EXPORT_SYMBOL_GPL(sas_slave_destroy);
866EXPORT_SYMBOL_GPL(sas_change_queue_depth); 971EXPORT_SYMBOL_GPL(sas_change_queue_depth);
867EXPORT_SYMBOL_GPL(sas_change_queue_type); 972EXPORT_SYMBOL_GPL(sas_change_queue_type);
868EXPORT_SYMBOL_GPL(sas_bios_param); 973EXPORT_SYMBOL_GPL(sas_bios_param);
974EXPORT_SYMBOL_GPL(__sas_task_abort);
869EXPORT_SYMBOL_GPL(sas_task_abort); 975EXPORT_SYMBOL_GPL(sas_task_abort);
870EXPORT_SYMBOL_GPL(sas_phy_reset); 976EXPORT_SYMBOL_GPL(sas_phy_reset);
977EXPORT_SYMBOL_GPL(sas_phy_enable);
978EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
index 3052869f51f4..170399ef06f4 100644
--- a/drivers/scsi/megaraid/mbox_defs.h
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -748,7 +748,7 @@ typedef struct {
748 748
749 749
750/** 750/**
751 * private_bios_data - bios private data for boot devices 751 * struct private_bios_data - bios private data for boot devices
752 * @geometry : bits 0-3 - BIOS geometry, 0x0001 - 1GB, 0x0010 - 2GB, 752 * @geometry : bits 0-3 - BIOS geometry, 0x0001 - 1GB, 0x0010 - 2GB,
753 * 0x1000 - 8GB, Others values are invalid 753 * 0x1000 - 8GB, Others values are invalid
754 * @unused : bits 4-7 are unused 754 * @unused : bits 4-7 are unused
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index b50e27e66024..26e1e6c55654 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -46,17 +46,17 @@
46 46
47/** 47/**
48 * scb_t - scsi command control block 48 * scb_t - scsi command control block
49 * @param ccb : command control block for individual driver 49 * @ccb : command control block for individual driver
50 * @param list : list of control blocks 50 * @list : list of control blocks
51 * @param gp : general purpose field for LLDs 51 * @gp : general purpose field for LLDs
52 * @param sno : all SCBs have a serial number 52 * @sno : all SCBs have a serial number
53 * @param scp : associated scsi command 53 * @scp : associated scsi command
54 * @param state : current state of scb 54 * @state : current state of scb
55 * @param dma_dir : direction of data transfer 55 * @dma_dir : direction of data transfer
56 * @param dma_type : transfer with sg list, buffer, or no data transfer 56 * @dma_type : transfer with sg list, buffer, or no data transfer
57 * @param dev_channel : actual channel on the device 57 * @dev_channel : actual channel on the device
58 * @param dev_target : actual target on the device 58 * @dev_target : actual target on the device
59 * @param status : completion status 59 * @status : completion status
60 * 60 *
61 * This is our central data structure to issue commands the each driver. 61 * This is our central data structure to issue commands the each driver.
62 * Driver specific data structures are maintained in the ccb field. 62 * Driver specific data structures are maintained in the ccb field.
@@ -99,42 +99,42 @@ typedef struct {
99 99
100/** 100/**
101 * struct adapter_t - driver's initialization structure 101 * struct adapter_t - driver's initialization structure
102 * @param dpc_h : tasklet handle 102 * @aram dpc_h : tasklet handle
103 * @param pdev : pci configuration pointer for kernel 103 * @pdev : pci configuration pointer for kernel
104 * @param host : pointer to host structure of mid-layer 104 * @host : pointer to host structure of mid-layer
105 * @param lock : synchronization lock for mid-layer and driver 105 * @lock : synchronization lock for mid-layer and driver
106 * @param quiescent : driver is quiescent for now. 106 * @quiescent : driver is quiescent for now.
107 * @param outstanding_cmds : number of commands pending in the driver 107 * @outstanding_cmds : number of commands pending in the driver
108 * @param kscb_list : pointer to the bulk of SCBs pointers for IO 108 * @kscb_list : pointer to the bulk of SCBs pointers for IO
109 * @param kscb_pool : pool of free scbs for IO 109 * @kscb_pool : pool of free scbs for IO
110 * @param kscb_pool_lock : lock for pool of free scbs 110 * @kscb_pool_lock : lock for pool of free scbs
111 * @param pend_list : pending commands list 111 * @pend_list : pending commands list
112 * @param pend_list_lock : exlusion lock for pending commands list 112 * @pend_list_lock : exclusion lock for pending commands list
113 * @param completed_list : list of completed commands 113 * @completed_list : list of completed commands
114 * @param completed_list_lock : exclusion lock for list of completed commands 114 * @completed_list_lock : exclusion lock for list of completed commands
115 * @param sglen : max sg elements supported 115 * @sglen : max sg elements supported
116 * @param device_ids : to convert kernel device addr to our devices. 116 * @device_ids : to convert kernel device addr to our devices.
117 * @param raid_device : raid adapter specific pointer 117 * @raid_device : raid adapter specific pointer
118 * @param max_channel : maximum channel number supported - inclusive 118 * @max_channel : maximum channel number supported - inclusive
119 * @param max_target : max target supported - inclusive 119 * @max_target : max target supported - inclusive
120 * @param max_lun : max lun supported - inclusive 120 * @max_lun : max lun supported - inclusive
121 * @param unique_id : unique identifier for each adapter 121 * @unique_id : unique identifier for each adapter
122 * @param irq : IRQ for this adapter 122 * @irq : IRQ for this adapter
123 * @param ito : internal timeout value, (-1) means no timeout 123 * @ito : internal timeout value, (-1) means no timeout
124 * @param ibuf : buffer to issue internal commands 124 * @ibuf : buffer to issue internal commands
125 * @param ibuf_dma_h : dma handle for the above buffer 125 * @ibuf_dma_h : dma handle for the above buffer
126 * @param uscb_list : SCB pointers for user cmds, common mgmt module 126 * @uscb_list : SCB pointers for user cmds, common mgmt module
127 * @param uscb_pool : pool of SCBs for user commands 127 * @uscb_pool : pool of SCBs for user commands
128 * @param uscb_pool_lock : exclusion lock for these SCBs 128 * @uscb_pool_lock : exclusion lock for these SCBs
129 * @param max_cmds : max outstanding commands 129 * @max_cmds : max outstanding commands
130 * @param fw_version : firmware version 130 * @fw_version : firmware version
131 * @param bios_version : bios version 131 * @bios_version : bios version
132 * @param max_cdb_sz : biggest CDB size supported. 132 * @max_cdb_sz : biggest CDB size supported.
133 * @param ha : is high availability present - clustering 133 * @ha : is high availability present - clustering
134 * @param init_id : initiator ID, the default value should be 7 134 * @init_id : initiator ID, the default value should be 7
135 * @param max_sectors : max sectors per request 135 * @max_sectors : max sectors per request
136 * @param cmd_per_lun : max outstanding commands per LUN 136 * @cmd_per_lun : max outstanding commands per LUN
137 * @param being_detached : set when unloading, no more mgmt calls 137 * @being_detached : set when unloading, no more mgmt calls
138 * 138 *
139 * 139 *
140 * mraid_setup_device_map() can be called anytime after the device map is 140 * mraid_setup_device_map() can be called anytime after the device map is
@@ -211,23 +211,23 @@ typedef struct {
211#define SCP2ADAPTER(scp) (adapter_t *)SCSIHOST2ADAP(SCP2HOST(scp)) 211#define SCP2ADAPTER(scp) (adapter_t *)SCSIHOST2ADAP(SCP2HOST(scp))
212 212
213 213
214/**
215 * MRAID_GET_DEVICE_MAP - device ids
216 * @param adp - Adapter's soft state
217 * @param scp - mid-layer scsi command pointer
218 * @param p_chan - physical channel on the controller
219 * @param target - target id of the device or logical drive number
220 * @param islogical - set if the command is for the logical drive
221 *
222 * Macro to retrieve information about device class, logical or physical and
223 * the corresponding physical channel and target or logical drive number
224 **/
225#define MRAID_IS_LOGICAL(adp, scp) \ 214#define MRAID_IS_LOGICAL(adp, scp) \
226 (SCP2CHANNEL(scp) == (adp)->max_channel) ? 1 : 0 215 (SCP2CHANNEL(scp) == (adp)->max_channel) ? 1 : 0
227 216
228#define MRAID_IS_LOGICAL_SDEV(adp, sdev) \ 217#define MRAID_IS_LOGICAL_SDEV(adp, sdev) \
229 (sdev->channel == (adp)->max_channel) ? 1 : 0 218 (sdev->channel == (adp)->max_channel) ? 1 : 0
230 219
220/**
221 * MRAID_GET_DEVICE_MAP - device ids
222 * @adp : adapter's soft state
223 * @scp : mid-layer scsi command pointer
224 * @p_chan : physical channel on the controller
225 * @target : target id of the device or logical drive number
226 * @islogical : set if the command is for the logical drive
227 *
228 * Macro to retrieve information about device class, logical or physical and
229 * the corresponding physical channel and target or logical drive number
230 */
231#define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical) \ 231#define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical) \
232 /* \ 232 /* \
233 * Is the request coming for the virtual channel \ 233 * Is the request coming for the virtual channel \
@@ -271,10 +271,10 @@ typedef struct {
271#define ASSERT(expression) 271#define ASSERT(expression)
272#endif 272#endif
273 273
274/* 274/**
275 * struct mraid_pci_blk - structure holds DMA memory block info 275 * struct mraid_pci_blk - structure holds DMA memory block info
276 * @param vaddr : virtual address to a memory block 276 * @vaddr : virtual address to a memory block
277 * @param dma_addr : DMA handle to a memory block 277 * @dma_addr : DMA handle to a memory block
278 * 278 *
279 * This structure is filled up for the caller. It is the responsibilty of the 279 * This structure is filled up for the caller. It is the responsibilty of the
280 * caller to allocate this array big enough to store addresses for all 280 * caller to allocate this array big enough to store addresses for all
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h
index b8aa34202ec3..706fa05a187a 100644
--- a/drivers/scsi/megaraid/megaraid_ioctl.h
+++ b/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -22,23 +22,23 @@
22 22
23#include "mbox_defs.h" 23#include "mbox_defs.h"
24 24
25/*
26 * console messages debug levels
27 */
28#define CL_ANN 0 /* print unconditionally, announcements */
29#define CL_DLEVEL1 1 /* debug level 1, informative */
30#define CL_DLEVEL2 2 /* debug level 2, verbose */
31#define CL_DLEVEL3 3 /* debug level 3, very verbose */
32
25/** 33/**
26 * con_log() - console log routine 34 * con_log() - console log routine
27 * @param level : indicates the severity of the message. 35 * @level : indicates the severity of the message.
28 * @fparam mt : format string 36 * @fmt : format string
29 * 37 *
30 * con_log displays the error messages on the console based on the current 38 * con_log displays the error messages on the console based on the current
31 * debug level. Also it attaches the appropriate kernel severity level with 39 * debug level. Also it attaches the appropriate kernel severity level with
32 * the message. 40 * the message.
33 *
34 *
35 * consolge messages debug levels
36 */ 41 */
37#define CL_ANN 0 /* print unconditionally, announcements */
38#define CL_DLEVEL1 1 /* debug level 1, informative */
39#define CL_DLEVEL2 2 /* debug level 2, verbose */
40#define CL_DLEVEL3 3 /* debug level 3, very verbose */
41
42#define con_log(level, fmt) if (LSI_DBGLVL >= level) printk fmt; 42#define con_log(level, fmt) if (LSI_DBGLVL >= level) printk fmt;
43 43
44/* 44/*
@@ -157,14 +157,14 @@ typedef struct uioc {
157/** 157/**
158 * struct mraid_hba_info - information about the controller 158 * struct mraid_hba_info - information about the controller
159 * 159 *
160 * @param pci_vendor_id : PCI vendor id 160 * @pci_vendor_id : PCI vendor id
161 * @param pci_device_id : PCI device id 161 * @pci_device_id : PCI device id
162 * @param subsystem_vendor_id : PCI subsystem vendor id 162 * @subsystem_vendor_id : PCI subsystem vendor id
163 * @param subsystem_device_id : PCI subsystem device id 163 * @subsystem_device_id : PCI subsystem device id
164 * @param baseport : base port of hba memory 164 * @baseport : base port of hba memory
165 * @param pci_bus : PCI bus 165 * @pci_bus : PCI bus
166 * @param pci_dev_fn : PCI device/function values 166 * @pci_dev_fn : PCI device/function values
167 * @param irq : interrupt vector for the device 167 * @irq : interrupt vector for the device
168 * 168 *
169 * Extended information of 256 bytes about the controller. Align on the single 169 * Extended information of 256 bytes about the controller. Align on the single
170 * byte boundary so that 32-bit applications can be run on 64-bit platform 170 * byte boundary so that 32-bit applications can be run on 64-bit platform
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 7bac86dda88f..04d0b6918c61 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -10,13 +10,13 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_mbox.c 12 * FILE : megaraid_mbox.c
13 * Version : v2.20.4.9 (Jul 16 2006) 13 * Version : v2.20.5.1 (Nov 16 2006)
14 * 14 *
15 * Authors: 15 * Authors:
16 * Atul Mukker <Atul.Mukker@lsil.com> 16 * Atul Mukker <Atul.Mukker@lsi.com>
17 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> 17 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsi.com>
18 * Manoj Jose <Manoj.Jose@lsil.com> 18 * Manoj Jose <Manoj.Jose@lsi.com>
19 * Seokmann Ju <Seokmann.Ju@lsil.com> 19 * Seokmann Ju
20 * 20 *
21 * List of supported controllers 21 * List of supported controllers
22 * 22 *
@@ -107,6 +107,7 @@ static int megaraid_mbox_support_random_del(adapter_t *);
107static int megaraid_mbox_get_max_sg(adapter_t *); 107static int megaraid_mbox_get_max_sg(adapter_t *);
108static void megaraid_mbox_enum_raid_scsi(adapter_t *); 108static void megaraid_mbox_enum_raid_scsi(adapter_t *);
109static void megaraid_mbox_flush_cache(adapter_t *); 109static void megaraid_mbox_flush_cache(adapter_t *);
110static int megaraid_mbox_fire_sync_cmd(adapter_t *);
110 111
111static void megaraid_mbox_display_scb(adapter_t *, scb_t *); 112static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
112static void megaraid_mbox_setup_device_map(adapter_t *); 113static void megaraid_mbox_setup_device_map(adapter_t *);
@@ -137,7 +138,7 @@ static int wait_till_fw_empty(adapter_t *);
137 138
138 139
139 140
140MODULE_AUTHOR("sju@lsil.com"); 141MODULE_AUTHOR("megaraidlinux@lsi.com");
141MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver"); 142MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver");
142MODULE_LICENSE("GPL"); 143MODULE_LICENSE("GPL");
143MODULE_VERSION(MEGARAID_VERSION); 144MODULE_VERSION(MEGARAID_VERSION);
@@ -146,7 +147,7 @@ MODULE_VERSION(MEGARAID_VERSION);
146 * ### modules parameters for driver ### 147 * ### modules parameters for driver ###
147 */ 148 */
148 149
149/** 150/*
150 * Set to enable driver to expose unconfigured disk to kernel 151 * Set to enable driver to expose unconfigured disk to kernel
151 */ 152 */
152static int megaraid_expose_unconf_disks = 0; 153static int megaraid_expose_unconf_disks = 0;
@@ -154,7 +155,7 @@ module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0);
154MODULE_PARM_DESC(unconf_disks, 155MODULE_PARM_DESC(unconf_disks,
155 "Set to expose unconfigured disks to kernel (default=0)"); 156 "Set to expose unconfigured disks to kernel (default=0)");
156 157
157/** 158/*
158 * driver wait time if the adapter's mailbox is busy 159 * driver wait time if the adapter's mailbox is busy
159 */ 160 */
160static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT; 161static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT;
@@ -162,7 +163,7 @@ module_param_named(busy_wait, max_mbox_busy_wait, int, 0);
162MODULE_PARM_DESC(busy_wait, 163MODULE_PARM_DESC(busy_wait,
163 "Max wait for mailbox in microseconds if busy (default=10)"); 164 "Max wait for mailbox in microseconds if busy (default=10)");
164 165
165/** 166/*
166 * number of sectors per IO command 167 * number of sectors per IO command
167 */ 168 */
168static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS; 169static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS;
@@ -170,7 +171,7 @@ module_param_named(max_sectors, megaraid_max_sectors, int, 0);
170MODULE_PARM_DESC(max_sectors, 171MODULE_PARM_DESC(max_sectors,
171 "Maximum number of sectors per IO command (default=128)"); 172 "Maximum number of sectors per IO command (default=128)");
172 173
173/** 174/*
174 * number of commands per logical unit 175 * number of commands per logical unit
175 */ 176 */
176static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN; 177static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN;
@@ -179,7 +180,7 @@ MODULE_PARM_DESC(cmd_per_lun,
179 "Maximum number of commands per logical unit (default=64)"); 180 "Maximum number of commands per logical unit (default=64)");
180 181
181 182
182/** 183/*
183 * Fast driver load option, skip scanning for physical devices during load. 184 * Fast driver load option, skip scanning for physical devices during load.
184 * This would result in non-disk devices being skipped during driver load 185 * This would result in non-disk devices being skipped during driver load
185 * time. These can be later added though, using /proc/scsi/scsi 186 * time. These can be later added though, using /proc/scsi/scsi
@@ -190,7 +191,7 @@ MODULE_PARM_DESC(fast_load,
190 "Faster loading of the driver, skips physical devices! (default=0)"); 191 "Faster loading of the driver, skips physical devices! (default=0)");
191 192
192 193
193/** 194/*
194 * mraid_debug level - threshold for amount of information to be displayed by 195 * mraid_debug level - threshold for amount of information to be displayed by
195 * the driver. This level can be changed through modules parameters, ioctl or 196 * the driver. This level can be changed through modules parameters, ioctl or
196 * sysfs/proc interface. By default, print the announcement messages only. 197 * sysfs/proc interface. By default, print the announcement messages only.
@@ -337,7 +338,7 @@ static struct device_attribute *megaraid_sdev_attrs[] = {
337 * 338 *
338 * Return value: 339 * Return value:
339 * actual depth set 340 * actual depth set
340 **/ 341 */
341static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth) 342static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth)
342{ 343{
343 if (qdepth > MBOX_MAX_SCSI_CMDS) 344 if (qdepth > MBOX_MAX_SCSI_CMDS)
@@ -369,8 +370,8 @@ static struct scsi_host_template megaraid_template_g = {
369 * megaraid_init - module load hook 370 * megaraid_init - module load hook
370 * 371 *
371 * We register ourselves as hotplug enabled module and let PCI subsystem 372 * We register ourselves as hotplug enabled module and let PCI subsystem
372 * discover our adaters 373 * discover our adapters.
373 **/ 374 */
374static int __init 375static int __init
375megaraid_init(void) 376megaraid_init(void)
376{ 377{
@@ -405,7 +406,7 @@ megaraid_init(void)
405/** 406/**
406 * megaraid_exit - driver unload entry point 407 * megaraid_exit - driver unload entry point
407 * 408 *
408 * We simply unwrap the megaraid_init routine here 409 * We simply unwrap the megaraid_init routine here.
409 */ 410 */
410static void __exit 411static void __exit
411megaraid_exit(void) 412megaraid_exit(void)
@@ -421,12 +422,12 @@ megaraid_exit(void)
421 422
422/** 423/**
423 * megaraid_probe_one - PCI hotplug entry point 424 * megaraid_probe_one - PCI hotplug entry point
424 * @param pdev : handle to this controller's PCI configuration space 425 * @pdev : handle to this controller's PCI configuration space
425 * @param id : pci device id of the class of controllers 426 * @id : pci device id of the class of controllers
426 * 427 *
427 * This routine should be called whenever a new adapter is detected by the 428 * This routine should be called whenever a new adapter is detected by the
428 * PCI hotplug susbsytem. 429 * PCI hotplug susbsytem.
429 **/ 430 */
430static int __devinit 431static int __devinit
431megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 432megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
432{ 433{
@@ -542,16 +543,15 @@ out_probe_one:
542 543
543 544
544/** 545/**
545 * megaraid_detach_one - release the framework resources and call LLD release 546 * megaraid_detach_one - release framework resources and call LLD release routine
546 * routine 547 * @pdev : handle for our PCI cofiguration space
547 * @param pdev : handle for our PCI cofiguration space
548 * 548 *
549 * This routine is called during driver unload. We free all the allocated 549 * This routine is called during driver unload. We free all the allocated
550 * resources and call the corresponding LLD so that it can also release all 550 * resources and call the corresponding LLD so that it can also release all
551 * its resources. 551 * its resources.
552 * 552 *
553 * This routine is also called from the PCI hotplug system 553 * This routine is also called from the PCI hotplug system.
554 **/ 554 */
555static void 555static void
556megaraid_detach_one(struct pci_dev *pdev) 556megaraid_detach_one(struct pci_dev *pdev)
557{ 557{
@@ -615,9 +615,9 @@ megaraid_detach_one(struct pci_dev *pdev)
615 615
616/** 616/**
617 * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA 617 * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA
618 * @param device : generice driver model device 618 * @pdev : generic driver model device
619 * 619 *
620 * Shutdown notification, perform flush cache 620 * Shutdown notification, perform flush cache.
621 */ 621 */
622static void 622static void
623megaraid_mbox_shutdown(struct pci_dev *pdev) 623megaraid_mbox_shutdown(struct pci_dev *pdev)
@@ -643,10 +643,10 @@ megaraid_mbox_shutdown(struct pci_dev *pdev)
643 643
644/** 644/**
645 * megaraid_io_attach - attach a device with the IO subsystem 645 * megaraid_io_attach - attach a device with the IO subsystem
646 * @param adapter : controller's soft state 646 * @adapter : controller's soft state
647 * 647 *
648 * Attach this device with the IO subsystem 648 * Attach this device with the IO subsystem.
649 **/ 649 */
650static int 650static int
651megaraid_io_attach(adapter_t *adapter) 651megaraid_io_attach(adapter_t *adapter)
652{ 652{
@@ -695,10 +695,10 @@ megaraid_io_attach(adapter_t *adapter)
695 695
696/** 696/**
697 * megaraid_io_detach - detach a device from the IO subsystem 697 * megaraid_io_detach - detach a device from the IO subsystem
698 * @param adapter : controller's soft state 698 * @adapter : controller's soft state
699 * 699 *
700 * Detach this device from the IO subsystem 700 * Detach this device from the IO subsystem.
701 **/ 701 */
702static void 702static void
703megaraid_io_detach(adapter_t *adapter) 703megaraid_io_detach(adapter_t *adapter)
704{ 704{
@@ -722,13 +722,13 @@ megaraid_io_detach(adapter_t *adapter)
722 722
723/** 723/**
724 * megaraid_init_mbox - initialize controller 724 * megaraid_init_mbox - initialize controller
725 * @param adapter - our soft state 725 * @adapter : our soft state
726 * 726 *
727 * . Allocate 16-byte aligned mailbox memory for firmware handshake 727 * - Allocate 16-byte aligned mailbox memory for firmware handshake
728 * . Allocate controller's memory resources 728 * - Allocate controller's memory resources
729 * . Find out all initialization data 729 * - Find out all initialization data
730 * . Allocate memory required for all the commands 730 * - Allocate memory required for all the commands
731 * . Use internal library of FW routines, build up complete soft state 731 * - Use internal library of FW routines, build up complete soft state
732 */ 732 */
733static int __devinit 733static int __devinit
734megaraid_init_mbox(adapter_t *adapter) 734megaraid_init_mbox(adapter_t *adapter)
@@ -779,33 +779,39 @@ megaraid_init_mbox(adapter_t *adapter)
779 goto out_release_regions; 779 goto out_release_regions;
780 } 780 }
781 781
782 // 782 /* initialize the mutual exclusion lock for the mailbox */
783 // Setup the rest of the soft state using the library of FW routines 783 spin_lock_init(&raid_dev->mailbox_lock);
784 //
785 784
786 // request IRQ and register the interrupt service routine 785 /* allocate memory required for commands */
786 if (megaraid_alloc_cmd_packets(adapter) != 0)
787 goto out_iounmap;
788
789 /*
790 * Issue SYNC cmd to flush the pending cmds in the adapter
791 * and initialize its internal state
792 */
793
794 if (megaraid_mbox_fire_sync_cmd(adapter))
795 con_log(CL_ANN, ("megaraid: sync cmd failed\n"));
796
797 /*
798 * Setup the rest of the soft state using the library of
799 * FW routines
800 */
801
802 /* request IRQ and register the interrupt service routine */
787 if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid", 803 if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid",
788 adapter)) { 804 adapter)) {
789 805
790 con_log(CL_ANN, (KERN_WARNING 806 con_log(CL_ANN, (KERN_WARNING
791 "megaraid: Couldn't register IRQ %d!\n", adapter->irq)); 807 "megaraid: Couldn't register IRQ %d!\n", adapter->irq));
808 goto out_alloc_cmds;
792 809
793 goto out_iounmap;
794 }
795
796
797 // initialize the mutual exclusion lock for the mailbox
798 spin_lock_init(&raid_dev->mailbox_lock);
799
800 // allocate memory required for commands
801 if (megaraid_alloc_cmd_packets(adapter) != 0) {
802 goto out_free_irq;
803 } 810 }
804 811
805 // Product info 812 // Product info
806 if (megaraid_mbox_product_info(adapter) != 0) { 813 if (megaraid_mbox_product_info(adapter) != 0)
807 goto out_alloc_cmds; 814 goto out_free_irq;
808 }
809 815
810 // Do we support extended CDBs 816 // Do we support extended CDBs
811 adapter->max_cdb_sz = 10; 817 adapter->max_cdb_sz = 10;
@@ -874,9 +880,8 @@ megaraid_init_mbox(adapter_t *adapter)
874 * Allocate resources required to issue FW calls, when sysfs is 880 * Allocate resources required to issue FW calls, when sysfs is
875 * accessed 881 * accessed
876 */ 882 */
877 if (megaraid_sysfs_alloc_resources(adapter) != 0) { 883 if (megaraid_sysfs_alloc_resources(adapter) != 0)
878 goto out_alloc_cmds; 884 goto out_free_irq;
879 }
880 885
881 // Set the DMA mask to 64-bit. All supported controllers as capable of 886 // Set the DMA mask to 64-bit. All supported controllers as capable of
882 // DMA in this range 887 // DMA in this range
@@ -920,10 +925,10 @@ megaraid_init_mbox(adapter_t *adapter)
920 925
921out_free_sysfs_res: 926out_free_sysfs_res:
922 megaraid_sysfs_free_resources(adapter); 927 megaraid_sysfs_free_resources(adapter);
923out_alloc_cmds:
924 megaraid_free_cmd_packets(adapter);
925out_free_irq: 928out_free_irq:
926 free_irq(adapter->irq, adapter); 929 free_irq(adapter->irq, adapter);
930out_alloc_cmds:
931 megaraid_free_cmd_packets(adapter);
927out_iounmap: 932out_iounmap:
928 iounmap(raid_dev->baseaddr); 933 iounmap(raid_dev->baseaddr);
929out_release_regions: 934out_release_regions:
@@ -937,7 +942,7 @@ out_free_raid_dev:
937 942
938/** 943/**
939 * megaraid_fini_mbox - undo controller initialization 944 * megaraid_fini_mbox - undo controller initialization
940 * @param adapter : our soft state 945 * @adapter : our soft state
941 */ 946 */
942static void 947static void
943megaraid_fini_mbox(adapter_t *adapter) 948megaraid_fini_mbox(adapter_t *adapter)
@@ -967,12 +972,12 @@ megaraid_fini_mbox(adapter_t *adapter)
967 972
968/** 973/**
969 * megaraid_alloc_cmd_packets - allocate shared mailbox 974 * megaraid_alloc_cmd_packets - allocate shared mailbox
970 * @param adapter : soft state of the raid controller 975 * @adapter : soft state of the raid controller
971 * 976 *
972 * Allocate and align the shared mailbox. This maibox is used to issue 977 * Allocate and align the shared mailbox. This maibox is used to issue
973 * all the commands. For IO based controllers, the mailbox is also regsitered 978 * all the commands. For IO based controllers, the mailbox is also regsitered
974 * with the FW. Allocate memory for all commands as well. 979 * with the FW. Allocate memory for all commands as well.
975 * This is our big allocator 980 * This is our big allocator.
976 */ 981 */
977static int 982static int
978megaraid_alloc_cmd_packets(adapter_t *adapter) 983megaraid_alloc_cmd_packets(adapter_t *adapter)
@@ -1132,9 +1137,9 @@ out_free_common_mbox:
1132 1137
1133/** 1138/**
1134 * megaraid_free_cmd_packets - free memory 1139 * megaraid_free_cmd_packets - free memory
1135 * @param adapter : soft state of the raid controller 1140 * @adapter : soft state of the raid controller
1136 * 1141 *
1137 * Release memory resources allocated for commands 1142 * Release memory resources allocated for commands.
1138 */ 1143 */
1139static void 1144static void
1140megaraid_free_cmd_packets(adapter_t *adapter) 1145megaraid_free_cmd_packets(adapter_t *adapter)
@@ -1156,10 +1161,10 @@ megaraid_free_cmd_packets(adapter_t *adapter)
1156 1161
1157/** 1162/**
1158 * megaraid_mbox_setup_dma_pools - setup dma pool for command packets 1163 * megaraid_mbox_setup_dma_pools - setup dma pool for command packets
1159 * @param adapter : HBA soft state 1164 * @adapter : HBA soft state
1160 * 1165 *
1161 * setup the dma pools for mailbox, passthru and extended passthru structures, 1166 * Setup the dma pools for mailbox, passthru and extended passthru structures,
1162 * and scatter-gather lists 1167 * and scatter-gather lists.
1163 */ 1168 */
1164static int 1169static int
1165megaraid_mbox_setup_dma_pools(adapter_t *adapter) 1170megaraid_mbox_setup_dma_pools(adapter_t *adapter)
@@ -1252,10 +1257,10 @@ fail_setup_dma_pool:
1252 1257
1253/** 1258/**
1254 * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets 1259 * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets
1255 * @param adapter : HBA soft state 1260 * @adapter : HBA soft state
1256 * 1261 *
1257 * teardown the dma pool for mailbox, passthru and extended passthru 1262 * Teardown the dma pool for mailbox, passthru and extended passthru
1258 * structures, and scatter-gather lists 1263 * structures, and scatter-gather lists.
1259 */ 1264 */
1260static void 1265static void
1261megaraid_mbox_teardown_dma_pools(adapter_t *adapter) 1266megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
@@ -1300,10 +1305,11 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
1300/** 1305/**
1301 * megaraid_alloc_scb - detach and return a scb from the free list 1306 * megaraid_alloc_scb - detach and return a scb from the free list
1302 * @adapter : controller's soft state 1307 * @adapter : controller's soft state
1308 * @scp : pointer to the scsi command to be executed
1303 * 1309 *
1304 * return the scb from the head of the free list. NULL if there are none 1310 * Return the scb from the head of the free list. %NULL if there are none
1305 * available 1311 * available.
1306 **/ 1312 */
1307static scb_t * 1313static scb_t *
1308megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp) 1314megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
1309{ 1315{
@@ -1337,11 +1343,11 @@ megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
1337 * @adapter : controller's soft state 1343 * @adapter : controller's soft state
1338 * @scb : scb to be freed 1344 * @scb : scb to be freed
1339 * 1345 *
1340 * return the scb back to the free list of scbs. The caller must 'flush' the 1346 * Return the scb back to the free list of scbs. The caller must 'flush' the
1341 * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc. 1347 * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc.
1342 * NOTE NOTE: Make sure the scb is not on any list before calling this 1348 * NOTE NOTE: Make sure the scb is not on any list before calling this
1343 * routine. 1349 * routine.
1344 **/ 1350 */
1345static inline void 1351static inline void
1346megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb) 1352megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
1347{ 1353{
@@ -1362,10 +1368,10 @@ megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
1362 1368
1363/** 1369/**
1364 * megaraid_mbox_mksgl - make the scatter-gather list 1370 * megaraid_mbox_mksgl - make the scatter-gather list
1365 * @adapter - controller's soft state 1371 * @adapter : controller's soft state
1366 * @scb - scsi control block 1372 * @scb : scsi control block
1367 * 1373 *
1368 * prepare the scatter-gather list 1374 * Prepare the scatter-gather list.
1369 */ 1375 */
1370static int 1376static int
1371megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb) 1377megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
@@ -1435,10 +1441,10 @@ megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
1435 1441
1436/** 1442/**
1437 * mbox_post_cmd - issue a mailbox command 1443 * mbox_post_cmd - issue a mailbox command
1438 * @adapter - controller's soft state 1444 * @adapter : controller's soft state
1439 * @scb - command to be issued 1445 * @scb : command to be issued
1440 * 1446 *
1441 * post the command to the controller if mailbox is availble. 1447 * Post the command to the controller if mailbox is available.
1442 */ 1448 */
1443static int 1449static int
1444mbox_post_cmd(adapter_t *adapter, scb_t *scb) 1450mbox_post_cmd(adapter_t *adapter, scb_t *scb)
@@ -1518,7 +1524,7 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
1518 * Queue entry point for mailbox based controllers. 1524 * Queue entry point for mailbox based controllers.
1519 */ 1525 */
1520static int 1526static int
1521megaraid_queue_command(struct scsi_cmnd *scp, void (* done)(struct scsi_cmnd *)) 1527megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
1522{ 1528{
1523 adapter_t *adapter; 1529 adapter_t *adapter;
1524 scb_t *scb; 1530 scb_t *scb;
@@ -1548,15 +1554,15 @@ megaraid_queue_command(struct scsi_cmnd *scp, void (* done)(struct scsi_cmnd *))
1548} 1554}
1549 1555
1550/** 1556/**
1551 * megaraid_mbox_build_cmd - transform the mid-layer scsi command to megaraid 1557 * megaraid_mbox_build_cmd - transform the mid-layer scsi commands
1552 * firmware lingua 1558 * @adapter : controller's soft state
1553 * @adapter - controller's soft state 1559 * @scp : mid-layer scsi command pointer
1554 * @scp - mid-layer scsi command pointer 1560 * @busy : set if request could not be completed because of lack of
1555 * @busy - set if request could not be completed because of lack of
1556 * resources 1561 * resources
1557 * 1562 *
1558 * convert the command issued by mid-layer to format understood by megaraid 1563 * Transform the mid-layer scsi command to megaraid firmware lingua.
1559 * firmware. We also complete certain command without sending them to firmware 1564 * Convert the command issued by mid-layer to format understood by megaraid
1565 * firmware. We also complete certain commands without sending them to firmware.
1560 */ 1566 */
1561static scb_t * 1567static scb_t *
1562megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) 1568megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
@@ -1937,9 +1943,9 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1937/** 1943/**
1938 * megaraid_mbox_runpendq - execute commands queued in the pending queue 1944 * megaraid_mbox_runpendq - execute commands queued in the pending queue
1939 * @adapter : controller's soft state 1945 * @adapter : controller's soft state
1940 * @scb : SCB to be queued in the pending list 1946 * @scb_q : SCB to be queued in the pending list
1941 * 1947 *
1942 * scan the pending list for commands which are not yet issued and try to 1948 * Scan the pending list for commands which are not yet issued and try to
1943 * post to the controller. The SCB can be a null pointer, which would indicate 1949 * post to the controller. The SCB can be a null pointer, which would indicate
1944 * no SCB to be queue, just try to execute the ones in the pending list. 1950 * no SCB to be queue, just try to execute the ones in the pending list.
1945 * 1951 *
@@ -2012,11 +2018,11 @@ megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
2012 2018
2013/** 2019/**
2014 * megaraid_mbox_prepare_pthru - prepare a command for physical devices 2020 * megaraid_mbox_prepare_pthru - prepare a command for physical devices
2015 * @adapter - pointer to controller's soft state 2021 * @adapter : pointer to controller's soft state
2016 * @scb - scsi control block 2022 * @scb : scsi control block
2017 * @scp - scsi command from the mid-layer 2023 * @scp : scsi command from the mid-layer
2018 * 2024 *
2019 * prepare a command for the scsi physical devices 2025 * Prepare a command for the scsi physical devices.
2020 */ 2026 */
2021static void 2027static void
2022megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb, 2028megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
@@ -2060,12 +2066,12 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
2060 2066
2061/** 2067/**
2062 * megaraid_mbox_prepare_epthru - prepare a command for physical devices 2068 * megaraid_mbox_prepare_epthru - prepare a command for physical devices
2063 * @adapter - pointer to controller's soft state 2069 * @adapter : pointer to controller's soft state
2064 * @scb - scsi control block 2070 * @scb : scsi control block
2065 * @scp - scsi command from the mid-layer 2071 * @scp : scsi command from the mid-layer
2066 * 2072 *
2067 * prepare a command for the scsi physical devices. This rountine prepares 2073 * Prepare a command for the scsi physical devices. This rountine prepares
2068 * commands for devices which can take extended CDBs (>10 bytes) 2074 * commands for devices which can take extended CDBs (>10 bytes).
2069 */ 2075 */
2070static void 2076static void
2071megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb, 2077megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
@@ -2109,9 +2115,9 @@ megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
2109 2115
2110/** 2116/**
2111 * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs 2117 * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs
2112 * @adapter - controller's soft state 2118 * @adapter : controller's soft state
2113 * 2119 *
2114 * Interrupt ackrowledgement sequence for memory mapped HBAs. Find out the 2120 * Interrupt acknowledgement sequence for memory mapped HBAs. Find out the
2115 * completed command and put them on the completed list for later processing. 2121 * completed command and put them on the completed list for later processing.
2116 * 2122 *
2117 * Returns: 1 if the interrupt is valid, 0 otherwise 2123 * Returns: 1 if the interrupt is valid, 0 otherwise
@@ -2224,9 +2230,8 @@ megaraid_ack_sequence(adapter_t *adapter)
2224 2230
2225/** 2231/**
2226 * megaraid_isr - isr for memory based mailbox based controllers 2232 * megaraid_isr - isr for memory based mailbox based controllers
2227 * @irq - irq 2233 * @irq : irq
2228 * @devp - pointer to our soft state 2234 * @devp : pointer to our soft state
2229 * @regs - unused
2230 * 2235 *
2231 * Interrupt service routine for memory-mapped mailbox controllers. 2236 * Interrupt service routine for memory-mapped mailbox controllers.
2232 */ 2237 */
@@ -2671,7 +2676,7 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
2671 * the FW is still live, in which case the outstanding commands counter mut go 2676 * the FW is still live, in which case the outstanding commands counter mut go
2672 * down to 0. If that happens, also issue the reservation reset command to 2677 * down to 0. If that happens, also issue the reservation reset command to
2673 * relinquish (possible) reservations on the logical drives connected to this 2678 * relinquish (possible) reservations on the logical drives connected to this
2674 * host 2679 * host.
2675 **/ 2680 **/
2676static int 2681static int
2677megaraid_reset_handler(struct scsi_cmnd *scp) 2682megaraid_reset_handler(struct scsi_cmnd *scp)
@@ -2823,11 +2828,11 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2823 2828
2824/** 2829/**
2825 * mbox_post_sync_cmd() - blocking command to the mailbox based controllers 2830 * mbox_post_sync_cmd() - blocking command to the mailbox based controllers
2826 * @adapter - controller's soft state 2831 * @adapter : controller's soft state
2827 * @raw_mbox - the mailbox 2832 * @raw_mbox : the mailbox
2828 * 2833 *
2829 * Issue a scb in synchronous and non-interrupt mode for mailbox based 2834 * Issue a scb in synchronous and non-interrupt mode for mailbox based
2830 * controllers 2835 * controllers.
2831 */ 2836 */
2832static int 2837static int
2833mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[]) 2838mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
@@ -2955,12 +2960,12 @@ blocked_mailbox:
2955 2960
2956/** 2961/**
2957 * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers 2962 * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers
2958 * @adapter - controller's soft state 2963 * @adapter : controller's soft state
2959 * @raw_mbox - the mailbox 2964 * @raw_mbox : the mailbox
2960 * 2965 *
2961 * Issue a scb in synchronous and non-interrupt mode for mailbox based 2966 * Issue a scb in synchronous and non-interrupt mode for mailbox based
2962 * controllers. This is a faster version of the synchronous command and 2967 * controllers. This is a faster version of the synchronous command and
2963 * therefore can be called in interrupt-context as well 2968 * therefore can be called in interrupt-context as well.
2964 */ 2969 */
2965static int 2970static int
2966mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[]) 2971mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
@@ -3008,10 +3013,10 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
3008 3013
3009/** 3014/**
3010 * megaraid_busywait_mbox() - Wait until the controller's mailbox is available 3015 * megaraid_busywait_mbox() - Wait until the controller's mailbox is available
3011 * @raid_dev - RAID device (HBA) soft state 3016 * @raid_dev : RAID device (HBA) soft state
3012 * 3017 *
3013 * wait until the controller's mailbox is available to accept more commands. 3018 * Wait until the controller's mailbox is available to accept more commands.
3014 * wait for at most 1 second 3019 * Wait for at most 1 second.
3015 */ 3020 */
3016static int 3021static int
3017megaraid_busywait_mbox(mraid_device_t *raid_dev) 3022megaraid_busywait_mbox(mraid_device_t *raid_dev)
@@ -3032,9 +3037,9 @@ megaraid_busywait_mbox(mraid_device_t *raid_dev)
3032 3037
3033/** 3038/**
3034 * megaraid_mbox_product_info - some static information about the controller 3039 * megaraid_mbox_product_info - some static information about the controller
3035 * @adapter - our soft state 3040 * @adapter : our soft state
3036 * 3041 *
3037 * issue commands to the controller to grab some parameters required by our 3042 * Issue commands to the controller to grab some parameters required by our
3038 * caller. 3043 * caller.
3039 */ 3044 */
3040static int 3045static int
@@ -3157,10 +3162,10 @@ megaraid_mbox_product_info(adapter_t *adapter)
3157 3162
3158/** 3163/**
3159 * megaraid_mbox_extended_cdb - check for support for extended CDBs 3164 * megaraid_mbox_extended_cdb - check for support for extended CDBs
3160 * @adapter - soft state for the controller 3165 * @adapter : soft state for the controller
3161 * 3166 *
3162 * this routine check whether the controller in question supports extended 3167 * This routine check whether the controller in question supports extended
3163 * ( > 10 bytes ) CDBs 3168 * ( > 10 bytes ) CDBs.
3164 */ 3169 */
3165static int 3170static int
3166megaraid_mbox_extended_cdb(adapter_t *adapter) 3171megaraid_mbox_extended_cdb(adapter_t *adapter)
@@ -3193,8 +3198,8 @@ megaraid_mbox_extended_cdb(adapter_t *adapter)
3193 3198
3194/** 3199/**
3195 * megaraid_mbox_support_ha - Do we support clustering 3200 * megaraid_mbox_support_ha - Do we support clustering
3196 * @adapter - soft state for the controller 3201 * @adapter : soft state for the controller
3197 * @init_id - ID of the initiator 3202 * @init_id : ID of the initiator
3198 * 3203 *
3199 * Determine if the firmware supports clustering and the ID of the initiator. 3204 * Determine if the firmware supports clustering and the ID of the initiator.
3200 */ 3205 */
@@ -3236,9 +3241,9 @@ megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
3236 3241
3237/** 3242/**
3238 * megaraid_mbox_support_random_del - Do we support random deletion 3243 * megaraid_mbox_support_random_del - Do we support random deletion
3239 * @adapter - soft state for the controller 3244 * @adapter : soft state for the controller
3240 * 3245 *
3241 * Determine if the firmware supports random deletion 3246 * Determine if the firmware supports random deletion.
3242 * Return: 1 is operation supported, 0 otherwise 3247 * Return: 1 is operation supported, 0 otherwise
3243 */ 3248 */
3244static int 3249static int
@@ -3271,10 +3276,10 @@ megaraid_mbox_support_random_del(adapter_t *adapter)
3271 3276
3272/** 3277/**
3273 * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware 3278 * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware
3274 * @adapter - soft state for the controller 3279 * @adapter : soft state for the controller
3275 * 3280 *
3276 * Find out the maximum number of scatter-gather elements supported by the 3281 * Find out the maximum number of scatter-gather elements supported by the
3277 * firmware 3282 * firmware.
3278 */ 3283 */
3279static int 3284static int
3280megaraid_mbox_get_max_sg(adapter_t *adapter) 3285megaraid_mbox_get_max_sg(adapter_t *adapter)
@@ -3311,10 +3316,10 @@ megaraid_mbox_get_max_sg(adapter_t *adapter)
3311 3316
3312/** 3317/**
3313 * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels 3318 * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels
3314 * @adapter - soft state for the controller 3319 * @adapter : soft state for the controller
3315 * 3320 *
3316 * Enumerate the RAID and SCSI channels for ROMB platoforms so that channels 3321 * Enumerate the RAID and SCSI channels for ROMB platforms so that channels
3317 * can be exported as regular SCSI channels 3322 * can be exported as regular SCSI channels.
3318 */ 3323 */
3319static void 3324static void
3320megaraid_mbox_enum_raid_scsi(adapter_t *adapter) 3325megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
@@ -3348,9 +3353,9 @@ megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
3348 3353
3349/** 3354/**
3350 * megaraid_mbox_flush_cache - flush adapter and disks cache 3355 * megaraid_mbox_flush_cache - flush adapter and disks cache
3351 * @param adapter : soft state for the controller 3356 * @adapter : soft state for the controller
3352 * 3357 *
3353 * Flush adapter cache followed by disks cache 3358 * Flush adapter cache followed by disks cache.
3354 */ 3359 */
3355static void 3360static void
3356megaraid_mbox_flush_cache(adapter_t *adapter) 3361megaraid_mbox_flush_cache(adapter_t *adapter)
@@ -3380,13 +3385,91 @@ megaraid_mbox_flush_cache(adapter_t *adapter)
3380 3385
3381 3386
3382/** 3387/**
3388 * megaraid_mbox_fire_sync_cmd - fire the sync cmd
3389 * @adapter : soft state for the controller
3390 *
3391 * Clears the pending cmds in FW and reinits its RAID structs.
3392 */
3393static int
3394megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
3395{
3396 mbox_t *mbox;
3397 uint8_t raw_mbox[sizeof(mbox_t)];
3398 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3399 mbox64_t *mbox64;
3400 int status = 0;
3401 int i;
3402 uint32_t dword;
3403
3404 mbox = (mbox_t *)raw_mbox;
3405
3406 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3407
3408 raw_mbox[0] = 0xFF;
3409
3410 mbox64 = raid_dev->mbox64;
3411 mbox = raid_dev->mbox;
3412
3413 /* Wait until mailbox is free */
3414 if (megaraid_busywait_mbox(raid_dev) != 0) {
3415 status = 1;
3416 goto blocked_mailbox;
3417 }
3418
3419 /* Copy mailbox data into host structure */
3420 memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
3421 mbox->cmdid = 0xFE;
3422 mbox->busy = 1;
3423 mbox->poll = 0;
3424 mbox->ack = 0;
3425 mbox->numstatus = 0;
3426 mbox->status = 0;
3427
3428 wmb();
3429 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
3430
3431 /* Wait for maximum 1 min for status to post.
3432 * If the Firmware SUPPORTS the ABOVE COMMAND,
3433 * mbox->cmd will be set to 0
3434 * else
3435 * the firmware will reject the command with
3436 * mbox->numstatus set to 1
3437 */
3438
3439 i = 0;
3440 status = 0;
3441 while (!mbox->numstatus && mbox->cmd == 0xFF) {
3442 rmb();
3443 msleep(1);
3444 i++;
3445 if (i > 1000 * 60) {
3446 status = 1;
3447 break;
3448 }
3449 }
3450 if (mbox->numstatus == 1)
3451 status = 1; /*cmd not supported*/
3452
3453 /* Check for interrupt line */
3454 dword = RDOUTDOOR(raid_dev);
3455 WROUTDOOR(raid_dev, dword);
3456 WRINDOOR(raid_dev,2);
3457
3458 return status;
3459
3460blocked_mailbox:
3461 con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n"));
3462 return status;
3463}
3464
3465/**
3383 * megaraid_mbox_display_scb - display SCB information, mostly debug purposes 3466 * megaraid_mbox_display_scb - display SCB information, mostly debug purposes
3384 * @param adapter : controllers' soft state 3467 * @adapter : controller's soft state
3385 * @param scb : SCB to be displayed 3468 * @scb : SCB to be displayed
3386 * @param level : debug level for console print 3469 * @level : debug level for console print
3387 * 3470 *
3388 * Diplay information about the given SCB iff the current debug level is 3471 * Diplay information about the given SCB iff the current debug level is
3389 * verbose 3472 * verbose.
3390 */ 3473 */
3391static void 3474static void
3392megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb) 3475megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
@@ -3434,7 +3517,7 @@ megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
3434 * scsi addresses and megaraid scsi and logical drive addresses. We export 3517 * scsi addresses and megaraid scsi and logical drive addresses. We export
3435 * scsi devices on their actual addresses, whereas the logical drives are 3518 * scsi devices on their actual addresses, whereas the logical drives are
3436 * exported on a virtual scsi channel. 3519 * exported on a virtual scsi channel.
3437 **/ 3520 */
3438static void 3521static void
3439megaraid_mbox_setup_device_map(adapter_t *adapter) 3522megaraid_mbox_setup_device_map(adapter_t *adapter)
3440{ 3523{
@@ -3472,7 +3555,7 @@ megaraid_mbox_setup_device_map(adapter_t *adapter)
3472 3555
3473/** 3556/**
3474 * megaraid_cmm_register - register with the mangement module 3557 * megaraid_cmm_register - register with the mangement module
3475 * @param adapter : HBA soft state 3558 * @adapter : HBA soft state
3476 * 3559 *
3477 * Register with the management module, which allows applications to issue 3560 * Register with the management module, which allows applications to issue
3478 * ioctl calls to the drivers. This interface is used by the management module 3561 * ioctl calls to the drivers. This interface is used by the management module
@@ -3562,11 +3645,11 @@ megaraid_cmm_register(adapter_t *adapter)
3562 3645
3563/** 3646/**
3564 * megaraid_cmm_unregister - un-register with the mangement module 3647 * megaraid_cmm_unregister - un-register with the mangement module
3565 * @param adapter : HBA soft state 3648 * @adapter : HBA soft state
3566 * 3649 *
3567 * Un-register with the management module. 3650 * Un-register with the management module.
3568 * FIXME: mgmt module must return failure for unregister if it has pending 3651 * FIXME: mgmt module must return failure for unregister if it has pending
3569 * commands in LLD 3652 * commands in LLD.
3570 */ 3653 */
3571static int 3654static int
3572megaraid_cmm_unregister(adapter_t *adapter) 3655megaraid_cmm_unregister(adapter_t *adapter)
@@ -3579,9 +3662,9 @@ megaraid_cmm_unregister(adapter_t *adapter)
3579 3662
3580/** 3663/**
3581 * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD 3664 * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD
3582 * @param drvr_data : LLD specific data 3665 * @drvr_data : LLD specific data
3583 * @param kioc : CMM interface packet 3666 * @kioc : CMM interface packet
3584 * @param action : command action 3667 * @action : command action
3585 * 3668 *
3586 * This routine is invoked whenever the Common Mangement Module (CMM) has a 3669 * This routine is invoked whenever the Common Mangement Module (CMM) has a
3587 * command for us. The 'action' parameter specifies if this is a new command 3670 * command for us. The 'action' parameter specifies if this is a new command
@@ -3634,8 +3717,8 @@ megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action)
3634 3717
3635/** 3718/**
3636 * megaraid_mbox_mm_command - issues commands routed through CMM 3719 * megaraid_mbox_mm_command - issues commands routed through CMM
3637 * @param adapter : HBA soft state 3720 * @adapter : HBA soft state
3638 * @param kioc : management command packet 3721 * @kioc : management command packet
3639 * 3722 *
3640 * Issues commands, which are routed through the management module. 3723 * Issues commands, which are routed through the management module.
3641 */ 3724 */
@@ -3804,8 +3887,8 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
3804 3887
3805/** 3888/**
3806 * gather_hbainfo - HBA characteristics for the applications 3889 * gather_hbainfo - HBA characteristics for the applications
3807 * @param adapter : HBA soft state 3890 * @adapter : HBA soft state
3808 * @param hinfo : pointer to the caller's host info strucuture 3891 * @hinfo : pointer to the caller's host info strucuture
3809 */ 3892 */
3810static int 3893static int
3811gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo) 3894gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
@@ -3839,16 +3922,15 @@ gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
3839 3922
3840/** 3923/**
3841 * megaraid_sysfs_alloc_resources - allocate sysfs related resources 3924 * megaraid_sysfs_alloc_resources - allocate sysfs related resources
3925 * @adapter : controller's soft state
3842 * 3926 *
3843 * Allocate packets required to issue FW calls whenever the sysfs attributes 3927 * Allocate packets required to issue FW calls whenever the sysfs attributes
3844 * are read. These attributes would require up-to-date information from the 3928 * are read. These attributes would require up-to-date information from the
3845 * FW. Also set up resources for mutual exclusion to share these resources and 3929 * FW. Also set up resources for mutual exclusion to share these resources and
3846 * the wait queue. 3930 * the wait queue.
3847 * 3931 *
3848 * @param adapter : controller's soft state 3932 * Return 0 on success.
3849 * 3933 * Return -ERROR_CODE on failure.
3850 * @return 0 on success
3851 * @return -ERROR_CODE on failure
3852 */ 3934 */
3853static int 3935static int
3854megaraid_sysfs_alloc_resources(adapter_t *adapter) 3936megaraid_sysfs_alloc_resources(adapter_t *adapter)
@@ -3885,10 +3967,9 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
3885 3967
3886/** 3968/**
3887 * megaraid_sysfs_free_resources - free sysfs related resources 3969 * megaraid_sysfs_free_resources - free sysfs related resources
3970 * @adapter : controller's soft state
3888 * 3971 *
3889 * Free packets allocated for sysfs FW commands 3972 * Free packets allocated for sysfs FW commands
3890 *
3891 * @param adapter : controller's soft state
3892 */ 3973 */
3893static void 3974static void
3894megaraid_sysfs_free_resources(adapter_t *adapter) 3975megaraid_sysfs_free_resources(adapter_t *adapter)
@@ -3907,10 +3988,9 @@ megaraid_sysfs_free_resources(adapter_t *adapter)
3907 3988
3908/** 3989/**
3909 * megaraid_sysfs_get_ldmap_done - callback for get ldmap 3990 * megaraid_sysfs_get_ldmap_done - callback for get ldmap
3991 * @uioc : completed packet
3910 * 3992 *
3911 * Callback routine called in the ISR/tasklet context for get ldmap call 3993 * Callback routine called in the ISR/tasklet context for get ldmap call
3912 *
3913 * @param uioc : completed packet
3914 */ 3994 */
3915static void 3995static void
3916megaraid_sysfs_get_ldmap_done(uioc_t *uioc) 3996megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
@@ -3926,12 +4006,11 @@ megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
3926 4006
3927/** 4007/**
3928 * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap 4008 * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap
4009 * @data : timed out packet
3929 * 4010 *
3930 * Timeout routine to recover and return to application, in case the adapter 4011 * Timeout routine to recover and return to application, in case the adapter
3931 * has stopped responding. A timeout of 60 seconds for this command seem like 4012 * has stopped responding. A timeout of 60 seconds for this command seems like
3932 * a good value 4013 * a good value.
3933 *
3934 * @param uioc : timed out packet
3935 */ 4014 */
3936static void 4015static void
3937megaraid_sysfs_get_ldmap_timeout(unsigned long data) 4016megaraid_sysfs_get_ldmap_timeout(unsigned long data)
@@ -3948,6 +4027,7 @@ megaraid_sysfs_get_ldmap_timeout(unsigned long data)
3948 4027
3949/** 4028/**
3950 * megaraid_sysfs_get_ldmap - get update logical drive map 4029 * megaraid_sysfs_get_ldmap - get update logical drive map
4030 * @adapter : controller's soft state
3951 * 4031 *
3952 * This routine will be called whenever user reads the logical drive 4032 * This routine will be called whenever user reads the logical drive
3953 * attributes, go get the current logical drive mapping table from the 4033 * attributes, go get the current logical drive mapping table from the
@@ -3959,10 +4039,8 @@ megaraid_sysfs_get_ldmap_timeout(unsigned long data)
3959 * standalone libary. For now, this should suffice since there is no other 4039 * standalone libary. For now, this should suffice since there is no other
3960 * user of this interface. 4040 * user of this interface.
3961 * 4041 *
3962 * @param adapter : controller's soft state 4042 * Return 0 on success.
3963 * 4043 * Return -1 on failure.
3964 * @return 0 on success
3965 * @return -1 on failure
3966 */ 4044 */
3967static int 4045static int
3968megaraid_sysfs_get_ldmap(adapter_t *adapter) 4046megaraid_sysfs_get_ldmap(adapter_t *adapter)
@@ -4064,13 +4142,12 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
4064 4142
4065/** 4143/**
4066 * megaraid_sysfs_show_app_hndl - display application handle for this adapter 4144 * megaraid_sysfs_show_app_hndl - display application handle for this adapter
4145 * @cdev : class device object representation for the host
4146 * @buf : buffer to send data to
4067 * 4147 *
4068 * Display the handle used by the applications while executing management 4148 * Display the handle used by the applications while executing management
4069 * tasks on the adapter. We invoke a management module API to get the adapter 4149 * tasks on the adapter. We invoke a management module API to get the adapter
4070 * handle, since we do not interface with applications directly. 4150 * handle, since we do not interface with applications directly.
4071 *
4072 * @param cdev : class device object representation for the host
4073 * @param buf : buffer to send data to
4074 */ 4151 */
4075static ssize_t 4152static ssize_t
4076megaraid_sysfs_show_app_hndl(struct class_device *cdev, char *buf) 4153megaraid_sysfs_show_app_hndl(struct class_device *cdev, char *buf)
@@ -4087,16 +4164,18 @@ megaraid_sysfs_show_app_hndl(struct class_device *cdev, char *buf)
4087 4164
4088/** 4165/**
4089 * megaraid_sysfs_show_ldnum - display the logical drive number for this device 4166 * megaraid_sysfs_show_ldnum - display the logical drive number for this device
4167 * @dev : device object representation for the scsi device
4168 * @attr : device attribute to show
4169 * @buf : buffer to send data to
4090 * 4170 *
4091 * Display the logical drive number for the device in question, if it a valid 4171 * Display the logical drive number for the device in question, if it a valid
4092 * logical drive. For physical devices, "-1" is returned 4172 * logical drive. For physical devices, "-1" is returned.
4093 * The logical drive number is displayed in following format 4173 *
4174 * The logical drive number is displayed in following format:
4094 * 4175 *
4095 * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE> 4176 * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE>
4096 * <int> <int> <int> <int>
4097 * 4177 *
4098 * @param dev : device object representation for the scsi device 4178 * <int> <int> <int> <int>
4099 * @param buf : buffer to send data to
4100 */ 4179 */
4101static ssize_t 4180static ssize_t
4102megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf) 4181megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 2b5a3285f799..9de803cebd4b 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -21,8 +21,8 @@
21#include "megaraid_ioctl.h" 21#include "megaraid_ioctl.h"
22 22
23 23
24#define MEGARAID_VERSION "2.20.4.9" 24#define MEGARAID_VERSION "2.20.5.1"
25#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)" 25#define MEGARAID_EXT_VERSION "(Release Date: Thu Nov 16 15:32:35 EST 2006)"
26 26
27 27
28/* 28/*
@@ -146,27 +146,27 @@ typedef struct {
146 146
147/** 147/**
148 * mraid_device_t - adapter soft state structure for mailbox controllers 148 * mraid_device_t - adapter soft state structure for mailbox controllers
149 * @param una_mbox64 : 64-bit mbox - unaligned 149 * @una_mbox64 : 64-bit mbox - unaligned
150 * @param una_mbox64_dma : mbox dma addr - unaligned 150 * @una_mbox64_dma : mbox dma addr - unaligned
151 * @param mbox : 32-bit mbox - aligned 151 * @mbox : 32-bit mbox - aligned
152 * @param mbox64 : 64-bit mbox - aligned 152 * @mbox64 : 64-bit mbox - aligned
153 * @param mbox_dma : mbox dma addr - aligned 153 * @mbox_dma : mbox dma addr - aligned
154 * @param mailbox_lock : exclusion lock for the mailbox 154 * @mailbox_lock : exclusion lock for the mailbox
155 * @param baseport : base port of hba memory 155 * @baseport : base port of hba memory
156 * @param baseaddr : mapped addr of hba memory 156 * @baseaddr : mapped addr of hba memory
157 * @param mbox_pool : pool of mailboxes 157 * @mbox_pool : pool of mailboxes
158 * @param mbox_pool_handle : handle for the mailbox pool memory 158 * @mbox_pool_handle : handle for the mailbox pool memory
159 * @param epthru_pool : a pool for extended passthru commands 159 * @epthru_pool : a pool for extended passthru commands
160 * @param epthru_pool_handle : handle to the pool above 160 * @epthru_pool_handle : handle to the pool above
161 * @param sg_pool : pool of scatter-gather lists for this driver 161 * @sg_pool : pool of scatter-gather lists for this driver
162 * @param sg_pool_handle : handle to the pool above 162 * @sg_pool_handle : handle to the pool above
163 * @param ccb_list : list of our command control blocks 163 * @ccb_list : list of our command control blocks
164 * @param uccb_list : list of cmd control blocks for mgmt module 164 * @uccb_list : list of cmd control blocks for mgmt module
165 * @param umbox64 : array of mailbox for user commands (cmm) 165 * @umbox64 : array of mailbox for user commands (cmm)
166 * @param pdrv_state : array for state of each physical drive. 166 * @pdrv_state : array for state of each physical drive.
167 * @param last_disp : flag used to show device scanning 167 * @last_disp : flag used to show device scanning
168 * @param hw_error : set if FW not responding 168 * @hw_error : set if FW not responding
169 * @param fast_load : If set, skip physical device scanning 169 * @fast_load : If set, skip physical device scanning
170 * @channel_class : channel class, RAID or SCSI 170 * @channel_class : channel class, RAID or SCSI
171 * @sysfs_sem : semaphore to serialize access to sysfs res. 171 * @sysfs_sem : semaphore to serialize access to sysfs res.
172 * @sysfs_uioc : management packet to issue FW calls from sysfs 172 * @sysfs_uioc : management packet to issue FW calls from sysfs
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index d85b9a8f1b8d..c1ff20c4747d 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -78,10 +78,10 @@ static struct file_operations lsi_fops = {
78 78
79/** 79/**
80 * mraid_mm_open - open routine for char node interface 80 * mraid_mm_open - open routine for char node interface
81 * @inod : unused 81 * @inode : unused
82 * @filep : unused 82 * @filep : unused
83 * 83 *
84 * allow ioctl operations by apps only if they superuser privilege 84 * Allow ioctl operations by apps only if they have superuser privilege.
85 */ 85 */
86static int 86static int
87mraid_mm_open(struct inode *inode, struct file *filep) 87mraid_mm_open(struct inode *inode, struct file *filep)
@@ -214,7 +214,9 @@ mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
214/** 214/**
215 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet 215 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
216 * @umimd : User space mimd_t ioctl packet 216 * @umimd : User space mimd_t ioctl packet
217 * @adapter : pointer to the adapter (OUT) 217 * @rval : returned success/error status
218 *
219 * The function return value is a pointer to the located @adapter.
218 */ 220 */
219static mraid_mmadp_t * 221static mraid_mmadp_t *
220mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) 222mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
@@ -252,11 +254,11 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
252 return adapter; 254 return adapter;
253} 255}
254 256
255/* 257/**
256 * handle_drvrcmd - This routine checks if the opcode is a driver 258 * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
257 * cmd and if it is, handles it.
258 * @arg : packet sent by the user app 259 * @arg : packet sent by the user app
259 * @old_ioctl : mimd if 1; uioc otherwise 260 * @old_ioctl : mimd if 1; uioc otherwise
261 * @rval : pointer for command's returned value (not function status)
260 */ 262 */
261static int 263static int
262handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval) 264handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
@@ -322,8 +324,8 @@ old_packet:
322 324
323/** 325/**
324 * mimd_to_kioc - Converter from old to new ioctl format 326 * mimd_to_kioc - Converter from old to new ioctl format
325 *
326 * @umimd : user space old MIMD IOCTL 327 * @umimd : user space old MIMD IOCTL
328 * @adp : adapter softstate
327 * @kioc : kernel space new format IOCTL 329 * @kioc : kernel space new format IOCTL
328 * 330 *
329 * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The 331 * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
@@ -474,7 +476,6 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
474 476
475/** 477/**
476 * mraid_mm_attch_buf - Attach a free dma buffer for required size 478 * mraid_mm_attch_buf - Attach a free dma buffer for required size
477 *
478 * @adp : Adapter softstate 479 * @adp : Adapter softstate
479 * @kioc : kioc that the buffer needs to be attached to 480 * @kioc : kioc that the buffer needs to be attached to
480 * @xferlen : required length for buffer 481 * @xferlen : required length for buffer
@@ -607,7 +608,6 @@ mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
607 608
608/** 609/**
609 * mraid_mm_dealloc_kioc - Return kioc to free pool 610 * mraid_mm_dealloc_kioc - Return kioc to free pool
610 *
611 * @adp : Adapter softstate 611 * @adp : Adapter softstate
612 * @kioc : uioc_t node to be returned to free pool 612 * @kioc : uioc_t node to be returned to free pool
613 */ 613 */
@@ -652,7 +652,6 @@ mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
652 652
653/** 653/**
654 * lld_ioctl - Routine to issue ioctl to low level drvr 654 * lld_ioctl - Routine to issue ioctl to low level drvr
655 *
656 * @adp : The adapter handle 655 * @adp : The adapter handle
657 * @kioc : The ioctl packet with kernel addresses 656 * @kioc : The ioctl packet with kernel addresses
658 */ 657 */
@@ -705,7 +704,6 @@ lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
705 704
706/** 705/**
707 * ioctl_done - callback from the low level driver 706 * ioctl_done - callback from the low level driver
708 *
709 * @kioc : completed ioctl packet 707 * @kioc : completed ioctl packet
710 */ 708 */
711static void 709static void
@@ -756,9 +754,8 @@ ioctl_done(uioc_t *kioc)
756} 754}
757 755
758 756
759/* 757/**
760 * lld_timedout : callback from the expired timer 758 * lld_timedout - callback from the expired timer
761 *
762 * @ptr : ioctl packet that timed out 759 * @ptr : ioctl packet that timed out
763 */ 760 */
764static void 761static void
@@ -776,8 +773,7 @@ lld_timedout(unsigned long ptr)
776 773
777 774
778/** 775/**
779 * kioc_to_mimd : Converter from new back to old format 776 * kioc_to_mimd - Converter from new back to old format
780 *
781 * @kioc : Kernel space IOCTL packet (successfully issued) 777 * @kioc : Kernel space IOCTL packet (successfully issued)
782 * @mimd : User space MIMD packet 778 * @mimd : User space MIMD packet
783 */ 779 */
@@ -855,7 +851,6 @@ kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
855 851
856/** 852/**
857 * hinfo_to_cinfo - Convert new format hba info into old format 853 * hinfo_to_cinfo - Convert new format hba info into old format
858 *
859 * @hinfo : New format, more comprehensive adapter info 854 * @hinfo : New format, more comprehensive adapter info
860 * @cinfo : Old format adapter info to support mimd_t apps 855 * @cinfo : Old format adapter info to support mimd_t apps
861 */ 856 */
@@ -878,10 +873,9 @@ hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
878} 873}
879 874
880 875
881/* 876/**
882 * mraid_mm_register_adp - Registration routine for low level drvrs 877 * mraid_mm_register_adp - Registration routine for low level drivers
883 * 878 * @lld_adp : Adapter objejct
884 * @adp : Adapter objejct
885 */ 879 */
886int 880int
887mraid_mm_register_adp(mraid_mmadp_t *lld_adp) 881mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
@@ -1007,15 +1001,14 @@ memalloc_error:
1007 1001
1008/** 1002/**
1009 * mraid_mm_adapter_app_handle - return the application handle for this adapter 1003 * mraid_mm_adapter_app_handle - return the application handle for this adapter
1004 * @unique_id : adapter unique identifier
1010 * 1005 *
1011 * For the given driver data, locate the adadpter in our global list and 1006 * For the given driver data, locate the adapter in our global list and
1012 * return the corresponding handle, which is also used by applications to 1007 * return the corresponding handle, which is also used by applications to
1013 * uniquely identify an adapter. 1008 * uniquely identify an adapter.
1014 * 1009 *
1015 * @param unique_id : adapter unique identifier 1010 * Return adapter handle if found in the list.
1016 * 1011 * Return 0 if adapter could not be located, should never happen though.
1017 * @return adapter handle if found in the list
1018 * @return 0 if adapter could not be located, should never happen though
1019 */ 1012 */
1020uint32_t 1013uint32_t
1021mraid_mm_adapter_app_handle(uint32_t unique_id) 1014mraid_mm_adapter_app_handle(uint32_t unique_id)
@@ -1040,7 +1033,6 @@ mraid_mm_adapter_app_handle(uint32_t unique_id)
1040 1033
1041/** 1034/**
1042 * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter 1035 * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
1043 *
1044 * @adp : Adapter softstate 1036 * @adp : Adapter softstate
1045 * 1037 *
1046 * We maintain a pool of dma buffers per each adapter. Each pool has one 1038 * We maintain a pool of dma buffers per each adapter. Each pool has one
@@ -1093,11 +1085,11 @@ dma_pool_setup_error:
1093} 1085}
1094 1086
1095 1087
1096/* 1088/**
1097 * mraid_mm_unregister_adp - Unregister routine for low level drivers 1089 * mraid_mm_unregister_adp - Unregister routine for low level drivers
1098 * Assume no outstanding ioctls to llds.
1099 *
1100 * @unique_id : UID of the adpater 1090 * @unique_id : UID of the adpater
1091 *
1092 * Assumes no outstanding ioctls to llds.
1101 */ 1093 */
1102int 1094int
1103mraid_mm_unregister_adp(uint32_t unique_id) 1095mraid_mm_unregister_adp(uint32_t unique_id)
@@ -1131,7 +1123,6 @@ mraid_mm_unregister_adp(uint32_t unique_id)
1131 1123
1132/** 1124/**
1133 * mraid_mm_free_adp_resources - Free adapter softstate 1125 * mraid_mm_free_adp_resources - Free adapter softstate
1134 *
1135 * @adp : Adapter softstate 1126 * @adp : Adapter softstate
1136 */ 1127 */
1137static void 1128static void
@@ -1162,7 +1153,6 @@ mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
1162 1153
1163/** 1154/**
1164 * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers 1155 * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
1165 *
1166 * @adp : Adapter softstate 1156 * @adp : Adapter softstate
1167 */ 1157 */
1168static void 1158static void
@@ -1190,7 +1180,7 @@ mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1190} 1180}
1191 1181
1192/** 1182/**
1193 * mraid_mm_init : Module entry point 1183 * mraid_mm_init - Module entry point
1194 */ 1184 */
1195static int __init 1185static int __init
1196mraid_mm_init(void) 1186mraid_mm_init(void)
@@ -1214,10 +1204,13 @@ mraid_mm_init(void)
1214} 1204}
1215 1205
1216 1206
1207#ifdef CONFIG_COMPAT
1217/** 1208/**
1218 * mraid_mm_compat_ioctl : 32bit to 64bit ioctl conversion routine 1209 * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine
1210 * @filep : file operations pointer (ignored)
1211 * @cmd : ioctl command
1212 * @arg : user ioctl packet
1219 */ 1213 */
1220#ifdef CONFIG_COMPAT
1221static long 1214static long
1222mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd, 1215mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
1223 unsigned long arg) 1216 unsigned long arg)
@@ -1231,7 +1224,7 @@ mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
1231#endif 1224#endif
1232 1225
1233/** 1226/**
1234 * mraid_mm_exit : Module exit point 1227 * mraid_mm_exit - Module exit point
1235 */ 1228 */
1236static void __exit 1229static void __exit
1237mraid_mm_exit(void) 1230mraid_mm_exit(void)
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 55eddcf8eb15..cacb3ad92527 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -15,7 +15,7 @@
15#ifndef LSI_MEGARAID_SAS_H 15#ifndef LSI_MEGARAID_SAS_H
16#define LSI_MEGARAID_SAS_H 16#define LSI_MEGARAID_SAS_H
17 17
18/** 18/*
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.03.05" 21#define MEGASAS_VERSION "00.00.03.05"
@@ -40,7 +40,7 @@
40 * "message frames" 40 * "message frames"
41 */ 41 */
42 42
43/** 43/*
44 * FW posts its state in upper 4 bits of outbound_msg_0 register 44 * FW posts its state in upper 4 bits of outbound_msg_0 register
45 */ 45 */
46#define MFI_STATE_MASK 0xF0000000 46#define MFI_STATE_MASK 0xF0000000
@@ -58,7 +58,7 @@
58 58
59#define MEGAMFI_FRAME_SIZE 64 59#define MEGAMFI_FRAME_SIZE 64
60 60
61/** 61/*
62 * During FW init, clear pending cmds & reset state using inbound_msg_0 62 * During FW init, clear pending cmds & reset state using inbound_msg_0
63 * 63 *
64 * ABORT : Abort all pending cmds 64 * ABORT : Abort all pending cmds
@@ -78,7 +78,7 @@
78 MFI_INIT_MFIMODE| \ 78 MFI_INIT_MFIMODE| \
79 MFI_INIT_ABORT 79 MFI_INIT_ABORT
80 80
81/** 81/*
82 * MFI frame flags 82 * MFI frame flags
83 */ 83 */
84#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000 84#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
@@ -92,12 +92,12 @@
92#define MFI_FRAME_DIR_READ 0x0010 92#define MFI_FRAME_DIR_READ 0x0010
93#define MFI_FRAME_DIR_BOTH 0x0018 93#define MFI_FRAME_DIR_BOTH 0x0018
94 94
95/** 95/*
96 * Definition for cmd_status 96 * Definition for cmd_status
97 */ 97 */
98#define MFI_CMD_STATUS_POLL_MODE 0xFF 98#define MFI_CMD_STATUS_POLL_MODE 0xFF
99 99
100/** 100/*
101 * MFI command opcodes 101 * MFI command opcodes
102 */ 102 */
103#define MFI_CMD_INIT 0x00 103#define MFI_CMD_INIT 0x00
@@ -128,7 +128,7 @@
128#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100 128#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
129#define MR_DCMD_CLUSTER_RESET_LD 0x08010200 129#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
130 130
131/** 131/*
132 * MFI command completion codes 132 * MFI command completion codes
133 */ 133 */
134enum MFI_STAT { 134enum MFI_STAT {
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 7b18a6c7b7eb..8081b637d97e 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -140,6 +140,8 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj, char *buf, loff_t off,
140 ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count); 140 ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count);
141 spin_unlock_irqrestore(&ha->hardware_lock, flags); 141 spin_unlock_irqrestore(&ha->hardware_lock, flags);
142 142
143 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
144
143 return (count); 145 return (count);
144} 146}
145 147
@@ -653,6 +655,43 @@ qla2x00_beacon_store(struct class_device *cdev, const char *buf,
653 return count; 655 return count;
654} 656}
655 657
658static ssize_t
659qla2x00_optrom_bios_version_show(struct class_device *cdev, char *buf)
660{
661 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
662
663 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
664 ha->bios_revision[0]);
665}
666
667static ssize_t
668qla2x00_optrom_efi_version_show(struct class_device *cdev, char *buf)
669{
670 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
671
672 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
673 ha->efi_revision[0]);
674}
675
676static ssize_t
677qla2x00_optrom_fcode_version_show(struct class_device *cdev, char *buf)
678{
679 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
680
681 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
682 ha->fcode_revision[0]);
683}
684
685static ssize_t
686qla2x00_optrom_fw_version_show(struct class_device *cdev, char *buf)
687{
688 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
689
690 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
691 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
692 ha->fw_revision[3]);
693}
694
656static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, 695static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show,
657 NULL); 696 NULL);
658static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 697static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
@@ -669,6 +708,14 @@ static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
669 qla2x00_zio_timer_store); 708 qla2x00_zio_timer_store);
670static CLASS_DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, 709static CLASS_DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
671 qla2x00_beacon_store); 710 qla2x00_beacon_store);
711static CLASS_DEVICE_ATTR(optrom_bios_version, S_IRUGO,
712 qla2x00_optrom_bios_version_show, NULL);
713static CLASS_DEVICE_ATTR(optrom_efi_version, S_IRUGO,
714 qla2x00_optrom_efi_version_show, NULL);
715static CLASS_DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
716 qla2x00_optrom_fcode_version_show, NULL);
717static CLASS_DEVICE_ATTR(optrom_fw_version, S_IRUGO,
718 qla2x00_optrom_fw_version_show, NULL);
672 719
673struct class_device_attribute *qla2x00_host_attrs[] = { 720struct class_device_attribute *qla2x00_host_attrs[] = {
674 &class_device_attr_driver_version, 721 &class_device_attr_driver_version,
@@ -683,6 +730,10 @@ struct class_device_attribute *qla2x00_host_attrs[] = {
683 &class_device_attr_zio, 730 &class_device_attr_zio,
684 &class_device_attr_zio_timer, 731 &class_device_attr_zio_timer,
685 &class_device_attr_beacon, 732 &class_device_attr_beacon,
733 &class_device_attr_optrom_bios_version,
734 &class_device_attr_optrom_efi_version,
735 &class_device_attr_optrom_fcode_version,
736 &class_device_attr_optrom_fw_version,
686 NULL, 737 NULL,
687}; 738};
688 739
@@ -836,21 +887,24 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
836 link_stat_t stat_buf; 887 link_stat_t stat_buf;
837 struct fc_host_statistics *pfc_host_stat; 888 struct fc_host_statistics *pfc_host_stat;
838 889
890 rval = QLA_FUNCTION_FAILED;
839 pfc_host_stat = &ha->fc_host_stat; 891 pfc_host_stat = &ha->fc_host_stat;
840 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 892 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
841 893
842 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 894 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
843 rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf, 895 rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf,
844 sizeof(stat_buf) / 4, mb_stat); 896 sizeof(stat_buf) / 4, mb_stat);
845 } else { 897 } else if (atomic_read(&ha->loop_state) == LOOP_READY &&
898 !test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) &&
899 !test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) &&
900 !ha->dpc_active) {
901 /* Must be in a 'READY' state for statistics retrieval. */
846 rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf, 902 rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf,
847 mb_stat); 903 mb_stat);
848 } 904 }
849 if (rval != 0) { 905
850 qla_printk(KERN_WARNING, ha, 906 if (rval != QLA_SUCCESS)
851 "Unable to retrieve host statistics (%d).\n", mb_stat[0]); 907 goto done;
852 return pfc_host_stat;
853 }
854 908
855 pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt; 909 pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt;
856 pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt; 910 pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt;
@@ -858,7 +912,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
858 pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt; 912 pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt;
859 pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt; 913 pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt;
860 pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt; 914 pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt;
861 915done:
862 return pfc_host_stat; 916 return pfc_host_stat;
863} 917}
864 918
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 2c10130d9e03..05f4f2a378eb 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2045,6 +2045,29 @@ struct isp_operations {
2045 uint32_t, uint32_t); 2045 uint32_t, uint32_t);
2046 int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t, 2046 int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t,
2047 uint32_t); 2047 uint32_t);
2048
2049 int (*get_flash_version) (struct scsi_qla_host *, void *);
2050};
2051
2052/* MSI-X Support *************************************************************/
2053
2054#define QLA_MSIX_CHIP_REV_24XX 3
2055#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
2056#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
2057
2058#define QLA_MSIX_DEFAULT 0x00
2059#define QLA_MSIX_RSP_Q 0x01
2060
2061#define QLA_MSIX_ENTRIES 2
2062#define QLA_MIDX_DEFAULT 0
2063#define QLA_MIDX_RSP_Q 1
2064
2065struct scsi_qla_host;
2066
2067struct qla_msix_entry {
2068 int have_irq;
2069 uint16_t msix_vector;
2070 uint16_t msix_entry;
2048}; 2071};
2049 2072
2050/* 2073/*
@@ -2077,6 +2100,7 @@ typedef struct scsi_qla_host {
2077 uint32_t enable_lip_full_login :1; 2100 uint32_t enable_lip_full_login :1;
2078 uint32_t enable_target_reset :1; 2101 uint32_t enable_target_reset :1;
2079 uint32_t enable_led_scheme :1; 2102 uint32_t enable_led_scheme :1;
2103 uint32_t inta_enabled :1;
2080 uint32_t msi_enabled :1; 2104 uint32_t msi_enabled :1;
2081 uint32_t msix_enabled :1; 2105 uint32_t msix_enabled :1;
2082 uint32_t disable_serdes :1; 2106 uint32_t disable_serdes :1;
@@ -2316,8 +2340,6 @@ typedef struct scsi_qla_host {
2316#define MBX_INTR_WAIT 2 2340#define MBX_INTR_WAIT 2
2317#define MBX_UPDATE_FLASH_ACTIVE 3 2341#define MBX_UPDATE_FLASH_ACTIVE 3
2318 2342
2319 spinlock_t mbx_reg_lock; /* Mbx Cmd Register Lock */
2320
2321 struct semaphore mbx_cmd_sem; /* Serialialize mbx access */ 2343 struct semaphore mbx_cmd_sem; /* Serialialize mbx access */
2322 struct semaphore mbx_intr_sem; /* Used for completion notification */ 2344 struct semaphore mbx_intr_sem; /* Used for completion notification */
2323 2345
@@ -2358,6 +2380,7 @@ typedef struct scsi_qla_host {
2358 2380
2359 uint8_t host_str[16]; 2381 uint8_t host_str[16];
2360 uint32_t pci_attr; 2382 uint32_t pci_attr;
2383 uint16_t chip_revision;
2361 2384
2362 uint16_t product_id[4]; 2385 uint16_t product_id[4];
2363 2386
@@ -2379,6 +2402,15 @@ typedef struct scsi_qla_host {
2379#define QLA_SREADING 1 2402#define QLA_SREADING 1
2380#define QLA_SWRITING 2 2403#define QLA_SWRITING 2
2381 2404
2405 /* PCI expansion ROM image information. */
2406#define ROM_CODE_TYPE_BIOS 0
2407#define ROM_CODE_TYPE_FCODE 1
2408#define ROM_CODE_TYPE_EFI 3
2409 uint8_t bios_revision[2];
2410 uint8_t efi_revision[2];
2411 uint8_t fcode_revision[16];
2412 uint32_t fw_revision[4];
2413
2382 /* Needed for BEACON */ 2414 /* Needed for BEACON */
2383 uint16_t beacon_blink_led; 2415 uint16_t beacon_blink_led;
2384 uint8_t beacon_color_state; 2416 uint8_t beacon_color_state;
@@ -2391,6 +2423,8 @@ typedef struct scsi_qla_host {
2391 uint16_t zio_mode; 2423 uint16_t zio_mode;
2392 uint16_t zio_timer; 2424 uint16_t zio_timer;
2393 struct fc_host_statistics fc_host_stat; 2425 struct fc_host_statistics fc_host_stat;
2426
2427 struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES];
2394} scsi_qla_host_t; 2428} scsi_qla_host_t;
2395 2429
2396 2430
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e4dd12f4b80e..74544ae4b0e2 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -224,6 +224,9 @@ extern irqreturn_t qla24xx_intr_handler(int, void *);
224extern void qla2x00_process_response_queue(struct scsi_qla_host *); 224extern void qla2x00_process_response_queue(struct scsi_qla_host *);
225extern void qla24xx_process_response_queue(struct scsi_qla_host *); 225extern void qla24xx_process_response_queue(struct scsi_qla_host *);
226 226
227extern int qla2x00_request_irqs(scsi_qla_host_t *);
228extern void qla2x00_free_irqs(scsi_qla_host_t *);
229
227/* 230/*
228 * Global Function Prototypes in qla_sup.c source file. 231 * Global Function Prototypes in qla_sup.c source file.
229 */ 232 */
@@ -259,6 +262,9 @@ extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
259extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, 262extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
260 uint32_t, uint32_t); 263 uint32_t, uint32_t);
261 264
265extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
266extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
267
262/* 268/*
263 * Global Function Prototypes in qla_dbg.c source file. 269 * Global Function Prototypes in qla_dbg.c source file.
264 */ 270 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b3dac26ddba3..98c01cd5e1a8 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -65,7 +65,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
65 ha->flags.reset_active = 0; 65 ha->flags.reset_active = 0;
66 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 66 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
67 atomic_set(&ha->loop_state, LOOP_DOWN); 67 atomic_set(&ha->loop_state, LOOP_DOWN);
68 ha->device_flags = 0; 68 ha->device_flags = DFLG_NO_CABLE;
69 ha->dpc_flags = 0; 69 ha->dpc_flags = 0;
70 ha->flags.management_server_logged_in = 0; 70 ha->flags.management_server_logged_in = 0;
71 ha->marker_needed = 0; 71 ha->marker_needed = 0;
@@ -77,16 +77,23 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
77 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 77 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
78 rval = ha->isp_ops.pci_config(ha); 78 rval = ha->isp_ops.pci_config(ha);
79 if (rval) { 79 if (rval) {
80 DEBUG2(printk("scsi(%ld): Unable to configure PCI space=n", 80 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
81 ha->host_no)); 81 ha->host_no));
82 return (rval); 82 return (rval);
83 } 83 }
84 84
85 ha->isp_ops.reset_chip(ha); 85 ha->isp_ops.reset_chip(ha);
86 86
87 ha->isp_ops.get_flash_version(ha, ha->request_ring);
88
87 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 89 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
88 90
89 ha->isp_ops.nvram_config(ha); 91 rval = ha->isp_ops.nvram_config(ha);
92 if (rval) {
93 DEBUG2(printk("scsi(%ld): Unable to verify NVRAM data.\n",
94 ha->host_no));
95 return rval;
96 }
90 97
91 if (ha->flags.disable_serdes) { 98 if (ha->flags.disable_serdes) {
92 /* Mask HBA via NVRAM settings? */ 99 /* Mask HBA via NVRAM settings? */
@@ -293,6 +300,8 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
293 d &= ~PCI_ROM_ADDRESS_ENABLE; 300 d &= ~PCI_ROM_ADDRESS_ENABLE;
294 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d); 301 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
295 302
303 pci_read_config_word(ha->pdev, PCI_REVISION_ID, &ha->chip_revision);
304
296 /* Get PCI bus information. */ 305 /* Get PCI bus information. */
297 spin_lock_irqsave(&ha->hardware_lock, flags); 306 spin_lock_irqsave(&ha->hardware_lock, flags);
298 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status); 307 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
@@ -1351,6 +1360,39 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1351 return(rval); 1360 return(rval);
1352} 1361}
1353 1362
1363static inline void
1364qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *def)
1365{
1366 char *st, *en;
1367 uint16_t index;
1368
1369 if (memcmp(model, BINZERO, len) != 0) {
1370 strncpy(ha->model_number, model, len);
1371 st = en = ha->model_number;
1372 en += len - 1;
1373 while (en > st) {
1374 if (*en != 0x20 && *en != 0x00)
1375 break;
1376 *en-- = '\0';
1377 }
1378
1379 index = (ha->pdev->subsystem_device & 0xff);
1380 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1381 index < QLA_MODEL_NAMES)
1382 ha->model_desc = qla2x00_model_name[index * 2 + 1];
1383 } else {
1384 index = (ha->pdev->subsystem_device & 0xff);
1385 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1386 index < QLA_MODEL_NAMES) {
1387 strcpy(ha->model_number,
1388 qla2x00_model_name[index * 2]);
1389 ha->model_desc = qla2x00_model_name[index * 2 + 1];
1390 } else {
1391 strcpy(ha->model_number, def);
1392 }
1393 }
1394}
1395
1354/* 1396/*
1355* NVRAM configuration for ISP 2xxx 1397* NVRAM configuration for ISP 2xxx
1356* 1398*
@@ -1367,7 +1409,6 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1367int 1409int
1368qla2x00_nvram_config(scsi_qla_host_t *ha) 1410qla2x00_nvram_config(scsi_qla_host_t *ha)
1369{ 1411{
1370 int rval;
1371 uint8_t chksum = 0; 1412 uint8_t chksum = 0;
1372 uint16_t cnt; 1413 uint16_t cnt;
1373 uint8_t *dptr1, *dptr2; 1414 uint8_t *dptr1, *dptr2;
@@ -1376,8 +1417,6 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1376 uint8_t *ptr = (uint8_t *)ha->request_ring; 1417 uint8_t *ptr = (uint8_t *)ha->request_ring;
1377 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1418 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1378 1419
1379 rval = QLA_SUCCESS;
1380
1381 /* Determine NVRAM starting address. */ 1420 /* Determine NVRAM starting address. */
1382 ha->nvram_size = sizeof(nvram_t); 1421 ha->nvram_size = sizeof(nvram_t);
1383 ha->nvram_base = 0; 1422 ha->nvram_base = 0;
@@ -1401,55 +1440,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1401 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 1440 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
1402 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 1441 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
1403 nv->nvram_version); 1442 nv->nvram_version);
1404 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 1443 return QLA_FUNCTION_FAILED;
1405 "invalid -- WWPN) defaults.\n");
1406
1407 /*
1408 * Set default initialization control block.
1409 */
1410 memset(nv, 0, ha->nvram_size);
1411 nv->parameter_block_version = ICB_VERSION;
1412
1413 if (IS_QLA23XX(ha)) {
1414 nv->firmware_options[0] = BIT_2 | BIT_1;
1415 nv->firmware_options[1] = BIT_7 | BIT_5;
1416 nv->add_firmware_options[0] = BIT_5;
1417 nv->add_firmware_options[1] = BIT_5 | BIT_4;
1418 nv->frame_payload_size = __constant_cpu_to_le16(2048);
1419 nv->special_options[1] = BIT_7;
1420 } else if (IS_QLA2200(ha)) {
1421 nv->firmware_options[0] = BIT_2 | BIT_1;
1422 nv->firmware_options[1] = BIT_7 | BIT_5;
1423 nv->add_firmware_options[0] = BIT_5;
1424 nv->add_firmware_options[1] = BIT_5 | BIT_4;
1425 nv->frame_payload_size = __constant_cpu_to_le16(1024);
1426 } else if (IS_QLA2100(ha)) {
1427 nv->firmware_options[0] = BIT_3 | BIT_1;
1428 nv->firmware_options[1] = BIT_5;
1429 nv->frame_payload_size = __constant_cpu_to_le16(1024);
1430 }
1431
1432 nv->max_iocb_allocation = __constant_cpu_to_le16(256);
1433 nv->execution_throttle = __constant_cpu_to_le16(16);
1434 nv->retry_count = 8;
1435 nv->retry_delay = 1;
1436
1437 nv->port_name[0] = 33;
1438 nv->port_name[3] = 224;
1439 nv->port_name[4] = 139;
1440
1441 nv->login_timeout = 4;
1442
1443 /*
1444 * Set default host adapter parameters
1445 */
1446 nv->host_p[1] = BIT_2;
1447 nv->reset_delay = 5;
1448 nv->port_down_retry_count = 8;
1449 nv->max_luns_per_target = __constant_cpu_to_le16(8);
1450 nv->link_down_timeout = 60;
1451
1452 rval = 1;
1453 } 1444 }
1454 1445
1455#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 1446#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
@@ -1489,33 +1480,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1489 strcpy(ha->model_number, "QLA2300"); 1480 strcpy(ha->model_number, "QLA2300");
1490 } 1481 }
1491 } else { 1482 } else {
1492 if (rval == 0 && 1483 qla2x00_set_model_info(ha, nv->model_number,
1493 memcmp(nv->model_number, BINZERO, 1484 sizeof(nv->model_number), "QLA23xx");
1494 sizeof(nv->model_number)) != 0) {
1495 char *st, *en;
1496
1497 strncpy(ha->model_number, nv->model_number,
1498 sizeof(nv->model_number));
1499 st = en = ha->model_number;
1500 en += sizeof(nv->model_number) - 1;
1501 while (en > st) {
1502 if (*en != 0x20 && *en != 0x00)
1503 break;
1504 *en-- = '\0';
1505 }
1506 } else {
1507 uint16_t index;
1508
1509 index = (ha->pdev->subsystem_device & 0xff);
1510 if (index < QLA_MODEL_NAMES) {
1511 strcpy(ha->model_number,
1512 qla2x00_model_name[index * 2]);
1513 ha->model_desc =
1514 qla2x00_model_name[index * 2 + 1];
1515 } else {
1516 strcpy(ha->model_number, "QLA23xx");
1517 }
1518 }
1519 } 1485 }
1520 } else if (IS_QLA2200(ha)) { 1486 } else if (IS_QLA2200(ha)) {
1521 nv->firmware_options[0] |= BIT_2; 1487 nv->firmware_options[0] |= BIT_2;
@@ -1687,11 +1653,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1687 } 1653 }
1688 } 1654 }
1689 1655
1690 if (rval) { 1656 return QLA_SUCCESS;
1691 DEBUG2_3(printk(KERN_WARNING
1692 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
1693 }
1694 return (rval);
1695} 1657}
1696 1658
1697static void 1659static void
@@ -3107,7 +3069,11 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3107 } 3069 }
3108 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3070 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3109 3071
3110 ha->isp_ops.nvram_config(ha); 3072 ha->isp_ops.get_flash_version(ha, ha->request_ring);
3073
3074 rval = ha->isp_ops.nvram_config(ha);
3075 if (rval)
3076 goto isp_abort_retry;
3111 3077
3112 if (!qla2x00_restart_isp(ha)) { 3078 if (!qla2x00_restart_isp(ha)) {
3113 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3079 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
@@ -3137,6 +3103,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3137 } 3103 }
3138 } 3104 }
3139 } else { /* failed the ISP abort */ 3105 } else { /* failed the ISP abort */
3106isp_abort_retry:
3140 ha->flags.online = 1; 3107 ha->flags.online = 1;
3141 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3108 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
3142 if (ha->isp_abort_cnt == 0) { 3109 if (ha->isp_abort_cnt == 0) {
@@ -3326,7 +3293,6 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
3326int 3293int
3327qla24xx_nvram_config(scsi_qla_host_t *ha) 3294qla24xx_nvram_config(scsi_qla_host_t *ha)
3328{ 3295{
3329 int rval;
3330 struct init_cb_24xx *icb; 3296 struct init_cb_24xx *icb;
3331 struct nvram_24xx *nv; 3297 struct nvram_24xx *nv;
3332 uint32_t *dptr; 3298 uint32_t *dptr;
@@ -3334,7 +3300,6 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3334 uint32_t chksum; 3300 uint32_t chksum;
3335 uint16_t cnt; 3301 uint16_t cnt;
3336 3302
3337 rval = QLA_SUCCESS;
3338 icb = (struct init_cb_24xx *)ha->init_cb; 3303 icb = (struct init_cb_24xx *)ha->init_cb;
3339 nv = (struct nvram_24xx *)ha->request_ring; 3304 nv = (struct nvram_24xx *)ha->request_ring;
3340 3305
@@ -3367,51 +3332,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3367 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 3332 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
3368 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 3333 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
3369 le16_to_cpu(nv->nvram_version)); 3334 le16_to_cpu(nv->nvram_version));
3370 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 3335 return QLA_FUNCTION_FAILED;
3371 "invalid -- WWPN) defaults.\n");
3372
3373 /*
3374 * Set default initialization control block.
3375 */
3376 memset(nv, 0, ha->nvram_size);
3377 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
3378 nv->version = __constant_cpu_to_le16(ICB_VERSION);
3379 nv->frame_payload_size = __constant_cpu_to_le16(2048);
3380 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
3381 nv->exchange_count = __constant_cpu_to_le16(0);
3382 nv->hard_address = __constant_cpu_to_le16(124);
3383 nv->port_name[0] = 0x21;
3384 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
3385 nv->port_name[2] = 0x00;
3386 nv->port_name[3] = 0xe0;
3387 nv->port_name[4] = 0x8b;
3388 nv->port_name[5] = 0x1c;
3389 nv->port_name[6] = 0x55;
3390 nv->port_name[7] = 0x86;
3391 nv->node_name[0] = 0x20;
3392 nv->node_name[1] = 0x00;
3393 nv->node_name[2] = 0x00;
3394 nv->node_name[3] = 0xe0;
3395 nv->node_name[4] = 0x8b;
3396 nv->node_name[5] = 0x1c;
3397 nv->node_name[6] = 0x55;
3398 nv->node_name[7] = 0x86;
3399 nv->login_retry_count = __constant_cpu_to_le16(8);
3400 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
3401 nv->login_timeout = __constant_cpu_to_le16(0);
3402 nv->firmware_options_1 =
3403 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
3404 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
3405 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
3406 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
3407 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
3408 nv->efi_parameters = __constant_cpu_to_le32(0);
3409 nv->reset_delay = 5;
3410 nv->max_luns_per_target = __constant_cpu_to_le16(128);
3411 nv->port_down_retry_count = __constant_cpu_to_le16(30);
3412 nv->link_down_timeout = __constant_cpu_to_le16(30);
3413
3414 rval = 1;
3415 } 3336 }
3416 3337
3417 /* Reset Initialization control block */ 3338 /* Reset Initialization control block */
@@ -3438,25 +3359,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3438 /* 3359 /*
3439 * Setup driver NVRAM options. 3360 * Setup driver NVRAM options.
3440 */ 3361 */
3441 if (memcmp(nv->model_name, BINZERO, sizeof(nv->model_name)) != 0) { 3362 qla2x00_set_model_info(ha, nv->model_name, sizeof(nv->model_name),
3442 char *st, *en; 3363 "QLA2462");
3443 uint16_t index;
3444
3445 strncpy(ha->model_number, nv->model_name,
3446 sizeof(nv->model_name));
3447 st = en = ha->model_number;
3448 en += sizeof(nv->model_name) - 1;
3449 while (en > st) {
3450 if (*en != 0x20 && *en != 0x00)
3451 break;
3452 *en-- = '\0';
3453 }
3454
3455 index = (ha->pdev->subsystem_device & 0xff);
3456 if (index < QLA_MODEL_NAMES)
3457 ha->model_desc = qla2x00_model_name[index * 2 + 1];
3458 } else
3459 strcpy(ha->model_number, "QLA2462");
3460 3364
3461 /* Use alternate WWN? */ 3365 /* Use alternate WWN? */
3462 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 3366 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
@@ -3575,11 +3479,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3575 ha->flags.process_response_queue = 1; 3479 ha->flags.process_response_queue = 1;
3576 } 3480 }
3577 3481
3578 if (rval) { 3482 return QLA_SUCCESS;
3579 DEBUG2_3(printk(KERN_WARNING
3580 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
3581 }
3582 return (rval);
3583} 3483}
3584 3484
3585static int 3485static int
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 39fd17b05be5..d4885616cd39 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -86,12 +86,8 @@ qla2100_intr_handler(int irq, void *dev_id)
86 86
87 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 87 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
88 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 88 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
89 spin_lock_irqsave(&ha->mbx_reg_lock, flags);
90
91 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 89 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
92 up(&ha->mbx_intr_sem); 90 up(&ha->mbx_intr_sem);
93
94 spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
95 } 91 }
96 92
97 return (IRQ_HANDLED); 93 return (IRQ_HANDLED);
@@ -199,12 +195,8 @@ qla2300_intr_handler(int irq, void *dev_id)
199 195
200 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 196 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
201 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 197 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
202 spin_lock_irqsave(&ha->mbx_reg_lock, flags);
203
204 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 198 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
205 up(&ha->mbx_intr_sem); 199 up(&ha->mbx_intr_sem);
206
207 spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
208 } 200 }
209 201
210 return (IRQ_HANDLED); 202 return (IRQ_HANDLED);
@@ -654,10 +646,8 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
654 fcport->last_queue_full + ql2xqfullrampup * HZ)) 646 fcport->last_queue_full + ql2xqfullrampup * HZ))
655 return; 647 return;
656 648
657 spin_unlock_irq(&ha->hardware_lock);
658 starget_for_each_device(sdev->sdev_target, fcport, 649 starget_for_each_device(sdev->sdev_target, fcport,
659 qla2x00_adjust_sdev_qdepth_up); 650 qla2x00_adjust_sdev_qdepth_up);
660 spin_lock_irq(&ha->hardware_lock);
661} 651}
662 652
663/** 653/**
@@ -927,10 +917,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
927 917
928 /* Adjust queue depth for all luns on the port. */ 918 /* Adjust queue depth for all luns on the port. */
929 fcport->last_queue_full = jiffies; 919 fcport->last_queue_full = jiffies;
930 spin_unlock_irq(&ha->hardware_lock);
931 starget_for_each_device(cp->device->sdev_target, 920 starget_for_each_device(cp->device->sdev_target,
932 fcport, qla2x00_adjust_sdev_qdepth_down); 921 fcport, qla2x00_adjust_sdev_qdepth_down);
933 spin_lock_irq(&ha->hardware_lock);
934 break; 922 break;
935 } 923 }
936 if (lscsi_status != SS_CHECK_CONDITION) 924 if (lscsi_status != SS_CHECK_CONDITION)
@@ -995,6 +983,22 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
995 if (lscsi_status != 0) { 983 if (lscsi_status != 0) {
996 cp->result = DID_OK << 16 | lscsi_status; 984 cp->result = DID_OK << 16 | lscsi_status;
997 985
986 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
987 DEBUG2(printk(KERN_INFO
988 "scsi(%ld): QUEUE FULL status detected "
989 "0x%x-0x%x.\n", ha->host_no, comp_status,
990 scsi_status));
991
992 /*
993 * Adjust queue depth for all luns on the
994 * port.
995 */
996 fcport->last_queue_full = jiffies;
997 starget_for_each_device(
998 cp->device->sdev_target, fcport,
999 qla2x00_adjust_sdev_qdepth_down);
1000 break;
1001 }
998 if (lscsi_status != SS_CHECK_CONDITION) 1002 if (lscsi_status != SS_CHECK_CONDITION)
999 break; 1003 break;
1000 1004
@@ -1482,12 +1486,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
1482 1486
1483 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 1487 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1484 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 1488 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1485 spin_lock_irqsave(&ha->mbx_reg_lock, flags);
1486
1487 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 1489 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1488 up(&ha->mbx_intr_sem); 1490 up(&ha->mbx_intr_sem);
1489
1490 spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
1491 } 1491 }
1492 1492
1493 return IRQ_HANDLED; 1493 return IRQ_HANDLED;
@@ -1536,3 +1536,216 @@ qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt)
1536 qla2x00_sp_compl(ha, sp); 1536 qla2x00_sp_compl(ha, sp);
1537} 1537}
1538 1538
1539static irqreturn_t
1540qla24xx_msix_rsp_q(int irq, void *dev_id)
1541{
1542 scsi_qla_host_t *ha;
1543 struct device_reg_24xx __iomem *reg;
1544 unsigned long flags;
1545
1546 ha = dev_id;
1547 reg = &ha->iobase->isp24;
1548
1549 spin_lock_irqsave(&ha->hardware_lock, flags);
1550
1551 qla24xx_process_response_queue(ha);
1552
1553 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1554 RD_REG_DWORD_RELAXED(&reg->hccr);
1555
1556 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1557
1558 return IRQ_HANDLED;
1559}
1560
1561static irqreturn_t
1562qla24xx_msix_default(int irq, void *dev_id)
1563{
1564 scsi_qla_host_t *ha;
1565 struct device_reg_24xx __iomem *reg;
1566 int status;
1567 unsigned long flags;
1568 unsigned long iter;
1569 uint32_t stat;
1570 uint32_t hccr;
1571 uint16_t mb[4];
1572
1573 ha = dev_id;
1574 reg = &ha->iobase->isp24;
1575 status = 0;
1576
1577 spin_lock_irqsave(&ha->hardware_lock, flags);
1578 for (iter = 50; iter--; ) {
1579 stat = RD_REG_DWORD(&reg->host_status);
1580 if (stat & HSRX_RISC_PAUSED) {
1581 hccr = RD_REG_DWORD(&reg->hccr);
1582
1583 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1584 "Dumping firmware!\n", hccr);
1585 ha->isp_ops.fw_dump(ha, 1);
1586 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1587 break;
1588 } else if ((stat & HSRX_RISC_INT) == 0)
1589 break;
1590
1591 switch (stat & 0xff) {
1592 case 0x1:
1593 case 0x2:
1594 case 0x10:
1595 case 0x11:
1596 qla24xx_mbx_completion(ha, MSW(stat));
1597 status |= MBX_INTERRUPT;
1598
1599 break;
1600 case 0x12:
1601 mb[0] = MSW(stat);
1602 mb[1] = RD_REG_WORD(&reg->mailbox1);
1603 mb[2] = RD_REG_WORD(&reg->mailbox2);
1604 mb[3] = RD_REG_WORD(&reg->mailbox3);
1605 qla2x00_async_event(ha, mb);
1606 break;
1607 case 0x13:
1608 qla24xx_process_response_queue(ha);
1609 break;
1610 default:
1611 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1612 "(%d).\n",
1613 ha->host_no, stat & 0xff));
1614 break;
1615 }
1616 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1617 RD_REG_DWORD_RELAXED(&reg->hccr);
1618 }
1619 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1620
1621 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1622 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1623 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1624 up(&ha->mbx_intr_sem);
1625 }
1626
1627 return IRQ_HANDLED;
1628}
1629
1630/* Interrupt handling helpers. */
1631
1632struct qla_init_msix_entry {
1633 uint16_t entry;
1634 uint16_t index;
1635 const char *name;
1636 irqreturn_t (*handler)(int, void *);
1637};
1638
1639static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
1640 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
1641 "qla2xxx (default)", qla24xx_msix_default },
1642
1643 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
1644 "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1645};
1646
1647static void
1648qla24xx_disable_msix(scsi_qla_host_t *ha)
1649{
1650 int i;
1651 struct qla_msix_entry *qentry;
1652
1653 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1654 qentry = &ha->msix_entries[imsix_entries[i].index];
1655 if (qentry->have_irq)
1656 free_irq(qentry->msix_vector, ha);
1657 }
1658 pci_disable_msix(ha->pdev);
1659}
1660
1661static int
1662qla24xx_enable_msix(scsi_qla_host_t *ha)
1663{
1664 int i, ret;
1665 struct msix_entry entries[QLA_MSIX_ENTRIES];
1666 struct qla_msix_entry *qentry;
1667
1668 for (i = 0; i < QLA_MSIX_ENTRIES; i++)
1669 entries[i].entry = imsix_entries[i].entry;
1670
1671 ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
1672 if (ret) {
1673 qla_printk(KERN_WARNING, ha,
1674 "MSI-X: Failed to enable support -- %d/%d\n",
1675 QLA_MSIX_ENTRIES, ret);
1676 goto msix_out;
1677 }
1678 ha->flags.msix_enabled = 1;
1679
1680 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1681 qentry = &ha->msix_entries[imsix_entries[i].index];
1682 qentry->msix_vector = entries[i].vector;
1683 qentry->msix_entry = entries[i].entry;
1684 qentry->have_irq = 0;
1685 ret = request_irq(qentry->msix_vector,
1686 imsix_entries[i].handler, 0, imsix_entries[i].name, ha);
1687 if (ret) {
1688 qla_printk(KERN_WARNING, ha,
1689 "MSI-X: Unable to register handler -- %x/%d.\n",
1690 imsix_entries[i].index, ret);
1691 qla24xx_disable_msix(ha);
1692 goto msix_out;
1693 }
1694 qentry->have_irq = 1;
1695 }
1696
1697msix_out:
1698 return ret;
1699}
1700
1701int
1702qla2x00_request_irqs(scsi_qla_host_t *ha)
1703{
1704 int ret;
1705
1706 /* If possible, enable MSI-X. */
1707 if (!IS_QLA2432(ha))
1708 goto skip_msix;
1709
1710 if (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
1711 !QLA_MSIX_FW_MODE_1(ha->fw_attributes)) {
1712 DEBUG2(qla_printk(KERN_WARNING, ha,
1713 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1714 ha->chip_revision, ha->fw_attributes));
1715
1716 goto skip_msix;
1717 }
1718
1719 ret = qla24xx_enable_msix(ha);
1720 if (!ret) {
1721 DEBUG2(qla_printk(KERN_INFO, ha,
1722 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1723 ha->fw_attributes));
1724 return ret;
1725 }
1726 qla_printk(KERN_WARNING, ha,
1727 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1728skip_msix:
1729 ret = request_irq(ha->pdev->irq, ha->isp_ops.intr_handler,
1730 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1731 if (!ret) {
1732 ha->flags.inta_enabled = 1;
1733 ha->host->irq = ha->pdev->irq;
1734 } else {
1735 qla_printk(KERN_WARNING, ha,
1736 "Failed to reserve interrupt %d already in use.\n",
1737 ha->pdev->irq);
1738 }
1739
1740 return ret;
1741}
1742
1743void
1744qla2x00_free_irqs(scsi_qla_host_t *ha)
1745{
1746
1747 if (ha->flags.msix_enabled)
1748 qla24xx_disable_msix(ha);
1749 else if (ha->flags.inta_enabled)
1750 free_irq(ha->host->irq, ha);
1751}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 077e5789beeb..83376f6ac3db 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -55,7 +55,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
55 uint16_t __iomem *optr; 55 uint16_t __iomem *optr;
56 uint32_t cnt; 56 uint32_t cnt;
57 uint32_t mboxes; 57 uint32_t mboxes;
58 unsigned long mbx_flags = 0;
59 unsigned long wait_time; 58 unsigned long wait_time;
60 59
61 rval = QLA_SUCCESS; 60 rval = QLA_SUCCESS;
@@ -81,10 +80,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
81 /* Save mailbox command for debug */ 80 /* Save mailbox command for debug */
82 ha->mcp = mcp; 81 ha->mcp = mcp;
83 82
84 /* Try to get mailbox register access */
85 if (!abort_active)
86 spin_lock_irqsave(&ha->mbx_reg_lock, mbx_flags);
87
88 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", 83 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n",
89 ha->host_no, mcp->mb[0])); 84 ha->host_no, mcp->mb[0]));
90 85
@@ -161,9 +156,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
161 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); 156 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
162 spin_unlock_irqrestore(&ha->hardware_lock, flags); 157 spin_unlock_irqrestore(&ha->hardware_lock, flags);
163 158
164 if (!abort_active)
165 spin_unlock_irqrestore(&ha->mbx_reg_lock, mbx_flags);
166
167 /* Wait for either the timer to expire 159 /* Wait for either the timer to expire
168 * or the mbox completion interrupt 160 * or the mbox completion interrupt
169 */ 161 */
@@ -184,8 +176,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
184 else 176 else
185 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); 177 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
186 spin_unlock_irqrestore(&ha->hardware_lock, flags); 178 spin_unlock_irqrestore(&ha->hardware_lock, flags);
187 if (!abort_active)
188 spin_unlock_irqrestore(&ha->mbx_reg_lock, mbx_flags);
189 179
190 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 180 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
191 while (!ha->flags.mbox_int) { 181 while (!ha->flags.mbox_int) {
@@ -201,9 +191,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
201 } /* while */ 191 } /* while */
202 } 192 }
203 193
204 if (!abort_active)
205 spin_lock_irqsave(&ha->mbx_reg_lock, mbx_flags);
206
207 /* Check whether we timed out */ 194 /* Check whether we timed out */
208 if (ha->flags.mbox_int) { 195 if (ha->flags.mbox_int) {
209 uint16_t *iptr2; 196 uint16_t *iptr2;
@@ -256,9 +243,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
256 rval = QLA_FUNCTION_TIMEOUT; 243 rval = QLA_FUNCTION_TIMEOUT;
257 } 244 }
258 245
259 if (!abort_active)
260 spin_unlock_irqrestore(&ha->mbx_reg_lock, mbx_flags);
261
262 ha->flags.mbox_busy = 0; 246 ha->flags.mbox_busy = 0;
263 247
264 /* Clean up */ 248 /* Clean up */
@@ -1713,7 +1697,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1713 lg->entry_count = 1; 1697 lg->entry_count = 1;
1714 lg->nport_handle = cpu_to_le16(loop_id); 1698 lg->nport_handle = cpu_to_le16(loop_id);
1715 lg->control_flags = 1699 lg->control_flags =
1716 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_EXPL_LOGO); 1700 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1717 lg->port_id[0] = al_pa; 1701 lg->port_id[0] = al_pa;
1718 lg->port_id[1] = area; 1702 lg->port_id[1] = area;
1719 lg->port_id[2] = domain; 1703 lg->port_id[2] = domain;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d6445ae841ba..68f5d24b938b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1485,6 +1485,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1485 ha->isp_ops.fw_dump = qla2100_fw_dump; 1485 ha->isp_ops.fw_dump = qla2100_fw_dump;
1486 ha->isp_ops.read_optrom = qla2x00_read_optrom_data; 1486 ha->isp_ops.read_optrom = qla2x00_read_optrom_data;
1487 ha->isp_ops.write_optrom = qla2x00_write_optrom_data; 1487 ha->isp_ops.write_optrom = qla2x00_write_optrom_data;
1488 ha->isp_ops.get_flash_version = qla2x00_get_flash_version;
1488 if (IS_QLA2100(ha)) { 1489 if (IS_QLA2100(ha)) {
1489 host->max_id = MAX_TARGETS_2100; 1490 host->max_id = MAX_TARGETS_2100;
1490 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1491 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
@@ -1550,6 +1551,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1550 ha->isp_ops.beacon_on = qla24xx_beacon_on; 1551 ha->isp_ops.beacon_on = qla24xx_beacon_on;
1551 ha->isp_ops.beacon_off = qla24xx_beacon_off; 1552 ha->isp_ops.beacon_off = qla24xx_beacon_off;
1552 ha->isp_ops.beacon_blink = qla24xx_beacon_blink; 1553 ha->isp_ops.beacon_blink = qla24xx_beacon_blink;
1554 ha->isp_ops.get_flash_version = qla24xx_get_flash_version;
1553 ha->gid_list_info_size = 8; 1555 ha->gid_list_info_size = 8;
1554 ha->optrom_size = OPTROM_SIZE_24XX; 1556 ha->optrom_size = OPTROM_SIZE_24XX;
1555 } 1557 }
@@ -1564,14 +1566,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1564 INIT_LIST_HEAD(&ha->list); 1566 INIT_LIST_HEAD(&ha->list);
1565 INIT_LIST_HEAD(&ha->fcports); 1567 INIT_LIST_HEAD(&ha->fcports);
1566 1568
1567 /*
1568 * These locks are used to prevent more than one CPU
1569 * from modifying the queue at the same time. The
1570 * higher level "host_lock" will reduce most
1571 * contention for these locks.
1572 */
1573 spin_lock_init(&ha->mbx_reg_lock);
1574
1575 qla2x00_config_dma_addressing(ha); 1569 qla2x00_config_dma_addressing(ha);
1576 if (qla2x00_mem_alloc(ha)) { 1570 if (qla2x00_mem_alloc(ha)) {
1577 qla_printk(KERN_WARNING, ha, 1571 qla_printk(KERN_WARNING, ha,
@@ -1615,15 +1609,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1615 host->max_lun = MAX_LUNS; 1609 host->max_lun = MAX_LUNS;
1616 host->transportt = qla2xxx_transport_template; 1610 host->transportt = qla2xxx_transport_template;
1617 1611
1618 ret = request_irq(pdev->irq, ha->isp_ops.intr_handler, 1612 ret = qla2x00_request_irqs(ha);
1619 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); 1613 if (ret)
1620 if (ret) {
1621 qla_printk(KERN_WARNING, ha,
1622 "Failed to reserve interrupt %d already in use.\n",
1623 pdev->irq);
1624 goto probe_failed; 1614 goto probe_failed;
1625 }
1626 host->irq = pdev->irq;
1627 1615
1628 /* Initialized the timer */ 1616 /* Initialized the timer */
1629 qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL); 1617 qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL);
@@ -1753,9 +1741,7 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1753 1741
1754 qla2x00_mem_free(ha); 1742 qla2x00_mem_free(ha);
1755 1743
1756 /* Detach interrupts */ 1744 qla2x00_free_irqs(ha);
1757 if (ha->host->irq)
1758 free_irq(ha->host->irq, ha);
1759 1745
1760 /* release io space registers */ 1746 /* release io space registers */
1761 if (ha->iobase) 1747 if (ha->iobase)
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 15390ad87456..ff1dd4175a7f 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -611,7 +611,6 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
611 flash_conf_to_access_addr(0x0339), 611 flash_conf_to_access_addr(0x0339),
612 (fdata & 0xff00) | ((fdata << 16) & 612 (fdata & 0xff00) | ((fdata << 16) &
613 0xff0000) | ((fdata >> 16) & 0xff)); 613 0xff0000) | ((fdata >> 16) & 0xff));
614 fdata = (faddr & sec_mask) << 2;
615 ret = qla24xx_write_flash_dword(ha, conf_addr, 614 ret = qla24xx_write_flash_dword(ha, conf_addr,
616 (fdata & 0xff00) |((fdata << 16) & 615 (fdata & 0xff00) |((fdata << 16) &
617 0xff0000) | ((fdata >> 16) & 0xff)); 616 0xff0000) | ((fdata >> 16) & 0xff));
@@ -1383,6 +1382,29 @@ qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
1383 qla2x00_write_flash_byte(ha, 0x5555, 0xf0); 1382 qla2x00_write_flash_byte(ha, 0x5555, 0xf0);
1384} 1383}
1385 1384
1385static void
1386qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr,
1387 uint32_t length)
1388{
1389 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1390 uint32_t midpoint, ilength;
1391 uint8_t data;
1392
1393 midpoint = length / 2;
1394
1395 WRT_REG_WORD(&reg->nvram, 0);
1396 RD_REG_WORD(&reg->nvram);
1397 for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) {
1398 if (ilength == midpoint) {
1399 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
1400 RD_REG_WORD(&reg->nvram);
1401 }
1402 data = qla2x00_read_flash_byte(ha, saddr);
1403 if (saddr % 100)
1404 udelay(10);
1405 *tmp_buf = data;
1406 }
1407}
1386 1408
1387static inline void 1409static inline void
1388qla2x00_suspend_hba(struct scsi_qla_host *ha) 1410qla2x00_suspend_hba(struct scsi_qla_host *ha)
@@ -1722,3 +1744,327 @@ qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1722 1744
1723 return rval; 1745 return rval;
1724} 1746}
1747
1748/**
1749 * qla2x00_get_fcode_version() - Determine an FCODE image's version.
1750 * @ha: HA context
1751 * @pcids: Pointer to the FCODE PCI data structure
1752 *
1753 * The process of retrieving the FCODE version information is at best
1754 * described as interesting.
1755 *
1756 * Within the first 100h bytes of the image an ASCII string is present
1757 * which contains several pieces of information including the FCODE
1758 * version. Unfortunately it seems the only reliable way to retrieve
1759 * the version is by scanning for another sentinel within the string,
1760 * the FCODE build date:
1761 *
1762 * ... 2.00.02 10/17/02 ...
1763 *
1764 * Returns QLA_SUCCESS on successful retrieval of version.
1765 */
1766static void
1767qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids)
1768{
1769 int ret = QLA_FUNCTION_FAILED;
1770 uint32_t istart, iend, iter, vend;
1771 uint8_t do_next, rbyte, *vbyte;
1772
1773 memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
1774
1775 /* Skip the PCI data structure. */
1776 istart = pcids +
1777 ((qla2x00_read_flash_byte(ha, pcids + 0x0B) << 8) |
1778 qla2x00_read_flash_byte(ha, pcids + 0x0A));
1779 iend = istart + 0x100;
1780 do {
1781 /* Scan for the sentinel date string...eeewww. */
1782 do_next = 0;
1783 iter = istart;
1784 while ((iter < iend) && !do_next) {
1785 iter++;
1786 if (qla2x00_read_flash_byte(ha, iter) == '/') {
1787 if (qla2x00_read_flash_byte(ha, iter + 2) ==
1788 '/')
1789 do_next++;
1790 else if (qla2x00_read_flash_byte(ha,
1791 iter + 3) == '/')
1792 do_next++;
1793 }
1794 }
1795 if (!do_next)
1796 break;
1797
1798 /* Backtrack to previous ' ' (space). */
1799 do_next = 0;
1800 while ((iter > istart) && !do_next) {
1801 iter--;
1802 if (qla2x00_read_flash_byte(ha, iter) == ' ')
1803 do_next++;
1804 }
1805 if (!do_next)
1806 break;
1807
1808 /*
1809 * Mark end of version tag, and find previous ' ' (space) or
1810 * string length (recent FCODE images -- major hack ahead!!!).
1811 */
1812 vend = iter - 1;
1813 do_next = 0;
1814 while ((iter > istart) && !do_next) {
1815 iter--;
1816 rbyte = qla2x00_read_flash_byte(ha, iter);
1817 if (rbyte == ' ' || rbyte == 0xd || rbyte == 0x10)
1818 do_next++;
1819 }
1820 if (!do_next)
1821 break;
1822
1823 /* Mark beginning of version tag, and copy data. */
1824 iter++;
1825 if ((vend - iter) &&
1826 ((vend - iter) < sizeof(ha->fcode_revision))) {
1827 vbyte = ha->fcode_revision;
1828 while (iter <= vend) {
1829 *vbyte++ = qla2x00_read_flash_byte(ha, iter);
1830 iter++;
1831 }
1832 ret = QLA_SUCCESS;
1833 }
1834 } while (0);
1835
1836 if (ret != QLA_SUCCESS)
1837 memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
1838}
1839
1840int
1841qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
1842{
1843 int ret = QLA_SUCCESS;
1844 uint8_t code_type, last_image;
1845 uint32_t pcihdr, pcids;
1846 uint8_t *dbyte;
1847 uint16_t *dcode;
1848
1849 if (!ha->pio_address || !mbuf)
1850 return QLA_FUNCTION_FAILED;
1851
1852 memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
1853 memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
1854 memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
1855 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
1856
1857 qla2x00_flash_enable(ha);
1858
1859 /* Begin with first PCI expansion ROM header. */
1860 pcihdr = 0;
1861 last_image = 1;
1862 do {
1863 /* Verify PCI expansion ROM header. */
1864 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
1865 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
1866 /* No signature */
1867 DEBUG2(printk("scsi(%ld): No matching ROM "
1868 "signature.\n", ha->host_no));
1869 ret = QLA_FUNCTION_FAILED;
1870 break;
1871 }
1872
1873 /* Locate PCI data structure. */
1874 pcids = pcihdr +
1875 ((qla2x00_read_flash_byte(ha, pcihdr + 0x19) << 8) |
1876 qla2x00_read_flash_byte(ha, pcihdr + 0x18));
1877
1878 /* Validate signature of PCI data structure. */
1879 if (qla2x00_read_flash_byte(ha, pcids) != 'P' ||
1880 qla2x00_read_flash_byte(ha, pcids + 0x1) != 'C' ||
1881 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
1882 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
1883 /* Incorrect header. */
1884 DEBUG2(printk("%s(): PCI data struct not found "
1885 "pcir_adr=%x.\n", __func__, pcids));
1886 ret = QLA_FUNCTION_FAILED;
1887 break;
1888 }
1889
1890 /* Read version */
1891 code_type = qla2x00_read_flash_byte(ha, pcids + 0x14);
1892 switch (code_type) {
1893 case ROM_CODE_TYPE_BIOS:
1894 /* Intel x86, PC-AT compatible. */
1895 ha->bios_revision[0] =
1896 qla2x00_read_flash_byte(ha, pcids + 0x12);
1897 ha->bios_revision[1] =
1898 qla2x00_read_flash_byte(ha, pcids + 0x13);
1899 DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__,
1900 ha->bios_revision[1], ha->bios_revision[0]));
1901 break;
1902 case ROM_CODE_TYPE_FCODE:
1903 /* Open Firmware standard for PCI (FCode). */
1904 /* Eeeewww... */
1905 qla2x00_get_fcode_version(ha, pcids);
1906 break;
1907 case ROM_CODE_TYPE_EFI:
1908 /* Extensible Firmware Interface (EFI). */
1909 ha->efi_revision[0] =
1910 qla2x00_read_flash_byte(ha, pcids + 0x12);
1911 ha->efi_revision[1] =
1912 qla2x00_read_flash_byte(ha, pcids + 0x13);
1913 DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__,
1914 ha->efi_revision[1], ha->efi_revision[0]));
1915 break;
1916 default:
1917 DEBUG2(printk("%s(): Unrecognized code type %x at "
1918 "pcids %x.\n", __func__, code_type, pcids));
1919 break;
1920 }
1921
1922 last_image = qla2x00_read_flash_byte(ha, pcids + 0x15) & BIT_7;
1923
1924 /* Locate next PCI expansion ROM. */
1925 pcihdr += ((qla2x00_read_flash_byte(ha, pcids + 0x11) << 8) |
1926 qla2x00_read_flash_byte(ha, pcids + 0x10)) * 512;
1927 } while (!last_image);
1928
1929 if (IS_QLA2322(ha)) {
1930 /* Read firmware image information. */
1931 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
1932 dbyte = mbuf;
1933 memset(dbyte, 0, 8);
1934 dcode = (uint16_t *)dbyte;
1935
1936 qla2x00_read_flash_data(ha, dbyte, FA_RISC_CODE_ADDR * 4 + 10,
1937 8);
1938 DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n",
1939 __func__, ha->host_no));
1940 DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8));
1941
1942 if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
1943 dcode[2] == 0xffff && dcode[3] == 0xffff) ||
1944 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
1945 dcode[3] == 0)) {
1946 DEBUG2(printk("%s(): Unrecognized fw revision at "
1947 "%x.\n", __func__, FA_RISC_CODE_ADDR * 4));
1948 } else {
1949 /* values are in big endian */
1950 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
1951 ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3];
1952 ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5];
1953 }
1954 }
1955
1956 qla2x00_flash_disable(ha);
1957
1958 return ret;
1959}
1960
1961int
1962qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
1963{
1964 int ret = QLA_SUCCESS;
1965 uint32_t pcihdr, pcids;
1966 uint32_t *dcode;
1967 uint8_t *bcode;
1968 uint8_t code_type, last_image;
1969 int i;
1970
1971 if (!mbuf)
1972 return QLA_FUNCTION_FAILED;
1973
1974 memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
1975 memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
1976 memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
1977 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
1978
1979 dcode = mbuf;
1980
1981 /* Begin with first PCI expansion ROM header. */
1982 pcihdr = 0;
1983 last_image = 1;
1984 do {
1985 /* Verify PCI expansion ROM header. */
1986 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
1987 bcode = mbuf + (pcihdr % 4);
1988 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
1989 /* No signature */
1990 DEBUG2(printk("scsi(%ld): No matching ROM "
1991 "signature.\n", ha->host_no));
1992 ret = QLA_FUNCTION_FAILED;
1993 break;
1994 }
1995
1996 /* Locate PCI data structure. */
1997 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
1998
1999 qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
2000 bcode = mbuf + (pcihdr % 4);
2001
2002 /* Validate signature of PCI data structure. */
2003 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
2004 bcode[0x2] != 'I' || bcode[0x3] != 'R') {
2005 /* Incorrect header. */
2006 DEBUG2(printk("%s(): PCI data struct not found "
2007 "pcir_adr=%x.\n", __func__, pcids));
2008 ret = QLA_FUNCTION_FAILED;
2009 break;
2010 }
2011
2012 /* Read version */
2013 code_type = bcode[0x14];
2014 switch (code_type) {
2015 case ROM_CODE_TYPE_BIOS:
2016 /* Intel x86, PC-AT compatible. */
2017 ha->bios_revision[0] = bcode[0x12];
2018 ha->bios_revision[1] = bcode[0x13];
2019 DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__,
2020 ha->bios_revision[1], ha->bios_revision[0]));
2021 break;
2022 case ROM_CODE_TYPE_FCODE:
2023 /* Open Firmware standard for PCI (FCode). */
2024 ha->fcode_revision[0] = bcode[0x12];
2025 ha->fcode_revision[1] = bcode[0x13];
2026 DEBUG3(printk("%s(): read FCODE %d.%d.\n", __func__,
2027 ha->fcode_revision[1], ha->fcode_revision[0]));
2028 break;
2029 case ROM_CODE_TYPE_EFI:
2030 /* Extensible Firmware Interface (EFI). */
2031 ha->efi_revision[0] = bcode[0x12];
2032 ha->efi_revision[1] = bcode[0x13];
2033 DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__,
2034 ha->efi_revision[1], ha->efi_revision[0]));
2035 break;
2036 default:
2037 DEBUG2(printk("%s(): Unrecognized code type %x at "
2038 "pcids %x.\n", __func__, code_type, pcids));
2039 break;
2040 }
2041
2042 last_image = bcode[0x15] & BIT_7;
2043
2044 /* Locate next PCI expansion ROM. */
2045 pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
2046 } while (!last_image);
2047
2048 /* Read firmware image information. */
2049 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
2050 dcode = mbuf;
2051
2052 qla24xx_read_flash_data(ha, dcode, FA_RISC_CODE_ADDR + 4, 4);
2053 for (i = 0; i < 4; i++)
2054 dcode[i] = be32_to_cpu(dcode[i]);
2055
2056 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
2057 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
2058 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2059 dcode[3] == 0)) {
2060 DEBUG2(printk("%s(): Unrecognized fw version at %x.\n",
2061 __func__, FA_RISC_CODE_ADDR));
2062 } else {
2063 ha->fw_revision[0] = dcode[0];
2064 ha->fw_revision[1] = dcode[1];
2065 ha->fw_revision[2] = dcode[2];
2066 ha->fw_revision[3] = dcode[3];
2067 }
2068
2069 return ret;
2070}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 24cffd98ee63..f33e2eb9f1b9 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -673,27 +673,6 @@ void __scsi_done(struct scsi_cmnd *cmd)
673} 673}
674 674
675/* 675/*
676 * Function: scsi_retry_command
677 *
678 * Purpose: Send a command back to the low level to be retried.
679 *
680 * Notes: This command is always executed in the context of the
681 * bottom half handler, or the error handler thread. Low
682 * level drivers should not become re-entrant as a result of
683 * this.
684 */
685int scsi_retry_command(struct scsi_cmnd *cmd)
686{
687 /*
688 * Zero the sense information from the last time we tried
689 * this command.
690 */
691 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
692
693 return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
694}
695
696/*
697 * Function: scsi_finish_command 676 * Function: scsi_finish_command
698 * 677 *
699 * Purpose: Pass command off to upper layer for finishing of I/O 678 * Purpose: Pass command off to upper layer for finishing of I/O
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 30ee3d72c021..5adbbeedec38 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -51,10 +51,10 @@
51#include "scsi_logging.h" 51#include "scsi_logging.h"
52#include "scsi_debug.h" 52#include "scsi_debug.h"
53 53
54#define SCSI_DEBUG_VERSION "1.80" 54#define SCSI_DEBUG_VERSION "1.81"
55static const char * scsi_debug_version_date = "20061018"; 55static const char * scsi_debug_version_date = "20070104";
56 56
57/* Additional Sense Code (ASC) used */ 57/* Additional Sense Code (ASC) */
58#define NO_ADDITIONAL_SENSE 0x0 58#define NO_ADDITIONAL_SENSE 0x0
59#define LOGICAL_UNIT_NOT_READY 0x4 59#define LOGICAL_UNIT_NOT_READY 0x4
60#define UNRECOVERED_READ_ERR 0x11 60#define UNRECOVERED_READ_ERR 0x11
@@ -65,9 +65,13 @@ static const char * scsi_debug_version_date = "20061018";
65#define INVALID_FIELD_IN_PARAM_LIST 0x26 65#define INVALID_FIELD_IN_PARAM_LIST 0x26
66#define POWERON_RESET 0x29 66#define POWERON_RESET 0x29
67#define SAVING_PARAMS_UNSUP 0x39 67#define SAVING_PARAMS_UNSUP 0x39
68#define TRANSPORT_PROBLEM 0x4b
68#define THRESHOLD_EXCEEDED 0x5d 69#define THRESHOLD_EXCEEDED 0x5d
69#define LOW_POWER_COND_ON 0x5e 70#define LOW_POWER_COND_ON 0x5e
70 71
72/* Additional Sense Code Qualifier (ASCQ) */
73#define ACK_NAK_TO 0x3
74
71#define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */ 75#define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
72 76
73/* Default values for driver parameters */ 77/* Default values for driver parameters */
@@ -95,15 +99,20 @@ static const char * scsi_debug_version_date = "20061018";
95#define SCSI_DEBUG_OPT_MEDIUM_ERR 2 99#define SCSI_DEBUG_OPT_MEDIUM_ERR 2
96#define SCSI_DEBUG_OPT_TIMEOUT 4 100#define SCSI_DEBUG_OPT_TIMEOUT 4
97#define SCSI_DEBUG_OPT_RECOVERED_ERR 8 101#define SCSI_DEBUG_OPT_RECOVERED_ERR 8
102#define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
98/* When "every_nth" > 0 then modulo "every_nth" commands: 103/* When "every_nth" > 0 then modulo "every_nth" commands:
99 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set 104 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
100 * - a RECOVERED_ERROR is simulated on successful read and write 105 * - a RECOVERED_ERROR is simulated on successful read and write
101 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. 106 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
107 * - a TRANSPORT_ERROR is simulated on successful read and write
108 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
102 * 109 *
103 * When "every_nth" < 0 then after "- every_nth" commands: 110 * When "every_nth" < 0 then after "- every_nth" commands:
104 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set 111 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
105 * - a RECOVERED_ERROR is simulated on successful read and write 112 * - a RECOVERED_ERROR is simulated on successful read and write
106 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. 113 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
114 * - a TRANSPORT_ERROR is simulated on successful read and write
115 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
107 * This will continue until some other action occurs (e.g. the user 116 * This will continue until some other action occurs (e.g. the user
108 * writing a new value (other than -1 or 1) to every_nth via sysfs). 117 * writing a new value (other than -1 or 1) to every_nth via sysfs).
109 */ 118 */
@@ -315,6 +324,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
315 int target = SCpnt->device->id; 324 int target = SCpnt->device->id;
316 struct sdebug_dev_info * devip = NULL; 325 struct sdebug_dev_info * devip = NULL;
317 int inj_recovered = 0; 326 int inj_recovered = 0;
327 int inj_transport = 0;
318 int delay_override = 0; 328 int delay_override = 0;
319 329
320 if (done == NULL) 330 if (done == NULL)
@@ -352,6 +362,8 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
352 return 0; /* ignore command causing timeout */ 362 return 0; /* ignore command causing timeout */
353 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts) 363 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
354 inj_recovered = 1; /* to reads and writes below */ 364 inj_recovered = 1; /* to reads and writes below */
365 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
366 inj_transport = 1; /* to reads and writes below */
355 } 367 }
356 368
357 if (devip->wlun) { 369 if (devip->wlun) {
@@ -468,7 +480,11 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
468 mk_sense_buffer(devip, RECOVERED_ERROR, 480 mk_sense_buffer(devip, RECOVERED_ERROR,
469 THRESHOLD_EXCEEDED, 0); 481 THRESHOLD_EXCEEDED, 0);
470 errsts = check_condition_result; 482 errsts = check_condition_result;
471 } 483 } else if (inj_transport && (0 == errsts)) {
484 mk_sense_buffer(devip, ABORTED_COMMAND,
485 TRANSPORT_PROBLEM, ACK_NAK_TO);
486 errsts = check_condition_result;
487 }
472 break; 488 break;
473 case REPORT_LUNS: /* mandatory, ignore unit attention */ 489 case REPORT_LUNS: /* mandatory, ignore unit attention */
474 delay_override = 1; 490 delay_override = 1;
@@ -531,6 +547,9 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
531 delay_override = 1; 547 delay_override = 1;
532 errsts = check_readiness(SCpnt, 0, devip); 548 errsts = check_readiness(SCpnt, 0, devip);
533 break; 549 break;
550 case WRITE_BUFFER:
551 errsts = check_readiness(SCpnt, 1, devip);
552 break;
534 default: 553 default:
535 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 554 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
536 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not " 555 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
@@ -954,7 +973,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
954 int alloc_len, n, ret; 973 int alloc_len, n, ret;
955 974
956 alloc_len = (cmd[3] << 8) + cmd[4]; 975 alloc_len = (cmd[3] << 8) + cmd[4];
957 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_KERNEL); 976 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
977 if (! arr)
978 return DID_REQUEUE << 16;
958 if (devip->wlun) 979 if (devip->wlun)
959 pq_pdt = 0x1e; /* present, wlun */ 980 pq_pdt = 0x1e; /* present, wlun */
960 else if (scsi_debug_no_lun_0 && (0 == devip->lun)) 981 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
@@ -1217,7 +1238,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1217 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8) 1238 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1218 + cmd[9]); 1239 + cmd[9]);
1219 1240
1220 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_KERNEL); 1241 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1242 if (! arr)
1243 return DID_REQUEUE << 16;
1221 /* 1244 /*
1222 * EVPD page 0x88 states we have two ports, one 1245 * EVPD page 0x88 states we have two ports, one
1223 * real and a fake port with no device connected. 1246 * real and a fake port with no device connected.
@@ -1996,6 +2019,8 @@ static int scsi_debug_slave_configure(struct scsi_device * sdp)
1996 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) 2019 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
1997 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; 2020 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
1998 devip = devInfoReg(sdp); 2021 devip = devInfoReg(sdp);
2022 if (NULL == devip)
2023 return 1; /* no resources, will be marked offline */
1999 sdp->hostdata = devip; 2024 sdp->hostdata = devip;
2000 if (sdp->host->cmd_per_lun) 2025 if (sdp->host->cmd_per_lun)
2001 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING, 2026 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
@@ -2044,7 +2069,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2044 } 2069 }
2045 } 2070 }
2046 if (NULL == open_devip) { /* try and make a new one */ 2071 if (NULL == open_devip) { /* try and make a new one */
2047 open_devip = kzalloc(sizeof(*open_devip),GFP_KERNEL); 2072 open_devip = kzalloc(sizeof(*open_devip),GFP_ATOMIC);
2048 if (NULL == open_devip) { 2073 if (NULL == open_devip) {
2049 printk(KERN_ERR "%s: out of memory at line %d\n", 2074 printk(KERN_ERR "%s: out of memory at line %d\n",
2050 __FUNCTION__, __LINE__); 2075 __FUNCTION__, __LINE__);
@@ -2388,7 +2413,7 @@ MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2388MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); 2413MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2389MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); 2414MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2390MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)"); 2415MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2391MODULE_PARM_DESC(opts, "1->noise, 2->medium_error, 4->... (def=0)"); 2416MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2392MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); 2417MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2393MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); 2418MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2394MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); 2419MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
@@ -2943,7 +2968,6 @@ static int sdebug_add_adapter(void)
2943 struct list_head *lh, *lh_sf; 2968 struct list_head *lh, *lh_sf;
2944 2969
2945 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); 2970 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
2946
2947 if (NULL == sdbg_host) { 2971 if (NULL == sdbg_host) {
2948 printk(KERN_ERR "%s: out of memory at line %d\n", 2972 printk(KERN_ERR "%s: out of memory at line %d\n",
2949 __FUNCTION__, __LINE__); 2973 __FUNCTION__, __LINE__);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 2ecb6ff42444..8e5011d13a18 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -672,8 +672,8 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
672 * XXX: Long term this code should go away, but that needs an audit of 672 * XXX: Long term this code should go away, but that needs an audit of
673 * all LLDDs first. 673 * all LLDDs first.
674 **/ 674 **/
675static int scsi_eh_get_sense(struct list_head *work_q, 675int scsi_eh_get_sense(struct list_head *work_q,
676 struct list_head *done_q) 676 struct list_head *done_q)
677{ 677{
678 struct scsi_cmnd *scmd, *next; 678 struct scsi_cmnd *scmd, *next;
679 int rtn; 679 int rtn;
@@ -715,6 +715,7 @@ static int scsi_eh_get_sense(struct list_head *work_q,
715 715
716 return list_empty(work_q); 716 return list_empty(work_q);
717} 717}
718EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
718 719
719/** 720/**
720 * scsi_try_to_abort_cmd - Ask host to abort a running command. 721 * scsi_try_to_abort_cmd - Ask host to abort a running command.
@@ -1411,9 +1412,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
1411 * @eh_done_q: list_head for processed commands. 1412 * @eh_done_q: list_head for processed commands.
1412 * 1413 *
1413 **/ 1414 **/
1414static void scsi_eh_ready_devs(struct Scsi_Host *shost, 1415void scsi_eh_ready_devs(struct Scsi_Host *shost,
1415 struct list_head *work_q, 1416 struct list_head *work_q,
1416 struct list_head *done_q) 1417 struct list_head *done_q)
1417{ 1418{
1418 if (!scsi_eh_stu(shost, work_q, done_q)) 1419 if (!scsi_eh_stu(shost, work_q, done_q))
1419 if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) 1420 if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
@@ -1421,6 +1422,7 @@ static void scsi_eh_ready_devs(struct Scsi_Host *shost,
1421 if (!scsi_eh_host_reset(work_q, done_q)) 1422 if (!scsi_eh_host_reset(work_q, done_q))
1422 scsi_eh_offline_sdevs(work_q, done_q); 1423 scsi_eh_offline_sdevs(work_q, done_q);
1423} 1424}
1425EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
1424 1426
1425/** 1427/**
1426 * scsi_eh_flush_done_q - finish processed commands or retry them. 1428 * scsi_eh_flush_done_q - finish processed commands or retry them.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f02f48a882a9..503f09c2f05f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1400,7 +1400,7 @@ static void scsi_softirq_done(struct request *rq)
1400 scsi_finish_command(cmd); 1400 scsi_finish_command(cmd);
1401 break; 1401 break;
1402 case NEEDS_RETRY: 1402 case NEEDS_RETRY:
1403 scsi_retry_command(cmd); 1403 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1404 break; 1404 break;
1405 case ADD_TO_MLQUEUE: 1405 case ADD_TO_MLQUEUE:
1406 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1406 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index f458c2f686d2..ee8efe849bf4 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -28,7 +28,6 @@ extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
28extern int scsi_setup_command_freelist(struct Scsi_Host *shost); 28extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
29extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); 29extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
30extern void __scsi_done(struct scsi_cmnd *cmd); 30extern void __scsi_done(struct scsi_cmnd *cmd);
31extern int scsi_retry_command(struct scsi_cmnd *cmd);
32#ifdef CONFIG_SCSI_LOGGING 31#ifdef CONFIG_SCSI_LOGGING
33void scsi_log_send(struct scsi_cmnd *cmd); 32void scsi_log_send(struct scsi_cmnd *cmd);
34void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); 33void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@@ -58,6 +57,11 @@ extern int scsi_error_handler(void *host);
58extern int scsi_decide_disposition(struct scsi_cmnd *cmd); 57extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
59extern void scsi_eh_wakeup(struct Scsi_Host *shost); 58extern void scsi_eh_wakeup(struct Scsi_Host *shost);
60extern int scsi_eh_scmd_add(struct scsi_cmnd *, int); 59extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
60void scsi_eh_ready_devs(struct Scsi_Host *shost,
61 struct list_head *work_q,
62 struct list_head *done_q);
63int scsi_eh_get_sense(struct list_head *work_q,
64 struct list_head *done_q);
61 65
62/* scsi_lib.c */ 66/* scsi_lib.c */
63extern int scsi_maybe_unblock_host(struct scsi_device *sdev); 67extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index b83d03c4deef..8160c00d1092 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1029,7 +1029,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
1029 1029
1030 sdev_printk(KERN_INFO, sdev, 1030 sdev_printk(KERN_INFO, sdev,
1031 "scsi scan: consider passing scsi_mod." 1031 "scsi scan: consider passing scsi_mod."
1032 "dev_flags=%s:%s:0x240 or 0x800240\n", 1032 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1033 scsi_inq_str(vend, result, 8, 16), 1033 scsi_inq_str(vend, result, 8, 16),
1034 scsi_inq_str(mod, result, 16, 32)); 1034 scsi_inq_str(mod, result, 16, 32));
1035 }); 1035 });
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 5c0b75bbfa10..6d39150e205b 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -336,6 +336,51 @@ show_sas_device_type(struct class_device *cdev, char *buf)
336} 336}
337static CLASS_DEVICE_ATTR(device_type, S_IRUGO, show_sas_device_type, NULL); 337static CLASS_DEVICE_ATTR(device_type, S_IRUGO, show_sas_device_type, NULL);
338 338
339static ssize_t do_sas_phy_enable(struct class_device *cdev,
340 size_t count, int enable)
341{
342 struct sas_phy *phy = transport_class_to_phy(cdev);
343 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
344 struct sas_internal *i = to_sas_internal(shost->transportt);
345 int error;
346
347 error = i->f->phy_enable(phy, enable);
348 if (error)
349 return error;
350 phy->enabled = enable;
351 return count;
352};
353
354static ssize_t store_sas_phy_enable(struct class_device *cdev,
355 const char *buf, size_t count)
356{
357 if (count < 1)
358 return -EINVAL;
359
360 switch (buf[0]) {
361 case '0':
362 do_sas_phy_enable(cdev, count, 0);
363 break;
364 case '1':
365 do_sas_phy_enable(cdev, count, 1);
366 break;
367 default:
368 return -EINVAL;
369 }
370
371 return count;
372}
373
374static ssize_t show_sas_phy_enable(struct class_device *cdev, char *buf)
375{
376 struct sas_phy *phy = transport_class_to_phy(cdev);
377
378 return snprintf(buf, 20, "%d", phy->enabled);
379}
380
381static CLASS_DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, show_sas_phy_enable,
382 store_sas_phy_enable);
383
339static ssize_t do_sas_phy_reset(struct class_device *cdev, 384static ssize_t do_sas_phy_reset(struct class_device *cdev,
340 size_t count, int hard_reset) 385 size_t count, int hard_reset)
341{ 386{
@@ -435,6 +480,7 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number)
435 return NULL; 480 return NULL;
436 481
437 phy->number = number; 482 phy->number = number;
483 phy->enabled = 1;
438 484
439 device_initialize(&phy->dev); 485 device_initialize(&phy->dev);
440 phy->dev.parent = get_device(parent); 486 phy->dev.parent = get_device(parent);
@@ -579,8 +625,19 @@ static void sas_port_release(struct device *dev)
579static void sas_port_create_link(struct sas_port *port, 625static void sas_port_create_link(struct sas_port *port,
580 struct sas_phy *phy) 626 struct sas_phy *phy)
581{ 627{
582 sysfs_create_link(&port->dev.kobj, &phy->dev.kobj, phy->dev.bus_id); 628 int res;
583 sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port"); 629
630 res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj,
631 phy->dev.bus_id);
632 if (res)
633 goto err;
634 res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port");
635 if (res)
636 goto err;
637 return;
638err:
639 printk(KERN_ERR "%s: Cannot create port links, err=%d\n",
640 __FUNCTION__, res);
584} 641}
585 642
586static void sas_port_delete_link(struct sas_port *port, 643static void sas_port_delete_link(struct sas_port *port,
@@ -818,13 +875,20 @@ EXPORT_SYMBOL(sas_port_delete_phy);
818 875
819void sas_port_mark_backlink(struct sas_port *port) 876void sas_port_mark_backlink(struct sas_port *port)
820{ 877{
878 int res;
821 struct device *parent = port->dev.parent->parent->parent; 879 struct device *parent = port->dev.parent->parent->parent;
822 880
823 if (port->is_backlink) 881 if (port->is_backlink)
824 return; 882 return;
825 port->is_backlink = 1; 883 port->is_backlink = 1;
826 sysfs_create_link(&port->dev.kobj, &parent->kobj, 884 res = sysfs_create_link(&port->dev.kobj, &parent->kobj,
827 parent->bus_id); 885 parent->bus_id);
886 if (res)
887 goto err;
888 return;
889err:
890 printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n",
891 __FUNCTION__, res);
828 892
829} 893}
830EXPORT_SYMBOL(sas_port_mark_backlink); 894EXPORT_SYMBOL(sas_port_mark_backlink);
@@ -1237,7 +1301,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
1237 if (identify->device_type == SAS_END_DEVICE && 1301 if (identify->device_type == SAS_END_DEVICE &&
1238 rphy->scsi_target_id != -1) { 1302 rphy->scsi_target_id != -1) {
1239 scsi_scan_target(&rphy->dev, 0, 1303 scsi_scan_target(&rphy->dev, 0,
1240 rphy->scsi_target_id, ~0, 0); 1304 rphy->scsi_target_id, SCAN_WILD_CARD, 0);
1241 } 1305 }
1242 1306
1243 return 0; 1307 return 0;
@@ -1253,7 +1317,7 @@ EXPORT_SYMBOL(sas_rphy_add);
1253 * Note: 1317 * Note:
1254 * This function must only be called on a remote 1318 * This function must only be called on a remote
1255 * PHY that has not sucessfully been added using 1319 * PHY that has not sucessfully been added using
1256 * sas_rphy_add(). 1320 * sas_rphy_add() (or has been sas_rphy_remove()'d)
1257 */ 1321 */
1258void sas_rphy_free(struct sas_rphy *rphy) 1322void sas_rphy_free(struct sas_rphy *rphy)
1259{ 1323{
@@ -1272,18 +1336,30 @@ void sas_rphy_free(struct sas_rphy *rphy)
1272EXPORT_SYMBOL(sas_rphy_free); 1336EXPORT_SYMBOL(sas_rphy_free);
1273 1337
1274/** 1338/**
1275 * sas_rphy_delete -- remove SAS remote PHY 1339 * sas_rphy_delete -- remove and free SAS remote PHY
1276 * @rphy: SAS remote PHY to remove 1340 * @rphy: SAS remote PHY to remove and free
1277 * 1341 *
1278 * Removes the specified SAS remote PHY. 1342 * Removes the specified SAS remote PHY and frees it.
1279 */ 1343 */
1280void 1344void
1281sas_rphy_delete(struct sas_rphy *rphy) 1345sas_rphy_delete(struct sas_rphy *rphy)
1282{ 1346{
1347 sas_rphy_remove(rphy);
1348 sas_rphy_free(rphy);
1349}
1350EXPORT_SYMBOL(sas_rphy_delete);
1351
1352/**
1353 * sas_rphy_remove -- remove SAS remote PHY
1354 * @rphy: SAS remote phy to remove
1355 *
1356 * Removes the specified SAS remote PHY.
1357 */
1358void
1359sas_rphy_remove(struct sas_rphy *rphy)
1360{
1283 struct device *dev = &rphy->dev; 1361 struct device *dev = &rphy->dev;
1284 struct sas_port *parent = dev_to_sas_port(dev->parent); 1362 struct sas_port *parent = dev_to_sas_port(dev->parent);
1285 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
1286 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
1287 1363
1288 switch (rphy->identify.device_type) { 1364 switch (rphy->identify.device_type) {
1289 case SAS_END_DEVICE: 1365 case SAS_END_DEVICE:
@@ -1299,17 +1375,10 @@ sas_rphy_delete(struct sas_rphy *rphy)
1299 1375
1300 transport_remove_device(dev); 1376 transport_remove_device(dev);
1301 device_del(dev); 1377 device_del(dev);
1302 transport_destroy_device(dev);
1303
1304 mutex_lock(&sas_host->lock);
1305 list_del(&rphy->list);
1306 mutex_unlock(&sas_host->lock);
1307 1378
1308 parent->rphy = NULL; 1379 parent->rphy = NULL;
1309
1310 put_device(dev);
1311} 1380}
1312EXPORT_SYMBOL(sas_rphy_delete); 1381EXPORT_SYMBOL(sas_rphy_remove);
1313 1382
1314/** 1383/**
1315 * scsi_is_sas_rphy -- check if a struct device represents a SAS remote PHY 1384 * scsi_is_sas_rphy -- check if a struct device represents a SAS remote PHY
@@ -1389,6 +1458,10 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
1389 SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \ 1458 SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \
1390 !i->f->set_phy_speed, S_IRUGO) 1459 !i->f->set_phy_speed, S_IRUGO)
1391 1460
1461#define SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(field, func) \
1462 SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \
1463 !i->f->func, S_IRUGO)
1464
1392#define SETUP_PORT_ATTRIBUTE(field) \ 1465#define SETUP_PORT_ATTRIBUTE(field) \
1393 SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) 1466 SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1)
1394 1467
@@ -1396,10 +1469,10 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
1396 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func) 1469 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func)
1397 1470
1398#define SETUP_PHY_ATTRIBUTE_WRONLY(field) \ 1471#define SETUP_PHY_ATTRIBUTE_WRONLY(field) \
1399 SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, 1) 1472 SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, 1)
1400 1473
1401#define SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(field, func) \ 1474#define SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(field, func) \
1402 SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, i->f->func) 1475 SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, i->f->func)
1403 1476
1404#define SETUP_END_DEV_ATTRIBUTE(field) \ 1477#define SETUP_END_DEV_ATTRIBUTE(field) \
1405 SETUP_TEMPLATE(end_dev_attrs, field, S_IRUGO, 1) 1478 SETUP_TEMPLATE(end_dev_attrs, field, S_IRUGO, 1)
@@ -1479,6 +1552,7 @@ sas_attach_transport(struct sas_function_template *ft)
1479 SETUP_PHY_ATTRIBUTE(phy_reset_problem_count); 1552 SETUP_PHY_ATTRIBUTE(phy_reset_problem_count);
1480 SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(link_reset, phy_reset); 1553 SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(link_reset, phy_reset);
1481 SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(hard_reset, phy_reset); 1554 SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(hard_reset, phy_reset);
1555 SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(enable, phy_enable);
1482 i->phy_attrs[count] = NULL; 1556 i->phy_attrs[count] = NULL;
1483 1557
1484 count = 0; 1558 count = 0;
@@ -1587,7 +1661,7 @@ static void __exit sas_transport_exit(void)
1587} 1661}
1588 1662
1589MODULE_AUTHOR("Christoph Hellwig"); 1663MODULE_AUTHOR("Christoph Hellwig");
1590MODULE_DESCRIPTION("SAS Transphy Attributes"); 1664MODULE_DESCRIPTION("SAS Transport Attributes");
1591MODULE_LICENSE("GPL"); 1665MODULE_LICENSE("GPL");
1592 1666
1593module_init(sas_transport_init); 1667module_init(sas_transport_init);
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 551baccec523..018c65f73ac4 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -123,6 +123,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
123 hostdata->differential = differential; 123 hostdata->differential = differential;
124 hostdata->clock = clock; 124 hostdata->clock = clock;
125 hostdata->chip710 = 1; 125 hostdata->chip710 = 1;
126 hostdata->burst_length = 8;
126 127
127 /* and register the chip */ 128 /* and register the chip */
128 if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev)) 129 if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev))
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
new file mode 100644
index 000000000000..6bc505115841
--- /dev/null
+++ b/drivers/scsi/sni_53c710.c
@@ -0,0 +1,159 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* SNI RM driver
4 *
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6**-----------------------------------------------------------------------------
7**
8** This program is free software; you can redistribute it and/or modify
9** it under the terms of the GNU General Public License as published by
10** the Free Software Foundation; either version 2 of the License, or
11** (at your option) any later version.
12**
13** This program is distributed in the hope that it will be useful,
14** but WITHOUT ANY WARRANTY; without even the implied warranty of
15** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16** GNU General Public License for more details.
17**
18** You should have received a copy of the GNU General Public License
19** along with this program; if not, write to the Free Software
20** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21**
22**-----------------------------------------------------------------------------
23 */
24
25/*
26 * Based on lasi700.c
27 */
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/types.h>
33#include <linux/stat.h>
34#include <linux/mm.h>
35#include <linux/blkdev.h>
36#include <linux/sched.h>
37#include <linux/ioport.h>
38#include <linux/dma-mapping.h>
39#include <linux/platform_device.h>
40
41#include <asm/page.h>
42#include <asm/pgtable.h>
43#include <asm/irq.h>
44#include <asm/delay.h>
45
46#include <scsi/scsi_host.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_transport.h>
49#include <scsi/scsi_transport_spi.h>
50
51#include "53c700.h"
52
53MODULE_AUTHOR("Thomas Bogendörfer");
54MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver");
55MODULE_LICENSE("GPL");
56
57#define SNIRM710_CLOCK 32
58
59static struct scsi_host_template snirm710_template = {
60 .name = "SNI RM SCSI 53c710",
61 .proc_name = "snirm_53c710",
62 .this_id = 7,
63 .module = THIS_MODULE,
64};
65
66static int __init snirm710_probe(struct platform_device *dev)
67{
68 unsigned long base;
69 struct NCR_700_Host_Parameters *hostdata;
70 struct Scsi_Host *host;
71 struct resource *res;
72
73 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
74 if (!res)
75 return -ENODEV;
76
77 base = res->start;
78 hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
79 if (!hostdata) {
80 printk(KERN_ERR "%s: Failed to allocate host data\n",
81 dev->dev.bus_id);
82 return -ENOMEM;
83 }
84
85 hostdata->dev = &dev->dev;
86 dma_set_mask(&dev->dev, DMA_32BIT_MASK);
87 hostdata->base = ioremap_nocache(CPHYSADDR(base), 0x100);
88 hostdata->differential = 0;
89
90 hostdata->clock = SNIRM710_CLOCK;
91 hostdata->force_le_on_be = 1;
92 hostdata->chip710 = 1;
93 hostdata->burst_length = 4;
94
95 host = NCR_700_detect(&snirm710_template, hostdata, &dev->dev);
96 if (!host)
97 goto out_kfree;
98 host->this_id = 7;
99 host->base = base;
100 host->irq = platform_get_irq(dev, 0);
101 if(request_irq(host->irq, NCR_700_intr, SA_SHIRQ, "snirm710", host)) {
102 printk(KERN_ERR "snirm710: request_irq failed!\n");
103 goto out_put_host;
104 }
105
106 dev_set_drvdata(&dev->dev, host);
107 scsi_scan_host(host);
108
109 return 0;
110
111 out_put_host:
112 scsi_host_put(host);
113 out_kfree:
114 iounmap(hostdata->base);
115 kfree(hostdata);
116 return -ENODEV;
117}
118
119static int __exit snirm710_driver_remove(struct platform_device *dev)
120{
121 struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
122 struct NCR_700_Host_Parameters *hostdata =
123 (struct NCR_700_Host_Parameters *)host->hostdata[0];
124
125 scsi_remove_host(host);
126 NCR_700_release(host);
127 free_irq(host->irq, host);
128 iounmap(hostdata->base);
129 kfree(hostdata);
130
131 return 0;
132}
133
134static struct platform_driver snirm710_driver = {
135 .probe = snirm710_probe,
136 .remove = __devexit_p(snirm710_driver_remove),
137 .driver = {
138 .name = "snirm_53c710",
139 },
140};
141
142static int __init snirm710_init(void)
143{
144 int err;
145
146 if ((err = platform_driver_register(&snirm710_driver))) {
147 printk(KERN_ERR "Driver registration failed\n");
148 return err;
149 }
150 return 0;
151}
152
153static void __exit snirm710_exit(void)
154{
155 platform_driver_unregister(&snirm710_driver);
156}
157
158module_init(snirm710_init);
159module_exit(snirm710_exit);