aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-28 15:43:43 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-28 15:43:43 -0500
commit21f3b24da9328415792efc780f50b9f434c12465 (patch)
tree446ad6d2154e0f05bcb079cb99a144102c682eb9
parent2a7d2b96d5cba7568139d9ab157a0e97ab32440f (diff)
parent2b4df6ea53d05625e9ca2dd73bc0e831976e009d (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James Bottomley: "The patch set is mostly driver updates (bnx2fc, ipr, lpfc, qla4) and a few bug fixes" Pull delayed because google hates James, and sneakily considers his pull requests spam. Why, google, why? * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (60 commits) [SCSI] aacraid: 1024 max outstanding command support for Series 7 and above [SCSI] bnx2fc: adjust duplicate test [SCSI] qla4xxx: Update driver version to 5.03.00-k4 [SCSI] qla4xxx: Fix return code for qla4xxx_session_get_param. [SCSI] qla4xxx: wait for boot target login response during probe. [SCSI] qla4xxx: Added support for force firmware dump [SCSI] qla4xxx: Re-register IRQ handler while retrying initialize of adapter [SCSI] qla4xxx: Throttle active IOCBs to firmware limits [SCSI] qla4xxx: Remove unnecessary code from qla4xxx_init_local_data [SCSI] qla4xxx: Quiesce driver activities while loopback [SCSI] qla4xxx: Rename MBOX_ASTS_IDC_NOTIFY to MBOX_ASTS_IDC_REQUEST_NOTIFICATION [SCSI] qla4xxx: Add spurious interrupt messages under debug level 2 [SCSI] cxgb4i: Remove the scsi host device when removing device [SCSI] bfa: fix strncpy() limiter in bfad_start_ops() [SCSI] qla4xxx: Update driver version to 5.03.00-k3 [SCSI] qla4xxx: Correct the validation to check in get_sys_info mailbox [SCSI] qla4xxx: Pass correct function param to qla4_8xxx_rd_direct [SCSI] lpfc 8.3.37: Update lpfc version for 8.3.37 driver release [SCSI] lpfc 8.3.37: Fixed infinite loop in lpfc_sli4_fcf_rr_next_index_get. [SCSI] lpfc 8.3.37: Fixed crash due to SLI Port invalid resource count ...
-rw-r--r--MAINTAINERS7
-rw-r--r--drivers/scsi/aacraid/aacraid.h8
-rw-r--r--drivers/scsi/aacraid/comminit.c11
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h27
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c21
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c29
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c95
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c15
-rw-r--r--drivers/scsi/csiostor/csio_init.c9
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c6
-rw-r--r--drivers/scsi/gdth.c10
-rw-r--r--drivers/scsi/ipr.c1296
-rw-r--r--drivers/scsi/ipr.h101
-rw-r--r--drivers/scsi/lpfc/lpfc.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c37
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h176
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c251
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c378
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c19
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c55
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c16
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c61
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c160
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c12
50 files changed, 2169 insertions, 926 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 0b4bb157a482..0ade02d5ee6d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1800,7 +1800,8 @@ F: drivers/bcma/
1800F: include/linux/bcma/ 1800F: include/linux/bcma/
1801 1801
1802BROCADE BFA FC SCSI DRIVER 1802BROCADE BFA FC SCSI DRIVER
1803M: Krishna C Gudipati <kgudipat@brocade.com> 1803M: Anil Gurumurthy <agurumur@brocade.com>
1804M: Vijaya Mohan Guvva <vmohan@brocade.com>
1804L: linux-scsi@vger.kernel.org 1805L: linux-scsi@vger.kernel.org
1805S: Supported 1806S: Supported
1806F: drivers/scsi/bfa/ 1807F: drivers/scsi/bfa/
@@ -2074,8 +2075,8 @@ S: Maintained
2074F: include/linux/clk.h 2075F: include/linux/clk.h
2075 2076
2076CISCO FCOE HBA DRIVER 2077CISCO FCOE HBA DRIVER
2077M: Abhijeet Joglekar <abjoglek@cisco.com> 2078M: Hiral Patel <hiralpat@cisco.com>
2078M: Venkata Siva Vijayendra Bhamidipati <vbhamidi@cisco.com> 2079M: Suma Ramars <sramars@cisco.com>
2079M: Brian Uchino <buchino@cisco.com> 2080M: Brian Uchino <buchino@cisco.com>
2080L: linux-scsi@vger.kernel.org 2081L: linux-scsi@vger.kernel.org
2081S: Supported 2082S: Supported
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 742f5d7eb0f5..a6f7190c09a4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,13 +12,13 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 29801 15# define AAC_DRIVER_BUILD 30000
16# define AAC_DRIVER_BRANCH "-ms" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
19 19
20#define AAC_NUM_MGT_FIB 8 20#define AAC_NUM_MGT_FIB 8
21#define AAC_NUM_IO_FIB (512 - AAC_NUM_MGT_FIB) 21#define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB)
22#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB) 22#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
23 23
24#define AAC_MAX_LUN (8) 24#define AAC_MAX_LUN (8)
@@ -36,6 +36,10 @@
36#define CONTAINER_TO_ID(cont) (cont) 36#define CONTAINER_TO_ID(cont) (cont)
37#define CONTAINER_TO_LUN(cont) (0) 37#define CONTAINER_TO_LUN(cont) (0)
38 38
39#define PMC_DEVICE_S7 0x28c
40#define PMC_DEVICE_S8 0x28d
41#define PMC_DEVICE_S9 0x28f
42
39#define aac_phys_to_logical(x) ((x)+1) 43#define aac_phys_to_logical(x) ((x)+1)
40#define aac_logical_to_phys(x) ((x)?(x)-1:0) 44#define aac_logical_to_phys(x) ((x)?(x)-1:0)
41 45
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 8e5d3be16127..3f759957f4b4 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -404,7 +404,13 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
404 dev->max_fib_size = status[1] & 0xFFE0; 404 dev->max_fib_size = status[1] & 0xFFE0;
405 host->sg_tablesize = status[2] >> 16; 405 host->sg_tablesize = status[2] >> 16;
406 dev->sg_tablesize = status[2] & 0xFFFF; 406 dev->sg_tablesize = status[2] & 0xFFFF;
407 host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; 407 if (dev->pdev->device == PMC_DEVICE_S7 ||
408 dev->pdev->device == PMC_DEVICE_S8 ||
409 dev->pdev->device == PMC_DEVICE_S9)
410 host->can_queue = ((status[3] >> 16) ? (status[3] >> 16) :
411 (status[3] & 0xFFFF)) - AAC_NUM_MGT_FIB;
412 else
413 host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
408 dev->max_num_aif = status[4] & 0xFFFF; 414 dev->max_num_aif = status[4] & 0xFFFF;
409 /* 415 /*
410 * NOTE: 416 * NOTE:
@@ -452,6 +458,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
452 } 458 }
453 } 459 }
454 460
461 if (host->can_queue > AAC_NUM_IO_FIB)
462 host->can_queue = AAC_NUM_IO_FIB;
463
455 /* 464 /*
456 * Ok now init the communication subsystem 465 * Ok now init the communication subsystem
457 */ 466 */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index e6bf12675db8..a5f7690e819e 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1034,7 +1034,7 @@ bfad_start_ops(struct bfad_s *bfad) {
1034 sizeof(driver_info.host_os_patch) - 1); 1034 sizeof(driver_info.host_os_patch) - 1);
1035 1035
1036 strncpy(driver_info.os_device_name, bfad->pci_name, 1036 strncpy(driver_info.os_device_name, bfad->pci_name,
1037 sizeof(driver_info.os_device_name - 1)); 1037 sizeof(driver_info.os_device_name) - 1);
1038 1038
1039 /* FCS driver info init */ 1039 /* FCS driver info init */
1040 spin_lock_irqsave(&bfad->bfad_lock, flags); 1040 spin_lock_irqsave(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 3486845ba301..50fcd018d14b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -64,7 +64,7 @@
64#include "bnx2fc_constants.h" 64#include "bnx2fc_constants.h"
65 65
66#define BNX2FC_NAME "bnx2fc" 66#define BNX2FC_NAME "bnx2fc"
67#define BNX2FC_VERSION "1.0.12" 67#define BNX2FC_VERSION "1.0.13"
68 68
69#define PFX "bnx2fc: " 69#define PFX "bnx2fc: "
70 70
@@ -156,6 +156,18 @@
156#define BNX2FC_RELOGIN_WAIT_TIME 200 156#define BNX2FC_RELOGIN_WAIT_TIME 200
157#define BNX2FC_RELOGIN_WAIT_CNT 10 157#define BNX2FC_RELOGIN_WAIT_CNT 10
158 158
159#define BNX2FC_STATS(hba, stat, cnt) \
160 do { \
161 u32 val; \
162 \
163 val = fw_stats->stat.cnt; \
164 if (hba->prev_stats.stat.cnt <= val) \
165 val -= hba->prev_stats.stat.cnt; \
166 else \
167 val += (0xfffffff - hba->prev_stats.stat.cnt); \
168 hba->bfw_stats.cnt += val; \
169 } while (0)
170
159/* bnx2fc driver uses only one instance of fcoe_percpu_s */ 171/* bnx2fc driver uses only one instance of fcoe_percpu_s */
160extern struct fcoe_percpu_s bnx2fc_global; 172extern struct fcoe_percpu_s bnx2fc_global;
161 173
@@ -167,6 +179,14 @@ struct bnx2fc_percpu_s {
167 spinlock_t fp_work_lock; 179 spinlock_t fp_work_lock;
168}; 180};
169 181
182struct bnx2fc_fw_stats {
183 u64 fc_crc_cnt;
184 u64 fcoe_tx_pkt_cnt;
185 u64 fcoe_rx_pkt_cnt;
186 u64 fcoe_tx_byte_cnt;
187 u64 fcoe_rx_byte_cnt;
188};
189
170struct bnx2fc_hba { 190struct bnx2fc_hba {
171 struct list_head list; 191 struct list_head list;
172 struct cnic_dev *cnic; 192 struct cnic_dev *cnic;
@@ -207,6 +227,8 @@ struct bnx2fc_hba {
207 struct bnx2fc_rport **tgt_ofld_list; 227 struct bnx2fc_rport **tgt_ofld_list;
208 228
209 /* statistics */ 229 /* statistics */
230 struct bnx2fc_fw_stats bfw_stats;
231 struct fcoe_statistics_params prev_stats;
210 struct fcoe_statistics_params *stats_buffer; 232 struct fcoe_statistics_params *stats_buffer;
211 dma_addr_t stats_buf_dma; 233 dma_addr_t stats_buf_dma;
212 struct completion stat_req_done; 234 struct completion stat_req_done;
@@ -280,6 +302,7 @@ struct bnx2fc_rport {
280#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7 302#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
281#define BNX2FC_FLAG_EXPL_LOGO 0x8 303#define BNX2FC_FLAG_EXPL_LOGO 0x8
282#define BNX2FC_FLAG_DISABLE_FAILED 0x9 304#define BNX2FC_FLAG_DISABLE_FAILED 0x9
305#define BNX2FC_FLAG_ENABLED 0xa
283 306
284 u8 src_addr[ETH_ALEN]; 307 u8 src_addr[ETH_ALEN];
285 u32 max_sqes; 308 u32 max_sqes;
@@ -468,6 +491,8 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
468int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba); 491int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
469int bnx2fc_send_session_ofld_req(struct fcoe_port *port, 492int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
470 struct bnx2fc_rport *tgt); 493 struct bnx2fc_rport *tgt);
494int bnx2fc_send_session_enable_req(struct fcoe_port *port,
495 struct bnx2fc_rport *tgt);
471int bnx2fc_send_session_disable_req(struct fcoe_port *port, 496int bnx2fc_send_session_disable_req(struct fcoe_port *port,
472 struct bnx2fc_rport *tgt); 497 struct bnx2fc_rport *tgt);
473int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, 498int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 70ecd953a579..6401db494ef5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
22 22
23#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
24#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
25#define DRV_MODULE_RELDATE "Jun 04, 2012" 25#define DRV_MODULE_RELDATE "Dec 21, 2012"
26 26
27 27
28static char version[] = 28static char version[] =
@@ -687,11 +687,16 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
687 BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); 687 BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
688 return bnx2fc_stats; 688 return bnx2fc_stats;
689 } 689 }
690 bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt; 690 BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
691 bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt; 691 bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
692 bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4; 692 BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt);
693 bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt; 693 bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt;
694 bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4; 694 BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt);
695 bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4);
696 BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt);
697 bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt;
698 BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt);
699 bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4);
695 700
696 bnx2fc_stats->dumped_frames = 0; 701 bnx2fc_stats->dumped_frames = 0;
697 bnx2fc_stats->lip_count = 0; 702 bnx2fc_stats->lip_count = 0;
@@ -700,6 +705,8 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
700 bnx2fc_stats->loss_of_signal_count = 0; 705 bnx2fc_stats->loss_of_signal_count = 0;
701 bnx2fc_stats->prim_seq_protocol_err_count = 0; 706 bnx2fc_stats->prim_seq_protocol_err_count = 0;
702 707
708 memcpy(&hba->prev_stats, hba->stats_buffer,
709 sizeof(struct fcoe_statistics_params));
703 return bnx2fc_stats; 710 return bnx2fc_stats;
704} 711}
705 712
@@ -2660,7 +2667,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
2660 .can_queue = BNX2FC_CAN_QUEUE, 2667 .can_queue = BNX2FC_CAN_QUEUE,
2661 .use_clustering = ENABLE_CLUSTERING, 2668 .use_clustering = ENABLE_CLUSTERING,
2662 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, 2669 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
2663 .max_sectors = 512, 2670 .max_sectors = 1024,
2664}; 2671};
2665 2672
2666static struct libfc_function_template bnx2fc_libfc_fcn_templ = { 2673static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index ef60afa94d0e..85ea98a80f40 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -347,7 +347,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
347 * @port: port structure pointer 347 * @port: port structure pointer
348 * @tgt: bnx2fc_rport structure pointer 348 * @tgt: bnx2fc_rport structure pointer
349 */ 349 */
350static int bnx2fc_send_session_enable_req(struct fcoe_port *port, 350int bnx2fc_send_session_enable_req(struct fcoe_port *port,
351 struct bnx2fc_rport *tgt) 351 struct bnx2fc_rport *tgt)
352{ 352{
353 struct kwqe *kwqe_arr[2]; 353 struct kwqe *kwqe_arr[2];
@@ -759,8 +759,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
759 case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET: 759 case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
760 BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n", 760 BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
761 xid); 761 xid);
762 memset(&io_req->err_entry, 0,
763 sizeof(struct fcoe_err_report_entry));
764 memcpy(&io_req->err_entry, err_entry, 762 memcpy(&io_req->err_entry, err_entry,
765 sizeof(struct fcoe_err_report_entry)); 763 sizeof(struct fcoe_err_report_entry));
766 if (!test_bit(BNX2FC_FLAG_SRR_SENT, 764 if (!test_bit(BNX2FC_FLAG_SRR_SENT,
@@ -847,8 +845,6 @@ ret_err_rqe:
847 goto ret_warn_rqe; 845 goto ret_warn_rqe;
848 } 846 }
849 847
850 memset(&io_req->err_entry, 0,
851 sizeof(struct fcoe_err_report_entry));
852 memcpy(&io_req->err_entry, err_entry, 848 memcpy(&io_req->err_entry, err_entry,
853 sizeof(struct fcoe_err_report_entry)); 849 sizeof(struct fcoe_err_report_entry));
854 850
@@ -1124,7 +1120,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1124 struct bnx2fc_interface *interface; 1120 struct bnx2fc_interface *interface;
1125 u32 conn_id; 1121 u32 conn_id;
1126 u32 context_id; 1122 u32 context_id;
1127 int rc;
1128 1123
1129 conn_id = ofld_kcqe->fcoe_conn_id; 1124 conn_id = ofld_kcqe->fcoe_conn_id;
1130 context_id = ofld_kcqe->fcoe_conn_context_id; 1125 context_id = ofld_kcqe->fcoe_conn_context_id;
@@ -1153,17 +1148,10 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1153 "resources\n"); 1148 "resources\n");
1154 set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags); 1149 set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1155 } 1150 }
1156 goto ofld_cmpl_err;
1157 } else { 1151 } else {
1158 1152 /* FW offload request successfully completed */
1159 /* now enable the session */ 1153 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1160 rc = bnx2fc_send_session_enable_req(port, tgt);
1161 if (rc) {
1162 printk(KERN_ERR PFX "enable session failed\n");
1163 goto ofld_cmpl_err;
1164 }
1165 } 1154 }
1166 return;
1167ofld_cmpl_err: 1155ofld_cmpl_err:
1168 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 1156 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1169 wake_up_interruptible(&tgt->ofld_wait); 1157 wake_up_interruptible(&tgt->ofld_wait);
@@ -1210,15 +1198,9 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1210 printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); 1198 printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1211 goto enbl_cmpl_err; 1199 goto enbl_cmpl_err;
1212 } 1200 }
1213 if (ofld_kcqe->completion_status) 1201 if (!ofld_kcqe->completion_status)
1214 goto enbl_cmpl_err;
1215 else {
1216 /* enable successful - rport ready for issuing IOs */ 1202 /* enable successful - rport ready for issuing IOs */
1217 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 1203 set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1218 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1219 wake_up_interruptible(&tgt->ofld_wait);
1220 }
1221 return;
1222 1204
1223enbl_cmpl_err: 1205enbl_cmpl_err:
1224 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 1206 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
@@ -1251,6 +1233,7 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1251 /* disable successful */ 1233 /* disable successful */
1252 BNX2FC_TGT_DBG(tgt, "disable successful\n"); 1234 BNX2FC_TGT_DBG(tgt, "disable successful\n");
1253 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 1235 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1236 clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1254 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); 1237 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1255 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); 1238 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1256 wake_up_interruptible(&tgt->upld_wait); 1239 wake_up_interruptible(&tgt->upld_wait);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 8d4626c07a12..60798e829de6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -654,7 +654,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
654 mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 654 mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
655 &mp_req->mp_resp_bd_dma, 655 &mp_req->mp_resp_bd_dma,
656 GFP_ATOMIC); 656 GFP_ATOMIC);
657 if (!mp_req->mp_req_bd) { 657 if (!mp_req->mp_resp_bd) {
658 printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); 658 printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
659 bnx2fc_free_mp_resc(io_req); 659 bnx2fc_free_mp_resc(io_req);
660 return FAILED; 660 return FAILED;
@@ -685,8 +685,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
685static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) 685static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
686{ 686{
687 struct fc_lport *lport; 687 struct fc_lport *lport;
688 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 688 struct fc_rport *rport;
689 struct fc_rport_libfc_priv *rp = rport->dd_data; 689 struct fc_rport_libfc_priv *rp;
690 struct fcoe_port *port; 690 struct fcoe_port *port;
691 struct bnx2fc_interface *interface; 691 struct bnx2fc_interface *interface;
692 struct bnx2fc_rport *tgt; 692 struct bnx2fc_rport *tgt;
@@ -704,6 +704,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
704 unsigned long start = jiffies; 704 unsigned long start = jiffies;
705 705
706 lport = shost_priv(host); 706 lport = shost_priv(host);
707 rport = starget_to_rport(scsi_target(sc_cmd->device));
707 port = lport_priv(lport); 708 port = lport_priv(lport);
708 interface = port->priv; 709 interface = port->priv;
709 710
@@ -712,6 +713,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
712 rc = FAILED; 713 rc = FAILED;
713 goto tmf_err; 714 goto tmf_err;
714 } 715 }
716 rp = rport->dd_data;
715 717
716 rc = fc_block_scsi_eh(sc_cmd); 718 rc = fc_block_scsi_eh(sc_cmd);
717 if (rc) 719 if (rc)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index b9d0d9cb17f9..c57a3bb8a9fb 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -33,6 +33,7 @@ static void bnx2fc_upld_timer(unsigned long data)
33 BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n"); 33 BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
34 /* fake upload completion */ 34 /* fake upload completion */
35 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 35 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
36 clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
36 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); 37 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
37 wake_up_interruptible(&tgt->upld_wait); 38 wake_up_interruptible(&tgt->upld_wait);
38} 39}
@@ -55,10 +56,25 @@ static void bnx2fc_ofld_timer(unsigned long data)
55 * resources are freed up in bnx2fc_offload_session 56 * resources are freed up in bnx2fc_offload_session
56 */ 57 */
57 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 58 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
59 clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
58 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 60 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
59 wake_up_interruptible(&tgt->ofld_wait); 61 wake_up_interruptible(&tgt->ofld_wait);
60} 62}
61 63
64static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
65{
66 setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
67 mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
68
69 wait_event_interruptible(tgt->ofld_wait,
70 (test_bit(
71 BNX2FC_FLAG_OFLD_REQ_CMPL,
72 &tgt->flags)));
73 if (signal_pending(current))
74 flush_signals(current);
75 del_timer_sync(&tgt->ofld_timer);
76}
77
62static void bnx2fc_offload_session(struct fcoe_port *port, 78static void bnx2fc_offload_session(struct fcoe_port *port,
63 struct bnx2fc_rport *tgt, 79 struct bnx2fc_rport *tgt,
64 struct fc_rport_priv *rdata) 80 struct fc_rport_priv *rdata)
@@ -103,17 +119,7 @@ retry_ofld:
103 * wait for the session is offloaded and enabled. 3 Secs 119 * wait for the session is offloaded and enabled. 3 Secs
104 * should be ample time for this process to complete. 120 * should be ample time for this process to complete.
105 */ 121 */
106 setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt); 122 bnx2fc_ofld_wait(tgt);
107 mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
108
109 wait_event_interruptible(tgt->ofld_wait,
110 (test_bit(
111 BNX2FC_FLAG_OFLD_REQ_CMPL,
112 &tgt->flags)));
113 if (signal_pending(current))
114 flush_signals(current);
115
116 del_timer_sync(&tgt->ofld_timer);
117 123
118 if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) { 124 if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
119 if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, 125 if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
@@ -131,14 +137,23 @@ retry_ofld:
131 } 137 }
132 if (bnx2fc_map_doorbell(tgt)) { 138 if (bnx2fc_map_doorbell(tgt)) {
133 printk(KERN_ERR PFX "map doorbell failed - no mem\n"); 139 printk(KERN_ERR PFX "map doorbell failed - no mem\n");
134 /* upload will take care of cleaning up sess resc */ 140 goto ofld_err;
135 lport->tt.rport_logoff(rdata);
136 } 141 }
142 clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
143 rval = bnx2fc_send_session_enable_req(port, tgt);
144 if (rval) {
145 pr_err(PFX "enable session failed\n");
146 goto ofld_err;
147 }
148 bnx2fc_ofld_wait(tgt);
149 if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)))
150 goto ofld_err;
137 return; 151 return;
138 152
139ofld_err: 153ofld_err:
140 /* couldn't offload the session. log off from this rport */ 154 /* couldn't offload the session. log off from this rport */
141 BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n"); 155 BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
156 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
142 /* Free session resources */ 157 /* Free session resources */
143 bnx2fc_free_session_resc(hba, tgt); 158 bnx2fc_free_session_resc(hba, tgt);
144tgt_init_err: 159tgt_init_err:
@@ -259,6 +274,19 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
259 spin_unlock_bh(&tgt->tgt_lock); 274 spin_unlock_bh(&tgt->tgt_lock);
260} 275}
261 276
277static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
278{
279 setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
280 mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
281 wait_event_interruptible(tgt->upld_wait,
282 (test_bit(
283 BNX2FC_FLAG_UPLD_REQ_COMPL,
284 &tgt->flags)));
285 if (signal_pending(current))
286 flush_signals(current);
287 del_timer_sync(&tgt->upld_timer);
288}
289
262static void bnx2fc_upload_session(struct fcoe_port *port, 290static void bnx2fc_upload_session(struct fcoe_port *port,
263 struct bnx2fc_rport *tgt) 291 struct bnx2fc_rport *tgt)
264{ 292{
@@ -279,19 +307,8 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
279 * wait for upload to complete. 3 Secs 307 * wait for upload to complete. 3 Secs
280 * should be sufficient time for this process to complete. 308 * should be sufficient time for this process to complete.
281 */ 309 */
282 setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
283 mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
284
285 BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n"); 310 BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
286 wait_event_interruptible(tgt->upld_wait, 311 bnx2fc_upld_wait(tgt);
287 (test_bit(
288 BNX2FC_FLAG_UPLD_REQ_COMPL,
289 &tgt->flags)));
290
291 if (signal_pending(current))
292 flush_signals(current);
293
294 del_timer_sync(&tgt->upld_timer);
295 312
296 /* 313 /*
297 * traverse thru the active_q and tmf_q and cleanup 314 * traverse thru the active_q and tmf_q and cleanup
@@ -308,24 +325,13 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
308 bnx2fc_send_session_destroy_req(hba, tgt); 325 bnx2fc_send_session_destroy_req(hba, tgt);
309 326
310 /* wait for destroy to complete */ 327 /* wait for destroy to complete */
311 setup_timer(&tgt->upld_timer, 328 bnx2fc_upld_wait(tgt);
312 bnx2fc_upld_timer, (unsigned long)tgt);
313 mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
314
315 wait_event_interruptible(tgt->upld_wait,
316 (test_bit(
317 BNX2FC_FLAG_UPLD_REQ_COMPL,
318 &tgt->flags)));
319 329
320 if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags))) 330 if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
321 printk(KERN_ERR PFX "ERROR!! destroy timed out\n"); 331 printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
322 332
323 BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n", 333 BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
324 tgt->flags); 334 tgt->flags);
325 if (signal_pending(current))
326 flush_signals(current);
327
328 del_timer_sync(&tgt->upld_timer);
329 335
330 } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) { 336 } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
331 printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy" 337 printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
@@ -381,7 +387,9 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
381 tgt->rq_cons_idx = 0; 387 tgt->rq_cons_idx = 0;
382 atomic_set(&tgt->num_active_ios, 0); 388 atomic_set(&tgt->num_active_ios, 0);
383 389
384 if (rdata->flags & FC_RP_FLAGS_RETRY) { 390 if (rdata->flags & FC_RP_FLAGS_RETRY &&
391 rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
392 !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
385 tgt->dev_type = TYPE_TAPE; 393 tgt->dev_type = TYPE_TAPE;
386 tgt->io_timeout = 0; /* use default ULP timeout */ 394 tgt->io_timeout = 0; /* use default ULP timeout */
387 } else { 395 } else {
@@ -479,7 +487,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
479 tgt = (struct bnx2fc_rport *)&rp[1]; 487 tgt = (struct bnx2fc_rport *)&rp[1];
480 488
481 /* This can happen when ADISC finds the same target */ 489 /* This can happen when ADISC finds the same target */
482 if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) { 490 if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
483 BNX2FC_TGT_DBG(tgt, "already offloaded\n"); 491 BNX2FC_TGT_DBG(tgt, "already offloaded\n");
484 mutex_unlock(&hba->hba_mutex); 492 mutex_unlock(&hba->hba_mutex);
485 return; 493 return;
@@ -494,11 +502,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
494 BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n", 502 BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
495 hba->num_ofld_sess); 503 hba->num_ofld_sess);
496 504
497 if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) { 505 if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
498 /* 506 /* Session is offloaded and enabled. */
499 * Session is offloaded and enabled. Map
500 * doorbell register for this target
501 */
502 BNX2FC_TGT_DBG(tgt, "sess offloaded\n"); 507 BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
503 /* This counter is protected with hba mutex */ 508 /* This counter is protected with hba mutex */
504 hba->num_ofld_sess++; 509 hba->num_ofld_sess++;
@@ -535,7 +540,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
535 */ 540 */
536 tgt = (struct bnx2fc_rport *)&rp[1]; 541 tgt = (struct bnx2fc_rport *)&rp[1];
537 542
538 if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) { 543 if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) {
539 mutex_unlock(&hba->hba_mutex); 544 mutex_unlock(&hba->hba_mutex);
540 break; 545 break;
541 } 546 }
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 91eec60252ee..a28b03e5a5f6 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1317,7 +1317,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1317 (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN)); 1317 (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
1318 if (error_mask1) { 1318 if (error_mask1) {
1319 iscsi_init2.error_bit_map[0] = error_mask1; 1319 iscsi_init2.error_bit_map[0] = error_mask1;
1320 mask64 &= (u32)(~mask64); 1320 mask64 ^= (u32)(mask64);
1321 mask64 |= error_mask1; 1321 mask64 |= error_mask1;
1322 } else 1322 } else
1323 iscsi_init2.error_bit_map[0] = (u32) mask64; 1323 iscsi_init2.error_bit_map[0] = (u32) mask64;
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 8ecdb94a59f4..bdd78fb4fc70 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -2131,13 +2131,16 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
2131 value_to_add = 4 - (cf->size % 4); 2131 value_to_add = 4 - (cf->size % 4);
2132 2132
2133 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL); 2133 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
2134 if (cfg_data == NULL) 2134 if (cfg_data == NULL) {
2135 return -ENOMEM; 2135 ret = -ENOMEM;
2136 goto leave;
2137 }
2136 2138
2137 memcpy((void *)cfg_data, (const void *)cf->data, cf->size); 2139 memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
2138 2140 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
2139 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) 2141 ret = -EINVAL;
2140 return -EINVAL; 2142 goto leave;
2143 }
2141 2144
2142 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param); 2145 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
2143 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16; 2146 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
@@ -2149,9 +2152,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
2149 strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64); 2152 strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64);
2150 } 2153 }
2151 2154
2155leave:
2152 kfree(cfg_data); 2156 kfree(cfg_data);
2153 release_firmware(cf); 2157 release_firmware(cf);
2154
2155 return ret; 2158 return ret;
2156} 2159}
2157 2160
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index c323b2030afa..0604b5ff3638 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -60,13 +60,6 @@ static struct scsi_transport_template *csio_fcoe_transport_vport;
60/* 60/*
61 * debugfs support 61 * debugfs support
62 */ 62 */
63static int
64csio_mem_open(struct inode *inode, struct file *file)
65{
66 file->private_data = inode->i_private;
67 return 0;
68}
69
70static ssize_t 63static ssize_t
71csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 64csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
72{ 65{
@@ -110,7 +103,7 @@ csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
110 103
111static const struct file_operations csio_mem_debugfs_fops = { 104static const struct file_operations csio_mem_debugfs_fops = {
112 .owner = THIS_MODULE, 105 .owner = THIS_MODULE,
113 .open = csio_mem_open, 106 .open = simple_open,
114 .read = csio_mem_read, 107 .read = csio_mem_read,
115 .llseek = default_llseek, 108 .llseek = default_llseek,
116}; 109};
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index f924b3c3720e..3fecf35ba292 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1564,6 +1564,7 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1564 break; 1564 break;
1565 case CXGB4_STATE_DETACH: 1565 case CXGB4_STATE_DETACH:
1566 pr_info("cdev 0x%p, DETACH.\n", cdev); 1566 pr_info("cdev 0x%p, DETACH.\n", cdev);
1567 cxgbi_device_unregister(cdev);
1567 break; 1568 break;
1568 default: 1569 default:
1569 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state); 1570 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 3c53c3478ee7..483eb9dbe663 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -495,7 +495,8 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
495 } 495 }
496 496
497 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, 497 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
498 fnic->vlan_hw_insert, fnic->vlan_id, 1); 498 0 /* hw inserts cos value */,
499 fnic->vlan_id, 1);
499 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); 500 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
500} 501}
501 502
@@ -563,7 +564,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
563 } 564 }
564 565
565 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), 566 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
566 fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1); 567 0 /* hw inserts cos value */,
568 fnic->vlan_id, 1, 1, 1);
567fnic_send_frame_end: 569fnic_send_frame_end:
568 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); 570 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
569 571
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 599790e41a98..59bceac51a4c 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -1107,14 +1107,8 @@ static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
1107 pci_read_config_word(pdev, PCI_COMMAND, &command); 1107 pci_read_config_word(pdev, PCI_COMMAND, &command);
1108 command |= 6; 1108 command |= 6;
1109 pci_write_config_word(pdev, PCI_COMMAND, command); 1109 pci_write_config_word(pdev, PCI_COMMAND, command);
1110 if (pci_resource_start(pdev, 8) == 1UL) 1110 gdth_delay(1);
1111 pci_resource_start(pdev, 8) = 0UL; 1111
1112 i = 0xFEFF0001UL;
1113 pci_write_config_dword(pdev, PCI_ROM_ADDRESS, i);
1114 gdth_delay(1);
1115 pci_write_config_dword(pdev, PCI_ROM_ADDRESS,
1116 pci_resource_start(pdev, 8));
1117
1118 dp6m_ptr = ha->brd; 1112 dp6m_ptr = ha->brd;
1119 1113
1120 /* Ensure that it is safe to access the non HW portions of DPMEM. 1114 /* Ensure that it is safe to access the non HW portions of DPMEM.
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 1d7da3f41ebb..8fa79b83f2d3 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -98,6 +98,7 @@ static unsigned int ipr_transop_timeout = 0;
98static unsigned int ipr_debug = 0; 98static unsigned int ipr_debug = 0;
99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100static unsigned int ipr_dual_ioa_raid = 1; 100static unsigned int ipr_dual_ioa_raid = 1;
101static unsigned int ipr_number_of_msix = 2;
101static DEFINE_SPINLOCK(ipr_driver_lock); 102static DEFINE_SPINLOCK(ipr_driver_lock);
102 103
103/* This table describes the differences between DMA controller chips */ 104/* This table describes the differences between DMA controller chips */
@@ -107,6 +108,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107 .max_cmds = 100, 108 .max_cmds = 100,
108 .cache_line_size = 0x20, 109 .cache_line_size = 0x20,
109 .clear_isr = 1, 110 .clear_isr = 1,
111 .iopoll_weight = 0,
110 { 112 {
111 .set_interrupt_mask_reg = 0x0022C, 113 .set_interrupt_mask_reg = 0x0022C,
112 .clr_interrupt_mask_reg = 0x00230, 114 .clr_interrupt_mask_reg = 0x00230,
@@ -131,6 +133,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
131 .max_cmds = 100, 133 .max_cmds = 100,
132 .cache_line_size = 0x20, 134 .cache_line_size = 0x20,
133 .clear_isr = 1, 135 .clear_isr = 1,
136 .iopoll_weight = 0,
134 { 137 {
135 .set_interrupt_mask_reg = 0x00288, 138 .set_interrupt_mask_reg = 0x00288,
136 .clr_interrupt_mask_reg = 0x0028C, 139 .clr_interrupt_mask_reg = 0x0028C,
@@ -155,6 +158,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
155 .max_cmds = 1000, 158 .max_cmds = 1000,
156 .cache_line_size = 0x20, 159 .cache_line_size = 0x20,
157 .clear_isr = 0, 160 .clear_isr = 0,
161 .iopoll_weight = 64,
158 { 162 {
159 .set_interrupt_mask_reg = 0x00010, 163 .set_interrupt_mask_reg = 0x00010,
160 .clr_interrupt_mask_reg = 0x00018, 164 .clr_interrupt_mask_reg = 0x00018,
@@ -215,6 +219,8 @@ MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to e
215module_param_named(max_devs, ipr_max_devs, int, 0); 219module_param_named(max_devs, ipr_max_devs, int, 0);
216MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " 220MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
217 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); 221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
218MODULE_LICENSE("GPL"); 224MODULE_LICENSE("GPL");
219MODULE_VERSION(IPR_DRIVER_VERSION); 225MODULE_VERSION(IPR_DRIVER_VERSION);
220 226
@@ -549,7 +555,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
549 struct ipr_trace_entry *trace_entry; 555 struct ipr_trace_entry *trace_entry;
550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 556 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
551 557
552 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++]; 558 trace_entry = &ioa_cfg->trace[atomic_add_return
559 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
553 trace_entry->time = jiffies; 560 trace_entry->time = jiffies;
554 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 561 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
555 trace_entry->type = type; 562 trace_entry->type = type;
@@ -560,6 +567,7 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
560 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; 567 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
561 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 568 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
562 trace_entry->u.add_data = add_data; 569 trace_entry->u.add_data = add_data;
570 wmb();
563} 571}
564#else 572#else
565#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0) 573#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
@@ -595,8 +603,11 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
595 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 603 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
596 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; 604 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
597 dma_addr_t dma_addr = ipr_cmd->dma_addr; 605 dma_addr_t dma_addr = ipr_cmd->dma_addr;
606 int hrrq_id;
598 607
608 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
599 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 609 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
610 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
600 ioarcb->data_transfer_length = 0; 611 ioarcb->data_transfer_length = 0;
601 ioarcb->read_data_transfer_length = 0; 612 ioarcb->read_data_transfer_length = 0;
602 ioarcb->ioadl_len = 0; 613 ioarcb->ioadl_len = 0;
@@ -646,12 +657,16 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
646 * pointer to ipr command struct 657 * pointer to ipr command struct
647 **/ 658 **/
648static 659static
649struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) 660struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
650{ 661{
651 struct ipr_cmnd *ipr_cmd; 662 struct ipr_cmnd *ipr_cmd = NULL;
663
664 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
665 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
666 struct ipr_cmnd, queue);
667 list_del(&ipr_cmd->queue);
668 }
652 669
653 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
654 list_del(&ipr_cmd->queue);
655 670
656 return ipr_cmd; 671 return ipr_cmd;
657} 672}
@@ -666,7 +681,8 @@ struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
666static 681static
667struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) 682struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
668{ 683{
669 struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg); 684 struct ipr_cmnd *ipr_cmd =
685 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
670 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); 686 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
671 return ipr_cmd; 687 return ipr_cmd;
672} 688}
@@ -686,9 +702,15 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
686 u32 clr_ints) 702 u32 clr_ints)
687{ 703{
688 volatile u32 int_reg; 704 volatile u32 int_reg;
705 int i;
689 706
690 /* Stop new interrupts */ 707 /* Stop new interrupts */
691 ioa_cfg->allow_interrupts = 0; 708 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
709 spin_lock(&ioa_cfg->hrrq[i]._lock);
710 ioa_cfg->hrrq[i].allow_interrupts = 0;
711 spin_unlock(&ioa_cfg->hrrq[i]._lock);
712 }
713 wmb();
692 714
693 /* Set interrupt mask to stop all new interrupts */ 715 /* Set interrupt mask to stop all new interrupts */
694 if (ioa_cfg->sis64) 716 if (ioa_cfg->sis64)
@@ -761,13 +783,12 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
761 **/ 783 **/
762static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) 784static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
763{ 785{
764 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
765 struct ata_queued_cmd *qc = ipr_cmd->qc; 786 struct ata_queued_cmd *qc = ipr_cmd->qc;
766 struct ipr_sata_port *sata_port = qc->ap->private_data; 787 struct ipr_sata_port *sata_port = qc->ap->private_data;
767 788
768 qc->err_mask |= AC_ERR_OTHER; 789 qc->err_mask |= AC_ERR_OTHER;
769 sata_port->ioasa.status |= ATA_BUSY; 790 sata_port->ioasa.status |= ATA_BUSY;
770 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 791 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
771 ata_qc_complete(qc); 792 ata_qc_complete(qc);
772} 793}
773 794
@@ -783,14 +804,13 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
783 **/ 804 **/
784static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) 805static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
785{ 806{
786 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
787 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 807 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
788 808
789 scsi_cmd->result |= (DID_ERROR << 16); 809 scsi_cmd->result |= (DID_ERROR << 16);
790 810
791 scsi_dma_unmap(ipr_cmd->scsi_cmd); 811 scsi_dma_unmap(ipr_cmd->scsi_cmd);
792 scsi_cmd->scsi_done(scsi_cmd); 812 scsi_cmd->scsi_done(scsi_cmd);
793 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 813 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
794} 814}
795 815
796/** 816/**
@@ -805,24 +825,32 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
805static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) 825static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
806{ 826{
807 struct ipr_cmnd *ipr_cmd, *temp; 827 struct ipr_cmnd *ipr_cmd, *temp;
828 struct ipr_hrr_queue *hrrq;
808 829
809 ENTER; 830 ENTER;
810 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { 831 for_each_hrrq(hrrq, ioa_cfg) {
811 list_del(&ipr_cmd->queue); 832 spin_lock(&hrrq->_lock);
833 list_for_each_entry_safe(ipr_cmd,
834 temp, &hrrq->hrrq_pending_q, queue) {
835 list_del(&ipr_cmd->queue);
812 836
813 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); 837 ipr_cmd->s.ioasa.hdr.ioasc =
814 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID); 838 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
839 ipr_cmd->s.ioasa.hdr.ilid =
840 cpu_to_be32(IPR_DRIVER_ILID);
815 841
816 if (ipr_cmd->scsi_cmd) 842 if (ipr_cmd->scsi_cmd)
817 ipr_cmd->done = ipr_scsi_eh_done; 843 ipr_cmd->done = ipr_scsi_eh_done;
818 else if (ipr_cmd->qc) 844 else if (ipr_cmd->qc)
819 ipr_cmd->done = ipr_sata_eh_done; 845 ipr_cmd->done = ipr_sata_eh_done;
820 846
821 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET); 847 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
822 del_timer(&ipr_cmd->timer); 848 IPR_IOASC_IOA_WAS_RESET);
823 ipr_cmd->done(ipr_cmd); 849 del_timer(&ipr_cmd->timer);
850 ipr_cmd->done(ipr_cmd);
851 }
852 spin_unlock(&hrrq->_lock);
824 } 853 }
825
826 LEAVE; 854 LEAVE;
827} 855}
828 856
@@ -872,9 +900,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
872 void (*done) (struct ipr_cmnd *), 900 void (*done) (struct ipr_cmnd *),
873 void (*timeout_func) (struct ipr_cmnd *), u32 timeout) 901 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
874{ 902{
875 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 903 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
876
877 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
878 904
879 ipr_cmd->done = done; 905 ipr_cmd->done = done;
880 906
@@ -975,6 +1001,14 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
975 spin_lock_irq(ioa_cfg->host->host_lock); 1001 spin_lock_irq(ioa_cfg->host->host_lock);
976} 1002}
977 1003
1004static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1005{
1006 if (ioa_cfg->hrrq_num == 1)
1007 return 0;
1008 else
1009 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1010}
1011
978/** 1012/**
979 * ipr_send_hcam - Send an HCAM to the adapter. 1013 * ipr_send_hcam - Send an HCAM to the adapter.
980 * @ioa_cfg: ioa config struct 1014 * @ioa_cfg: ioa config struct
@@ -994,9 +1028,9 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
994 struct ipr_cmnd *ipr_cmd; 1028 struct ipr_cmnd *ipr_cmd;
995 struct ipr_ioarcb *ioarcb; 1029 struct ipr_ioarcb *ioarcb;
996 1030
997 if (ioa_cfg->allow_cmds) { 1031 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
998 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 1032 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
999 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 1033 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1000 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); 1034 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1001 1035
1002 ipr_cmd->u.hostrcb = hostrcb; 1036 ipr_cmd->u.hostrcb = hostrcb;
@@ -1166,14 +1200,15 @@ static int ipr_is_same_device(struct ipr_resource_entry *res,
1166} 1200}
1167 1201
1168/** 1202/**
1169 * ipr_format_res_path - Format the resource path for printing. 1203 * __ipr_format_res_path - Format the resource path for printing.
1170 * @res_path: resource path 1204 * @res_path: resource path
1171 * @buf: buffer 1205 * @buf: buffer
1206 * @len: length of buffer provided
1172 * 1207 *
1173 * Return value: 1208 * Return value:
1174 * pointer to buffer 1209 * pointer to buffer
1175 **/ 1210 **/
1176static char *ipr_format_res_path(u8 *res_path, char *buffer, int len) 1211static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1177{ 1212{
1178 int i; 1213 int i;
1179 char *p = buffer; 1214 char *p = buffer;
@@ -1187,6 +1222,27 @@ static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1187} 1222}
1188 1223
1189/** 1224/**
1225 * ipr_format_res_path - Format the resource path for printing.
1226 * @ioa_cfg: ioa config struct
1227 * @res_path: resource path
1228 * @buf: buffer
1229 * @len: length of buffer provided
1230 *
1231 * Return value:
1232 * pointer to buffer
1233 **/
1234static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1235 u8 *res_path, char *buffer, int len)
1236{
1237 char *p = buffer;
1238
1239 *p = '\0';
1240 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1241 __ipr_format_res_path(res_path, p, len - (buffer - p));
1242 return buffer;
1243}
1244
1245/**
1190 * ipr_update_res_entry - Update the resource entry. 1246 * ipr_update_res_entry - Update the resource entry.
1191 * @res: resource entry struct 1247 * @res: resource entry struct
1192 * @cfgtew: config table entry wrapper struct 1248 * @cfgtew: config table entry wrapper struct
@@ -1226,8 +1282,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
1226 1282
1227 if (res->sdev && new_path) 1283 if (res->sdev && new_path)
1228 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", 1284 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1229 ipr_format_res_path(res->res_path, buffer, 1285 ipr_format_res_path(res->ioa_cfg,
1230 sizeof(buffer))); 1286 res->res_path, buffer, sizeof(buffer)));
1231 } else { 1287 } else {
1232 res->flags = cfgtew->u.cfgte->flags; 1288 res->flags = cfgtew->u.cfgte->flags;
1233 if (res->flags & IPR_IS_IOA_RESOURCE) 1289 if (res->flags & IPR_IS_IOA_RESOURCE)
@@ -1363,7 +1419,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1363 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 1419 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1364 1420
1365 list_del(&hostrcb->queue); 1421 list_del(&hostrcb->queue);
1366 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 1422 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1367 1423
1368 if (ioasc) { 1424 if (ioasc) {
1369 if (ioasc != IPR_IOASC_IOA_WAS_RESET) 1425 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
@@ -1613,8 +1669,8 @@ static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1613 ipr_err_separator; 1669 ipr_err_separator;
1614 1670
1615 ipr_err("Device %d : %s", i + 1, 1671 ipr_err("Device %d : %s", i + 1,
1616 ipr_format_res_path(dev_entry->res_path, buffer, 1672 __ipr_format_res_path(dev_entry->res_path,
1617 sizeof(buffer))); 1673 buffer, sizeof(buffer)));
1618 ipr_log_ext_vpd(&dev_entry->vpd); 1674 ipr_log_ext_vpd(&dev_entry->vpd);
1619 1675
1620 ipr_err("-----New Device Information-----\n"); 1676 ipr_err("-----New Device Information-----\n");
@@ -1960,14 +2016,16 @@ static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1960 2016
1961 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", 2017 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1962 path_active_desc[i].desc, path_state_desc[j].desc, 2018 path_active_desc[i].desc, path_state_desc[j].desc,
1963 ipr_format_res_path(fabric->res_path, buffer, 2019 ipr_format_res_path(hostrcb->ioa_cfg,
1964 sizeof(buffer))); 2020 fabric->res_path,
2021 buffer, sizeof(buffer)));
1965 return; 2022 return;
1966 } 2023 }
1967 } 2024 }
1968 2025
1969 ipr_err("Path state=%02X Resource Path=%s\n", path_state, 2026 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1970 ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer))); 2027 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2028 buffer, sizeof(buffer)));
1971} 2029}
1972 2030
1973static const struct { 2031static const struct {
@@ -2108,18 +2166,20 @@ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2108 2166
2109 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", 2167 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2110 path_status_desc[j].desc, path_type_desc[i].desc, 2168 path_status_desc[j].desc, path_type_desc[i].desc,
2111 ipr_format_res_path(cfg->res_path, buffer, 2169 ipr_format_res_path(hostrcb->ioa_cfg,
2112 sizeof(buffer)), 2170 cfg->res_path, buffer, sizeof(buffer)),
2113 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2171 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2114 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2172 be32_to_cpu(cfg->wwid[0]),
2173 be32_to_cpu(cfg->wwid[1]));
2115 return; 2174 return;
2116 } 2175 }
2117 } 2176 }
2118 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " 2177 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2119 "WWN=%08X%08X\n", cfg->type_status, 2178 "WWN=%08X%08X\n", cfg->type_status,
2120 ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)), 2179 ipr_format_res_path(hostrcb->ioa_cfg,
2121 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2180 cfg->res_path, buffer, sizeof(buffer)),
2122 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2181 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2182 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2123} 2183}
2124 2184
2125/** 2185/**
@@ -2182,7 +2242,8 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2182 2242
2183 ipr_err("RAID %s Array Configuration: %s\n", 2243 ipr_err("RAID %s Array Configuration: %s\n",
2184 error->protection_level, 2244 error->protection_level,
2185 ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer))); 2245 ipr_format_res_path(ioa_cfg, error->last_res_path,
2246 buffer, sizeof(buffer)));
2186 2247
2187 ipr_err_separator; 2248 ipr_err_separator;
2188 2249
@@ -2203,11 +2264,12 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2203 ipr_err("Array Member %d:\n", i); 2264 ipr_err("Array Member %d:\n", i);
2204 ipr_log_ext_vpd(&array_entry->vpd); 2265 ipr_log_ext_vpd(&array_entry->vpd);
2205 ipr_err("Current Location: %s\n", 2266 ipr_err("Current Location: %s\n",
2206 ipr_format_res_path(array_entry->res_path, buffer, 2267 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2207 sizeof(buffer))); 2268 buffer, sizeof(buffer)));
2208 ipr_err("Expected Location: %s\n", 2269 ipr_err("Expected Location: %s\n",
2209 ipr_format_res_path(array_entry->expected_res_path, 2270 ipr_format_res_path(ioa_cfg,
2210 buffer, sizeof(buffer))); 2271 array_entry->expected_res_path,
2272 buffer, sizeof(buffer)));
2211 2273
2212 ipr_err_separator; 2274 ipr_err_separator;
2213 } 2275 }
@@ -2409,7 +2471,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2409 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2471 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2410 2472
2411 list_del(&hostrcb->queue); 2473 list_del(&hostrcb->queue);
2412 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 2474 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2413 2475
2414 if (!ioasc) { 2476 if (!ioasc) {
2415 ipr_handle_log_data(ioa_cfg, hostrcb); 2477 ipr_handle_log_data(ioa_cfg, hostrcb);
@@ -2491,36 +2553,6 @@ static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2491} 2553}
2492 2554
2493/** 2555/**
2494 * ipr_reset_reload - Reset/Reload the IOA
2495 * @ioa_cfg: ioa config struct
2496 * @shutdown_type: shutdown type
2497 *
2498 * This function resets the adapter and re-initializes it.
2499 * This function assumes that all new host commands have been stopped.
2500 * Return value:
2501 * SUCCESS / FAILED
2502 **/
2503static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2504 enum ipr_shutdown_type shutdown_type)
2505{
2506 if (!ioa_cfg->in_reset_reload)
2507 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2508
2509 spin_unlock_irq(ioa_cfg->host->host_lock);
2510 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2511 spin_lock_irq(ioa_cfg->host->host_lock);
2512
2513 /* If we got hit with a host reset while we were already resetting
2514 the adapter for some reason, and the reset failed. */
2515 if (ioa_cfg->ioa_is_dead) {
2516 ipr_trace;
2517 return FAILED;
2518 }
2519
2520 return SUCCESS;
2521}
2522
2523/**
2524 * ipr_find_ses_entry - Find matching SES in SES table 2556 * ipr_find_ses_entry - Find matching SES in SES table
2525 * @res: resource entry struct of SES 2557 * @res: resource entry struct of SES
2526 * 2558 *
@@ -3153,7 +3185,8 @@ static void ipr_worker_thread(struct work_struct *work)
3153restart: 3185restart:
3154 do { 3186 do {
3155 did_work = 0; 3187 did_work = 0;
3156 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) { 3188 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3189 !ioa_cfg->allow_ml_add_del) {
3157 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3190 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3158 return; 3191 return;
3159 } 3192 }
@@ -3401,7 +3434,7 @@ static ssize_t ipr_show_adapter_state(struct device *dev,
3401 int len; 3434 int len;
3402 3435
3403 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3436 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3404 if (ioa_cfg->ioa_is_dead) 3437 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3405 len = snprintf(buf, PAGE_SIZE, "offline\n"); 3438 len = snprintf(buf, PAGE_SIZE, "offline\n");
3406 else 3439 else
3407 len = snprintf(buf, PAGE_SIZE, "online\n"); 3440 len = snprintf(buf, PAGE_SIZE, "online\n");
@@ -3427,14 +3460,20 @@ static ssize_t ipr_store_adapter_state(struct device *dev,
3427 struct Scsi_Host *shost = class_to_shost(dev); 3460 struct Scsi_Host *shost = class_to_shost(dev);
3428 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3429 unsigned long lock_flags; 3462 unsigned long lock_flags;
3430 int result = count; 3463 int result = count, i;
3431 3464
3432 if (!capable(CAP_SYS_ADMIN)) 3465 if (!capable(CAP_SYS_ADMIN))
3433 return -EACCES; 3466 return -EACCES;
3434 3467
3435 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3468 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3436 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) { 3469 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3437 ioa_cfg->ioa_is_dead = 0; 3470 !strncmp(buf, "online", 6)) {
3471 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3472 spin_lock(&ioa_cfg->hrrq[i]._lock);
3473 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3474 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3475 }
3476 wmb();
3438 ioa_cfg->reset_retries = 0; 3477 ioa_cfg->reset_retries = 0;
3439 ioa_cfg->in_ioa_bringdown = 0; 3478 ioa_cfg->in_ioa_bringdown = 0;
3440 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3479 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -3494,6 +3533,95 @@ static struct device_attribute ipr_ioa_reset_attr = {
3494 .store = ipr_store_reset_adapter 3533 .store = ipr_store_reset_adapter
3495}; 3534};
3496 3535
3536static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3537 /**
3538 * ipr_show_iopoll_weight - Show ipr polling mode
3539 * @dev: class device struct
3540 * @buf: buffer
3541 *
3542 * Return value:
3543 * number of bytes printed to buffer
3544 **/
3545static ssize_t ipr_show_iopoll_weight(struct device *dev,
3546 struct device_attribute *attr, char *buf)
3547{
3548 struct Scsi_Host *shost = class_to_shost(dev);
3549 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3550 unsigned long lock_flags = 0;
3551 int len;
3552
3553 spin_lock_irqsave(shost->host_lock, lock_flags);
3554 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3555 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3556
3557 return len;
3558}
3559
3560/**
3561 * ipr_store_iopoll_weight - Change the adapter's polling mode
3562 * @dev: class device struct
3563 * @buf: buffer
3564 *
3565 * Return value:
3566 * number of bytes printed to buffer
3567 **/
3568static ssize_t ipr_store_iopoll_weight(struct device *dev,
3569 struct device_attribute *attr,
3570 const char *buf, size_t count)
3571{
3572 struct Scsi_Host *shost = class_to_shost(dev);
3573 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3574 unsigned long user_iopoll_weight;
3575 unsigned long lock_flags = 0;
3576 int i;
3577
3578 if (!ioa_cfg->sis64) {
3579 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3580 return -EINVAL;
3581 }
3582 if (kstrtoul(buf, 10, &user_iopoll_weight))
3583 return -EINVAL;
3584
3585 if (user_iopoll_weight > 256) {
3586 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3587 return -EINVAL;
3588 }
3589
3590 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3591 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3592 return strlen(buf);
3593 }
3594
3595 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3596 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3597 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3598 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3599 }
3600
3601 spin_lock_irqsave(shost->host_lock, lock_flags);
3602 ioa_cfg->iopoll_weight = user_iopoll_weight;
3603 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3604 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3605 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3606 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3607 ioa_cfg->iopoll_weight, ipr_iopoll);
3608 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3609 }
3610 }
3611 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3612
3613 return strlen(buf);
3614}
3615
3616static struct device_attribute ipr_iopoll_weight_attr = {
3617 .attr = {
3618 .name = "iopoll_weight",
3619 .mode = S_IRUGO | S_IWUSR,
3620 },
3621 .show = ipr_show_iopoll_weight,
3622 .store = ipr_store_iopoll_weight
3623};
3624
3497/** 3625/**
3498 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer 3626 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3499 * @buf_len: buffer length 3627 * @buf_len: buffer length
@@ -3862,6 +3990,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
3862 &ipr_ioa_reset_attr, 3990 &ipr_ioa_reset_attr,
3863 &ipr_update_fw_attr, 3991 &ipr_update_fw_attr,
3864 &ipr_ioa_fw_type_attr, 3992 &ipr_ioa_fw_type_attr,
3993 &ipr_iopoll_weight_attr,
3865 NULL, 3994 NULL,
3866}; 3995};
3867 3996
@@ -4014,7 +4143,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4014 4143
4015 ioa_cfg->dump = dump; 4144 ioa_cfg->dump = dump;
4016 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 4145 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4017 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) { 4146 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4018 ioa_cfg->dump_taken = 1; 4147 ioa_cfg->dump_taken = 1;
4019 schedule_work(&ioa_cfg->work_q); 4148 schedule_work(&ioa_cfg->work_q);
4020 } 4149 }
@@ -4227,8 +4356,8 @@ static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribut
4227 res = (struct ipr_resource_entry *)sdev->hostdata; 4356 res = (struct ipr_resource_entry *)sdev->hostdata;
4228 if (res && ioa_cfg->sis64) 4357 if (res && ioa_cfg->sis64)
4229 len = snprintf(buf, PAGE_SIZE, "%s\n", 4358 len = snprintf(buf, PAGE_SIZE, "%s\n",
4230 ipr_format_res_path(res->res_path, buffer, 4359 __ipr_format_res_path(res->res_path, buffer,
4231 sizeof(buffer))); 4360 sizeof(buffer)));
4232 else if (res) 4361 else if (res)
4233 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, 4362 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4234 res->bus, res->target, res->lun); 4363 res->bus, res->target, res->lun);
@@ -4556,8 +4685,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
4556 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 4685 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4557 if (ioa_cfg->sis64) 4686 if (ioa_cfg->sis64)
4558 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", 4687 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4559 ipr_format_res_path(res->res_path, buffer, 4688 ipr_format_res_path(ioa_cfg,
4560 sizeof(buffer))); 4689 res->res_path, buffer, sizeof(buffer)));
4561 return 0; 4690 return 0;
4562 } 4691 }
4563 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4692 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -4638,22 +4767,18 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
4638 return rc; 4767 return rc;
4639} 4768}
4640 4769
4641/** 4770static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4642 * ipr_eh_host_reset - Reset the host adapter
4643 * @scsi_cmd: scsi command struct
4644 *
4645 * Return value:
4646 * SUCCESS / FAILED
4647 **/
4648static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
4649{ 4771{
4650 struct ipr_ioa_cfg *ioa_cfg; 4772 struct ipr_ioa_cfg *ioa_cfg;
4651 int rc; 4773 unsigned long lock_flags = 0;
4774 int rc = SUCCESS;
4652 4775
4653 ENTER; 4776 ENTER;
4654 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 4777 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4655 4779
4656 if (!ioa_cfg->in_reset_reload) { 4780 if (!ioa_cfg->in_reset_reload) {
4781 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4657 dev_err(&ioa_cfg->pdev->dev, 4782 dev_err(&ioa_cfg->pdev->dev,
4658 "Adapter being reset as a result of error recovery.\n"); 4783 "Adapter being reset as a result of error recovery.\n");
4659 4784
@@ -4661,20 +4786,19 @@ static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
4661 ioa_cfg->sdt_state = GET_DUMP; 4786 ioa_cfg->sdt_state = GET_DUMP;
4662 } 4787 }
4663 4788
4664 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV); 4789 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4665 4790 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4666 LEAVE; 4791 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4667 return rc;
4668}
4669
4670static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4671{
4672 int rc;
4673 4792
4674 spin_lock_irq(cmd->device->host->host_lock); 4793 /* If we got hit with a host reset while we were already resetting
4675 rc = __ipr_eh_host_reset(cmd); 4794 the adapter for some reason, and the reset failed. */
4676 spin_unlock_irq(cmd->device->host->host_lock); 4795 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4796 ipr_trace;
4797 rc = FAILED;
4798 }
4677 4799
4800 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4801 LEAVE;
4678 return rc; 4802 return rc;
4679} 4803}
4680 4804
@@ -4723,7 +4847,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4723 4847
4724 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 4848 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4725 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 4849 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4726 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4850 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4727 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { 4851 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4728 if (ipr_cmd->ioa_cfg->sis64) 4852 if (ipr_cmd->ioa_cfg->sis64)
4729 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 4853 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
@@ -4793,6 +4917,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4793 struct ipr_resource_entry *res; 4917 struct ipr_resource_entry *res;
4794 struct ata_port *ap; 4918 struct ata_port *ap;
4795 int rc = 0; 4919 int rc = 0;
4920 struct ipr_hrr_queue *hrrq;
4796 4921
4797 ENTER; 4922 ENTER;
4798 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 4923 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@@ -4808,22 +4933,26 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4808 */ 4933 */
4809 if (ioa_cfg->in_reset_reload) 4934 if (ioa_cfg->in_reset_reload)
4810 return FAILED; 4935 return FAILED;
4811 if (ioa_cfg->ioa_is_dead) 4936 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
4812 return FAILED; 4937 return FAILED;
4813 4938
4814 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4939 for_each_hrrq(hrrq, ioa_cfg) {
4815 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 4940 spin_lock(&hrrq->_lock);
4816 if (ipr_cmd->scsi_cmd) 4941 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4817 ipr_cmd->done = ipr_scsi_eh_done; 4942 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4818 if (ipr_cmd->qc) 4943 if (ipr_cmd->scsi_cmd)
4819 ipr_cmd->done = ipr_sata_eh_done; 4944 ipr_cmd->done = ipr_scsi_eh_done;
4820 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { 4945 if (ipr_cmd->qc)
4821 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; 4946 ipr_cmd->done = ipr_sata_eh_done;
4822 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; 4947 if (ipr_cmd->qc &&
4948 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4949 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4950 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4951 }
4823 } 4952 }
4824 } 4953 }
4954 spin_unlock(&hrrq->_lock);
4825 } 4955 }
4826
4827 res->resetting_device = 1; 4956 res->resetting_device = 1;
4828 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); 4957 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4829 4958
@@ -4833,11 +4962,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4833 ata_std_error_handler(ap); 4962 ata_std_error_handler(ap);
4834 spin_lock_irq(scsi_cmd->device->host->host_lock); 4963 spin_lock_irq(scsi_cmd->device->host->host_lock);
4835 4964
4836 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4965 for_each_hrrq(hrrq, ioa_cfg) {
4837 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 4966 spin_lock(&hrrq->_lock);
4838 rc = -EIO; 4967 list_for_each_entry(ipr_cmd,
4839 break; 4968 &hrrq->hrrq_pending_q, queue) {
4969 if (ipr_cmd->ioarcb.res_handle ==
4970 res->res_handle) {
4971 rc = -EIO;
4972 break;
4973 }
4840 } 4974 }
4975 spin_unlock(&hrrq->_lock);
4841 } 4976 }
4842 } else 4977 } else
4843 rc = ipr_device_reset(ioa_cfg, res); 4978 rc = ipr_device_reset(ioa_cfg, res);
@@ -4890,7 +5025,7 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4890 else 5025 else
4891 ipr_cmd->sibling->done(ipr_cmd->sibling); 5026 ipr_cmd->sibling->done(ipr_cmd->sibling);
4892 5027
4893 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5028 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4894 LEAVE; 5029 LEAVE;
4895} 5030}
4896 5031
@@ -4951,6 +5086,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
4951 struct ipr_cmd_pkt *cmd_pkt; 5086 struct ipr_cmd_pkt *cmd_pkt;
4952 u32 ioasc, int_reg; 5087 u32 ioasc, int_reg;
4953 int op_found = 0; 5088 int op_found = 0;
5089 struct ipr_hrr_queue *hrrq;
4954 5090
4955 ENTER; 5091 ENTER;
4956 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; 5092 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
@@ -4960,7 +5096,8 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
4960 * This will force the mid-layer to call ipr_eh_host_reset, 5096 * This will force the mid-layer to call ipr_eh_host_reset,
4961 * which will then go to sleep and wait for the reset to complete 5097 * which will then go to sleep and wait for the reset to complete
4962 */ 5098 */
4963 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead) 5099 if (ioa_cfg->in_reset_reload ||
5100 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
4964 return FAILED; 5101 return FAILED;
4965 if (!res) 5102 if (!res)
4966 return FAILED; 5103 return FAILED;
@@ -4975,12 +5112,16 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
4975 if (!ipr_is_gscsi(res)) 5112 if (!ipr_is_gscsi(res))
4976 return FAILED; 5113 return FAILED;
4977 5114
4978 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 5115 for_each_hrrq(hrrq, ioa_cfg) {
4979 if (ipr_cmd->scsi_cmd == scsi_cmd) { 5116 spin_lock(&hrrq->_lock);
4980 ipr_cmd->done = ipr_scsi_eh_done; 5117 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4981 op_found = 1; 5118 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4982 break; 5119 ipr_cmd->done = ipr_scsi_eh_done;
5120 op_found = 1;
5121 break;
5122 }
4983 } 5123 }
5124 spin_unlock(&hrrq->_lock);
4984 } 5125 }
4985 5126
4986 if (!op_found) 5127 if (!op_found)
@@ -5007,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5007 ipr_trace; 5148 ipr_trace;
5008 } 5149 }
5009 5150
5010 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5151 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
5011 if (!ipr_is_naca_model(res)) 5152 if (!ipr_is_naca_model(res))
5012 res->needs_sync_complete = 1; 5153 res->needs_sync_complete = 1;
5013 5154
@@ -5099,6 +5240,9 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5099 } else { 5240 } else {
5100 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) 5241 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5101 ioa_cfg->ioa_unit_checked = 1; 5242 ioa_cfg->ioa_unit_checked = 1;
5243 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5244 dev_err(&ioa_cfg->pdev->dev,
5245 "No Host RRQ. 0x%08X\n", int_reg);
5102 else 5246 else
5103 dev_err(&ioa_cfg->pdev->dev, 5247 dev_err(&ioa_cfg->pdev->dev,
5104 "Permanent IOA failure. 0x%08X\n", int_reg); 5248 "Permanent IOA failure. 0x%08X\n", int_reg);
@@ -5121,10 +5265,10 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5121 * Return value: 5265 * Return value:
5122 * none 5266 * none
5123 **/ 5267 **/
5124static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg) 5268static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5125{ 5269{
5126 ioa_cfg->errors_logged++; 5270 ioa_cfg->errors_logged++;
5127 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg); 5271 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5128 5272
5129 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5273 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5130 ioa_cfg->sdt_state = GET_DUMP; 5274 ioa_cfg->sdt_state = GET_DUMP;
@@ -5132,6 +5276,83 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5132 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5276 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5133} 5277}
5134 5278
5279static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5280 struct list_head *doneq)
5281{
5282 u32 ioasc;
5283 u16 cmd_index;
5284 struct ipr_cmnd *ipr_cmd;
5285 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5286 int num_hrrq = 0;
5287
5288 /* If interrupts are disabled, ignore the interrupt */
5289 if (!hrr_queue->allow_interrupts)
5290 return 0;
5291
5292 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5293 hrr_queue->toggle_bit) {
5294
5295 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5296 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5297 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5298
5299 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5300 cmd_index < hrr_queue->min_cmd_id)) {
5301 ipr_isr_eh(ioa_cfg,
5302 "Invalid response handle from IOA: ",
5303 cmd_index);
5304 break;
5305 }
5306
5307 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5308 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5309
5310 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5311
5312 list_move_tail(&ipr_cmd->queue, doneq);
5313
5314 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5315 hrr_queue->hrrq_curr++;
5316 } else {
5317 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5318 hrr_queue->toggle_bit ^= 1u;
5319 }
5320 num_hrrq++;
5321 if (budget > 0 && num_hrrq >= budget)
5322 break;
5323 }
5324
5325 return num_hrrq;
5326}
5327
5328static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5329{
5330 struct ipr_ioa_cfg *ioa_cfg;
5331 struct ipr_hrr_queue *hrrq;
5332 struct ipr_cmnd *ipr_cmd, *temp;
5333 unsigned long hrrq_flags;
5334 int completed_ops;
5335 LIST_HEAD(doneq);
5336
5337 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5338 ioa_cfg = hrrq->ioa_cfg;
5339
5340 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5341 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5342
5343 if (completed_ops < budget)
5344 blk_iopoll_complete(iop);
5345 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5346
5347 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5348 list_del(&ipr_cmd->queue);
5349 del_timer(&ipr_cmd->timer);
5350 ipr_cmd->fast_done(ipr_cmd);
5351 }
5352
5353 return completed_ops;
5354}
5355
5135/** 5356/**
5136 * ipr_isr - Interrupt service routine 5357 * ipr_isr - Interrupt service routine
5137 * @irq: irq number 5358 * @irq: irq number
@@ -5142,78 +5363,48 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5142 **/ 5363 **/
5143static irqreturn_t ipr_isr(int irq, void *devp) 5364static irqreturn_t ipr_isr(int irq, void *devp)
5144{ 5365{
5145 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 5366 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5146 unsigned long lock_flags = 0; 5367 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5368 unsigned long hrrq_flags = 0;
5147 u32 int_reg = 0; 5369 u32 int_reg = 0;
5148 u32 ioasc;
5149 u16 cmd_index;
5150 int num_hrrq = 0; 5370 int num_hrrq = 0;
5151 int irq_none = 0; 5371 int irq_none = 0;
5152 struct ipr_cmnd *ipr_cmd, *temp; 5372 struct ipr_cmnd *ipr_cmd, *temp;
5153 irqreturn_t rc = IRQ_NONE; 5373 irqreturn_t rc = IRQ_NONE;
5154 LIST_HEAD(doneq); 5374 LIST_HEAD(doneq);
5155 5375
5156 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5376 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5157
5158 /* If interrupts are disabled, ignore the interrupt */ 5377 /* If interrupts are disabled, ignore the interrupt */
5159 if (!ioa_cfg->allow_interrupts) { 5378 if (!hrrq->allow_interrupts) {
5160 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5379 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5161 return IRQ_NONE; 5380 return IRQ_NONE;
5162 } 5381 }
5163 5382
5164 while (1) { 5383 while (1) {
5165 ipr_cmd = NULL; 5384 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5166 5385 rc = IRQ_HANDLED;
5167 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5168 ioa_cfg->toggle_bit) {
5169
5170 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5171 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5172
5173 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
5174 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5175 rc = IRQ_HANDLED;
5176 goto unlock_out;
5177 }
5178
5179 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5180
5181 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5182 5386
5183 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); 5387 if (!ioa_cfg->clear_isr)
5184 5388 break;
5185 list_move_tail(&ipr_cmd->queue, &doneq);
5186
5187 rc = IRQ_HANDLED;
5188
5189 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5190 ioa_cfg->hrrq_curr++;
5191 } else {
5192 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5193 ioa_cfg->toggle_bit ^= 1u;
5194 }
5195 }
5196
5197 if (ipr_cmd && !ioa_cfg->clear_isr)
5198 break;
5199 5389
5200 if (ipr_cmd != NULL) {
5201 /* Clear the PCI interrupt */ 5390 /* Clear the PCI interrupt */
5202 num_hrrq = 0; 5391 num_hrrq = 0;
5203 do { 5392 do {
5204 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5393 writel(IPR_PCII_HRRQ_UPDATED,
5394 ioa_cfg->regs.clr_interrupt_reg32);
5205 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5395 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5206 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5396 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5207 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5397 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5208 5398
5209 } else if (rc == IRQ_NONE && irq_none == 0) { 5399 } else if (rc == IRQ_NONE && irq_none == 0) {
5210 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5400 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5211 irq_none++; 5401 irq_none++;
5212 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && 5402 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5213 int_reg & IPR_PCII_HRRQ_UPDATED) { 5403 int_reg & IPR_PCII_HRRQ_UPDATED) {
5214 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ"); 5404 ipr_isr_eh(ioa_cfg,
5405 "Error clearing HRRQ: ", num_hrrq);
5215 rc = IRQ_HANDLED; 5406 rc = IRQ_HANDLED;
5216 goto unlock_out; 5407 break;
5217 } else 5408 } else
5218 break; 5409 break;
5219 } 5410 }
@@ -5221,14 +5412,64 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5221 if (unlikely(rc == IRQ_NONE)) 5412 if (unlikely(rc == IRQ_NONE))
5222 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); 5413 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5223 5414
5224unlock_out: 5415 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5226 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5416 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5227 list_del(&ipr_cmd->queue); 5417 list_del(&ipr_cmd->queue);
5228 del_timer(&ipr_cmd->timer); 5418 del_timer(&ipr_cmd->timer);
5229 ipr_cmd->fast_done(ipr_cmd); 5419 ipr_cmd->fast_done(ipr_cmd);
5230 } 5420 }
5421 return rc;
5422}
5423
5424/**
5425 * ipr_isr_mhrrq - Interrupt service routine
5426 * @irq: irq number
5427 * @devp: pointer to ioa config struct
5428 *
5429 * Return value:
5430 * IRQ_NONE / IRQ_HANDLED
5431 **/
5432static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5433{
5434 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5435 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5436 unsigned long hrrq_flags = 0;
5437 struct ipr_cmnd *ipr_cmd, *temp;
5438 irqreturn_t rc = IRQ_NONE;
5439 LIST_HEAD(doneq);
5440
5441 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5442
5443 /* If interrupts are disabled, ignore the interrupt */
5444 if (!hrrq->allow_interrupts) {
5445 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5446 return IRQ_NONE;
5447 }
5448
5449 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5450 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5451 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5452 hrrq->toggle_bit) {
5453 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5454 blk_iopoll_sched(&hrrq->iopoll);
5455 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5456 return IRQ_HANDLED;
5457 }
5458 } else {
5459 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5460 hrrq->toggle_bit)
5461
5462 if (ipr_process_hrrq(hrrq, -1, &doneq))
5463 rc = IRQ_HANDLED;
5464 }
5231 5465
5466 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5467
5468 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5469 list_del(&ipr_cmd->queue);
5470 del_timer(&ipr_cmd->timer);
5471 ipr_cmd->fast_done(ipr_cmd);
5472 }
5232 return rc; 5473 return rc;
5233} 5474}
5234 5475
@@ -5388,7 +5629,6 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5388{ 5629{
5389 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5630 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5390 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5631 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5391 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5392 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5632 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5393 5633
5394 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 5634 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
@@ -5406,7 +5646,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5406 res->in_erp = 0; 5646 res->in_erp = 0;
5407 } 5647 }
5408 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5648 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5409 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5649 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5410 scsi_cmd->scsi_done(scsi_cmd); 5650 scsi_cmd->scsi_done(scsi_cmd);
5411} 5651}
5412 5652
@@ -5790,7 +6030,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5790 } 6030 }
5791 6031
5792 scsi_dma_unmap(ipr_cmd->scsi_cmd); 6032 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5793 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6033 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5794 scsi_cmd->scsi_done(scsi_cmd); 6034 scsi_cmd->scsi_done(scsi_cmd);
5795} 6035}
5796 6036
@@ -5809,21 +6049,21 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5809 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6049 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5810 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6050 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5811 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6051 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5812 unsigned long lock_flags; 6052 unsigned long hrrq_flags;
5813 6053
5814 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 6054 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5815 6055
5816 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 6056 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5817 scsi_dma_unmap(scsi_cmd); 6057 scsi_dma_unmap(scsi_cmd);
5818 6058
5819 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 6059 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
5820 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6060 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5821 scsi_cmd->scsi_done(scsi_cmd); 6061 scsi_cmd->scsi_done(scsi_cmd);
5822 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 6062 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
5823 } else { 6063 } else {
5824 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 6064 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
5825 ipr_erp_start(ioa_cfg, ipr_cmd); 6065 ipr_erp_start(ioa_cfg, ipr_cmd);
5826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 6066 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
5827 } 6067 }
5828} 6068}
5829 6069
@@ -5846,22 +6086,34 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
5846 struct ipr_resource_entry *res; 6086 struct ipr_resource_entry *res;
5847 struct ipr_ioarcb *ioarcb; 6087 struct ipr_ioarcb *ioarcb;
5848 struct ipr_cmnd *ipr_cmd; 6088 struct ipr_cmnd *ipr_cmd;
5849 unsigned long lock_flags; 6089 unsigned long hrrq_flags, lock_flags;
5850 int rc; 6090 int rc;
6091 struct ipr_hrr_queue *hrrq;
6092 int hrrq_id;
5851 6093
5852 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 6094 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
5853 6095
5854 spin_lock_irqsave(shost->host_lock, lock_flags);
5855 scsi_cmd->result = (DID_OK << 16); 6096 scsi_cmd->result = (DID_OK << 16);
5856 res = scsi_cmd->device->hostdata; 6097 res = scsi_cmd->device->hostdata;
5857 6098
6099 if (ipr_is_gata(res) && res->sata_port) {
6100 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6101 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6102 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6103 return rc;
6104 }
6105
6106 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6107 hrrq = &ioa_cfg->hrrq[hrrq_id];
6108
6109 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5858 /* 6110 /*
5859 * We are currently blocking all devices due to a host reset 6111 * We are currently blocking all devices due to a host reset
5860 * We have told the host to stop giving us new requests, but 6112 * We have told the host to stop giving us new requests, but
5861 * ERP ops don't count. FIXME 6113 * ERP ops don't count. FIXME
5862 */ 6114 */
5863 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) { 6115 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead)) {
5864 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6116 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5865 return SCSI_MLQUEUE_HOST_BUSY; 6117 return SCSI_MLQUEUE_HOST_BUSY;
5866 } 6118 }
5867 6119
@@ -5869,19 +6121,17 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
5869 * FIXME - Create scsi_set_host_offline interface 6121 * FIXME - Create scsi_set_host_offline interface
5870 * and the ioa_is_dead check can be removed 6122 * and the ioa_is_dead check can be removed
5871 */ 6123 */
5872 if (unlikely(ioa_cfg->ioa_is_dead || !res)) { 6124 if (unlikely(hrrq->ioa_is_dead || !res)) {
5873 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6125 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5874 goto err_nodev; 6126 goto err_nodev;
5875 } 6127 }
5876 6128
5877 if (ipr_is_gata(res) && res->sata_port) { 6129 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
5878 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); 6130 if (ipr_cmd == NULL) {
5879 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6131 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5880 return rc; 6132 return SCSI_MLQUEUE_HOST_BUSY;
5881 } 6133 }
5882 6134 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5883 ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
5884 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5885 6135
5886 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done); 6136 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
5887 ioarcb = &ipr_cmd->ioarcb; 6137 ioarcb = &ipr_cmd->ioarcb;
@@ -5902,26 +6152,27 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
5902 } 6152 }
5903 6153
5904 if (scsi_cmd->cmnd[0] >= 0xC0 && 6154 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5905 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 6155 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
5906 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6156 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6157 }
5907 6158
5908 if (ioa_cfg->sis64) 6159 if (ioa_cfg->sis64)
5909 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); 6160 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5910 else 6161 else
5911 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 6162 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5912 6163
5913 spin_lock_irqsave(shost->host_lock, lock_flags); 6164 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5914 if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) { 6165 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
5915 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6166 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
5916 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6167 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5917 if (!rc) 6168 if (!rc)
5918 scsi_dma_unmap(scsi_cmd); 6169 scsi_dma_unmap(scsi_cmd);
5919 return SCSI_MLQUEUE_HOST_BUSY; 6170 return SCSI_MLQUEUE_HOST_BUSY;
5920 } 6171 }
5921 6172
5922 if (unlikely(ioa_cfg->ioa_is_dead)) { 6173 if (unlikely(hrrq->ioa_is_dead)) {
5923 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6174 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
5924 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6175 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5925 scsi_dma_unmap(scsi_cmd); 6176 scsi_dma_unmap(scsi_cmd);
5926 goto err_nodev; 6177 goto err_nodev;
5927 } 6178 }
@@ -5931,18 +6182,18 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
5931 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; 6182 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5932 res->needs_sync_complete = 0; 6183 res->needs_sync_complete = 0;
5933 } 6184 }
5934 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 6185 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
5935 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 6186 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5936 ipr_send_command(ipr_cmd); 6187 ipr_send_command(ipr_cmd);
5937 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6188 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5938 return 0; 6189 return 0;
5939 6190
5940err_nodev: 6191err_nodev:
5941 spin_lock_irqsave(shost->host_lock, lock_flags); 6192 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5942 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 6193 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5943 scsi_cmd->result = (DID_NO_CONNECT << 16); 6194 scsi_cmd->result = (DID_NO_CONNECT << 16);
5944 scsi_cmd->scsi_done(scsi_cmd); 6195 scsi_cmd->scsi_done(scsi_cmd);
5945 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6196 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5946 return 0; 6197 return 0;
5947} 6198}
5948 6199
@@ -6040,7 +6291,7 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
6040 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6291 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6041 } 6292 }
6042 6293
6043 if (!ioa_cfg->allow_cmds) 6294 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6044 goto out_unlock; 6295 goto out_unlock;
6045 6296
6046 rc = ipr_device_reset(ioa_cfg, res); 6297 rc = ipr_device_reset(ioa_cfg, res);
@@ -6071,6 +6322,7 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6071 struct ipr_sata_port *sata_port = qc->ap->private_data; 6322 struct ipr_sata_port *sata_port = qc->ap->private_data;
6072 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6323 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6073 struct ipr_cmnd *ipr_cmd; 6324 struct ipr_cmnd *ipr_cmd;
6325 struct ipr_hrr_queue *hrrq;
6074 unsigned long flags; 6326 unsigned long flags;
6075 6327
6076 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6328 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@@ -6080,11 +6332,15 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6080 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6332 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6081 } 6333 }
6082 6334
6083 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 6335 for_each_hrrq(hrrq, ioa_cfg) {
6084 if (ipr_cmd->qc == qc) { 6336 spin_lock(&hrrq->_lock);
6085 ipr_device_reset(ioa_cfg, sata_port->res); 6337 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6086 break; 6338 if (ipr_cmd->qc == qc) {
6339 ipr_device_reset(ioa_cfg, sata_port->res);
6340 break;
6341 }
6087 } 6342 }
6343 spin_unlock(&hrrq->_lock);
6088 } 6344 }
6089 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6345 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6090} 6346}
@@ -6133,6 +6389,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6133 struct ipr_resource_entry *res = sata_port->res; 6389 struct ipr_resource_entry *res = sata_port->res;
6134 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6390 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6135 6391
6392 spin_lock(&ipr_cmd->hrrq->_lock);
6136 if (ipr_cmd->ioa_cfg->sis64) 6393 if (ipr_cmd->ioa_cfg->sis64)
6137 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 6394 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6138 sizeof(struct ipr_ioasa_gata)); 6395 sizeof(struct ipr_ioasa_gata));
@@ -6148,7 +6405,8 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6148 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); 6405 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6149 else 6406 else
6150 qc->err_mask |= ac_err_mask(sata_port->ioasa.status); 6407 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6151 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6408 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6409 spin_unlock(&ipr_cmd->hrrq->_lock);
6152 ata_qc_complete(qc); 6410 ata_qc_complete(qc);
6153} 6411}
6154 6412
@@ -6244,6 +6502,48 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6244} 6502}
6245 6503
6246/** 6504/**
6505 * ipr_qc_defer - Get a free ipr_cmd
6506 * @qc: queued command
6507 *
6508 * Return value:
6509 * 0 if success
6510 **/
6511static int ipr_qc_defer(struct ata_queued_cmd *qc)
6512{
6513 struct ata_port *ap = qc->ap;
6514 struct ipr_sata_port *sata_port = ap->private_data;
6515 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6516 struct ipr_cmnd *ipr_cmd;
6517 struct ipr_hrr_queue *hrrq;
6518 int hrrq_id;
6519
6520 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6521 hrrq = &ioa_cfg->hrrq[hrrq_id];
6522
6523 qc->lldd_task = NULL;
6524 spin_lock(&hrrq->_lock);
6525 if (unlikely(hrrq->ioa_is_dead)) {
6526 spin_unlock(&hrrq->_lock);
6527 return 0;
6528 }
6529
6530 if (unlikely(!hrrq->allow_cmds)) {
6531 spin_unlock(&hrrq->_lock);
6532 return ATA_DEFER_LINK;
6533 }
6534
6535 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6536 if (ipr_cmd == NULL) {
6537 spin_unlock(&hrrq->_lock);
6538 return ATA_DEFER_LINK;
6539 }
6540
6541 qc->lldd_task = ipr_cmd;
6542 spin_unlock(&hrrq->_lock);
6543 return 0;
6544}
6545
6546/**
6247 * ipr_qc_issue - Issue a SATA qc to a device 6547 * ipr_qc_issue - Issue a SATA qc to a device
6248 * @qc: queued command 6548 * @qc: queued command
6249 * 6549 *
@@ -6260,10 +6560,23 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6260 struct ipr_ioarcb *ioarcb; 6560 struct ipr_ioarcb *ioarcb;
6261 struct ipr_ioarcb_ata_regs *regs; 6561 struct ipr_ioarcb_ata_regs *regs;
6262 6562
6263 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead)) 6563 if (qc->lldd_task == NULL)
6564 ipr_qc_defer(qc);
6565
6566 ipr_cmd = qc->lldd_task;
6567 if (ipr_cmd == NULL)
6264 return AC_ERR_SYSTEM; 6568 return AC_ERR_SYSTEM;
6265 6569
6266 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 6570 qc->lldd_task = NULL;
6571 spin_lock(&ipr_cmd->hrrq->_lock);
6572 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6573 ipr_cmd->hrrq->ioa_is_dead)) {
6574 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6575 spin_unlock(&ipr_cmd->hrrq->_lock);
6576 return AC_ERR_SYSTEM;
6577 }
6578
6579 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6267 ioarcb = &ipr_cmd->ioarcb; 6580 ioarcb = &ipr_cmd->ioarcb;
6268 6581
6269 if (ioa_cfg->sis64) { 6582 if (ioa_cfg->sis64) {
@@ -6275,7 +6588,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6275 memset(regs, 0, sizeof(*regs)); 6588 memset(regs, 0, sizeof(*regs));
6276 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); 6589 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6277 6590
6278 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 6591 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6279 ipr_cmd->qc = qc; 6592 ipr_cmd->qc = qc;
6280 ipr_cmd->done = ipr_sata_done; 6593 ipr_cmd->done = ipr_sata_done;
6281 ipr_cmd->ioarcb.res_handle = res->res_handle; 6594 ipr_cmd->ioarcb.res_handle = res->res_handle;
@@ -6315,10 +6628,12 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6315 6628
6316 default: 6629 default:
6317 WARN_ON(1); 6630 WARN_ON(1);
6631 spin_unlock(&ipr_cmd->hrrq->_lock);
6318 return AC_ERR_INVALID; 6632 return AC_ERR_INVALID;
6319 } 6633 }
6320 6634
6321 ipr_send_command(ipr_cmd); 6635 ipr_send_command(ipr_cmd);
6636 spin_unlock(&ipr_cmd->hrrq->_lock);
6322 6637
6323 return 0; 6638 return 0;
6324} 6639}
@@ -6357,6 +6672,7 @@ static struct ata_port_operations ipr_sata_ops = {
6357 .hardreset = ipr_sata_reset, 6672 .hardreset = ipr_sata_reset,
6358 .post_internal_cmd = ipr_ata_post_internal, 6673 .post_internal_cmd = ipr_ata_post_internal,
6359 .qc_prep = ata_noop_qc_prep, 6674 .qc_prep = ata_noop_qc_prep,
6675 .qc_defer = ipr_qc_defer,
6360 .qc_issue = ipr_qc_issue, 6676 .qc_issue = ipr_qc_issue,
6361 .qc_fill_rtf = ipr_qc_fill_rtf, 6677 .qc_fill_rtf = ipr_qc_fill_rtf,
6362 .port_start = ata_sas_port_start, 6678 .port_start = ata_sas_port_start,
@@ -6427,7 +6743,7 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6427 ENTER; 6743 ENTER;
6428 ioa_cfg->in_reset_reload = 0; 6744 ioa_cfg->in_reset_reload = 0;
6429 ioa_cfg->reset_retries = 0; 6745 ioa_cfg->reset_retries = 0;
6430 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6746 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6431 wake_up_all(&ioa_cfg->reset_wait_q); 6747 wake_up_all(&ioa_cfg->reset_wait_q);
6432 6748
6433 spin_unlock_irq(ioa_cfg->host->host_lock); 6749 spin_unlock_irq(ioa_cfg->host->host_lock);
@@ -6454,11 +6770,16 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6454 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6770 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6455 struct ipr_resource_entry *res; 6771 struct ipr_resource_entry *res;
6456 struct ipr_hostrcb *hostrcb, *temp; 6772 struct ipr_hostrcb *hostrcb, *temp;
6457 int i = 0; 6773 int i = 0, j;
6458 6774
6459 ENTER; 6775 ENTER;
6460 ioa_cfg->in_reset_reload = 0; 6776 ioa_cfg->in_reset_reload = 0;
6461 ioa_cfg->allow_cmds = 1; 6777 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6778 spin_lock(&ioa_cfg->hrrq[j]._lock);
6779 ioa_cfg->hrrq[j].allow_cmds = 1;
6780 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6781 }
6782 wmb();
6462 ioa_cfg->reset_cmd = NULL; 6783 ioa_cfg->reset_cmd = NULL;
6463 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; 6784 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6464 6785
@@ -6482,14 +6803,14 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6482 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); 6803 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6483 6804
6484 ioa_cfg->reset_retries = 0; 6805 ioa_cfg->reset_retries = 0;
6485 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6806 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6486 wake_up_all(&ioa_cfg->reset_wait_q); 6807 wake_up_all(&ioa_cfg->reset_wait_q);
6487 6808
6488 spin_unlock(ioa_cfg->host->host_lock); 6809 spin_unlock(ioa_cfg->host->host_lock);
6489 scsi_unblock_requests(ioa_cfg->host); 6810 scsi_unblock_requests(ioa_cfg->host);
6490 spin_lock(ioa_cfg->host->host_lock); 6811 spin_lock(ioa_cfg->host->host_lock);
6491 6812
6492 if (!ioa_cfg->allow_cmds) 6813 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6493 scsi_block_requests(ioa_cfg->host); 6814 scsi_block_requests(ioa_cfg->host);
6494 6815
6495 LEAVE; 6816 LEAVE;
@@ -6560,9 +6881,11 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6560 6881
6561 if (!ioa_cfg->sis64) 6882 if (!ioa_cfg->sis64)
6562 ipr_cmd->job_step = ipr_set_supported_devs; 6883 ipr_cmd->job_step = ipr_set_supported_devs;
6884 LEAVE;
6563 return IPR_RC_JOB_RETURN; 6885 return IPR_RC_JOB_RETURN;
6564 } 6886 }
6565 6887
6888 LEAVE;
6566 return IPR_RC_JOB_CONTINUE; 6889 return IPR_RC_JOB_CONTINUE;
6567} 6890}
6568 6891
@@ -6820,7 +7143,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6820 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); 7143 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6821 7144
6822 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7145 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6823 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 7146 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6824 return IPR_RC_JOB_RETURN; 7147 return IPR_RC_JOB_RETURN;
6825} 7148}
6826 7149
@@ -7278,46 +7601,71 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7278{ 7601{
7279 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7602 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7280 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7603 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7604 struct ipr_hrr_queue *hrrq;
7281 7605
7282 ENTER; 7606 ENTER;
7607 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7283 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); 7608 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7284 7609
7285 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; 7610 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7286 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7611 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7287 7612
7288 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7613 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7289 if (ioa_cfg->sis64) 7614 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7290 ioarcb->cmd_pkt.cdb[1] = 0x1;
7291 ioarcb->cmd_pkt.cdb[2] =
7292 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7293 ioarcb->cmd_pkt.cdb[3] =
7294 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7295 ioarcb->cmd_pkt.cdb[4] =
7296 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7297 ioarcb->cmd_pkt.cdb[5] =
7298 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
7299 ioarcb->cmd_pkt.cdb[7] =
7300 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7301 ioarcb->cmd_pkt.cdb[8] =
7302 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7303 7615
7304 if (ioa_cfg->sis64) { 7616 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7305 ioarcb->cmd_pkt.cdb[10] = 7617 if (ioa_cfg->sis64)
7306 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff; 7618 ioarcb->cmd_pkt.cdb[1] = 0x1;
7307 ioarcb->cmd_pkt.cdb[11] =
7308 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7309 ioarcb->cmd_pkt.cdb[12] =
7310 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7311 ioarcb->cmd_pkt.cdb[13] =
7312 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7313 }
7314 7619
7315 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 7620 if (ioa_cfg->nvectors == 1)
7621 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7622 else
7623 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7624
7625 ioarcb->cmd_pkt.cdb[2] =
7626 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7627 ioarcb->cmd_pkt.cdb[3] =
7628 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7629 ioarcb->cmd_pkt.cdb[4] =
7630 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7631 ioarcb->cmd_pkt.cdb[5] =
7632 ((u64) hrrq->host_rrq_dma) & 0xff;
7633 ioarcb->cmd_pkt.cdb[7] =
7634 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7635 ioarcb->cmd_pkt.cdb[8] =
7636 (sizeof(u32) * hrrq->size) & 0xff;
7637
7638 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7639 ioarcb->cmd_pkt.cdb[9] =
7640 ioa_cfg->identify_hrrq_index;
7316 7641
7317 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7642 if (ioa_cfg->sis64) {
7643 ioarcb->cmd_pkt.cdb[10] =
7644 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7645 ioarcb->cmd_pkt.cdb[11] =
7646 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7647 ioarcb->cmd_pkt.cdb[12] =
7648 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7649 ioarcb->cmd_pkt.cdb[13] =
7650 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7651 }
7652
7653 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7654 ioarcb->cmd_pkt.cdb[14] =
7655 ioa_cfg->identify_hrrq_index;
7656
7657 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7658 IPR_INTERNAL_TIMEOUT);
7659
7660 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7661 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7662
7663 LEAVE;
7664 return IPR_RC_JOB_RETURN;
7665 }
7318 7666
7319 LEAVE; 7667 LEAVE;
7320 return IPR_RC_JOB_RETURN; 7668 return IPR_RC_JOB_CONTINUE;
7321} 7669}
7322 7670
7323/** 7671/**
@@ -7365,7 +7713,9 @@ static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7365static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, 7713static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7366 unsigned long timeout) 7714 unsigned long timeout)
7367{ 7715{
7368 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q); 7716
7717 ENTER;
7718 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7369 ipr_cmd->done = ipr_reset_ioa_job; 7719 ipr_cmd->done = ipr_reset_ioa_job;
7370 7720
7371 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7721 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
@@ -7383,13 +7733,26 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7383 **/ 7733 **/
7384static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) 7734static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7385{ 7735{
7386 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS); 7736 struct ipr_hrr_queue *hrrq;
7737
7738 for_each_hrrq(hrrq, ioa_cfg) {
7739 spin_lock(&hrrq->_lock);
7740 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7741
7742 /* Initialize Host RRQ pointers */
7743 hrrq->hrrq_start = hrrq->host_rrq;
7744 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7745 hrrq->hrrq_curr = hrrq->hrrq_start;
7746 hrrq->toggle_bit = 1;
7747 spin_unlock(&hrrq->_lock);
7748 }
7749 wmb();
7387 7750
7388 /* Initialize Host RRQ pointers */ 7751 ioa_cfg->identify_hrrq_index = 0;
7389 ioa_cfg->hrrq_start = ioa_cfg->host_rrq; 7752 if (ioa_cfg->hrrq_num == 1)
7390 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1]; 7753 atomic_set(&ioa_cfg->hrrq_index, 0);
7391 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start; 7754 else
7392 ioa_cfg->toggle_bit = 1; 7755 atomic_set(&ioa_cfg->hrrq_index, 1);
7393 7756
7394 /* Zero out config table */ 7757 /* Zero out config table */
7395 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); 7758 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
@@ -7446,7 +7809,8 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7446 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 7809 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7447 ipr_cmd->done = ipr_reset_ioa_job; 7810 ipr_cmd->done = ipr_reset_ioa_job;
7448 add_timer(&ipr_cmd->timer); 7811 add_timer(&ipr_cmd->timer);
7449 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 7812
7813 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7450 7814
7451 return IPR_RC_JOB_RETURN; 7815 return IPR_RC_JOB_RETURN;
7452} 7816}
@@ -7466,12 +7830,18 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7466 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7830 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7467 volatile u32 int_reg; 7831 volatile u32 int_reg;
7468 volatile u64 maskval; 7832 volatile u64 maskval;
7833 int i;
7469 7834
7470 ENTER; 7835 ENTER;
7471 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7836 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7472 ipr_init_ioa_mem(ioa_cfg); 7837 ipr_init_ioa_mem(ioa_cfg);
7473 7838
7474 ioa_cfg->allow_interrupts = 1; 7839 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7840 spin_lock(&ioa_cfg->hrrq[i]._lock);
7841 ioa_cfg->hrrq[i].allow_interrupts = 1;
7842 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7843 }
7844 wmb();
7475 if (ioa_cfg->sis64) { 7845 if (ioa_cfg->sis64) {
7476 /* Set the adapter to the correct endian mode. */ 7846 /* Set the adapter to the correct endian mode. */
7477 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 7847 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
@@ -7511,7 +7881,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7511 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 7881 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7512 ipr_cmd->done = ipr_reset_ioa_job; 7882 ipr_cmd->done = ipr_reset_ioa_job;
7513 add_timer(&ipr_cmd->timer); 7883 add_timer(&ipr_cmd->timer);
7514 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 7884 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7515 7885
7516 LEAVE; 7886 LEAVE;
7517 return IPR_RC_JOB_RETURN; 7887 return IPR_RC_JOB_RETURN;
@@ -8030,7 +8400,8 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8030 int rc = IPR_RC_JOB_CONTINUE; 8400 int rc = IPR_RC_JOB_CONTINUE;
8031 8401
8032 ENTER; 8402 ENTER;
8033 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) { 8403 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8404 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8034 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 8405 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8035 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 8406 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8036 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 8407 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
@@ -8078,7 +8449,8 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8078 * We are doing nested adapter resets and this is 8449 * We are doing nested adapter resets and this is
8079 * not the current reset job. 8450 * not the current reset job.
8080 */ 8451 */
8081 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 8452 list_add_tail(&ipr_cmd->queue,
8453 &ipr_cmd->hrrq->hrrq_free_q);
8082 return; 8454 return;
8083 } 8455 }
8084 8456
@@ -8113,9 +8485,15 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8113 enum ipr_shutdown_type shutdown_type) 8485 enum ipr_shutdown_type shutdown_type)
8114{ 8486{
8115 struct ipr_cmnd *ipr_cmd; 8487 struct ipr_cmnd *ipr_cmd;
8488 int i;
8116 8489
8117 ioa_cfg->in_reset_reload = 1; 8490 ioa_cfg->in_reset_reload = 1;
8118 ioa_cfg->allow_cmds = 0; 8491 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8492 spin_lock(&ioa_cfg->hrrq[i]._lock);
8493 ioa_cfg->hrrq[i].allow_cmds = 0;
8494 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8495 }
8496 wmb();
8119 scsi_block_requests(ioa_cfg->host); 8497 scsi_block_requests(ioa_cfg->host);
8120 8498
8121 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 8499 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
@@ -8141,7 +8519,9 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8141static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, 8519static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8142 enum ipr_shutdown_type shutdown_type) 8520 enum ipr_shutdown_type shutdown_type)
8143{ 8521{
8144 if (ioa_cfg->ioa_is_dead) 8522 int i;
8523
8524 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8145 return; 8525 return;
8146 8526
8147 if (ioa_cfg->in_reset_reload) { 8527 if (ioa_cfg->in_reset_reload) {
@@ -8156,7 +8536,12 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8156 "IOA taken offline - error recovery failed\n"); 8536 "IOA taken offline - error recovery failed\n");
8157 8537
8158 ioa_cfg->reset_retries = 0; 8538 ioa_cfg->reset_retries = 0;
8159 ioa_cfg->ioa_is_dead = 1; 8539 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8540 spin_lock(&ioa_cfg->hrrq[i]._lock);
8541 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8542 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8543 }
8544 wmb();
8160 8545
8161 if (ioa_cfg->in_ioa_bringdown) { 8546 if (ioa_cfg->in_ioa_bringdown) {
8162 ioa_cfg->reset_cmd = NULL; 8547 ioa_cfg->reset_cmd = NULL;
@@ -8188,9 +8573,17 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8188 */ 8573 */
8189static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd) 8574static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8190{ 8575{
8576 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8577 int i;
8578
8191 /* Disallow new interrupts, avoid loop */ 8579 /* Disallow new interrupts, avoid loop */
8192 ipr_cmd->ioa_cfg->allow_interrupts = 0; 8580 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8193 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q); 8581 spin_lock(&ioa_cfg->hrrq[i]._lock);
8582 ioa_cfg->hrrq[i].allow_interrupts = 0;
8583 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8584 }
8585 wmb();
8586 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8194 ipr_cmd->done = ipr_reset_ioa_job; 8587 ipr_cmd->done = ipr_reset_ioa_job;
8195 return IPR_RC_JOB_RETURN; 8588 return IPR_RC_JOB_RETURN;
8196} 8589}
@@ -8247,13 +8640,19 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
8247{ 8640{
8248 unsigned long flags = 0; 8641 unsigned long flags = 0;
8249 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8642 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8643 int i;
8250 8644
8251 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8645 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8252 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 8646 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8253 ioa_cfg->sdt_state = ABORT_DUMP; 8647 ioa_cfg->sdt_state = ABORT_DUMP;
8254 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; 8648 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8255 ioa_cfg->in_ioa_bringdown = 1; 8649 ioa_cfg->in_ioa_bringdown = 1;
8256 ioa_cfg->allow_cmds = 0; 8650 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8651 spin_lock(&ioa_cfg->hrrq[i]._lock);
8652 ioa_cfg->hrrq[i].allow_cmds = 0;
8653 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8654 }
8655 wmb();
8257 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8656 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8258 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8657 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8259} 8658}
@@ -8310,12 +8709,11 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8310 } else 8709 } else
8311 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, 8710 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8312 IPR_SHUTDOWN_NONE); 8711 IPR_SHUTDOWN_NONE);
8313
8314 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 8712 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8315 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 8713 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8316 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 8714 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8317 8715
8318 if (ioa_cfg->ioa_is_dead) { 8716 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8319 rc = -EIO; 8717 rc = -EIO;
8320 } else if (ipr_invalid_adapter(ioa_cfg)) { 8718 } else if (ipr_invalid_adapter(ioa_cfg)) {
8321 if (!ipr_testmode) 8719 if (!ipr_testmode)
@@ -8376,8 +8774,13 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8376 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs), 8774 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8377 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 8775 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8378 ipr_free_cmd_blks(ioa_cfg); 8776 ipr_free_cmd_blks(ioa_cfg);
8379 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 8777
8380 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 8778 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8779 pci_free_consistent(ioa_cfg->pdev,
8780 sizeof(u32) * ioa_cfg->hrrq[i].size,
8781 ioa_cfg->hrrq[i].host_rrq,
8782 ioa_cfg->hrrq[i].host_rrq_dma);
8783
8381 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size, 8784 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8382 ioa_cfg->u.cfg_table, 8785 ioa_cfg->u.cfg_table,
8383 ioa_cfg->cfg_table_dma); 8786 ioa_cfg->cfg_table_dma);
@@ -8408,8 +8811,23 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8408 struct pci_dev *pdev = ioa_cfg->pdev; 8811 struct pci_dev *pdev = ioa_cfg->pdev;
8409 8812
8410 ENTER; 8813 ENTER;
8411 free_irq(pdev->irq, ioa_cfg); 8814 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8412 pci_disable_msi(pdev); 8815 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8816 int i;
8817 for (i = 0; i < ioa_cfg->nvectors; i++)
8818 free_irq(ioa_cfg->vectors_info[i].vec,
8819 &ioa_cfg->hrrq[i]);
8820 } else
8821 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8822
8823 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8824 pci_disable_msi(pdev);
8825 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8826 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8827 pci_disable_msix(pdev);
8828 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8829 }
8830
8413 iounmap(ioa_cfg->hdw_dma_regs); 8831 iounmap(ioa_cfg->hdw_dma_regs);
8414 pci_release_regions(pdev); 8832 pci_release_regions(pdev);
8415 ipr_free_mem(ioa_cfg); 8833 ipr_free_mem(ioa_cfg);
@@ -8430,7 +8848,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8430 struct ipr_cmnd *ipr_cmd; 8848 struct ipr_cmnd *ipr_cmd;
8431 struct ipr_ioarcb *ioarcb; 8849 struct ipr_ioarcb *ioarcb;
8432 dma_addr_t dma_addr; 8850 dma_addr_t dma_addr;
8433 int i; 8851 int i, entries_each_hrrq, hrrq_id = 0;
8434 8852
8435 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev, 8853 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8436 sizeof(struct ipr_cmnd), 512, 0); 8854 sizeof(struct ipr_cmnd), 512, 0);
@@ -8446,6 +8864,41 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8446 return -ENOMEM; 8864 return -ENOMEM;
8447 } 8865 }
8448 8866
8867 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8868 if (ioa_cfg->hrrq_num > 1) {
8869 if (i == 0) {
8870 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8871 ioa_cfg->hrrq[i].min_cmd_id = 0;
8872 ioa_cfg->hrrq[i].max_cmd_id =
8873 (entries_each_hrrq - 1);
8874 } else {
8875 entries_each_hrrq =
8876 IPR_NUM_BASE_CMD_BLKS/
8877 (ioa_cfg->hrrq_num - 1);
8878 ioa_cfg->hrrq[i].min_cmd_id =
8879 IPR_NUM_INTERNAL_CMD_BLKS +
8880 (i - 1) * entries_each_hrrq;
8881 ioa_cfg->hrrq[i].max_cmd_id =
8882 (IPR_NUM_INTERNAL_CMD_BLKS +
8883 i * entries_each_hrrq - 1);
8884 }
8885 } else {
8886 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8887 ioa_cfg->hrrq[i].min_cmd_id = 0;
8888 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8889 }
8890 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8891 }
8892
8893 BUG_ON(ioa_cfg->hrrq_num == 0);
8894
8895 i = IPR_NUM_CMD_BLKS -
8896 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8897 if (i > 0) {
8898 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8899 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8900 }
8901
8449 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 8902 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8450 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); 8903 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8451 8904
@@ -8484,7 +8937,11 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8484 ipr_cmd->sense_buffer_dma = dma_addr + 8937 ipr_cmd->sense_buffer_dma = dma_addr +
8485 offsetof(struct ipr_cmnd, sense_buffer); 8938 offsetof(struct ipr_cmnd, sense_buffer);
8486 8939
8487 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 8940 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
8941 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8942 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8943 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8944 hrrq_id++;
8488 } 8945 }
8489 8946
8490 return 0; 8947 return 0;
@@ -8516,6 +8973,10 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8516 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); 8973 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8517 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) * 8974 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8518 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); 8975 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8976
8977 if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
8978 || !ioa_cfg->vset_ids)
8979 goto out_free_res_entries;
8519 } 8980 }
8520 8981
8521 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { 8982 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
@@ -8530,15 +8991,34 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8530 if (!ioa_cfg->vpd_cbs) 8991 if (!ioa_cfg->vpd_cbs)
8531 goto out_free_res_entries; 8992 goto out_free_res_entries;
8532 8993
8994 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8995 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
8996 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
8997 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
8998 if (i == 0)
8999 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9000 else
9001 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9002 }
9003
8533 if (ipr_alloc_cmd_blks(ioa_cfg)) 9004 if (ipr_alloc_cmd_blks(ioa_cfg))
8534 goto out_free_vpd_cbs; 9005 goto out_free_vpd_cbs;
8535 9006
8536 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev, 9007 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8537 sizeof(u32) * IPR_NUM_CMD_BLKS, 9008 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8538 &ioa_cfg->host_rrq_dma); 9009 sizeof(u32) * ioa_cfg->hrrq[i].size,
8539 9010 &ioa_cfg->hrrq[i].host_rrq_dma);
8540 if (!ioa_cfg->host_rrq) 9011
8541 goto out_ipr_free_cmd_blocks; 9012 if (!ioa_cfg->hrrq[i].host_rrq) {
9013 while (--i > 0)
9014 pci_free_consistent(pdev,
9015 sizeof(u32) * ioa_cfg->hrrq[i].size,
9016 ioa_cfg->hrrq[i].host_rrq,
9017 ioa_cfg->hrrq[i].host_rrq_dma);
9018 goto out_ipr_free_cmd_blocks;
9019 }
9020 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9021 }
8542 9022
8543 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev, 9023 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8544 ioa_cfg->cfg_table_size, 9024 ioa_cfg->cfg_table_size,
@@ -8582,8 +9062,12 @@ out_free_hostrcb_dma:
8582 ioa_cfg->u.cfg_table, 9062 ioa_cfg->u.cfg_table,
8583 ioa_cfg->cfg_table_dma); 9063 ioa_cfg->cfg_table_dma);
8584out_free_host_rrq: 9064out_free_host_rrq:
8585 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 9065 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8586 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 9066 pci_free_consistent(pdev,
9067 sizeof(u32) * ioa_cfg->hrrq[i].size,
9068 ioa_cfg->hrrq[i].host_rrq,
9069 ioa_cfg->hrrq[i].host_rrq_dma);
9070 }
8587out_ipr_free_cmd_blocks: 9071out_ipr_free_cmd_blocks:
8588 ipr_free_cmd_blks(ioa_cfg); 9072 ipr_free_cmd_blks(ioa_cfg);
8589out_free_vpd_cbs: 9073out_free_vpd_cbs:
@@ -8591,6 +9075,9 @@ out_free_vpd_cbs:
8591 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9075 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8592out_free_res_entries: 9076out_free_res_entries:
8593 kfree(ioa_cfg->res_entries); 9077 kfree(ioa_cfg->res_entries);
9078 kfree(ioa_cfg->target_ids);
9079 kfree(ioa_cfg->array_ids);
9080 kfree(ioa_cfg->vset_ids);
8594 goto out; 9081 goto out;
8595} 9082}
8596 9083
@@ -8638,15 +9125,11 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8638 ioa_cfg->doorbell = IPR_DOORBELL; 9125 ioa_cfg->doorbell = IPR_DOORBELL;
8639 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); 9126 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8640 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); 9127 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8641 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8642 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8643 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); 9128 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8644 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); 9129 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8645 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); 9130 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8646 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); 9131 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8647 9132
8648 INIT_LIST_HEAD(&ioa_cfg->free_q);
8649 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8650 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); 9133 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8651 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); 9134 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8652 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 9135 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
@@ -8724,6 +9207,88 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
8724 return NULL; 9207 return NULL;
8725} 9208}
8726 9209
9210static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9211{
9212 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9213 int i, err, vectors;
9214
9215 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9216 entries[i].entry = i;
9217
9218 vectors = ipr_number_of_msix;
9219
9220 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9221 vectors = err;
9222
9223 if (err < 0) {
9224 pci_disable_msix(ioa_cfg->pdev);
9225 return err;
9226 }
9227
9228 if (!err) {
9229 for (i = 0; i < vectors; i++)
9230 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9231 ioa_cfg->nvectors = vectors;
9232 }
9233
9234 return err;
9235}
9236
9237static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9238{
9239 int i, err, vectors;
9240
9241 vectors = ipr_number_of_msix;
9242
9243 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9244 vectors = err;
9245
9246 if (err < 0) {
9247 pci_disable_msi(ioa_cfg->pdev);
9248 return err;
9249 }
9250
9251 if (!err) {
9252 for (i = 0; i < vectors; i++)
9253 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9254 ioa_cfg->nvectors = vectors;
9255 }
9256
9257 return err;
9258}
9259
9260static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9261{
9262 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9263
9264 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9265 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9266 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9267 ioa_cfg->vectors_info[vec_idx].
9268 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9269 }
9270}
9271
9272static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9273{
9274 int i, rc;
9275
9276 for (i = 1; i < ioa_cfg->nvectors; i++) {
9277 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9278 ipr_isr_mhrrq,
9279 0,
9280 ioa_cfg->vectors_info[i].desc,
9281 &ioa_cfg->hrrq[i]);
9282 if (rc) {
9283 while (--i >= 0)
9284 free_irq(ioa_cfg->vectors_info[i].vec,
9285 &ioa_cfg->hrrq[i]);
9286 return rc;
9287 }
9288 }
9289 return 0;
9290}
9291
8727/** 9292/**
8728 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi(). 9293 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8729 * @pdev: PCI device struct 9294 * @pdev: PCI device struct
@@ -8740,6 +9305,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
8740 unsigned long lock_flags = 0; 9305 unsigned long lock_flags = 0;
8741 irqreturn_t rc = IRQ_HANDLED; 9306 irqreturn_t rc = IRQ_HANDLED;
8742 9307
9308 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
8743 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9309 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8744 9310
8745 ioa_cfg->msi_received = 1; 9311 ioa_cfg->msi_received = 1;
@@ -8787,9 +9353,9 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
8787 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); 9353 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8788 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 9354 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8789 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); 9355 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9356 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8790 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 9357 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8791 9358
8792 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8793 if (!ioa_cfg->msi_received) { 9359 if (!ioa_cfg->msi_received) {
8794 /* MSI test failed */ 9360 /* MSI test failed */
8795 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); 9361 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
@@ -8806,8 +9372,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
8806 return rc; 9372 return rc;
8807} 9373}
8808 9374
8809/** 9375 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
8810 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8811 * @pdev: PCI device struct 9376 * @pdev: PCI device struct
8812 * @dev_id: PCI device id struct 9377 * @dev_id: PCI device id struct
8813 * 9378 *
@@ -8823,6 +9388,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
8823 void __iomem *ipr_regs; 9388 void __iomem *ipr_regs;
8824 int rc = PCIBIOS_SUCCESSFUL; 9389 int rc = PCIBIOS_SUCCESSFUL;
8825 volatile u32 mask, uproc, interrupts; 9390 volatile u32 mask, uproc, interrupts;
9391 unsigned long lock_flags;
8826 9392
8827 ENTER; 9393 ENTER;
8828 9394
@@ -8918,17 +9484,56 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
8918 goto cleanup_nomem; 9484 goto cleanup_nomem;
8919 } 9485 }
8920 9486
8921 /* Enable MSI style interrupts if they are supported. */ 9487 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
8922 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) { 9488 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9489 IPR_MAX_MSIX_VECTORS);
9490 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9491 }
9492
9493 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9494 ipr_enable_msix(ioa_cfg) == 0)
9495 ioa_cfg->intr_flag = IPR_USE_MSIX;
9496 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9497 ipr_enable_msi(ioa_cfg) == 0)
9498 ioa_cfg->intr_flag = IPR_USE_MSI;
9499 else {
9500 ioa_cfg->intr_flag = IPR_USE_LSI;
9501 ioa_cfg->nvectors = 1;
9502 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9503 }
9504
9505 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9506 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8923 rc = ipr_test_msi(ioa_cfg, pdev); 9507 rc = ipr_test_msi(ioa_cfg, pdev);
8924 if (rc == -EOPNOTSUPP) 9508 if (rc == -EOPNOTSUPP) {
8925 pci_disable_msi(pdev); 9509 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9510 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9511 pci_disable_msi(pdev);
9512 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9513 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9514 pci_disable_msix(pdev);
9515 }
9516
9517 ioa_cfg->intr_flag = IPR_USE_LSI;
9518 ioa_cfg->nvectors = 1;
9519 }
8926 else if (rc) 9520 else if (rc)
8927 goto out_msi_disable; 9521 goto out_msi_disable;
8928 else 9522 else {
8929 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq); 9523 if (ioa_cfg->intr_flag == IPR_USE_MSI)
8930 } else if (ipr_debug) 9524 dev_info(&pdev->dev,
8931 dev_info(&pdev->dev, "Cannot enable MSI.\n"); 9525 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9526 ioa_cfg->nvectors, pdev->irq);
9527 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9528 dev_info(&pdev->dev,
9529 "Request for %d MSIXs succeeded.",
9530 ioa_cfg->nvectors);
9531 }
9532 }
9533
9534 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9535 (unsigned int)num_online_cpus(),
9536 (unsigned int)IPR_MAX_HRRQ_NUM);
8932 9537
8933 /* Save away PCI config space for use following IOA reset */ 9538 /* Save away PCI config space for use following IOA reset */
8934 rc = pci_save_state(pdev); 9539 rc = pci_save_state(pdev);
@@ -8975,11 +9580,24 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
8975 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) 9580 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8976 ioa_cfg->ioa_unit_checked = 1; 9581 ioa_cfg->ioa_unit_checked = 1;
8977 9582
9583 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8978 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 9584 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8979 rc = request_irq(pdev->irq, ipr_isr, 9585 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8980 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8981 IPR_NAME, ioa_cfg);
8982 9586
9587 if (ioa_cfg->intr_flag == IPR_USE_MSI
9588 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9589 name_msi_vectors(ioa_cfg);
9590 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9591 0,
9592 ioa_cfg->vectors_info[0].desc,
9593 &ioa_cfg->hrrq[0]);
9594 if (!rc)
9595 rc = ipr_request_other_msi_irqs(ioa_cfg);
9596 } else {
9597 rc = request_irq(pdev->irq, ipr_isr,
9598 IRQF_SHARED,
9599 IPR_NAME, &ioa_cfg->hrrq[0]);
9600 }
8983 if (rc) { 9601 if (rc) {
8984 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", 9602 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8985 pdev->irq, rc); 9603 pdev->irq, rc);
@@ -9004,7 +9622,10 @@ out:
9004cleanup_nolog: 9622cleanup_nolog:
9005 ipr_free_mem(ioa_cfg); 9623 ipr_free_mem(ioa_cfg);
9006out_msi_disable: 9624out_msi_disable:
9007 pci_disable_msi(pdev); 9625 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9626 pci_disable_msi(pdev);
9627 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9628 pci_disable_msix(pdev);
9008cleanup_nomem: 9629cleanup_nomem:
9009 iounmap(ipr_regs); 9630 iounmap(ipr_regs);
9010out_release_regions: 9631out_release_regions:
@@ -9138,7 +9759,7 @@ static void ipr_remove(struct pci_dev *pdev)
9138static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) 9759static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9139{ 9760{
9140 struct ipr_ioa_cfg *ioa_cfg; 9761 struct ipr_ioa_cfg *ioa_cfg;
9141 int rc; 9762 int rc, i;
9142 9763
9143 rc = ipr_probe_ioa(pdev, dev_id); 9764 rc = ipr_probe_ioa(pdev, dev_id);
9144 9765
@@ -9185,6 +9806,17 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9185 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN); 9806 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9186 ioa_cfg->allow_ml_add_del = 1; 9807 ioa_cfg->allow_ml_add_del = 1;
9187 ioa_cfg->host->max_channel = IPR_VSET_BUS; 9808 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9809 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9810
9811 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9812 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9813 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9814 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9815 ioa_cfg->iopoll_weight, ipr_iopoll);
9816 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9817 }
9818 }
9819
9188 schedule_work(&ioa_cfg->work_q); 9820 schedule_work(&ioa_cfg->work_q);
9189 return 0; 9821 return 0;
9190} 9822}
@@ -9203,8 +9835,16 @@ static void ipr_shutdown(struct pci_dev *pdev)
9203{ 9835{
9204 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9836 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9205 unsigned long lock_flags = 0; 9837 unsigned long lock_flags = 0;
9838 int i;
9206 9839
9207 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9840 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9841 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9842 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9843 ioa_cfg->iopoll_weight = 0;
9844 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9845 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9846 }
9847
9208 while (ioa_cfg->in_reset_reload) { 9848 while (ioa_cfg->in_reset_reload) {
9209 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9849 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9210 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 9850 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
@@ -9277,6 +9917,8 @@ static struct pci_device_id ipr_pci_table[] = {
9277 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 9917 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9278 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, 9918 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9279 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 9919 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9920 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
9921 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9280 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 }, 9922 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9281 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 9923 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9282 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, 9924 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
@@ -9290,6 +9932,14 @@ static struct pci_device_id ipr_pci_table[] = {
9290 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 }, 9932 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9291 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 9933 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9292 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 }, 9934 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9935 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9936 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
9937 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9938 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
9939 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9940 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9941 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9942 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
9293 { } 9943 { }
9294}; 9944};
9295MODULE_DEVICE_TABLE(pci, ipr_pci_table); 9945MODULE_DEVICE_TABLE(pci, ipr_pci_table);
@@ -9316,9 +9966,7 @@ static struct pci_driver ipr_driver = {
9316 **/ 9966 **/
9317static void ipr_halt_done(struct ipr_cmnd *ipr_cmd) 9967static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9318{ 9968{
9319 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9969 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9320
9321 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9322} 9970}
9323 9971
9324/** 9972/**
@@ -9340,7 +9988,7 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9340 9988
9341 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { 9989 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9342 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9990 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9343 if (!ioa_cfg->allow_cmds) { 9991 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
9344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9992 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9345 continue; 9993 continue;
9346 } 9994 }
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index c8a137f83bb1..1a9a246932ae 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -32,14 +32,15 @@
32#include <linux/libata.h> 32#include <linux/libata.h>
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/kref.h> 34#include <linux/kref.h>
35#include <linux/blk-iopoll.h>
35#include <scsi/scsi.h> 36#include <scsi/scsi.h>
36#include <scsi/scsi_cmnd.h> 37#include <scsi/scsi_cmnd.h>
37 38
38/* 39/*
39 * Literals 40 * Literals
40 */ 41 */
41#define IPR_DRIVER_VERSION "2.5.4" 42#define IPR_DRIVER_VERSION "2.6.0"
42#define IPR_DRIVER_DATE "(July 11, 2012)" 43#define IPR_DRIVER_DATE "(November 16, 2012)"
43 44
44/* 45/*
45 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 46 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -82,6 +83,7 @@
82 83
83#define IPR_SUBS_DEV_ID_57B4 0x033B 84#define IPR_SUBS_DEV_ID_57B4 0x033B
84#define IPR_SUBS_DEV_ID_57B2 0x035F 85#define IPR_SUBS_DEV_ID_57B2 0x035F
86#define IPR_SUBS_DEV_ID_57C0 0x0352
85#define IPR_SUBS_DEV_ID_57C3 0x0353 87#define IPR_SUBS_DEV_ID_57C3 0x0353
86#define IPR_SUBS_DEV_ID_57C4 0x0354 88#define IPR_SUBS_DEV_ID_57C4 0x0354
87#define IPR_SUBS_DEV_ID_57C6 0x0357 89#define IPR_SUBS_DEV_ID_57C6 0x0357
@@ -94,6 +96,10 @@
94#define IPR_SUBS_DEV_ID_574D 0x0356 96#define IPR_SUBS_DEV_ID_574D 0x0356
95#define IPR_SUBS_DEV_ID_57C8 0x035D 97#define IPR_SUBS_DEV_ID_57C8 0x035D
96 98
99#define IPR_SUBS_DEV_ID_57D5 0x03FB
100#define IPR_SUBS_DEV_ID_57D6 0x03FC
101#define IPR_SUBS_DEV_ID_57D7 0x03FF
102#define IPR_SUBS_DEV_ID_57D8 0x03FE
97#define IPR_NAME "ipr" 103#define IPR_NAME "ipr"
98 104
99/* 105/*
@@ -298,6 +304,9 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
298 * Misc literals 304 * Misc literals
299 */ 305 */
300#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST 306#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST
307#define IPR_MAX_MSIX_VECTORS 0x5
308#define IPR_MAX_HRRQ_NUM 0x10
309#define IPR_INIT_HRRQ 0x0
301 310
302/* 311/*
303 * Adapter interface types 312 * Adapter interface types
@@ -404,7 +413,7 @@ struct ipr_config_table_entry64 {
404 __be64 dev_id; 413 __be64 dev_id;
405 __be64 lun; 414 __be64 lun;
406 __be64 lun_wwn[2]; 415 __be64 lun_wwn[2];
407#define IPR_MAX_RES_PATH_LENGTH 24 416#define IPR_MAX_RES_PATH_LENGTH 48
408 __be64 res_path; 417 __be64 res_path;
409 struct ipr_std_inq_data std_inq_data; 418 struct ipr_std_inq_data std_inq_data;
410 u8 reserved2[4]; 419 u8 reserved2[4];
@@ -459,9 +468,39 @@ struct ipr_supported_device {
459 u8 reserved2[16]; 468 u8 reserved2[16];
460}__attribute__((packed, aligned (4))); 469}__attribute__((packed, aligned (4)));
461 470
471struct ipr_hrr_queue {
472 struct ipr_ioa_cfg *ioa_cfg;
473 __be32 *host_rrq;
474 dma_addr_t host_rrq_dma;
475#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc
476#define IPR_HRRQ_RESP_BIT_SET 0x00000002
477#define IPR_HRRQ_TOGGLE_BIT 0x00000001
478#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
479#define IPR_ID_HRRQ_SELE_ENABLE 0x02
480 volatile __be32 *hrrq_start;
481 volatile __be32 *hrrq_end;
482 volatile __be32 *hrrq_curr;
483
484 struct list_head hrrq_free_q;
485 struct list_head hrrq_pending_q;
486 spinlock_t _lock;
487 spinlock_t *lock;
488
489 volatile u32 toggle_bit;
490 u32 size;
491 u32 min_cmd_id;
492 u32 max_cmd_id;
493 u8 allow_interrupts:1;
494 u8 ioa_is_dead:1;
495 u8 allow_cmds:1;
496
497 struct blk_iopoll iopoll;
498};
499
462/* Command packet structure */ 500/* Command packet structure */
463struct ipr_cmd_pkt { 501struct ipr_cmd_pkt {
464 __be16 reserved; /* Reserved by IOA */ 502 u8 reserved; /* Reserved by IOA */
503 u8 hrrq_id;
465 u8 request_type; 504 u8 request_type;
466#define IPR_RQTYPE_SCSICDB 0x00 505#define IPR_RQTYPE_SCSICDB 0x00
467#define IPR_RQTYPE_IOACMD 0x01 506#define IPR_RQTYPE_IOACMD 0x01
@@ -1022,6 +1061,10 @@ struct ipr_hostrcb64_fabric_desc {
1022 struct ipr_hostrcb64_config_element elem[1]; 1061 struct ipr_hostrcb64_config_element elem[1];
1023}__attribute__((packed, aligned (8))); 1062}__attribute__((packed, aligned (8)));
1024 1063
1064#define for_each_hrrq(hrrq, ioa_cfg) \
1065 for (hrrq = (ioa_cfg)->hrrq; \
1066 hrrq < ((ioa_cfg)->hrrq + (ioa_cfg)->hrrq_num); hrrq++)
1067
1025#define for_each_fabric_cfg(fabric, cfg) \ 1068#define for_each_fabric_cfg(fabric, cfg) \
1026 for (cfg = (fabric)->elem; \ 1069 for (cfg = (fabric)->elem; \
1027 cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \ 1070 cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
@@ -1308,6 +1351,7 @@ struct ipr_chip_cfg_t {
1308 u16 max_cmds; 1351 u16 max_cmds;
1309 u8 cache_line_size; 1352 u8 cache_line_size;
1310 u8 clear_isr; 1353 u8 clear_isr;
1354 u32 iopoll_weight;
1311 struct ipr_interrupt_offsets regs; 1355 struct ipr_interrupt_offsets regs;
1312}; 1356};
1313 1357
@@ -1317,6 +1361,7 @@ struct ipr_chip_t {
1317 u16 intr_type; 1361 u16 intr_type;
1318#define IPR_USE_LSI 0x00 1362#define IPR_USE_LSI 0x00
1319#define IPR_USE_MSI 0x01 1363#define IPR_USE_MSI 0x01
1364#define IPR_USE_MSIX 0x02
1320 u16 sis_type; 1365 u16 sis_type;
1321#define IPR_SIS32 0x00 1366#define IPR_SIS32 0x00
1322#define IPR_SIS64 0x01 1367#define IPR_SIS64 0x01
@@ -1375,13 +1420,10 @@ struct ipr_ioa_cfg {
1375 1420
1376 struct list_head queue; 1421 struct list_head queue;
1377 1422
1378 u8 allow_interrupts:1;
1379 u8 in_reset_reload:1; 1423 u8 in_reset_reload:1;
1380 u8 in_ioa_bringdown:1; 1424 u8 in_ioa_bringdown:1;
1381 u8 ioa_unit_checked:1; 1425 u8 ioa_unit_checked:1;
1382 u8 ioa_is_dead:1;
1383 u8 dump_taken:1; 1426 u8 dump_taken:1;
1384 u8 allow_cmds:1;
1385 u8 allow_ml_add_del:1; 1427 u8 allow_ml_add_del:1;
1386 u8 needs_hard_reset:1; 1428 u8 needs_hard_reset:1;
1387 u8 dual_raid:1; 1429 u8 dual_raid:1;
@@ -1413,21 +1455,7 @@ struct ipr_ioa_cfg {
1413 char trace_start[8]; 1455 char trace_start[8];
1414#define IPR_TRACE_START_LABEL "trace" 1456#define IPR_TRACE_START_LABEL "trace"
1415 struct ipr_trace_entry *trace; 1457 struct ipr_trace_entry *trace;
1416 u32 trace_index:IPR_NUM_TRACE_INDEX_BITS; 1458 atomic_t trace_index;
1417
1418 /*
1419 * Queue for free command blocks
1420 */
1421 char ipr_free_label[8];
1422#define IPR_FREEQ_LABEL "free-q"
1423 struct list_head free_q;
1424
1425 /*
1426 * Queue for command blocks outstanding to the adapter
1427 */
1428 char ipr_pending_label[8];
1429#define IPR_PENDQ_LABEL "pend-q"
1430 struct list_head pending_q;
1431 1459
1432 char cfg_table_start[8]; 1460 char cfg_table_start[8];
1433#define IPR_CFG_TBL_START "cfg" 1461#define IPR_CFG_TBL_START "cfg"
@@ -1452,16 +1480,10 @@ struct ipr_ioa_cfg {
1452 struct list_head hostrcb_free_q; 1480 struct list_head hostrcb_free_q;
1453 struct list_head hostrcb_pending_q; 1481 struct list_head hostrcb_pending_q;
1454 1482
1455 __be32 *host_rrq; 1483 struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
1456 dma_addr_t host_rrq_dma; 1484 u32 hrrq_num;
1457#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc 1485 atomic_t hrrq_index;
1458#define IPR_HRRQ_RESP_BIT_SET 0x00000002 1486 u16 identify_hrrq_index;
1459#define IPR_HRRQ_TOGGLE_BIT 0x00000001
1460#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
1461 volatile __be32 *hrrq_start;
1462 volatile __be32 *hrrq_end;
1463 volatile __be32 *hrrq_curr;
1464 volatile u32 toggle_bit;
1465 1487
1466 struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES]; 1488 struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
1467 1489
@@ -1507,6 +1529,17 @@ struct ipr_ioa_cfg {
1507 u32 max_cmds; 1529 u32 max_cmds;
1508 struct ipr_cmnd **ipr_cmnd_list; 1530 struct ipr_cmnd **ipr_cmnd_list;
1509 dma_addr_t *ipr_cmnd_list_dma; 1531 dma_addr_t *ipr_cmnd_list_dma;
1532
1533 u16 intr_flag;
1534 unsigned int nvectors;
1535
1536 struct {
1537 unsigned short vec;
1538 char desc[22];
1539 } vectors_info[IPR_MAX_MSIX_VECTORS];
1540
1541 u32 iopoll_weight;
1542
1510}; /* struct ipr_ioa_cfg */ 1543}; /* struct ipr_ioa_cfg */
1511 1544
1512struct ipr_cmnd { 1545struct ipr_cmnd {
@@ -1544,6 +1577,7 @@ struct ipr_cmnd {
1544 struct scsi_device *sdev; 1577 struct scsi_device *sdev;
1545 } u; 1578 } u;
1546 1579
1580 struct ipr_hrr_queue *hrrq;
1547 struct ipr_ioa_cfg *ioa_cfg; 1581 struct ipr_ioa_cfg *ioa_cfg;
1548}; 1582};
1549 1583
@@ -1717,7 +1751,8 @@ struct ipr_ucode_image_header {
1717 if (ipr_is_device(hostrcb)) { \ 1751 if (ipr_is_device(hostrcb)) { \
1718 if ((hostrcb)->ioa_cfg->sis64) { \ 1752 if ((hostrcb)->ioa_cfg->sis64) { \
1719 printk(KERN_ERR IPR_NAME ": %s: " fmt, \ 1753 printk(KERN_ERR IPR_NAME ": %s: " fmt, \
1720 ipr_format_res_path(hostrcb->hcam.u.error64.fd_res_path, \ 1754 ipr_format_res_path(hostrcb->ioa_cfg, \
1755 hostrcb->hcam.u.error64.fd_res_path, \
1721 hostrcb->rp_buffer, \ 1756 hostrcb->rp_buffer, \
1722 sizeof(hostrcb->rp_buffer)), \ 1757 sizeof(hostrcb->rp_buffer)), \
1723 __VA_ARGS__); \ 1758 __VA_ARGS__); \
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index df4c13a5534c..7706c99ec8bb 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -466,11 +466,13 @@ enum intr_type_t {
466 MSIX, 466 MSIX,
467}; 467};
468 468
469#define LPFC_CT_CTX_MAX 64
469struct unsol_rcv_ct_ctx { 470struct unsol_rcv_ct_ctx {
470 uint32_t ctxt_id; 471 uint32_t ctxt_id;
471 uint32_t SID; 472 uint32_t SID;
472 uint32_t flags; 473 uint32_t valid;
473#define UNSOL_VALID 0x00000001 474#define UNSOL_INVALID 0
475#define UNSOL_VALID 1
474 uint16_t oxid; 476 uint16_t oxid;
475 uint16_t rxid; 477 uint16_t rxid;
476}; 478};
@@ -750,6 +752,15 @@ struct lpfc_hba {
750 void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for 752 void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
751 PCI BAR2 */ 753 PCI BAR2 */
752 754
755 void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for
756 PCI BAR0 with dual-ULP support */
757 void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for
758 PCI BAR2 with dual-ULP support */
759 void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for
760 PCI BAR4 with dual-ULP support */
761#define PCI_64BIT_BAR0 0
762#define PCI_64BIT_BAR2 2
763#define PCI_64BIT_BAR4 4
753 void __iomem *MBslimaddr; /* virtual address for mbox cmds */ 764 void __iomem *MBslimaddr; /* virtual address for mbox cmds */
754 void __iomem *HAregaddr; /* virtual address for host attn reg */ 765 void __iomem *HAregaddr; /* virtual address for host attn reg */
755 void __iomem *CAregaddr; /* virtual address for chip attn reg */ 766 void __iomem *CAregaddr; /* virtual address for chip attn reg */
@@ -938,7 +949,7 @@ struct lpfc_hba {
938 949
939 spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */ 950 spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
940 struct list_head ct_ev_waiters; 951 struct list_head ct_ev_waiters;
941 struct unsol_rcv_ct_ctx ct_ctx[64]; 952 struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
942 uint32_t ctx_idx; 953 uint32_t ctx_idx;
943 954
944 uint8_t menlo_flag; /* menlo generic flags */ 955 uint8_t menlo_flag; /* menlo generic flags */
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f7368eb80415..32d5683e6181 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -955,9 +955,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
955 spin_lock_irqsave(&phba->ct_ev_lock, flags); 955 spin_lock_irqsave(&phba->ct_ev_lock, flags);
956 if (phba->sli_rev == LPFC_SLI_REV4) { 956 if (phba->sli_rev == LPFC_SLI_REV4) {
957 evt_dat->immed_dat = phba->ctx_idx; 957 evt_dat->immed_dat = phba->ctx_idx;
958 phba->ctx_idx = (phba->ctx_idx + 1) % 64; 958 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
959 /* Provide warning for over-run of the ct_ctx array */ 959 /* Provide warning for over-run of the ct_ctx array */
960 if (phba->ct_ctx[evt_dat->immed_dat].flags & 960 if (phba->ct_ctx[evt_dat->immed_dat].valid ==
961 UNSOL_VALID) 961 UNSOL_VALID)
962 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 962 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
963 "2717 CT context array entry " 963 "2717 CT context array entry "
@@ -973,7 +973,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
973 piocbq->iocb.unsli3.rcvsli3.ox_id; 973 piocbq->iocb.unsli3.rcvsli3.ox_id;
974 phba->ct_ctx[evt_dat->immed_dat].SID = 974 phba->ct_ctx[evt_dat->immed_dat].SID =
975 piocbq->iocb.un.rcvels.remoteID; 975 piocbq->iocb.un.rcvels.remoteID;
976 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID; 976 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
977 } else 977 } else
978 evt_dat->immed_dat = piocbq->iocb.ulpContext; 978 evt_dat->immed_dat = piocbq->iocb.ulpContext;
979 979
@@ -1013,6 +1013,47 @@ error_ct_unsol_exit:
1013} 1013}
1014 1014
1015/** 1015/**
1016 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1017 * @phba: Pointer to HBA context object.
1018 * @dmabuf: pointer to a dmabuf that describes the FC sequence
1019 *
1020 * This function handles abort to the CT command toward management plane
1021 * for SLI4 port.
1022 *
1023 * If the pending context of a CT command to management plane present, clears
1024 * such context and returns 1 for handled; otherwise, it returns 0 indicating
1025 * no context exists.
1026 **/
1027int
1028lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1029{
1030 struct fc_frame_header fc_hdr;
1031 struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1032 int ctx_idx, handled = 0;
1033 uint16_t oxid, rxid;
1034 uint32_t sid;
1035
1036 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1037 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1038 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1039 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1040
1041 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1042 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1043 continue;
1044 if (phba->ct_ctx[ctx_idx].rxid != rxid)
1045 continue;
1046 if (phba->ct_ctx[ctx_idx].oxid != oxid)
1047 continue;
1048 if (phba->ct_ctx[ctx_idx].SID != sid)
1049 continue;
1050 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1051 handled = 1;
1052 }
1053 return handled;
1054}
1055
1056/**
1016 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 1057 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1017 * @job: SET_EVENT fc_bsg_job 1058 * @job: SET_EVENT fc_bsg_job
1018 **/ 1059 **/
@@ -1318,7 +1359,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1318 icmd->ulpClass = CLASS3; 1359 icmd->ulpClass = CLASS3;
1319 if (phba->sli_rev == LPFC_SLI_REV4) { 1360 if (phba->sli_rev == LPFC_SLI_REV4) {
1320 /* Do not issue unsol response if oxid not marked as valid */ 1361 /* Do not issue unsol response if oxid not marked as valid */
1321 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) { 1362 if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1322 rc = IOCB_ERROR; 1363 rc = IOCB_ERROR;
1323 goto issue_ct_rsp_exit; 1364 goto issue_ct_rsp_exit;
1324 } 1365 }
@@ -1352,7 +1393,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1352 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 1393 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1353 1394
1354 /* The exchange is done, mark the entry as invalid */ 1395 /* The exchange is done, mark the entry as invalid */
1355 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1396 phba->ct_ctx[tag].valid = UNSOL_INVALID;
1356 } else 1397 } else
1357 icmd->ulpContext = (ushort) tag; 1398 icmd->ulpContext = (ushort) tag;
1358 1399
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 69d66e3662cb..76ca65dae781 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -164,8 +164,7 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
164 164
165void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 165void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
166 struct lpfc_iocbq *); 166 struct lpfc_iocbq *);
167void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 167int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
168 struct lpfc_iocbq *);
169int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); 168int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
170int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); 169int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
171void lpfc_fdmi_tmo(unsigned long); 170void lpfc_fdmi_tmo(unsigned long);
@@ -427,6 +426,7 @@ int lpfc_bsg_request(struct fc_bsg_job *);
427int lpfc_bsg_timeout(struct fc_bsg_job *); 426int lpfc_bsg_timeout(struct fc_bsg_job *);
428int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 427int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
429 struct lpfc_iocbq *); 428 struct lpfc_iocbq *);
429int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
430void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *, 430void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *,
431 struct lpfc_iocbq *); 431 struct lpfc_iocbq *);
432struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *, 432struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 65f9fb6862e6..7bff3a19af56 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -164,37 +164,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
164} 164}
165 165
166/** 166/**
167 * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort 167 * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler
168 * @phba: Pointer to HBA context object. 168 * @phba: Pointer to HBA context object.
169 * @pring: Pointer to the driver internal I/O ring. 169 * @dmabuf: pointer to a dmabuf that describes the FC sequence
170 * @piocbq: Pointer to the IOCBQ.
171 * 170 *
172 * This function serves as the default handler for the sli4 unsolicited 171 * This function serves as the upper level protocol abort handler for CT
173 * abort event. It shall be invoked when there is no application interface 172 * protocol.
174 * registered unsolicited abort handler. This handler does nothing but 173 *
175 * just simply releases the dma buffer used by the unsol abort event. 174 * Return 1 if abort has been handled, 0 otherwise.
176 **/ 175 **/
177void 176int
178lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba, 177lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
179 struct lpfc_sli_ring *pring,
180 struct lpfc_iocbq *piocbq)
181{ 178{
182 IOCB_t *icmd = &piocbq->iocb; 179 int handled;
183 struct lpfc_dmabuf *bdeBuf;
184 uint32_t size;
185 180
186 /* Forward abort event to any process registered to receive ct event */ 181 /* CT upper level goes through BSG */
187 if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0) 182 handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf);
188 return;
189 183
190 /* If there is no BDE associated with IOCB, there is nothing to do */ 184 return handled;
191 if (icmd->ulpBdeCount == 0)
192 return;
193 bdeBuf = piocbq->context2;
194 piocbq->context2 = NULL;
195 size = icmd->un.cont64[0].tus.f.bdeSize;
196 lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
197 lpfc_in_buf_free(phba, bdeBuf);
198} 185}
199 186
200static void 187static void
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b9440deaad45..08d156a9094f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3122,6 +3122,13 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3122 3122
3123 case IOERR_SEQUENCE_TIMEOUT: 3123 case IOERR_SEQUENCE_TIMEOUT:
3124 case IOERR_INVALID_RPI: 3124 case IOERR_INVALID_RPI:
3125 if (cmd == ELS_CMD_PLOGI &&
3126 did == NameServer_DID) {
3127 /* Continue forever if plogi to */
3128 /* the nameserver fails */
3129 maxretry = 0;
3130 delay = 100;
3131 }
3125 retry = 1; 3132 retry = 1;
3126 break; 3133 break;
3127 } 3134 }
@@ -6517,7 +6524,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6517 struct lpfc_nodelist *ndlp; 6524 struct lpfc_nodelist *ndlp;
6518 struct ls_rjt stat; 6525 struct ls_rjt stat;
6519 uint32_t *payload; 6526 uint32_t *payload;
6520 uint32_t cmd, did, newnode, rjt_err = 0; 6527 uint32_t cmd, did, newnode;
6528 uint8_t rjt_exp, rjt_err = 0;
6521 IOCB_t *icmd = &elsiocb->iocb; 6529 IOCB_t *icmd = &elsiocb->iocb;
6522 6530
6523 if (!vport || !(elsiocb->context2)) 6531 if (!vport || !(elsiocb->context2))
@@ -6606,12 +6614,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6606 /* If Nport discovery is delayed, reject PLOGIs */ 6614 /* If Nport discovery is delayed, reject PLOGIs */
6607 if (vport->fc_flag & FC_DISC_DELAYED) { 6615 if (vport->fc_flag & FC_DISC_DELAYED) {
6608 rjt_err = LSRJT_UNABLE_TPC; 6616 rjt_err = LSRJT_UNABLE_TPC;
6617 rjt_exp = LSEXP_NOTHING_MORE;
6609 break; 6618 break;
6610 } 6619 }
6611 if (vport->port_state < LPFC_DISC_AUTH) { 6620 if (vport->port_state < LPFC_DISC_AUTH) {
6612 if (!(phba->pport->fc_flag & FC_PT2PT) || 6621 if (!(phba->pport->fc_flag & FC_PT2PT) ||
6613 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 6622 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
6614 rjt_err = LSRJT_UNABLE_TPC; 6623 rjt_err = LSRJT_UNABLE_TPC;
6624 rjt_exp = LSEXP_NOTHING_MORE;
6615 break; 6625 break;
6616 } 6626 }
6617 /* We get here, and drop thru, if we are PT2PT with 6627 /* We get here, and drop thru, if we are PT2PT with
@@ -6648,6 +6658,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6648 lpfc_send_els_event(vport, ndlp, payload); 6658 lpfc_send_els_event(vport, ndlp, payload);
6649 if (vport->port_state < LPFC_DISC_AUTH) { 6659 if (vport->port_state < LPFC_DISC_AUTH) {
6650 rjt_err = LSRJT_UNABLE_TPC; 6660 rjt_err = LSRJT_UNABLE_TPC;
6661 rjt_exp = LSEXP_NOTHING_MORE;
6651 break; 6662 break;
6652 } 6663 }
6653 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 6664 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
@@ -6661,6 +6672,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6661 lpfc_send_els_event(vport, ndlp, payload); 6672 lpfc_send_els_event(vport, ndlp, payload);
6662 if (vport->port_state < LPFC_DISC_AUTH) { 6673 if (vport->port_state < LPFC_DISC_AUTH) {
6663 rjt_err = LSRJT_UNABLE_TPC; 6674 rjt_err = LSRJT_UNABLE_TPC;
6675 rjt_exp = LSEXP_NOTHING_MORE;
6664 break; 6676 break;
6665 } 6677 }
6666 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 6678 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
@@ -6680,6 +6692,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6680 phba->fc_stat.elsRcvADISC++; 6692 phba->fc_stat.elsRcvADISC++;
6681 if (vport->port_state < LPFC_DISC_AUTH) { 6693 if (vport->port_state < LPFC_DISC_AUTH) {
6682 rjt_err = LSRJT_UNABLE_TPC; 6694 rjt_err = LSRJT_UNABLE_TPC;
6695 rjt_exp = LSEXP_NOTHING_MORE;
6683 break; 6696 break;
6684 } 6697 }
6685 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6698 lpfc_disc_state_machine(vport, ndlp, elsiocb,
@@ -6693,6 +6706,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6693 phba->fc_stat.elsRcvPDISC++; 6706 phba->fc_stat.elsRcvPDISC++;
6694 if (vport->port_state < LPFC_DISC_AUTH) { 6707 if (vport->port_state < LPFC_DISC_AUTH) {
6695 rjt_err = LSRJT_UNABLE_TPC; 6708 rjt_err = LSRJT_UNABLE_TPC;
6709 rjt_exp = LSEXP_NOTHING_MORE;
6696 break; 6710 break;
6697 } 6711 }
6698 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6712 lpfc_disc_state_machine(vport, ndlp, elsiocb,
@@ -6730,6 +6744,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6730 phba->fc_stat.elsRcvPRLI++; 6744 phba->fc_stat.elsRcvPRLI++;
6731 if (vport->port_state < LPFC_DISC_AUTH) { 6745 if (vport->port_state < LPFC_DISC_AUTH) {
6732 rjt_err = LSRJT_UNABLE_TPC; 6746 rjt_err = LSRJT_UNABLE_TPC;
6747 rjt_exp = LSEXP_NOTHING_MORE;
6733 break; 6748 break;
6734 } 6749 }
6735 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 6750 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
@@ -6813,6 +6828,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6813 if (newnode) 6828 if (newnode)
6814 lpfc_nlp_put(ndlp); 6829 lpfc_nlp_put(ndlp);
6815 break; 6830 break;
6831 case ELS_CMD_REC:
6832 /* receive this due to exchange closed */
6833 rjt_err = LSRJT_UNABLE_TPC;
6834 rjt_exp = LSEXP_INVALID_OX_RX;
6835 break;
6816 default: 6836 default:
6817 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6837 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6818 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 6838 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -6820,6 +6840,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6820 6840
6821 /* Unsupported ELS command, reject */ 6841 /* Unsupported ELS command, reject */
6822 rjt_err = LSRJT_CMD_UNSUPPORTED; 6842 rjt_err = LSRJT_CMD_UNSUPPORTED;
6843 rjt_exp = LSEXP_NOTHING_MORE;
6823 6844
6824 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 6845 /* Unknown ELS command <elsCmd> received from NPORT <did> */
6825 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6846 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -6834,7 +6855,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6834 if (rjt_err) { 6855 if (rjt_err) {
6835 memset(&stat, 0, sizeof(stat)); 6856 memset(&stat, 0, sizeof(stat));
6836 stat.un.b.lsRjtRsnCode = rjt_err; 6857 stat.un.b.lsRjtRsnCode = rjt_err;
6837 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 6858 stat.un.b.lsRjtRsnCodeExp = rjt_exp;
6838 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 6859 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
6839 NULL); 6860 NULL);
6840 } 6861 }
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7398ca862e97..e8c476031703 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -538,6 +538,7 @@ struct fc_vft_header {
538#define ELS_CMD_ECHO 0x10000000 538#define ELS_CMD_ECHO 0x10000000
539#define ELS_CMD_TEST 0x11000000 539#define ELS_CMD_TEST 0x11000000
540#define ELS_CMD_RRQ 0x12000000 540#define ELS_CMD_RRQ 0x12000000
541#define ELS_CMD_REC 0x13000000
541#define ELS_CMD_PRLI 0x20100014 542#define ELS_CMD_PRLI 0x20100014
542#define ELS_CMD_PRLO 0x21100014 543#define ELS_CMD_PRLO 0x21100014
543#define ELS_CMD_PRLO_ACC 0x02100014 544#define ELS_CMD_PRLO_ACC 0x02100014
@@ -574,6 +575,7 @@ struct fc_vft_header {
574#define ELS_CMD_ECHO 0x10 575#define ELS_CMD_ECHO 0x10
575#define ELS_CMD_TEST 0x11 576#define ELS_CMD_TEST 0x11
576#define ELS_CMD_RRQ 0x12 577#define ELS_CMD_RRQ 0x12
578#define ELS_CMD_REC 0x13
577#define ELS_CMD_PRLI 0x14001020 579#define ELS_CMD_PRLI 0x14001020
578#define ELS_CMD_PRLO 0x14001021 580#define ELS_CMD_PRLO 0x14001021
579#define ELS_CMD_PRLO_ACC 0x14001002 581#define ELS_CMD_PRLO_ACC 0x14001002
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index a47cfbdd05f2..6e93b886cd4d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -106,6 +106,7 @@ struct lpfc_sli_intf {
106 106
107#define LPFC_SLI4_MB_WORD_COUNT 64 107#define LPFC_SLI4_MB_WORD_COUNT 64
108#define LPFC_MAX_MQ_PAGE 8 108#define LPFC_MAX_MQ_PAGE 8
109#define LPFC_MAX_WQ_PAGE_V0 4
109#define LPFC_MAX_WQ_PAGE 8 110#define LPFC_MAX_WQ_PAGE 8
110#define LPFC_MAX_CQ_PAGE 4 111#define LPFC_MAX_CQ_PAGE 4
111#define LPFC_MAX_EQ_PAGE 8 112#define LPFC_MAX_EQ_PAGE 8
@@ -703,24 +704,41 @@ struct lpfc_register {
703 * BAR0. The offsets are the same so the driver must account for 704 * BAR0. The offsets are the same so the driver must account for
704 * any base address difference. 705 * any base address difference.
705 */ 706 */
706#define LPFC_RQ_DOORBELL 0x00A0 707#define LPFC_ULP0_RQ_DOORBELL 0x00A0
707#define lpfc_rq_doorbell_num_posted_SHIFT 16 708#define LPFC_ULP1_RQ_DOORBELL 0x00C0
708#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF 709#define lpfc_rq_db_list_fm_num_posted_SHIFT 24
709#define lpfc_rq_doorbell_num_posted_WORD word0 710#define lpfc_rq_db_list_fm_num_posted_MASK 0x00FF
710#define lpfc_rq_doorbell_id_SHIFT 0 711#define lpfc_rq_db_list_fm_num_posted_WORD word0
711#define lpfc_rq_doorbell_id_MASK 0xFFFF 712#define lpfc_rq_db_list_fm_index_SHIFT 16
712#define lpfc_rq_doorbell_id_WORD word0 713#define lpfc_rq_db_list_fm_index_MASK 0x00FF
713 714#define lpfc_rq_db_list_fm_index_WORD word0
714#define LPFC_WQ_DOORBELL 0x0040 715#define lpfc_rq_db_list_fm_id_SHIFT 0
715#define lpfc_wq_doorbell_num_posted_SHIFT 24 716#define lpfc_rq_db_list_fm_id_MASK 0xFFFF
716#define lpfc_wq_doorbell_num_posted_MASK 0x00FF 717#define lpfc_rq_db_list_fm_id_WORD word0
717#define lpfc_wq_doorbell_num_posted_WORD word0 718#define lpfc_rq_db_ring_fm_num_posted_SHIFT 16
718#define lpfc_wq_doorbell_index_SHIFT 16 719#define lpfc_rq_db_ring_fm_num_posted_MASK 0x3FFF
719#define lpfc_wq_doorbell_index_MASK 0x00FF 720#define lpfc_rq_db_ring_fm_num_posted_WORD word0
720#define lpfc_wq_doorbell_index_WORD word0 721#define lpfc_rq_db_ring_fm_id_SHIFT 0
721#define lpfc_wq_doorbell_id_SHIFT 0 722#define lpfc_rq_db_ring_fm_id_MASK 0xFFFF
722#define lpfc_wq_doorbell_id_MASK 0xFFFF 723#define lpfc_rq_db_ring_fm_id_WORD word0
723#define lpfc_wq_doorbell_id_WORD word0 724
725#define LPFC_ULP0_WQ_DOORBELL 0x0040
726#define LPFC_ULP1_WQ_DOORBELL 0x0060
727#define lpfc_wq_db_list_fm_num_posted_SHIFT 24
728#define lpfc_wq_db_list_fm_num_posted_MASK 0x00FF
729#define lpfc_wq_db_list_fm_num_posted_WORD word0
730#define lpfc_wq_db_list_fm_index_SHIFT 16
731#define lpfc_wq_db_list_fm_index_MASK 0x00FF
732#define lpfc_wq_db_list_fm_index_WORD word0
733#define lpfc_wq_db_list_fm_id_SHIFT 0
734#define lpfc_wq_db_list_fm_id_MASK 0xFFFF
735#define lpfc_wq_db_list_fm_id_WORD word0
736#define lpfc_wq_db_ring_fm_num_posted_SHIFT 16
737#define lpfc_wq_db_ring_fm_num_posted_MASK 0x3FFF
738#define lpfc_wq_db_ring_fm_num_posted_WORD word0
739#define lpfc_wq_db_ring_fm_id_SHIFT 0
740#define lpfc_wq_db_ring_fm_id_MASK 0xFFFF
741#define lpfc_wq_db_ring_fm_id_WORD word0
724 742
725#define LPFC_EQCQ_DOORBELL 0x0120 743#define LPFC_EQCQ_DOORBELL 0x0120
726#define lpfc_eqcq_doorbell_se_SHIFT 31 744#define lpfc_eqcq_doorbell_se_SHIFT 31
@@ -1131,12 +1149,22 @@ struct lpfc_mbx_wq_create {
1131 struct { /* Version 0 Request */ 1149 struct { /* Version 0 Request */
1132 uint32_t word0; 1150 uint32_t word0;
1133#define lpfc_mbx_wq_create_num_pages_SHIFT 0 1151#define lpfc_mbx_wq_create_num_pages_SHIFT 0
1134#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF 1152#define lpfc_mbx_wq_create_num_pages_MASK 0x000000FF
1135#define lpfc_mbx_wq_create_num_pages_WORD word0 1153#define lpfc_mbx_wq_create_num_pages_WORD word0
1154#define lpfc_mbx_wq_create_dua_SHIFT 8
1155#define lpfc_mbx_wq_create_dua_MASK 0x00000001
1156#define lpfc_mbx_wq_create_dua_WORD word0
1136#define lpfc_mbx_wq_create_cq_id_SHIFT 16 1157#define lpfc_mbx_wq_create_cq_id_SHIFT 16
1137#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF 1158#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
1138#define lpfc_mbx_wq_create_cq_id_WORD word0 1159#define lpfc_mbx_wq_create_cq_id_WORD word0
1139 struct dma_address page[LPFC_MAX_WQ_PAGE]; 1160 struct dma_address page[LPFC_MAX_WQ_PAGE_V0];
1161 uint32_t word9;
1162#define lpfc_mbx_wq_create_bua_SHIFT 0
1163#define lpfc_mbx_wq_create_bua_MASK 0x00000001
1164#define lpfc_mbx_wq_create_bua_WORD word9
1165#define lpfc_mbx_wq_create_ulp_num_SHIFT 8
1166#define lpfc_mbx_wq_create_ulp_num_MASK 0x000000FF
1167#define lpfc_mbx_wq_create_ulp_num_WORD word9
1140 } request; 1168 } request;
1141 struct { /* Version 1 Request */ 1169 struct { /* Version 1 Request */
1142 uint32_t word0; /* Word 0 is the same as in v0 */ 1170 uint32_t word0; /* Word 0 is the same as in v0 */
@@ -1160,6 +1188,17 @@ struct lpfc_mbx_wq_create {
1160#define lpfc_mbx_wq_create_q_id_SHIFT 0 1188#define lpfc_mbx_wq_create_q_id_SHIFT 0
1161#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF 1189#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
1162#define lpfc_mbx_wq_create_q_id_WORD word0 1190#define lpfc_mbx_wq_create_q_id_WORD word0
1191 uint32_t doorbell_offset;
1192 uint32_t word2;
1193#define lpfc_mbx_wq_create_bar_set_SHIFT 0
1194#define lpfc_mbx_wq_create_bar_set_MASK 0x0000FFFF
1195#define lpfc_mbx_wq_create_bar_set_WORD word2
1196#define WQ_PCI_BAR_0_AND_1 0x00
1197#define WQ_PCI_BAR_2_AND_3 0x01
1198#define WQ_PCI_BAR_4_AND_5 0x02
1199#define lpfc_mbx_wq_create_db_format_SHIFT 16
1200#define lpfc_mbx_wq_create_db_format_MASK 0x0000FFFF
1201#define lpfc_mbx_wq_create_db_format_WORD word2
1163 } response; 1202 } response;
1164 } u; 1203 } u;
1165}; 1204};
@@ -1223,14 +1262,31 @@ struct lpfc_mbx_rq_create {
1223#define lpfc_mbx_rq_create_num_pages_SHIFT 0 1262#define lpfc_mbx_rq_create_num_pages_SHIFT 0
1224#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF 1263#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
1225#define lpfc_mbx_rq_create_num_pages_WORD word0 1264#define lpfc_mbx_rq_create_num_pages_WORD word0
1265#define lpfc_mbx_rq_create_dua_SHIFT 16
1266#define lpfc_mbx_rq_create_dua_MASK 0x00000001
1267#define lpfc_mbx_rq_create_dua_WORD word0
1268#define lpfc_mbx_rq_create_bqu_SHIFT 17
1269#define lpfc_mbx_rq_create_bqu_MASK 0x00000001
1270#define lpfc_mbx_rq_create_bqu_WORD word0
1271#define lpfc_mbx_rq_create_ulp_num_SHIFT 24
1272#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF
1273#define lpfc_mbx_rq_create_ulp_num_WORD word0
1226 struct rq_context context; 1274 struct rq_context context;
1227 struct dma_address page[LPFC_MAX_WQ_PAGE]; 1275 struct dma_address page[LPFC_MAX_WQ_PAGE];
1228 } request; 1276 } request;
1229 struct { 1277 struct {
1230 uint32_t word0; 1278 uint32_t word0;
1231#define lpfc_mbx_rq_create_q_id_SHIFT 0 1279#define lpfc_mbx_rq_create_q_id_SHIFT 0
1232#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF 1280#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
1233#define lpfc_mbx_rq_create_q_id_WORD word0 1281#define lpfc_mbx_rq_create_q_id_WORD word0
1282 uint32_t doorbell_offset;
1283 uint32_t word2;
1284#define lpfc_mbx_rq_create_bar_set_SHIFT 0
1285#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF
1286#define lpfc_mbx_rq_create_bar_set_WORD word2
1287#define lpfc_mbx_rq_create_db_format_SHIFT 16
1288#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF
1289#define lpfc_mbx_rq_create_db_format_WORD word2
1234 } response; 1290 } response;
1235 } u; 1291 } u;
1236}; 1292};
@@ -1388,6 +1444,33 @@ struct lpfc_mbx_get_rsrc_extent_info {
1388 } u; 1444 } u;
1389}; 1445};
1390 1446
1447struct lpfc_mbx_query_fw_config {
1448 struct mbox_header header;
1449 struct {
1450 uint32_t config_number;
1451#define LPFC_FC_FCOE 0x00000007
1452 uint32_t asic_revision;
1453 uint32_t physical_port;
1454 uint32_t function_mode;
1455#define LPFC_FCOE_INI_MODE 0x00000040
1456#define LPFC_FCOE_TGT_MODE 0x00000080
1457#define LPFC_DUA_MODE 0x00000800
1458 uint32_t ulp0_mode;
1459#define LPFC_ULP_FCOE_INIT_MODE 0x00000040
1460#define LPFC_ULP_FCOE_TGT_MODE 0x00000080
1461 uint32_t ulp0_nap_words[12];
1462 uint32_t ulp1_mode;
1463 uint32_t ulp1_nap_words[12];
1464 uint32_t function_capabilities;
1465 uint32_t cqid_base;
1466 uint32_t cqid_tot;
1467 uint32_t eqid_base;
1468 uint32_t eqid_tot;
1469 uint32_t ulp0_nap2_words[2];
1470 uint32_t ulp1_nap2_words[2];
1471 } rsp;
1472};
1473
1391struct lpfc_id_range { 1474struct lpfc_id_range {
1392 uint32_t word5; 1475 uint32_t word5;
1393#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0 1476#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
@@ -1803,51 +1886,6 @@ struct lpfc_mbx_redisc_fcf_tbl {
1803#define lpfc_mbx_redisc_fcf_index_WORD word12 1886#define lpfc_mbx_redisc_fcf_index_WORD word12
1804}; 1887};
1805 1888
1806struct lpfc_mbx_query_fw_cfg {
1807 struct mbox_header header;
1808 uint32_t config_number;
1809 uint32_t asic_rev;
1810 uint32_t phys_port;
1811 uint32_t function_mode;
1812/* firmware Function Mode */
1813#define lpfc_function_mode_toe_SHIFT 0
1814#define lpfc_function_mode_toe_MASK 0x00000001
1815#define lpfc_function_mode_toe_WORD function_mode
1816#define lpfc_function_mode_nic_SHIFT 1
1817#define lpfc_function_mode_nic_MASK 0x00000001
1818#define lpfc_function_mode_nic_WORD function_mode
1819#define lpfc_function_mode_rdma_SHIFT 2
1820#define lpfc_function_mode_rdma_MASK 0x00000001
1821#define lpfc_function_mode_rdma_WORD function_mode
1822#define lpfc_function_mode_vm_SHIFT 3
1823#define lpfc_function_mode_vm_MASK 0x00000001
1824#define lpfc_function_mode_vm_WORD function_mode
1825#define lpfc_function_mode_iscsi_i_SHIFT 4
1826#define lpfc_function_mode_iscsi_i_MASK 0x00000001
1827#define lpfc_function_mode_iscsi_i_WORD function_mode
1828#define lpfc_function_mode_iscsi_t_SHIFT 5
1829#define lpfc_function_mode_iscsi_t_MASK 0x00000001
1830#define lpfc_function_mode_iscsi_t_WORD function_mode
1831#define lpfc_function_mode_fcoe_i_SHIFT 6
1832#define lpfc_function_mode_fcoe_i_MASK 0x00000001
1833#define lpfc_function_mode_fcoe_i_WORD function_mode
1834#define lpfc_function_mode_fcoe_t_SHIFT 7
1835#define lpfc_function_mode_fcoe_t_MASK 0x00000001
1836#define lpfc_function_mode_fcoe_t_WORD function_mode
1837#define lpfc_function_mode_dal_SHIFT 8
1838#define lpfc_function_mode_dal_MASK 0x00000001
1839#define lpfc_function_mode_dal_WORD function_mode
1840#define lpfc_function_mode_lro_SHIFT 9
1841#define lpfc_function_mode_lro_MASK 0x00000001
1842#define lpfc_function_mode_lro_WORD function_mode
1843#define lpfc_function_mode_flex10_SHIFT 10
1844#define lpfc_function_mode_flex10_MASK 0x00000001
1845#define lpfc_function_mode_flex10_WORD function_mode
1846#define lpfc_function_mode_ncsi_SHIFT 11
1847#define lpfc_function_mode_ncsi_MASK 0x00000001
1848#define lpfc_function_mode_ncsi_WORD function_mode
1849};
1850
1851/* Status field for embedded SLI_CONFIG mailbox command */ 1889/* Status field for embedded SLI_CONFIG mailbox command */
1852#define STATUS_SUCCESS 0x0 1890#define STATUS_SUCCESS 0x0
1853#define STATUS_FAILED 0x1 1891#define STATUS_FAILED 0x1
@@ -2965,7 +3003,7 @@ struct lpfc_mqe {
2965 struct lpfc_mbx_read_config rd_config; 3003 struct lpfc_mbx_read_config rd_config;
2966 struct lpfc_mbx_request_features req_ftrs; 3004 struct lpfc_mbx_request_features req_ftrs;
2967 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; 3005 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
2968 struct lpfc_mbx_query_fw_cfg query_fw_cfg; 3006 struct lpfc_mbx_query_fw_config query_fw_cfg;
2969 struct lpfc_mbx_supp_pages supp_pages; 3007 struct lpfc_mbx_supp_pages supp_pages;
2970 struct lpfc_mbx_pc_sli4_params sli4_params; 3008 struct lpfc_mbx_pc_sli4_params sli4_params;
2971 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; 3009 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7de4ef14698f..314b4f61b9e3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6229,9 +6229,11 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
6229 phba->sli4_hba.conf_regs_memmap_p + 6229 phba->sli4_hba.conf_regs_memmap_p +
6230 LPFC_CTL_PORT_SEM_OFFSET; 6230 LPFC_CTL_PORT_SEM_OFFSET;
6231 phba->sli4_hba.RQDBregaddr = 6231 phba->sli4_hba.RQDBregaddr =
6232 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 6232 phba->sli4_hba.conf_regs_memmap_p +
6233 LPFC_ULP0_RQ_DOORBELL;
6233 phba->sli4_hba.WQDBregaddr = 6234 phba->sli4_hba.WQDBregaddr =
6234 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; 6235 phba->sli4_hba.conf_regs_memmap_p +
6236 LPFC_ULP0_WQ_DOORBELL;
6235 phba->sli4_hba.EQCQDBregaddr = 6237 phba->sli4_hba.EQCQDBregaddr =
6236 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 6238 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
6237 phba->sli4_hba.MQDBregaddr = 6239 phba->sli4_hba.MQDBregaddr =
@@ -6285,9 +6287,11 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
6285 return -ENODEV; 6287 return -ENODEV;
6286 6288
6287 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6289 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6288 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 6290 vf * LPFC_VFR_PAGE_SIZE +
6291 LPFC_ULP0_RQ_DOORBELL);
6289 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6292 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6290 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 6293 vf * LPFC_VFR_PAGE_SIZE +
6294 LPFC_ULP0_WQ_DOORBELL);
6291 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6295 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6292 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 6296 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
6293 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6297 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
@@ -6983,6 +6987,19 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6983 phba->sli4_hba.fcp_wq = NULL; 6987 phba->sli4_hba.fcp_wq = NULL;
6984 } 6988 }
6985 6989
6990 if (phba->pci_bar0_memmap_p) {
6991 iounmap(phba->pci_bar0_memmap_p);
6992 phba->pci_bar0_memmap_p = NULL;
6993 }
6994 if (phba->pci_bar2_memmap_p) {
6995 iounmap(phba->pci_bar2_memmap_p);
6996 phba->pci_bar2_memmap_p = NULL;
6997 }
6998 if (phba->pci_bar4_memmap_p) {
6999 iounmap(phba->pci_bar4_memmap_p);
7000 phba->pci_bar4_memmap_p = NULL;
7001 }
7002
6986 /* Release FCP CQ mapping array */ 7003 /* Release FCP CQ mapping array */
6987 if (phba->sli4_hba.fcp_cq_map != NULL) { 7004 if (phba->sli4_hba.fcp_cq_map != NULL) {
6988 kfree(phba->sli4_hba.fcp_cq_map); 7005 kfree(phba->sli4_hba.fcp_cq_map);
@@ -7046,6 +7063,53 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7046 int rc = -ENOMEM; 7063 int rc = -ENOMEM;
7047 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 7064 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
7048 int fcp_cq_index = 0; 7065 int fcp_cq_index = 0;
7066 uint32_t shdr_status, shdr_add_status;
7067 union lpfc_sli4_cfg_shdr *shdr;
7068 LPFC_MBOXQ_t *mboxq;
7069 uint32_t length;
7070
7071 /* Check for dual-ULP support */
7072 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7073 if (!mboxq) {
7074 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7075 "3249 Unable to allocate memory for "
7076 "QUERY_FW_CFG mailbox command\n");
7077 return -ENOMEM;
7078 }
7079 length = (sizeof(struct lpfc_mbx_query_fw_config) -
7080 sizeof(struct lpfc_sli4_cfg_mhdr));
7081 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7082 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
7083 length, LPFC_SLI4_MBX_EMBED);
7084
7085 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7086
7087 shdr = (union lpfc_sli4_cfg_shdr *)
7088 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7089 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7090 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7091 if (shdr_status || shdr_add_status || rc) {
7092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7093 "3250 QUERY_FW_CFG mailbox failed with status "
7094 "x%x add_status x%x, mbx status x%x\n",
7095 shdr_status, shdr_add_status, rc);
7096 if (rc != MBX_TIMEOUT)
7097 mempool_free(mboxq, phba->mbox_mem_pool);
7098 rc = -ENXIO;
7099 goto out_error;
7100 }
7101
7102 phba->sli4_hba.fw_func_mode =
7103 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
7104 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
7105 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
7106 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7107 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
7108 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
7109 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
7110
7111 if (rc != MBX_TIMEOUT)
7112 mempool_free(mboxq, phba->mbox_mem_pool);
7049 7113
7050 /* 7114 /*
7051 * Set up HBA Event Queues (EQs) 7115 * Set up HBA Event Queues (EQs)
@@ -7660,78 +7724,6 @@ out:
7660} 7724}
7661 7725
7662/** 7726/**
7663 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
7664 * @phba: pointer to lpfc hba data structure.
7665 * @cnt: number of nop mailbox commands to send.
7666 *
7667 * This routine is invoked to send a number @cnt of NOP mailbox command and
7668 * wait for each command to complete.
7669 *
7670 * Return: the number of NOP mailbox command completed.
7671 **/
7672static int
7673lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7674{
7675 LPFC_MBOXQ_t *mboxq;
7676 int length, cmdsent;
7677 uint32_t mbox_tmo;
7678 uint32_t rc = 0;
7679 uint32_t shdr_status, shdr_add_status;
7680 union lpfc_sli4_cfg_shdr *shdr;
7681
7682 if (cnt == 0) {
7683 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7684 "2518 Requested to send 0 NOP mailbox cmd\n");
7685 return cnt;
7686 }
7687
7688 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7689 if (!mboxq) {
7690 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7691 "2519 Unable to allocate memory for issuing "
7692 "NOP mailbox command\n");
7693 return 0;
7694 }
7695
7696 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7697 length = (sizeof(struct lpfc_mbx_nop) -
7698 sizeof(struct lpfc_sli4_cfg_mhdr));
7699
7700 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7701 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7702 LPFC_MBOX_OPCODE_NOP, length,
7703 LPFC_SLI4_MBX_EMBED);
7704 if (!phba->sli4_hba.intr_enable)
7705 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7706 else {
7707 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
7708 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7709 }
7710 if (rc == MBX_TIMEOUT)
7711 break;
7712 /* Check return status */
7713 shdr = (union lpfc_sli4_cfg_shdr *)
7714 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7715 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7716 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7717 &shdr->response);
7718 if (shdr_status || shdr_add_status || rc) {
7719 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7720 "2520 NOP mailbox command failed "
7721 "status x%x add_status x%x mbx "
7722 "status x%x\n", shdr_status,
7723 shdr_add_status, rc);
7724 break;
7725 }
7726 }
7727
7728 if (rc != MBX_TIMEOUT)
7729 mempool_free(mboxq, phba->mbox_mem_pool);
7730
7731 return cmdsent;
7732}
7733
7734/**
7735 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 7727 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7736 * @phba: pointer to lpfc hba data structure. 7728 * @phba: pointer to lpfc hba data structure.
7737 * 7729 *
@@ -8499,37 +8491,6 @@ lpfc_unset_hba(struct lpfc_hba *phba)
8499} 8491}
8500 8492
8501/** 8493/**
8502 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
8503 * @phba: pointer to lpfc hba data structure.
8504 *
8505 * This routine is invoked to unset the HBA device initialization steps to
8506 * a device with SLI-4 interface spec.
8507 **/
8508static void
8509lpfc_sli4_unset_hba(struct lpfc_hba *phba)
8510{
8511 struct lpfc_vport *vport = phba->pport;
8512 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8513
8514 spin_lock_irq(shost->host_lock);
8515 vport->load_flag |= FC_UNLOADING;
8516 spin_unlock_irq(shost->host_lock);
8517
8518 phba->pport->work_port_events = 0;
8519
8520 /* Stop the SLI4 device port */
8521 lpfc_stop_port(phba);
8522
8523 lpfc_sli4_disable_intr(phba);
8524
8525 /* Reset SLI4 HBA FCoE function */
8526 lpfc_pci_function_reset(phba);
8527 lpfc_sli4_queue_destroy(phba);
8528
8529 return;
8530}
8531
8532/**
8533 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 8494 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
8534 * @phba: Pointer to HBA context object. 8495 * @phba: Pointer to HBA context object.
8535 * 8496 *
@@ -9591,7 +9552,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9591 struct Scsi_Host *shost = NULL; 9552 struct Scsi_Host *shost = NULL;
9592 int error, ret; 9553 int error, ret;
9593 uint32_t cfg_mode, intr_mode; 9554 uint32_t cfg_mode, intr_mode;
9594 int mcnt;
9595 int adjusted_fcp_io_channel; 9555 int adjusted_fcp_io_channel;
9596 9556
9597 /* Allocate memory for HBA structure */ 9557 /* Allocate memory for HBA structure */
@@ -9680,58 +9640,35 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9680 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 9640 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9681 /* Now, trying to enable interrupt and bring up the device */ 9641 /* Now, trying to enable interrupt and bring up the device */
9682 cfg_mode = phba->cfg_use_msi; 9642 cfg_mode = phba->cfg_use_msi;
9683 while (true) {
9684 /* Put device to a known state before enabling interrupt */
9685 lpfc_stop_port(phba);
9686 /* Configure and enable interrupt */
9687 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
9688 if (intr_mode == LPFC_INTR_ERROR) {
9689 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9690 "0426 Failed to enable interrupt.\n");
9691 error = -ENODEV;
9692 goto out_free_sysfs_attr;
9693 }
9694 /* Default to single EQ for non-MSI-X */
9695 if (phba->intr_type != MSIX)
9696 adjusted_fcp_io_channel = 1;
9697 else
9698 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
9699 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
9700 /* Set up SLI-4 HBA */
9701 if (lpfc_sli4_hba_setup(phba)) {
9702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9703 "1421 Failed to set up hba\n");
9704 error = -ENODEV;
9705 goto out_disable_intr;
9706 }
9707 9643
9708 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 9644 /* Put device to a known state before enabling interrupt */
9709 if (intr_mode != 0) 9645 lpfc_stop_port(phba);
9710 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 9646 /* Configure and enable interrupt */
9711 LPFC_ACT_INTR_CNT); 9647 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
9712 9648 if (intr_mode == LPFC_INTR_ERROR) {
9713 /* Check active interrupts received only for MSI/MSI-X */ 9649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9714 if (intr_mode == 0 || 9650 "0426 Failed to enable interrupt.\n");
9715 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 9651 error = -ENODEV;
9716 /* Log the current active interrupt mode */ 9652 goto out_free_sysfs_attr;
9717 phba->intr_mode = intr_mode; 9653 }
9718 lpfc_log_intr_mode(phba, intr_mode); 9654 /* Default to single EQ for non-MSI-X */
9719 break; 9655 if (phba->intr_type != MSIX)
9720 } 9656 adjusted_fcp_io_channel = 1;
9721 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9657 else
9722 "0451 Configure interrupt mode (%d) " 9658 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
9723 "failed active interrupt test.\n", 9659 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
9724 intr_mode); 9660 /* Set up SLI-4 HBA */
9725 /* Unset the previous SLI-4 HBA setup. */ 9661 if (lpfc_sli4_hba_setup(phba)) {
9726 /* 9662 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9727 * TODO: Is this operation compatible with IF TYPE 2 9663 "1421 Failed to set up hba\n");
9728 * devices? All port state is deleted and cleared. 9664 error = -ENODEV;
9729 */ 9665 goto out_disable_intr;
9730 lpfc_sli4_unset_hba(phba);
9731 /* Try next level of interrupt mode */
9732 cfg_mode = --intr_mode;
9733 } 9666 }
9734 9667
9668 /* Log the current active interrupt mode */
9669 phba->intr_mode = intr_mode;
9670 lpfc_log_intr_mode(phba, intr_mode);
9671
9735 /* Perform post initialization setup */ 9672 /* Perform post initialization setup */
9736 lpfc_post_init_setup(phba); 9673 lpfc_post_init_setup(phba);
9737 9674
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d8fadcb2db73..46128c679202 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1115,6 +1115,13 @@ out:
1115 "0261 Cannot Register NameServer login\n"); 1115 "0261 Cannot Register NameServer login\n");
1116 } 1116 }
1117 1117
1118 /*
1119 ** In case the node reference counter does not go to zero, ensure that
1120 ** the stale state for the node is not processed.
1121 */
1122
1123 ndlp->nlp_prev_state = ndlp->nlp_state;
1124 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1118 spin_lock_irq(shost->host_lock); 1125 spin_lock_irq(shost->host_lock);
1119 ndlp->nlp_flag |= NLP_DEFER_RM; 1126 ndlp->nlp_flag |= NLP_DEFER_RM;
1120 spin_unlock_irq(shost->host_lock); 1127 spin_unlock_irq(shost->host_lock);
@@ -2159,13 +2166,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2159{ 2166{
2160 struct lpfc_iocbq *cmdiocb, *rspiocb; 2167 struct lpfc_iocbq *cmdiocb, *rspiocb;
2161 IOCB_t *irsp; 2168 IOCB_t *irsp;
2169 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2162 2170
2163 cmdiocb = (struct lpfc_iocbq *) arg; 2171 cmdiocb = (struct lpfc_iocbq *) arg;
2164 rspiocb = cmdiocb->context_un.rsp_iocb; 2172 rspiocb = cmdiocb->context_un.rsp_iocb;
2165 2173
2166 irsp = &rspiocb->iocb; 2174 irsp = &rspiocb->iocb;
2167 if (irsp->ulpStatus) { 2175 if (irsp->ulpStatus) {
2176 spin_lock_irq(shost->host_lock);
2168 ndlp->nlp_flag |= NLP_DEFER_RM; 2177 ndlp->nlp_flag |= NLP_DEFER_RM;
2178 spin_unlock_irq(shost->host_lock);
2169 return NLP_STE_FREED_NODE; 2179 return NLP_STE_FREED_NODE;
2170 } 2180 }
2171 return ndlp->nlp_state; 2181 return ndlp->nlp_state;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 60e5a177644c..98af07c6e300 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -288,6 +288,26 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
288} 288}
289 289
290/** 290/**
291 * lpfc_change_queue_type() - Change a device's scsi tag queuing type
292 * @sdev: Pointer the scsi device whose queue depth is to change
293 * @tag_type: Identifier for queue tag type
294 */
295static int
296lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
297{
298 if (sdev->tagged_supported) {
299 scsi_set_tag_type(sdev, tag_type);
300 if (tag_type)
301 scsi_activate_tcq(sdev, sdev->queue_depth);
302 else
303 scsi_deactivate_tcq(sdev, sdev->queue_depth);
304 } else
305 tag_type = 0;
306
307 return tag_type;
308}
309
310/**
291 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 311 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
292 * @phba: The Hba for which this call is being executed. 312 * @phba: The Hba for which this call is being executed.
293 * 313 *
@@ -3972,7 +3992,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3972 break; 3992 break;
3973 } 3993 }
3974 } else 3994 } else
3975 fcp_cmnd->fcpCntl1 = 0; 3995 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
3976 3996
3977 sli4 = (phba->sli_rev == LPFC_SLI_REV4); 3997 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
3978 3998
@@ -5150,6 +5170,7 @@ struct scsi_host_template lpfc_template = {
5150 .max_sectors = 0xFFFF, 5170 .max_sectors = 0xFFFF,
5151 .vendor_id = LPFC_NL_VENDOR_ID, 5171 .vendor_id = LPFC_NL_VENDOR_ID,
5152 .change_queue_depth = lpfc_change_queue_depth, 5172 .change_queue_depth = lpfc_change_queue_depth,
5173 .change_queue_type = lpfc_change_queue_type,
5153}; 5174};
5154 5175
5155struct scsi_host_template lpfc_vport_template = { 5176struct scsi_host_template lpfc_vport_template = {
@@ -5172,4 +5193,5 @@ struct scsi_host_template lpfc_vport_template = {
5172 .shost_attrs = lpfc_vport_attrs, 5193 .shost_attrs = lpfc_vport_attrs,
5173 .max_sectors = 0xFFFF, 5194 .max_sectors = 0xFFFF,
5174 .change_queue_depth = lpfc_change_queue_depth, 5195 .change_queue_depth = lpfc_change_queue_depth,
5196 .change_queue_type = lpfc_change_queue_type,
5175}; 5197};
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 624eab370396..55b6fc83ad71 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -124,10 +124,17 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
124 124
125 /* Ring Doorbell */ 125 /* Ring Doorbell */
126 doorbell.word0 = 0; 126 doorbell.word0 = 0;
127 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); 127 if (q->db_format == LPFC_DB_LIST_FORMAT) {
128 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); 128 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
129 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); 129 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
130 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); 130 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
131 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
132 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
133 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
134 } else {
135 return -EINVAL;
136 }
137 writel(doorbell.word0, q->db_regaddr);
131 138
132 return 0; 139 return 0;
133} 140}
@@ -456,10 +463,20 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
456 /* Ring The Header Receive Queue Doorbell */ 463 /* Ring The Header Receive Queue Doorbell */
457 if (!(hq->host_index % hq->entry_repost)) { 464 if (!(hq->host_index % hq->entry_repost)) {
458 doorbell.word0 = 0; 465 doorbell.word0 = 0;
459 bf_set(lpfc_rq_doorbell_num_posted, &doorbell, 466 if (hq->db_format == LPFC_DB_RING_FORMAT) {
460 hq->entry_repost); 467 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
461 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); 468 hq->entry_repost);
462 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); 469 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
470 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
471 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
472 hq->entry_repost);
473 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
474 hq->host_index);
475 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
476 } else {
477 return -EINVAL;
478 }
479 writel(doorbell.word0, hq->db_regaddr);
463 } 480 }
464 return put_index; 481 return put_index;
465} 482}
@@ -4939,7 +4956,7 @@ out_free_mboxq:
4939static void 4956static void
4940lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 4957lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4941{ 4958{
4942 uint8_t fcp_eqidx; 4959 int fcp_eqidx;
4943 4960
4944 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4961 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4945 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4962 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
@@ -5622,6 +5639,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5622 } 5639 }
5623 /* RPIs. */ 5640 /* RPIs. */
5624 count = phba->sli4_hba.max_cfg_param.max_rpi; 5641 count = phba->sli4_hba.max_cfg_param.max_rpi;
5642 if (count <= 0) {
5643 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5644 "3279 Invalid provisioning of "
5645 "rpi:%d\n", count);
5646 rc = -EINVAL;
5647 goto err_exit;
5648 }
5625 base = phba->sli4_hba.max_cfg_param.rpi_base; 5649 base = phba->sli4_hba.max_cfg_param.rpi_base;
5626 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5650 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5627 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5651 phba->sli4_hba.rpi_bmask = kzalloc(longs *
@@ -5644,6 +5668,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5644 5668
5645 /* VPIs. */ 5669 /* VPIs. */
5646 count = phba->sli4_hba.max_cfg_param.max_vpi; 5670 count = phba->sli4_hba.max_cfg_param.max_vpi;
5671 if (count <= 0) {
5672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5673 "3280 Invalid provisioning of "
5674 "vpi:%d\n", count);
5675 rc = -EINVAL;
5676 goto free_rpi_ids;
5677 }
5647 base = phba->sli4_hba.max_cfg_param.vpi_base; 5678 base = phba->sli4_hba.max_cfg_param.vpi_base;
5648 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5679 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5649 phba->vpi_bmask = kzalloc(longs * 5680 phba->vpi_bmask = kzalloc(longs *
@@ -5666,6 +5697,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5666 5697
5667 /* XRIs. */ 5698 /* XRIs. */
5668 count = phba->sli4_hba.max_cfg_param.max_xri; 5699 count = phba->sli4_hba.max_cfg_param.max_xri;
5700 if (count <= 0) {
5701 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5702 "3281 Invalid provisioning of "
5703 "xri:%d\n", count);
5704 rc = -EINVAL;
5705 goto free_vpi_ids;
5706 }
5669 base = phba->sli4_hba.max_cfg_param.xri_base; 5707 base = phba->sli4_hba.max_cfg_param.xri_base;
5670 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5708 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5671 phba->sli4_hba.xri_bmask = kzalloc(longs * 5709 phba->sli4_hba.xri_bmask = kzalloc(longs *
@@ -5689,6 +5727,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5689 5727
5690 /* VFIs. */ 5728 /* VFIs. */
5691 count = phba->sli4_hba.max_cfg_param.max_vfi; 5729 count = phba->sli4_hba.max_cfg_param.max_vfi;
5730 if (count <= 0) {
5731 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5732 "3282 Invalid provisioning of "
5733 "vfi:%d\n", count);
5734 rc = -EINVAL;
5735 goto free_xri_ids;
5736 }
5692 base = phba->sli4_hba.max_cfg_param.vfi_base; 5737 base = phba->sli4_hba.max_cfg_param.vfi_base;
5693 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5738 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5694 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5739 phba->sli4_hba.vfi_bmask = kzalloc(longs *
@@ -8370,7 +8415,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8370 * This is a continuation of a commandi,(CX) so this 8415 * This is a continuation of a commandi,(CX) so this
8371 * sglq is on the active list 8416 * sglq is on the active list
8372 */ 8417 */
8373 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 8418 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
8374 if (!sglq) 8419 if (!sglq)
8375 return IOCB_ERROR; 8420 return IOCB_ERROR;
8376 } 8421 }
@@ -8855,12 +8900,6 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8855 pring->prt[3].type = FC_TYPE_CT; 8900 pring->prt[3].type = FC_TYPE_CT;
8856 pring->prt[3].lpfc_sli_rcv_unsol_event = 8901 pring->prt[3].lpfc_sli_rcv_unsol_event =
8857 lpfc_ct_unsol_event; 8902 lpfc_ct_unsol_event;
8858 /* abort unsolicited sequence */
8859 pring->prt[4].profile = 0; /* Mask 4 */
8860 pring->prt[4].rctl = FC_RCTL_BA_ABTS;
8861 pring->prt[4].type = FC_TYPE_BLS;
8862 pring->prt[4].lpfc_sli_rcv_unsol_event =
8863 lpfc_sli4_ct_abort_unsol_event;
8864 break; 8903 break;
8865 } 8904 }
8866 totiocbsize += (pring->sli.sli3.numCiocb * 8905 totiocbsize += (pring->sli.sli3.numCiocb *
@@ -11873,7 +11912,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
11873 struct lpfc_eqe *eqe; 11912 struct lpfc_eqe *eqe;
11874 unsigned long iflag; 11913 unsigned long iflag;
11875 int ecount = 0; 11914 int ecount = 0;
11876 uint32_t fcp_eqidx; 11915 int fcp_eqidx;
11877 11916
11878 /* Get the driver's phba structure from the dev_id */ 11917 /* Get the driver's phba structure from the dev_id */
11879 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 11918 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
@@ -11975,7 +12014,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
11975 struct lpfc_hba *phba; 12014 struct lpfc_hba *phba;
11976 irqreturn_t hba_irq_rc; 12015 irqreturn_t hba_irq_rc;
11977 bool hba_handled = false; 12016 bool hba_handled = false;
11978 uint32_t fcp_eqidx; 12017 int fcp_eqidx;
11979 12018
11980 /* Get the driver's phba structure from the dev_id */ 12019 /* Get the driver's phba structure from the dev_id */
11981 phba = (struct lpfc_hba *)dev_id; 12020 phba = (struct lpfc_hba *)dev_id;
@@ -12097,6 +12136,54 @@ out_fail:
12097} 12136}
12098 12137
12099/** 12138/**
12139 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
12140 * @phba: HBA structure that indicates port to create a queue on.
12141 * @pci_barset: PCI BAR set flag.
12142 *
12143 * This function shall perform iomap of the specified PCI BAR address to host
12144 * memory address if not already done so and return it. The returned host
12145 * memory address can be NULL.
12146 */
12147static void __iomem *
12148lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
12149{
12150 struct pci_dev *pdev;
12151 unsigned long bar_map, bar_map_len;
12152
12153 if (!phba->pcidev)
12154 return NULL;
12155 else
12156 pdev = phba->pcidev;
12157
12158 switch (pci_barset) {
12159 case WQ_PCI_BAR_0_AND_1:
12160 if (!phba->pci_bar0_memmap_p) {
12161 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
12162 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
12163 phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
12164 }
12165 return phba->pci_bar0_memmap_p;
12166 case WQ_PCI_BAR_2_AND_3:
12167 if (!phba->pci_bar2_memmap_p) {
12168 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
12169 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
12170 phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
12171 }
12172 return phba->pci_bar2_memmap_p;
12173 case WQ_PCI_BAR_4_AND_5:
12174 if (!phba->pci_bar4_memmap_p) {
12175 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
12176 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
12177 phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
12178 }
12179 return phba->pci_bar4_memmap_p;
12180 default:
12181 break;
12182 }
12183 return NULL;
12184}
12185
12186/**
12100 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs 12187 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
12101 * @phba: HBA structure that indicates port to create a queue on. 12188 * @phba: HBA structure that indicates port to create a queue on.
12102 * @startq: The starting FCP EQ to modify 12189 * @startq: The starting FCP EQ to modify
@@ -12673,6 +12760,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12673 union lpfc_sli4_cfg_shdr *shdr; 12760 union lpfc_sli4_cfg_shdr *shdr;
12674 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12761 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12675 struct dma_address *page; 12762 struct dma_address *page;
12763 void __iomem *bar_memmap_p;
12764 uint32_t db_offset;
12765 uint16_t pci_barset;
12676 12766
12677 /* sanity check on queue memory */ 12767 /* sanity check on queue memory */
12678 if (!wq || !cq) 12768 if (!wq || !cq)
@@ -12696,6 +12786,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12696 cq->queue_id); 12786 cq->queue_id);
12697 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12787 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12698 phba->sli4_hba.pc_sli4_params.wqv); 12788 phba->sli4_hba.pc_sli4_params.wqv);
12789
12699 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { 12790 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
12700 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 12791 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
12701 wq->entry_count); 12792 wq->entry_count);
@@ -12723,6 +12814,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12723 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 12814 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
12724 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 12815 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
12725 } 12816 }
12817
12818 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
12819 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
12820
12726 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12821 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12727 /* The IOCTL status is embedded in the mailbox subheader. */ 12822 /* The IOCTL status is embedded in the mailbox subheader. */
12728 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12823 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12740,6 +12835,47 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12740 status = -ENXIO; 12835 status = -ENXIO;
12741 goto out; 12836 goto out;
12742 } 12837 }
12838 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
12839 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
12840 &wq_create->u.response);
12841 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
12842 (wq->db_format != LPFC_DB_RING_FORMAT)) {
12843 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12844 "3265 WQ[%d] doorbell format not "
12845 "supported: x%x\n", wq->queue_id,
12846 wq->db_format);
12847 status = -EINVAL;
12848 goto out;
12849 }
12850 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
12851 &wq_create->u.response);
12852 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
12853 if (!bar_memmap_p) {
12854 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12855 "3263 WQ[%d] failed to memmap pci "
12856 "barset:x%x\n", wq->queue_id,
12857 pci_barset);
12858 status = -ENOMEM;
12859 goto out;
12860 }
12861 db_offset = wq_create->u.response.doorbell_offset;
12862 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
12863 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
12864 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12865 "3252 WQ[%d] doorbell offset not "
12866 "supported: x%x\n", wq->queue_id,
12867 db_offset);
12868 status = -EINVAL;
12869 goto out;
12870 }
12871 wq->db_regaddr = bar_memmap_p + db_offset;
12872 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12873 "3264 WQ[%d]: barset:x%x, offset:x%x\n",
12874 wq->queue_id, pci_barset, db_offset);
12875 } else {
12876 wq->db_format = LPFC_DB_LIST_FORMAT;
12877 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
12878 }
12743 wq->type = LPFC_WQ; 12879 wq->type = LPFC_WQ;
12744 wq->assoc_qid = cq->queue_id; 12880 wq->assoc_qid = cq->queue_id;
12745 wq->subtype = subtype; 12881 wq->subtype = subtype;
@@ -12816,6 +12952,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12816 uint32_t shdr_status, shdr_add_status; 12952 uint32_t shdr_status, shdr_add_status;
12817 union lpfc_sli4_cfg_shdr *shdr; 12953 union lpfc_sli4_cfg_shdr *shdr;
12818 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12954 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12955 void __iomem *bar_memmap_p;
12956 uint32_t db_offset;
12957 uint16_t pci_barset;
12819 12958
12820 /* sanity check on queue memory */ 12959 /* sanity check on queue memory */
12821 if (!hrq || !drq || !cq) 12960 if (!hrq || !drq || !cq)
@@ -12894,6 +13033,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12894 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13033 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12895 putPaddrHigh(dmabuf->phys); 13034 putPaddrHigh(dmabuf->phys);
12896 } 13035 }
13036 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13037 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13038
12897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13039 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12898 /* The IOCTL status is embedded in the mailbox subheader. */ 13040 /* The IOCTL status is embedded in the mailbox subheader. */
12899 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13041 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12911,6 +13053,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12911 status = -ENXIO; 13053 status = -ENXIO;
12912 goto out; 13054 goto out;
12913 } 13055 }
13056
13057 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13058 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
13059 &rq_create->u.response);
13060 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
13061 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
13062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13063 "3262 RQ [%d] doorbell format not "
13064 "supported: x%x\n", hrq->queue_id,
13065 hrq->db_format);
13066 status = -EINVAL;
13067 goto out;
13068 }
13069
13070 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
13071 &rq_create->u.response);
13072 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
13073 if (!bar_memmap_p) {
13074 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13075 "3269 RQ[%d] failed to memmap pci "
13076 "barset:x%x\n", hrq->queue_id,
13077 pci_barset);
13078 status = -ENOMEM;
13079 goto out;
13080 }
13081
13082 db_offset = rq_create->u.response.doorbell_offset;
13083 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
13084 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
13085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13086 "3270 RQ[%d] doorbell offset not "
13087 "supported: x%x\n", hrq->queue_id,
13088 db_offset);
13089 status = -EINVAL;
13090 goto out;
13091 }
13092 hrq->db_regaddr = bar_memmap_p + db_offset;
13093 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13094 "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
13095 hrq->queue_id, pci_barset, db_offset);
13096 } else {
13097 hrq->db_format = LPFC_DB_RING_FORMAT;
13098 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
13099 }
12914 hrq->type = LPFC_HRQ; 13100 hrq->type = LPFC_HRQ;
12915 hrq->assoc_qid = cq->queue_id; 13101 hrq->assoc_qid = cq->queue_id;
12916 hrq->subtype = subtype; 13102 hrq->subtype = subtype;
@@ -12976,6 +13162,8 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12976 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13162 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12977 putPaddrHigh(dmabuf->phys); 13163 putPaddrHigh(dmabuf->phys);
12978 } 13164 }
13165 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13166 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
12979 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13167 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12980 /* The IOCTL status is embedded in the mailbox subheader. */ 13168 /* The IOCTL status is embedded in the mailbox subheader. */
12981 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 13169 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
@@ -14063,6 +14251,40 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
14063} 14251}
14064 14252
14065/** 14253/**
14254 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
14255 * @vport: pointer to a vitural port
14256 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14257 *
14258 * This function tries to abort from the assembed sequence from upper level
14259 * protocol, described by the information from basic abbort @dmabuf. It
14260 * checks to see whether such pending context exists at upper level protocol.
14261 * If so, it shall clean up the pending context.
14262 *
14263 * Return
14264 * true -- if there is matching pending context of the sequence cleaned
14265 * at ulp;
14266 * false -- if there is no matching pending context of the sequence present
14267 * at ulp.
14268 **/
14269static bool
14270lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
14271{
14272 struct lpfc_hba *phba = vport->phba;
14273 int handled;
14274
14275 /* Accepting abort at ulp with SLI4 only */
14276 if (phba->sli_rev < LPFC_SLI_REV4)
14277 return false;
14278
14279 /* Register all caring upper level protocols to attend abort */
14280 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
14281 if (handled)
14282 return true;
14283
14284 return false;
14285}
14286
14287/**
14066 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 14288 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
14067 * @phba: Pointer to HBA context object. 14289 * @phba: Pointer to HBA context object.
14068 * @cmd_iocbq: pointer to the command iocbq structure. 14290 * @cmd_iocbq: pointer to the command iocbq structure.
@@ -14077,8 +14299,14 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
14077 struct lpfc_iocbq *cmd_iocbq, 14299 struct lpfc_iocbq *cmd_iocbq,
14078 struct lpfc_iocbq *rsp_iocbq) 14300 struct lpfc_iocbq *rsp_iocbq)
14079{ 14301{
14080 if (cmd_iocbq) 14302 struct lpfc_nodelist *ndlp;
14303
14304 if (cmd_iocbq) {
14305 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
14306 lpfc_nlp_put(ndlp);
14307 lpfc_nlp_not_used(ndlp);
14081 lpfc_sli_release_iocbq(phba, cmd_iocbq); 14308 lpfc_sli_release_iocbq(phba, cmd_iocbq);
14309 }
14082 14310
14083 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 14311 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
14084 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 14312 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
@@ -14118,9 +14346,10 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
14118 * event after aborting the sequence handling. 14346 * event after aborting the sequence handling.
14119 **/ 14347 **/
14120static void 14348static void
14121lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, 14349lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
14122 struct fc_frame_header *fc_hdr) 14350 struct fc_frame_header *fc_hdr, bool aborted)
14123{ 14351{
14352 struct lpfc_hba *phba = vport->phba;
14124 struct lpfc_iocbq *ctiocb = NULL; 14353 struct lpfc_iocbq *ctiocb = NULL;
14125 struct lpfc_nodelist *ndlp; 14354 struct lpfc_nodelist *ndlp;
14126 uint16_t oxid, rxid, xri, lxri; 14355 uint16_t oxid, rxid, xri, lxri;
@@ -14135,12 +14364,27 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14135 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 14364 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
14136 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 14365 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
14137 14366
14138 ndlp = lpfc_findnode_did(phba->pport, sid); 14367 ndlp = lpfc_findnode_did(vport, sid);
14139 if (!ndlp) { 14368 if (!ndlp) {
14140 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 14369 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
14141 "1268 Find ndlp returned NULL for oxid:x%x " 14370 if (!ndlp) {
14142 "SID:x%x\n", oxid, sid); 14371 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
14143 return; 14372 "1268 Failed to allocate ndlp for "
14373 "oxid:x%x SID:x%x\n", oxid, sid);
14374 return;
14375 }
14376 lpfc_nlp_init(vport, ndlp, sid);
14377 /* Put ndlp onto pport node list */
14378 lpfc_enqueue_node(vport, ndlp);
14379 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
14380 /* re-setup ndlp without removing from node list */
14381 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
14382 if (!ndlp) {
14383 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
14384 "3275 Failed to active ndlp found "
14385 "for oxid:x%x SID:x%x\n", oxid, sid);
14386 return;
14387 }
14144 } 14388 }
14145 14389
14146 /* Allocate buffer for rsp iocb */ 14390 /* Allocate buffer for rsp iocb */
@@ -14164,7 +14408,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14164 icmd->ulpLe = 1; 14408 icmd->ulpLe = 1;
14165 icmd->ulpClass = CLASS3; 14409 icmd->ulpClass = CLASS3;
14166 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 14410 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
14167 ctiocb->context1 = ndlp; 14411 ctiocb->context1 = lpfc_nlp_get(ndlp);
14168 14412
14169 ctiocb->iocb_cmpl = NULL; 14413 ctiocb->iocb_cmpl = NULL;
14170 ctiocb->vport = phba->pport; 14414 ctiocb->vport = phba->pport;
@@ -14183,14 +14427,24 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14183 if (lxri != NO_XRI) 14427 if (lxri != NO_XRI)
14184 lpfc_set_rrq_active(phba, ndlp, lxri, 14428 lpfc_set_rrq_active(phba, ndlp, lxri,
14185 (xri == oxid) ? rxid : oxid, 0); 14429 (xri == oxid) ? rxid : oxid, 0);
14186 /* If the oxid maps to the FCP XRI range or if it is out of range, 14430 /* For BA_ABTS from exchange responder, if the logical xri with
14187 * send a BLS_RJT. The driver no longer has that exchange. 14431 * the oxid maps to the FCP XRI range, the port no longer has
14188 * Override the IOCB for a BA_RJT. 14432 * that exchange context, send a BLS_RJT. Override the IOCB for
14433 * a BA_RJT.
14189 */ 14434 */
14190 if (xri > (phba->sli4_hba.max_cfg_param.max_xri + 14435 if ((fctl & FC_FC_EX_CTX) &&
14191 phba->sli4_hba.max_cfg_param.xri_base) || 14436 (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
14192 xri > (lpfc_sli4_get_els_iocb_cnt(phba) + 14437 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14193 phba->sli4_hba.max_cfg_param.xri_base)) { 14438 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14439 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
14440 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
14441 }
14442
14443 /* If BA_ABTS failed to abort a partially assembled receive sequence,
14444 * the driver no longer has that exchange, send a BLS_RJT. Override
14445 * the IOCB for a BA_RJT.
14446 */
14447 if (aborted == false) {
14194 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 14448 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14195 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 14449 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14196 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 14450 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
@@ -14214,17 +14468,19 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14214 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 14468 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
14215 14469
14216 /* Xmit CT abts response on exchange <xid> */ 14470 /* Xmit CT abts response on exchange <xid> */
14217 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 14471 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
14218 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 14472 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
14219 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 14473 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
14220 14474
14221 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 14475 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
14222 if (rc == IOCB_ERROR) { 14476 if (rc == IOCB_ERROR) {
14223 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 14477 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
14224 "2925 Failed to issue CT ABTS RSP x%x on " 14478 "2925 Failed to issue CT ABTS RSP x%x on "
14225 "xri x%x, Data x%x\n", 14479 "xri x%x, Data x%x\n",
14226 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 14480 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
14227 phba->link_state); 14481 phba->link_state);
14482 lpfc_nlp_put(ndlp);
14483 ctiocb->context1 = NULL;
14228 lpfc_sli_release_iocbq(phba, ctiocb); 14484 lpfc_sli_release_iocbq(phba, ctiocb);
14229 } 14485 }
14230} 14486}
@@ -14249,32 +14505,25 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
14249 struct lpfc_hba *phba = vport->phba; 14505 struct lpfc_hba *phba = vport->phba;
14250 struct fc_frame_header fc_hdr; 14506 struct fc_frame_header fc_hdr;
14251 uint32_t fctl; 14507 uint32_t fctl;
14252 bool abts_par; 14508 bool aborted;
14253 14509
14254 /* Make a copy of fc_hdr before the dmabuf being released */ 14510 /* Make a copy of fc_hdr before the dmabuf being released */
14255 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 14511 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
14256 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 14512 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
14257 14513
14258 if (fctl & FC_FC_EX_CTX) { 14514 if (fctl & FC_FC_EX_CTX) {
14259 /* 14515 /* ABTS by responder to exchange, no cleanup needed */
14260 * ABTS sent by responder to exchange, just free the buffer 14516 aborted = true;
14261 */
14262 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14263 } else { 14517 } else {
14264 /* 14518 /* ABTS by initiator to exchange, need to do cleanup */
14265 * ABTS sent by initiator to exchange, need to do cleanup 14519 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
14266 */ 14520 if (aborted == false)
14267 /* Try to abort partially assembled seq */ 14521 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
14268 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
14269
14270 /* Send abort to ULP if partially seq abort failed */
14271 if (abts_par == false)
14272 lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
14273 else
14274 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14275 } 14522 }
14276 /* Send basic accept (BA_ACC) to the abort requester */ 14523 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14277 lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); 14524
14525 /* Respond with BA_ACC or BA_RJT accordingly */
14526 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
14278} 14527}
14279 14528
14280/** 14529/**
@@ -15307,10 +15556,13 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
15307{ 15556{
15308 uint16_t next_fcf_index; 15557 uint16_t next_fcf_index;
15309 15558
15559initial_priority:
15310 /* Search start from next bit of currently registered FCF index */ 15560 /* Search start from next bit of currently registered FCF index */
15561 next_fcf_index = phba->fcf.current_rec.fcf_indx;
15562
15311next_priority: 15563next_priority:
15312 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 15564 /* Determine the next fcf index to check */
15313 LPFC_SLI4_FCF_TBL_INDX_MAX; 15565 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
15314 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 15566 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15315 LPFC_SLI4_FCF_TBL_INDX_MAX, 15567 LPFC_SLI4_FCF_TBL_INDX_MAX,
15316 next_fcf_index); 15568 next_fcf_index);
@@ -15337,7 +15589,7 @@ next_priority:
15337 * at that level and continue the selection process. 15589 * at that level and continue the selection process.
15338 */ 15590 */
15339 if (lpfc_check_next_fcf_pri_level(phba)) 15591 if (lpfc_check_next_fcf_pri_level(phba))
15340 goto next_priority; 15592 goto initial_priority;
15341 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 15593 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15342 "2844 No roundrobin failover FCF available\n"); 15594 "2844 No roundrobin failover FCF available\n");
15343 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 15595 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 44c427a45d66..be02b59ea279 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -139,6 +139,10 @@ struct lpfc_queue {
139 139
140 struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */ 140 struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
141 141
142 uint16_t db_format;
143#define LPFC_DB_RING_FORMAT 0x01
144#define LPFC_DB_LIST_FORMAT 0x02
145 void __iomem *db_regaddr;
142 /* For q stats */ 146 /* For q stats */
143 uint32_t q_cnt_1; 147 uint32_t q_cnt_1;
144 uint32_t q_cnt_2; 148 uint32_t q_cnt_2;
@@ -508,6 +512,10 @@ struct lpfc_sli4_hba {
508 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ 512 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
509 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ 513 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
510 514
515 uint8_t fw_func_mode; /* FW function protocol mode */
516 uint32_t ulp0_mode; /* ULP0 protocol mode */
517 uint32_t ulp1_mode; /* ULP1 protocol mode */
518
511 /* Setup information for various queue parameters */ 519 /* Setup information for various queue parameters */
512 int eq_esize; 520 int eq_esize;
513 int eq_ecount; 521 int eq_ecount;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ba596e854bbc..f3b7795a296b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.36" 21#define LPFC_DRIVER_VERSION "8.3.37"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index ffd85c511c8e..5e24e7e73714 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -155,7 +155,7 @@ _base_fault_reset_work(struct work_struct *work)
155 struct task_struct *p; 155 struct task_struct *p;
156 156
157 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 157 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
158 if (ioc->shost_recovery) 158 if (ioc->shost_recovery || ioc->pci_error_recovery)
159 goto rearm_timer; 159 goto rearm_timer;
160 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 160 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
161 161
@@ -164,6 +164,20 @@ _base_fault_reset_work(struct work_struct *work)
164 printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n", 164 printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
165 ioc->name, __func__); 165 ioc->name, __func__);
166 166
167 /* It may be possible that EEH recovery can resolve some of
168 * pci bus failure issues rather removing the dead ioc function
169 * by considering controller is in a non-operational state. So
170 * here priority is given to the EEH recovery. If it doesn't
171 * not resolve this issue, mpt2sas driver will consider this
172 * controller to non-operational state and remove the dead ioc
173 * function.
174 */
175 if (ioc->non_operational_loop++ < 5) {
176 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
177 flags);
178 goto rearm_timer;
179 }
180
167 /* 181 /*
168 * Call _scsih_flush_pending_cmds callback so that we flush all 182 * Call _scsih_flush_pending_cmds callback so that we flush all
169 * pending commands back to OS. This call is required to aovid 183 * pending commands back to OS. This call is required to aovid
@@ -193,6 +207,8 @@ _base_fault_reset_work(struct work_struct *work)
193 return; /* don't rearm timer */ 207 return; /* don't rearm timer */
194 } 208 }
195 209
210 ioc->non_operational_loop = 0;
211
196 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 212 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
197 rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 213 rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
198 FORCE_BIG_HAMMER); 214 FORCE_BIG_HAMMER);
@@ -4386,6 +4402,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4386 if (missing_delay[0] != -1 && missing_delay[1] != -1) 4402 if (missing_delay[0] != -1 && missing_delay[1] != -1)
4387 _base_update_missing_delay(ioc, missing_delay[0], 4403 _base_update_missing_delay(ioc, missing_delay[0],
4388 missing_delay[1]); 4404 missing_delay[1]);
4405 ioc->non_operational_loop = 0;
4389 4406
4390 return 0; 4407 return 0;
4391 4408
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 543d8d637479..c6ee7aad7501 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -835,6 +835,7 @@ struct MPT2SAS_ADAPTER {
835 u16 cpu_msix_table_sz; 835 u16 cpu_msix_table_sz;
836 u32 ioc_reset_count; 836 u32 ioc_reset_count;
837 MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; 837 MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
838 u32 non_operational_loop;
838 839
839 /* internal commands, callback index */ 840 /* internal commands, callback index */
840 u8 scsi_io_cb_idx; 841 u8 scsi_io_cb_idx;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 04f8010f0770..18360032a520 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -42,7 +42,6 @@
42 * USA. 42 * USA.
43 */ 43 */
44 44
45#include <linux/version.h>
46#include <linux/kernel.h> 45#include <linux/kernel.h>
47#include <linux/module.h> 46#include <linux/module.h>
48#include <linux/errno.h> 47#include <linux/errno.h>
@@ -1310,7 +1309,6 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1310 void *sg_local, *chain; 1309 void *sg_local, *chain;
1311 u32 chain_offset; 1310 u32 chain_offset;
1312 u32 chain_length; 1311 u32 chain_length;
1313 u32 chain_flags;
1314 int sges_left; 1312 int sges_left;
1315 u32 sges_in_segment; 1313 u32 sges_in_segment;
1316 u8 simple_sgl_flags; 1314 u8 simple_sgl_flags;
@@ -1356,8 +1354,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1356 sges_in_segment--; 1354 sges_in_segment--;
1357 } 1355 }
1358 1356
1359 /* initializing the chain flags and pointers */ 1357 /* initializing the pointers */
1360 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
1361 chain_req = _base_get_chain_buffer_tracker(ioc, smid); 1358 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1362 if (!chain_req) 1359 if (!chain_req)
1363 return -1; 1360 return -1;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index ce7e59b2fc08..1df9ed4f371d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -41,7 +41,6 @@
41 * USA. 41 * USA.
42 */ 42 */
43 43
44#include <linux/version.h>
45#include <linux/module.h> 44#include <linux/module.h>
46#include <linux/kernel.h> 45#include <linux/kernel.h>
47#include <linux/init.h> 46#include <linux/init.h>
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 8af944d7d13d..054d5231c974 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -42,7 +42,6 @@
42 * USA. 42 * USA.
43 */ 43 */
44 44
45#include <linux/version.h>
46#include <linux/kernel.h> 45#include <linux/kernel.h>
47#include <linux/module.h> 46#include <linux/module.h>
48#include <linux/errno.h> 47#include <linux/errno.h>
@@ -3136,7 +3135,7 @@ _ctl_diag_trigger_mpi_store(struct device *cdev,
3136 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3135 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3137 sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); 3136 sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
3138 memset(&ioc->diag_trigger_mpi, 0, 3137 memset(&ioc->diag_trigger_mpi, 0,
3139 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3138 sizeof(ioc->diag_trigger_mpi));
3140 memcpy(&ioc->diag_trigger_mpi, buf, sz); 3139 memcpy(&ioc->diag_trigger_mpi, buf, sz);
3141 if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) 3140 if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
3142 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; 3141 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 6421a06c4ce2..dcbf7c880cb2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -41,7 +41,6 @@
41 * USA. 41 * USA.
42 */ 42 */
43 43
44#include <linux/version.h>
45#include <linux/module.h> 44#include <linux/module.h>
46#include <linux/kernel.h> 45#include <linux/kernel.h>
47#include <linux/init.h> 46#include <linux/init.h>
@@ -2755,13 +2754,11 @@ _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
2755 int i; 2754 int i;
2756 u16 handle; 2755 u16 handle;
2757 u16 reason_code; 2756 u16 reason_code;
2758 u8 phy_number;
2759 2757
2760 for (i = 0; i < event_data->NumEntries; i++) { 2758 for (i = 0; i < event_data->NumEntries; i++) {
2761 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 2759 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
2762 if (!handle) 2760 if (!handle)
2763 continue; 2761 continue;
2764 phy_number = event_data->StartPhyNum + i;
2765 reason_code = event_data->PHY[i].PhyStatus & 2762 reason_code = event_data->PHY[i].PhyStatus &
2766 MPI2_EVENT_SAS_TOPO_RC_MASK; 2763 MPI2_EVENT_SAS_TOPO_RC_MASK;
2767 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) 2764 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index da6c5f25749c..6f8d6213040b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -42,7 +42,6 @@
42 * USA. 42 * USA.
43 */ 43 */
44 44
45#include <linux/version.h>
46#include <linux/kernel.h> 45#include <linux/kernel.h>
47#include <linux/module.h> 46#include <linux/module.h>
48#include <linux/errno.h> 47#include <linux/errno.h>
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 6e9af20be12f..5d8fe4f75650 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -538,7 +538,7 @@ struct device_info {
538 int port_num; 538 int port_num;
539}; 539};
540 540
541static int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha) 541int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
542{ 542{
543 uint32_t drv_active; 543 uint32_t drv_active;
544 uint32_t dev_part, dev_part1, dev_part2; 544 uint32_t dev_part, dev_part1, dev_part2;
@@ -1351,31 +1351,58 @@ exit_start_fw:
1351 1351
1352/*----------------------Interrupt Related functions ---------------------*/ 1352/*----------------------Interrupt Related functions ---------------------*/
1353 1353
1354void qla4_83xx_disable_intrs(struct scsi_qla_host *ha) 1354static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
1355{
1356 if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
1357 qla4_8xxx_intr_disable(ha);
1358}
1359
1360static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
1355{ 1361{
1356 uint32_t mb_int, ret; 1362 uint32_t mb_int, ret;
1357 1363
1358 if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags)) 1364 if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
1359 qla4_8xxx_mbx_intr_disable(ha); 1365 ret = readl(&ha->qla4_83xx_reg->mbox_int);
1366 mb_int = ret & ~INT_ENABLE_FW_MB;
1367 writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1368 writel(1, &ha->qla4_83xx_reg->leg_int_mask);
1369 }
1370}
1360 1371
1361 ret = readl(&ha->qla4_83xx_reg->mbox_int); 1372void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
1362 mb_int = ret & ~INT_ENABLE_FW_MB; 1373{
1363 writel(mb_int, &ha->qla4_83xx_reg->mbox_int); 1374 qla4_83xx_disable_mbox_intrs(ha);
1364 writel(1, &ha->qla4_83xx_reg->leg_int_mask); 1375 qla4_83xx_disable_iocb_intrs(ha);
1365} 1376}
1366 1377
1367void qla4_83xx_enable_intrs(struct scsi_qla_host *ha) 1378static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
1379{
1380 if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
1381 qla4_8xxx_intr_enable(ha);
1382 set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
1383 }
1384}
1385
1386void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
1368{ 1387{
1369 uint32_t mb_int; 1388 uint32_t mb_int;
1370 1389
1371 qla4_8xxx_mbx_intr_enable(ha); 1390 if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
1372 mb_int = INT_ENABLE_FW_MB; 1391 mb_int = INT_ENABLE_FW_MB;
1373 writel(mb_int, &ha->qla4_83xx_reg->mbox_int); 1392 writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1374 writel(0, &ha->qla4_83xx_reg->leg_int_mask); 1393 writel(0, &ha->qla4_83xx_reg->leg_int_mask);
1394 set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
1395 }
1396}
1375 1397
1376 set_bit(AF_INTERRUPTS_ON, &ha->flags); 1398
1399void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
1400{
1401 qla4_83xx_enable_mbox_intrs(ha);
1402 qla4_83xx_enable_iocb_intrs(ha);
1377} 1403}
1378 1404
1405
1379void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, 1406void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
1380 int incount) 1407 int incount)
1381{ 1408{
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 76819b71ada7..19ee55a6226c 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -74,16 +74,22 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
74 } 74 }
75 break; 75 break;
76 case 2: 76 case 2:
77 /* Reset HBA */ 77 /* Reset HBA and collect FW dump */
78 ha->isp_ops->idc_lock(ha); 78 ha->isp_ops->idc_lock(ha);
79 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); 79 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
80 if (dev_state == QLA8XXX_DEV_READY) { 80 if (dev_state == QLA8XXX_DEV_READY) {
81 ql4_printk(KERN_INFO, ha, 81 ql4_printk(KERN_INFO, ha, "%s: Setting Need reset\n",
82 "%s: Setting Need reset, reset_owner is 0x%x.\n", 82 __func__);
83 __func__, ha->func_num);
84 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 83 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
85 QLA8XXX_DEV_NEED_RESET); 84 QLA8XXX_DEV_NEED_RESET);
86 set_bit(AF_8XXX_RST_OWNER, &ha->flags); 85 if (is_qla8022(ha) ||
86 (is_qla8032(ha) &&
87 qla4_83xx_can_perform_reset(ha))) {
88 set_bit(AF_8XXX_RST_OWNER, &ha->flags);
89 set_bit(AF_FW_RECOVERY, &ha->flags);
90 ql4_printk(KERN_INFO, ha, "%s: Reset owner is 0x%x\n",
91 __func__, ha->func_num);
92 }
87 } else 93 } else
88 ql4_printk(KERN_INFO, ha, 94 ql4_printk(KERN_INFO, ha,
89 "%s: Reset not performed as device state is 0x%x\n", 95 "%s: Reset not performed as device state is 0x%x\n",
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 329d553eae94..129f5dd02822 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -136,6 +136,7 @@
136#define RESPONSE_QUEUE_DEPTH 64 136#define RESPONSE_QUEUE_DEPTH 64
137#define QUEUE_SIZE 64 137#define QUEUE_SIZE 64
138#define DMA_BUFFER_SIZE 512 138#define DMA_BUFFER_SIZE 512
139#define IOCB_HIWAT_CUSHION 4
139 140
140/* 141/*
141 * Misc 142 * Misc
@@ -180,6 +181,7 @@
180#define DISABLE_ACB_TOV 30 181#define DISABLE_ACB_TOV 30
181#define IP_CONFIG_TOV 30 182#define IP_CONFIG_TOV 30
182#define LOGIN_TOV 12 183#define LOGIN_TOV 12
184#define BOOT_LOGIN_RESP_TOV 60
183 185
184#define MAX_RESET_HA_RETRIES 2 186#define MAX_RESET_HA_RETRIES 2
185#define FW_ALIVE_WAIT_TOV 3 187#define FW_ALIVE_WAIT_TOV 3
@@ -314,6 +316,7 @@ struct ql4_tuple_ddb {
314 * DDB flags. 316 * DDB flags.
315 */ 317 */
316#define DF_RELOGIN 0 /* Relogin to device */ 318#define DF_RELOGIN 0 /* Relogin to device */
319#define DF_BOOT_TGT 1 /* Boot target entry */
317#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ 320#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
318#define DF_FO_MASKED 3 321#define DF_FO_MASKED 3
319 322
@@ -501,6 +504,7 @@ struct scsi_qla_host {
501#define AF_INTERRUPTS_ON 6 /* 0x00000040 */ 504#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
502#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ 505#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
503#define AF_LINK_UP 8 /* 0x00000100 */ 506#define AF_LINK_UP 8 /* 0x00000100 */
507#define AF_LOOPBACK 9 /* 0x00000200 */
504#define AF_IRQ_ATTACHED 10 /* 0x00000400 */ 508#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
505#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ 509#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
506#define AF_HA_REMOVAL 12 /* 0x00001000 */ 510#define AF_HA_REMOVAL 12 /* 0x00001000 */
@@ -516,6 +520,8 @@ struct scsi_qla_host {
516#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */ 520#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */
517#define AF_82XX_DUMP_READING 26 /* 0x04000000 */ 521#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
518#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */ 522#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */
523#define AF_83XX_IOCB_INTR_ON 28 /* 0x10000000 */
524#define AF_83XX_MBOX_INTR_ON 29 /* 0x20000000 */
519 525
520 unsigned long dpc_flags; 526 unsigned long dpc_flags;
521 527
@@ -537,6 +543,7 @@ struct scsi_qla_host {
537 uint32_t tot_ddbs; 543 uint32_t tot_ddbs;
538 544
539 uint16_t iocb_cnt; 545 uint16_t iocb_cnt;
546 uint16_t iocb_hiwat;
540 547
541 /* SRB cache. */ 548 /* SRB cache. */
542#define SRB_MIN_REQ 128 549#define SRB_MIN_REQ 128
@@ -838,7 +845,8 @@ static inline int is_aer_supported(struct scsi_qla_host *ha)
838static inline int adapter_up(struct scsi_qla_host *ha) 845static inline int adapter_up(struct scsi_qla_host *ha)
839{ 846{
840 return (test_bit(AF_ONLINE, &ha->flags) != 0) && 847 return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
841 (test_bit(AF_LINK_UP, &ha->flags) != 0); 848 (test_bit(AF_LINK_UP, &ha->flags) != 0) &&
849 (!test_bit(AF_LOOPBACK, &ha->flags));
842} 850}
843 851
844static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost) 852static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 1c4795020357..ad9d2e2d370f 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -495,7 +495,7 @@ struct qla_flt_region {
495#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D 495#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
496#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E 496#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
497#define MBOX_ASTS_IDC_COMPLETE 0x8100 497#define MBOX_ASTS_IDC_COMPLETE 0x8100
498#define MBOX_ASTS_IDC_NOTIFY 0x8101 498#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101
499#define MBOX_ASTS_TXSCVR_INSERTED 0x8130 499#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
500#define MBOX_ASTS_TXSCVR_REMOVED 0x8131 500#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
501 501
@@ -522,6 +522,10 @@ struct qla_flt_region {
522#define FLASH_OPT_COMMIT 2 522#define FLASH_OPT_COMMIT 2
523#define FLASH_OPT_RMW_COMMIT 3 523#define FLASH_OPT_RMW_COMMIT 3
524 524
525/* Loopback type */
526#define ENABLE_INTERNAL_LOOPBACK 0x04
527#define ENABLE_EXTERNAL_LOOPBACK 0x08
528
525/*************************************************************************/ 529/*************************************************************************/
526 530
527/* Host Adapter Initialization Control Block (from host) */ 531/* Host Adapter Initialization Control Block (from host) */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 57a5a3cf5770..982293edf02c 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -253,12 +253,14 @@ void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
253void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha); 253void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
254int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha); 254int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
255void qla4_8xxx_get_minidump(struct scsi_qla_host *ha); 255void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
256int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha); 256int qla4_8xxx_intr_disable(struct scsi_qla_host *ha);
257int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha); 257int qla4_8xxx_intr_enable(struct scsi_qla_host *ha);
258int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param); 258int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
259int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha); 259int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
260int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha); 260int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
261void qla4_83xx_disable_pause(struct scsi_qla_host *ha); 261void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
262void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
263int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
262 264
263extern int ql4xextended_error_logging; 265extern int ql4xextended_error_logging;
264extern int ql4xdontresethba; 266extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1aca1b4f70b8..8fc8548ba4ba 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -195,12 +195,10 @@ exit_get_sys_info_no_free:
195 * @ha: pointer to host adapter structure. 195 * @ha: pointer to host adapter structure.
196 * 196 *
197 **/ 197 **/
198static int qla4xxx_init_local_data(struct scsi_qla_host *ha) 198static void qla4xxx_init_local_data(struct scsi_qla_host *ha)
199{ 199{
200 /* Initialize aen queue */ 200 /* Initialize aen queue */
201 ha->aen_q_count = MAX_AEN_ENTRIES; 201 ha->aen_q_count = MAX_AEN_ENTRIES;
202
203 return qla4xxx_get_firmware_status(ha);
204} 202}
205 203
206static uint8_t 204static uint8_t
@@ -935,14 +933,23 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
935 if (ha->isp_ops->start_firmware(ha) == QLA_ERROR) 933 if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
936 goto exit_init_hba; 934 goto exit_init_hba;
937 935
936 /*
937 * For ISP83XX, mailbox and IOCB interrupts are enabled separately.
938 * Mailbox interrupts must be enabled prior to issuing any mailbox
939 * command in order to prevent the possibility of losing interrupts
940 * while switching from polling to interrupt mode. IOCB interrupts are
941 * enabled via isp_ops->enable_intrs.
942 */
943 if (is_qla8032(ha))
944 qla4_83xx_enable_mbox_intrs(ha);
945
938 if (qla4xxx_about_firmware(ha) == QLA_ERROR) 946 if (qla4xxx_about_firmware(ha) == QLA_ERROR)
939 goto exit_init_hba; 947 goto exit_init_hba;
940 948
941 if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR) 949 if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
942 goto exit_init_hba; 950 goto exit_init_hba;
943 951
944 if (qla4xxx_init_local_data(ha) == QLA_ERROR) 952 qla4xxx_init_local_data(ha);
945 goto exit_init_hba;
946 953
947 status = qla4xxx_init_firmware(ha); 954 status = qla4xxx_init_firmware(ha);
948 if (status == QLA_ERROR) 955 if (status == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index f48f37a281d1..14fec976f634 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -316,7 +316,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
316 goto queuing_error; 316 goto queuing_error;
317 317
318 /* total iocbs active */ 318 /* total iocbs active */
319 if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH) 319 if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
320 goto queuing_error; 320 goto queuing_error;
321 321
322 /* Build command packet */ 322 /* Build command packet */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 15ea81465ce4..1b83dc283d2e 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -582,6 +582,33 @@ exit_prq_error:
582} 582}
583 583
584/** 584/**
585 * qla4_83xx_loopback_in_progress: Is loopback in progress?
586 * @ha: Pointer to host adapter structure.
587 * @ret: 1 = loopback in progress, 0 = loopback not in progress
588 **/
589static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
590{
591 int rval = 1;
592
593 if (is_qla8032(ha)) {
594 if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
595 (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
596 DEBUG2(ql4_printk(KERN_INFO, ha,
597 "%s: Loopback diagnostics in progress\n",
598 __func__));
599 rval = 1;
600 } else {
601 DEBUG2(ql4_printk(KERN_INFO, ha,
602 "%s: Loopback diagnostics not in progress\n",
603 __func__));
604 rval = 0;
605 }
606 }
607
608 return rval;
609}
610
611/**
585 * qla4xxx_isr_decode_mailbox - decodes mailbox status 612 * qla4xxx_isr_decode_mailbox - decodes mailbox status
586 * @ha: Pointer to host adapter structure. 613 * @ha: Pointer to host adapter structure.
587 * @mailbox_status: Mailbox status. 614 * @mailbox_status: Mailbox status.
@@ -676,8 +703,10 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
676 703
677 case MBOX_ASTS_LINK_DOWN: 704 case MBOX_ASTS_LINK_DOWN:
678 clear_bit(AF_LINK_UP, &ha->flags); 705 clear_bit(AF_LINK_UP, &ha->flags);
679 if (test_bit(AF_INIT_DONE, &ha->flags)) 706 if (test_bit(AF_INIT_DONE, &ha->flags)) {
680 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); 707 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
708 qla4xxx_wake_dpc(ha);
709 }
681 710
682 ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__); 711 ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
683 qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN, 712 qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
@@ -806,7 +835,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
806 " removed\n", ha->host_no, mbox_sts[0])); 835 " removed\n", ha->host_no, mbox_sts[0]));
807 break; 836 break;
808 837
809 case MBOX_ASTS_IDC_NOTIFY: 838 case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
810 { 839 {
811 uint32_t opcode; 840 uint32_t opcode;
812 if (is_qla8032(ha)) { 841 if (is_qla8032(ha)) {
@@ -840,6 +869,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
840 DEBUG2(ql4_printk(KERN_INFO, ha, 869 DEBUG2(ql4_printk(KERN_INFO, ha,
841 "scsi:%ld: AEN %04x IDC Complete notification\n", 870 "scsi:%ld: AEN %04x IDC Complete notification\n",
842 ha->host_no, mbox_sts[0])); 871 ha->host_no, mbox_sts[0]));
872
873 if (qla4_83xx_loopback_in_progress(ha))
874 set_bit(AF_LOOPBACK, &ha->flags);
875 else
876 clear_bit(AF_LOOPBACK, &ha->flags);
843 } 877 }
844 break; 878 break;
845 879
@@ -1124,17 +1158,18 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
1124 1158
1125 /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */ 1159 /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
1126 if (!(leg_int_ptr & LEG_INT_PTR_B31)) { 1160 if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
1127 ql4_printk(KERN_ERR, ha, 1161 DEBUG2(ql4_printk(KERN_ERR, ha,
1128 "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n", 1162 "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
1129 __func__); 1163 __func__));
1130 return IRQ_NONE; 1164 return IRQ_NONE;
1131 } 1165 }
1132 1166
1133 /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */ 1167 /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
1134 if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) { 1168 if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
1135 ql4_printk(KERN_ERR, ha, 1169 DEBUG2(ql4_printk(KERN_ERR, ha,
1136 "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n", 1170 "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
1137 __func__, (leg_int_ptr & PF_BITS_MASK), ha->pf_bit); 1171 __func__, (leg_int_ptr & PF_BITS_MASK),
1172 ha->pf_bit));
1138 return IRQ_NONE; 1173 return IRQ_NONE;
1139 } 1174 }
1140 1175
@@ -1437,11 +1472,14 @@ irq_not_attached:
1437 1472
1438void qla4xxx_free_irqs(struct scsi_qla_host *ha) 1473void qla4xxx_free_irqs(struct scsi_qla_host *ha)
1439{ 1474{
1440 if (test_bit(AF_MSIX_ENABLED, &ha->flags)) 1475 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
1441 qla4_8xxx_disable_msix(ha); 1476 if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
1442 else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) { 1477 qla4_8xxx_disable_msix(ha);
1443 free_irq(ha->pdev->irq, ha); 1478 } else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
1444 pci_disable_msi(ha->pdev); 1479 free_irq(ha->pdev->irq, ha);
1445 } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) 1480 pci_disable_msi(ha->pdev);
1446 free_irq(ha->pdev->irq, ha); 1481 } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
1482 free_irq(ha->pdev->irq, ha);
1483 }
1484 }
1447} 1485}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 3d41034191f0..81e738d61ec0 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -44,6 +44,30 @@ void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
44} 44}
45 45
46/** 46/**
47 * qla4xxx_is_intr_poll_mode – Are we allowed to poll for interrupts?
48 * @ha: Pointer to host adapter structure.
49 * @ret: 1=polling mode, 0=non-polling mode
50 **/
51static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
52{
53 int rval = 1;
54
55 if (is_qla8032(ha)) {
56 if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
57 test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
58 rval = 0;
59 } else {
60 if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
61 test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
62 test_bit(AF_ONLINE, &ha->flags) &&
63 !test_bit(AF_HA_REMOVAL, &ha->flags))
64 rval = 0;
65 }
66
67 return rval;
68}
69
70/**
47 * qla4xxx_mailbox_command - issues mailbox commands 71 * qla4xxx_mailbox_command - issues mailbox commands
48 * @ha: Pointer to host adapter structure. 72 * @ha: Pointer to host adapter structure.
49 * @inCount: number of mailbox registers to load. 73 * @inCount: number of mailbox registers to load.
@@ -153,33 +177,28 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
153 /* 177 /*
154 * Wait for completion: Poll or completion queue 178 * Wait for completion: Poll or completion queue
155 */ 179 */
156 if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && 180 if (qla4xxx_is_intr_poll_mode(ha)) {
157 test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
158 test_bit(AF_ONLINE, &ha->flags) &&
159 !test_bit(AF_HA_REMOVAL, &ha->flags)) {
160 /* Do not poll for completion. Use completion queue */
161 set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
162 wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
163 clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
164 } else {
165 /* Poll for command to complete */ 181 /* Poll for command to complete */
166 wait_count = jiffies + MBOX_TOV * HZ; 182 wait_count = jiffies + MBOX_TOV * HZ;
167 while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) { 183 while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
168 if (time_after_eq(jiffies, wait_count)) 184 if (time_after_eq(jiffies, wait_count))
169 break; 185 break;
170
171 /* 186 /*
172 * Service the interrupt. 187 * Service the interrupt.
173 * The ISR will save the mailbox status registers 188 * The ISR will save the mailbox status registers
174 * to a temporary storage location in the adapter 189 * to a temporary storage location in the adapter
175 * structure. 190 * structure.
176 */ 191 */
177
178 spin_lock_irqsave(&ha->hardware_lock, flags); 192 spin_lock_irqsave(&ha->hardware_lock, flags);
179 ha->isp_ops->process_mailbox_interrupt(ha, outCount); 193 ha->isp_ops->process_mailbox_interrupt(ha, outCount);
180 spin_unlock_irqrestore(&ha->hardware_lock, flags); 194 spin_unlock_irqrestore(&ha->hardware_lock, flags);
181 msleep(10); 195 msleep(10);
182 } 196 }
197 } else {
198 /* Do not poll for completion. Use completion queue */
199 set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
200 wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
201 clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
183 } 202 }
184 203
185 /* Check for mailbox timeout. */ 204 /* Check for mailbox timeout. */
@@ -678,8 +697,24 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
678 return QLA_ERROR; 697 return QLA_ERROR;
679 } 698 }
680 699
681 ql4_printk(KERN_INFO, ha, "%ld firmware IOCBs available (%d).\n", 700 /* High-water mark of IOCBs */
682 ha->host_no, mbox_sts[2]); 701 ha->iocb_hiwat = mbox_sts[2];
702 DEBUG2(ql4_printk(KERN_INFO, ha,
703 "%s: firmware IOCBs available = %d\n", __func__,
704 ha->iocb_hiwat));
705
706 if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
707 ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
708
709 /* Ideally, we should not enter this code, as the # of firmware
710 * IOCBs is hard-coded in the firmware. We set a default
711 * iocb_hiwat here just in case */
712 if (ha->iocb_hiwat == 0) {
713 ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4;
714 DEBUG2(ql4_printk(KERN_WARNING, ha,
715 "%s: Setting IOCB's to = %d\n", __func__,
716 ha->iocb_hiwat));
717 }
683 718
684 return QLA_SUCCESS; 719 return QLA_SUCCESS;
685} 720}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 499a92db1cf6..71d3d234f526 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -2986,7 +2986,7 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
2986 2986
2987 retval = qla4_8xxx_device_state_handler(ha); 2987 retval = qla4_8xxx_device_state_handler(ha);
2988 2988
2989 if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags)) 2989 if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
2990 retval = qla4xxx_request_irqs(ha); 2990 retval = qla4xxx_request_irqs(ha);
2991 2991
2992 return retval; 2992 return retval;
@@ -3427,11 +3427,11 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
3427 } 3427 }
3428 3428
3429 /* Make sure we receive the minimum required data to cache internally */ 3429 /* Make sure we receive the minimum required data to cache internally */
3430 if (mbox_sts[4] < offsetof(struct mbx_sys_info, reserved)) { 3430 if ((is_qla8032(ha) ? mbox_sts[3] : mbox_sts[4]) <
3431 offsetof(struct mbx_sys_info, reserved)) {
3431 DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive" 3432 DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
3432 " error (%x)\n", ha->host_no, __func__, mbox_sts[4])); 3433 " error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
3433 goto exit_validate_mac82; 3434 goto exit_validate_mac82;
3434
3435 } 3435 }
3436 3436
3437 /* Save M.A.C. address & serial_number */ 3437 /* Save M.A.C. address & serial_number */
@@ -3463,7 +3463,7 @@ exit_validate_mac82:
3463 3463
3464/* Interrupt handling helpers. */ 3464/* Interrupt handling helpers. */
3465 3465
3466int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha) 3466int qla4_8xxx_intr_enable(struct scsi_qla_host *ha)
3467{ 3467{
3468 uint32_t mbox_cmd[MBOX_REG_COUNT]; 3468 uint32_t mbox_cmd[MBOX_REG_COUNT];
3469 uint32_t mbox_sts[MBOX_REG_COUNT]; 3469 uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3484,7 +3484,7 @@ int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
3484 return QLA_SUCCESS; 3484 return QLA_SUCCESS;
3485} 3485}
3486 3486
3487int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha) 3487int qla4_8xxx_intr_disable(struct scsi_qla_host *ha)
3488{ 3488{
3489 uint32_t mbox_cmd[MBOX_REG_COUNT]; 3489 uint32_t mbox_cmd[MBOX_REG_COUNT];
3490 uint32_t mbox_sts[MBOX_REG_COUNT]; 3490 uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3509,7 +3509,7 @@ int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
3509void 3509void
3510qla4_82xx_enable_intrs(struct scsi_qla_host *ha) 3510qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
3511{ 3511{
3512 qla4_8xxx_mbx_intr_enable(ha); 3512 qla4_8xxx_intr_enable(ha);
3513 3513
3514 spin_lock_irq(&ha->hardware_lock); 3514 spin_lock_irq(&ha->hardware_lock);
3515 /* BIT 10 - reset */ 3515 /* BIT 10 - reset */
@@ -3522,7 +3522,7 @@ void
3522qla4_82xx_disable_intrs(struct scsi_qla_host *ha) 3522qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
3523{ 3523{
3524 if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags)) 3524 if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
3525 qla4_8xxx_mbx_intr_disable(ha); 3525 qla4_8xxx_intr_disable(ha);
3526 3526
3527 spin_lock_irq(&ha->hardware_lock); 3527 spin_lock_irq(&ha->hardware_lock);
3528 /* BIT 10 - set */ 3528 /* BIT 10 - set */
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 4cec123a6a6a..6142729167f4 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1337,18 +1337,18 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1337 sess->password_in, BIDI_CHAP, 1337 sess->password_in, BIDI_CHAP,
1338 &idx); 1338 &idx);
1339 if (rval) 1339 if (rval)
1340 return -EINVAL; 1340 len = sprintf(buf, "\n");
1341 1341 else
1342 len = sprintf(buf, "%hu\n", idx); 1342 len = sprintf(buf, "%hu\n", idx);
1343 break; 1343 break;
1344 case ISCSI_PARAM_CHAP_OUT_IDX: 1344 case ISCSI_PARAM_CHAP_OUT_IDX:
1345 rval = qla4xxx_get_chap_index(ha, sess->username, 1345 rval = qla4xxx_get_chap_index(ha, sess->username,
1346 sess->password, LOCAL_CHAP, 1346 sess->password, LOCAL_CHAP,
1347 &idx); 1347 &idx);
1348 if (rval) 1348 if (rval)
1349 return -EINVAL; 1349 len = sprintf(buf, "\n");
1350 1350 else
1351 len = sprintf(buf, "%hu\n", idx); 1351 len = sprintf(buf, "%hu\n", idx);
1352 break; 1352 break;
1353 default: 1353 default:
1354 return iscsi_session_get_param(cls_sess, param, buf); 1354 return iscsi_session_get_param(cls_sess, param, buf);
@@ -2242,6 +2242,7 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2242 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 2242 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2243 !test_bit(AF_ONLINE, &ha->flags) || 2243 !test_bit(AF_ONLINE, &ha->flags) ||
2244 !test_bit(AF_LINK_UP, &ha->flags) || 2244 !test_bit(AF_LINK_UP, &ha->flags) ||
2245 test_bit(AF_LOOPBACK, &ha->flags) ||
2245 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 2246 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
2246 goto qc_host_busy; 2247 goto qc_host_busy;
2247 2248
@@ -2978,6 +2979,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2978 if (status == QLA_SUCCESS) { 2979 if (status == QLA_SUCCESS) {
2979 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 2980 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2980 qla4xxx_cmd_wait(ha); 2981 qla4xxx_cmd_wait(ha);
2982
2981 ha->isp_ops->disable_intrs(ha); 2983 ha->isp_ops->disable_intrs(ha);
2982 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 2984 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2983 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 2985 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
@@ -3479,7 +3481,8 @@ dpc_post_reset_ha:
3479 } 3481 }
3480 3482
3481 /* ---- link change? --- */ 3483 /* ---- link change? --- */
3482 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { 3484 if (!test_bit(AF_LOOPBACK, &ha->flags) &&
3485 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
3483 if (!test_bit(AF_LINK_UP, &ha->flags)) { 3486 if (!test_bit(AF_LINK_UP, &ha->flags)) {
3484 /* ---- link down? --- */ 3487 /* ---- link down? --- */
3485 qla4xxx_mark_all_devices_missing(ha); 3488 qla4xxx_mark_all_devices_missing(ha);
@@ -3508,10 +3511,8 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3508{ 3511{
3509 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); 3512 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3510 3513
3511 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) { 3514 /* Turn-off interrupts on the card. */
3512 /* Turn-off interrupts on the card. */ 3515 ha->isp_ops->disable_intrs(ha);
3513 ha->isp_ops->disable_intrs(ha);
3514 }
3515 3516
3516 if (is_qla40XX(ha)) { 3517 if (is_qla40XX(ha)) {
3517 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 3518 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
@@ -3547,8 +3548,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3547 } 3548 }
3548 3549
3549 /* Detach interrupts */ 3550 /* Detach interrupts */
3550 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) 3551 qla4xxx_free_irqs(ha);
3551 qla4xxx_free_irqs(ha);
3552 3552
3553 /* free extra memory */ 3553 /* free extra memory */
3554 qla4xxx_mem_free(ha); 3554 qla4xxx_mem_free(ha);
@@ -4687,7 +4687,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
4687 struct iscsi_endpoint *ep; 4687 struct iscsi_endpoint *ep;
4688 struct sockaddr_in *addr; 4688 struct sockaddr_in *addr;
4689 struct sockaddr_in6 *addr6; 4689 struct sockaddr_in6 *addr6;
4690 struct sockaddr *dst_addr; 4690 struct sockaddr *t_addr;
4691 struct sockaddr_storage *dst_addr;
4691 char *ip; 4692 char *ip;
4692 4693
4693 /* TODO: need to destroy on unload iscsi_endpoint*/ 4694 /* TODO: need to destroy on unload iscsi_endpoint*/
@@ -4696,21 +4697,23 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
4696 return NULL; 4697 return NULL;
4697 4698
4698 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) { 4699 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
4699 dst_addr->sa_family = AF_INET6; 4700 t_addr = (struct sockaddr *)dst_addr;
4701 t_addr->sa_family = AF_INET6;
4700 addr6 = (struct sockaddr_in6 *)dst_addr; 4702 addr6 = (struct sockaddr_in6 *)dst_addr;
4701 ip = (char *)&addr6->sin6_addr; 4703 ip = (char *)&addr6->sin6_addr;
4702 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); 4704 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
4703 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port)); 4705 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
4704 4706
4705 } else { 4707 } else {
4706 dst_addr->sa_family = AF_INET; 4708 t_addr = (struct sockaddr *)dst_addr;
4709 t_addr->sa_family = AF_INET;
4707 addr = (struct sockaddr_in *)dst_addr; 4710 addr = (struct sockaddr_in *)dst_addr;
4708 ip = (char *)&addr->sin_addr; 4711 ip = (char *)&addr->sin_addr;
4709 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN); 4712 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
4710 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port)); 4713 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
4711 } 4714 }
4712 4715
4713 ep = qla4xxx_ep_connect(ha->host, dst_addr, 0); 4716 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
4714 vfree(dst_addr); 4717 vfree(dst_addr);
4715 return ep; 4718 return ep;
4716} 4719}
@@ -4725,7 +4728,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
4725} 4728}
4726 4729
4727static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, 4730static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
4728 struct ddb_entry *ddb_entry) 4731 struct ddb_entry *ddb_entry,
4732 uint16_t idx)
4729{ 4733{
4730 uint16_t def_timeout; 4734 uint16_t def_timeout;
4731 4735
@@ -4745,6 +4749,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
4745 def_timeout : LOGIN_TOV; 4749 def_timeout : LOGIN_TOV;
4746 ddb_entry->default_time2wait = 4750 ddb_entry->default_time2wait =
4747 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); 4751 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
4752
4753 if (ql4xdisablesysfsboot &&
4754 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
4755 set_bit(DF_BOOT_TGT, &ddb_entry->flags);
4748} 4756}
4749 4757
4750static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) 4758static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
@@ -4881,7 +4889,7 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
4881 4889
4882static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, 4890static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
4883 struct dev_db_entry *fw_ddb_entry, 4891 struct dev_db_entry *fw_ddb_entry,
4884 int is_reset) 4892 int is_reset, uint16_t idx)
4885{ 4893{
4886 struct iscsi_cls_session *cls_sess; 4894 struct iscsi_cls_session *cls_sess;
4887 struct iscsi_session *sess; 4895 struct iscsi_session *sess;
@@ -4919,7 +4927,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
4919 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, 4927 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4920 sizeof(struct dev_db_entry)); 4928 sizeof(struct dev_db_entry));
4921 4929
4922 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry); 4930 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
4923 4931
4924 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); 4932 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
4925 4933
@@ -5036,7 +5044,7 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
5036 goto continue_next_nt; 5044 goto continue_next_nt;
5037 } 5045 }
5038 5046
5039 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset); 5047 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
5040 if (ret == QLA_ERROR) 5048 if (ret == QLA_ERROR)
5041 goto exit_nt_list; 5049 goto exit_nt_list;
5042 5050
@@ -5116,6 +5124,78 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
5116} 5124}
5117 5125
5118/** 5126/**
5127 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login
5128 * response.
5129 * @ha: pointer to adapter structure
5130 *
5131 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
5132 * set in DDB and we will wait for login response of boot targets during
5133 * probe.
5134 **/
5135static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
5136{
5137 struct ddb_entry *ddb_entry;
5138 struct dev_db_entry *fw_ddb_entry = NULL;
5139 dma_addr_t fw_ddb_entry_dma;
5140 unsigned long wtime;
5141 uint32_t ddb_state;
5142 int max_ddbs, idx, ret;
5143
5144 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5145 MAX_DEV_DB_ENTRIES;
5146
5147 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5148 &fw_ddb_entry_dma, GFP_KERNEL);
5149 if (!fw_ddb_entry) {
5150 ql4_printk(KERN_ERR, ha,
5151 "%s: Unable to allocate dma buffer\n", __func__);
5152 goto exit_login_resp;
5153 }
5154
5155 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
5156
5157 for (idx = 0; idx < max_ddbs; idx++) {
5158 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
5159 if (ddb_entry == NULL)
5160 continue;
5161
5162 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
5163 DEBUG2(ql4_printk(KERN_INFO, ha,
5164 "%s: DDB index [%d]\n", __func__,
5165 ddb_entry->fw_ddb_index));
5166 do {
5167 ret = qla4xxx_get_fwddb_entry(ha,
5168 ddb_entry->fw_ddb_index,
5169 fw_ddb_entry, fw_ddb_entry_dma,
5170 NULL, NULL, &ddb_state, NULL,
5171 NULL, NULL);
5172 if (ret == QLA_ERROR)
5173 goto exit_login_resp;
5174
5175 if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
5176 (ddb_state == DDB_DS_SESSION_FAILED))
5177 break;
5178
5179 schedule_timeout_uninterruptible(HZ);
5180
5181 } while ((time_after(wtime, jiffies)));
5182
5183 if (!time_after(wtime, jiffies)) {
5184 DEBUG2(ql4_printk(KERN_INFO, ha,
5185 "%s: Login response wait timer expired\n",
5186 __func__));
5187 goto exit_login_resp;
5188 }
5189 }
5190 }
5191
5192exit_login_resp:
5193 if (fw_ddb_entry)
5194 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5195 fw_ddb_entry, fw_ddb_entry_dma);
5196}
5197
5198/**
5119 * qla4xxx_probe_adapter - callback function to probe HBA 5199 * qla4xxx_probe_adapter - callback function to probe HBA
5120 * @pdev: pointer to pci_dev structure 5200 * @pdev: pointer to pci_dev structure
5121 * @pci_device_id: pointer to pci_device entry 5201 * @pci_device_id: pointer to pci_device entry
@@ -5270,7 +5350,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
5270 if (is_qla80XX(ha)) { 5350 if (is_qla80XX(ha)) {
5271 ha->isp_ops->idc_lock(ha); 5351 ha->isp_ops->idc_lock(ha);
5272 dev_state = qla4_8xxx_rd_direct(ha, 5352 dev_state = qla4_8xxx_rd_direct(ha,
5273 QLA82XX_CRB_DEV_STATE); 5353 QLA8XXX_CRB_DEV_STATE);
5274 ha->isp_ops->idc_unlock(ha); 5354 ha->isp_ops->idc_unlock(ha);
5275 if (dev_state == QLA8XXX_DEV_FAILED) { 5355 if (dev_state == QLA8XXX_DEV_FAILED) {
5276 ql4_printk(KERN_WARNING, ha, "%s: don't retry " 5356 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
@@ -5368,6 +5448,7 @@ skip_retry_init:
5368 /* Perform the build ddb list and login to each */ 5448 /* Perform the build ddb list and login to each */
5369 qla4xxx_build_ddb_list(ha, INIT_ADAPTER); 5449 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
5370 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); 5450 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
5451 qla4xxx_wait_login_resp_boot_tgt(ha);
5371 5452
5372 qla4xxx_create_chap_list(ha); 5453 qla4xxx_create_chap_list(ha);
5373 5454
@@ -6008,14 +6089,6 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
6008 goto exit_host_reset; 6089 goto exit_host_reset;
6009 } 6090 }
6010 6091
6011 rval = qla4xxx_wait_for_hba_online(ha);
6012 if (rval != QLA_SUCCESS) {
6013 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
6014 "adapter\n", __func__));
6015 rval = -EIO;
6016 goto exit_host_reset;
6017 }
6018
6019 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 6092 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
6020 goto recover_adapter; 6093 goto recover_adapter;
6021 6094
@@ -6115,7 +6188,6 @@ qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
6115static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) 6188static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
6116{ 6189{
6117 uint32_t rval = QLA_ERROR; 6190 uint32_t rval = QLA_ERROR;
6118 uint32_t ret = 0;
6119 int fn; 6191 int fn;
6120 struct pci_dev *other_pdev = NULL; 6192 struct pci_dev *other_pdev = NULL;
6121 6193
@@ -6201,16 +6273,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
6201 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0); 6273 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
6202 qla4_8xxx_set_drv_active(ha); 6274 qla4_8xxx_set_drv_active(ha);
6203 ha->isp_ops->idc_unlock(ha); 6275 ha->isp_ops->idc_unlock(ha);
6204 ret = qla4xxx_request_irqs(ha); 6276 ha->isp_ops->enable_intrs(ha);
6205 if (ret) {
6206 ql4_printk(KERN_WARNING, ha, "Failed to "
6207 "reserve interrupt %d already in use.\n",
6208 ha->pdev->irq);
6209 rval = QLA_ERROR;
6210 } else {
6211 ha->isp_ops->enable_intrs(ha);
6212 rval = QLA_SUCCESS;
6213 }
6214 } 6277 }
6215 } else { 6278 } else {
6216 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " 6279 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
@@ -6220,18 +6283,9 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
6220 QLA8XXX_DEV_READY)) { 6283 QLA8XXX_DEV_READY)) {
6221 clear_bit(AF_FW_RECOVERY, &ha->flags); 6284 clear_bit(AF_FW_RECOVERY, &ha->flags);
6222 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 6285 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
6223 if (rval == QLA_SUCCESS) { 6286 if (rval == QLA_SUCCESS)
6224 ret = qla4xxx_request_irqs(ha); 6287 ha->isp_ops->enable_intrs(ha);
6225 if (ret) { 6288
6226 ql4_printk(KERN_WARNING, ha, "Failed to"
6227 " reserve interrupt %d already in"
6228 " use.\n", ha->pdev->irq);
6229 rval = QLA_ERROR;
6230 } else {
6231 ha->isp_ops->enable_intrs(ha);
6232 rval = QLA_SUCCESS;
6233 }
6234 }
6235 ha->isp_ops->idc_lock(ha); 6289 ha->isp_ops->idc_lock(ha);
6236 qla4_8xxx_set_drv_active(ha); 6290 qla4_8xxx_set_drv_active(ha);
6237 ha->isp_ops->idc_unlock(ha); 6291 ha->isp_ops->idc_unlock(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index f6df2ea91ab5..6775a45af315 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.03.00-k1" 8#define QLA4XXX_DRIVER_VERSION "5.03.00-k4"
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 59d427bf08e2..0a74b975efdf 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2503,6 +2503,15 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr,
2503} 2503}
2504static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator, 2504static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
2505 NULL); 2505 NULL);
2506static ssize_t
2507show_priv_session_target_id(struct device *dev, struct device_attribute *attr,
2508 char *buf)
2509{
2510 struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
2511 return sprintf(buf, "%d\n", session->target_id);
2512}
2513static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO,
2514 show_priv_session_target_id, NULL);
2506 2515
2507#define iscsi_priv_session_attr_show(field, format) \ 2516#define iscsi_priv_session_attr_show(field, format) \
2508static ssize_t \ 2517static ssize_t \
@@ -2575,6 +2584,7 @@ static struct attribute *iscsi_session_attrs[] = {
2575 &dev_attr_priv_sess_creator.attr, 2584 &dev_attr_priv_sess_creator.attr,
2576 &dev_attr_sess_chap_out_idx.attr, 2585 &dev_attr_sess_chap_out_idx.attr,
2577 &dev_attr_sess_chap_in_idx.attr, 2586 &dev_attr_sess_chap_in_idx.attr,
2587 &dev_attr_priv_sess_target_id.attr,
2578 NULL, 2588 NULL,
2579}; 2589};
2580 2590
@@ -2638,6 +2648,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
2638 return S_IRUGO; 2648 return S_IRUGO;
2639 else if (attr == &dev_attr_priv_sess_creator.attr) 2649 else if (attr == &dev_attr_priv_sess_creator.attr)
2640 return S_IRUGO; 2650 return S_IRUGO;
2651 else if (attr == &dev_attr_priv_sess_target_id.attr)
2652 return S_IRUGO;
2641 else { 2653 else {
2642 WARN_ONCE(1, "Invalid session attr"); 2654 WARN_ONCE(1, "Invalid session attr");
2643 return 0; 2655 return 0;