aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas8
-rw-r--r--MAINTAINERS9
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/be2iscsi/be.h4
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h154
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c493
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h15
-rw-r--r--drivers/scsi/be2iscsi/be_main.c447
-rw-r--r--drivers/scsi/be2iscsi/be_main.h17
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c522
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h50
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c32
-rw-r--r--drivers/scsi/bfa/bfad.c17
-rw-r--r--drivers/scsi/bfa/bfad_attr.c20
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c3
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c70
-rw-r--r--drivers/scsi/fcoe/fcoe.c41
-rw-r--r--drivers/scsi/fcoe/fcoe.h4
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c8
-rw-r--r--drivers/scsi/hpsa.c683
-rw-r--r--drivers/scsi/hpsa.h85
-rw-r--r--drivers/scsi/hpsa_cmd.h37
-rw-r--r--drivers/scsi/isci/host.c703
-rw-r--r--drivers/scsi/isci/host.h124
-rw-r--r--drivers/scsi/isci/init.c212
-rw-r--r--drivers/scsi/isci/phy.c76
-rw-r--r--drivers/scsi/isci/phy.h9
-rw-r--r--drivers/scsi/isci/port.c68
-rw-r--r--drivers/scsi/isci/port.h11
-rw-r--r--drivers/scsi/isci/port_config.c18
-rw-r--r--drivers/scsi/isci/probe_roms.c12
-rw-r--r--drivers/scsi/isci/probe_roms.h2
-rw-r--r--drivers/scsi/isci/registers.h8
-rw-r--r--drivers/scsi/isci/remote_device.c576
-rw-r--r--drivers/scsi/isci/remote_device.h63
-rw-r--r--drivers/scsi/isci/remote_node_context.c393
-rw-r--r--drivers/scsi/isci/remote_node_context.h43
-rw-r--r--drivers/scsi/isci/request.c715
-rw-r--r--drivers/scsi/isci/request.h125
-rw-r--r--drivers/scsi/isci/scu_completion_codes.h2
-rw-r--r--drivers/scsi/isci/task.c800
-rw-r--r--drivers/scsi/isci/task.h132
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c30
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h6
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c46
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h418
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c141
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c377
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c414
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c785
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c21
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c4
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h7
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h68
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c38
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h10
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c312
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c597
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c243
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h3
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c23
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c10
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_lib.c10
-rw-r--r--drivers/scsi/scsi_pm.c2
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_fc.c24
-rw-r--r--drivers/scsi/scsi_transport_spi.c4
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/scsi/sg.c183
-rw-r--r--drivers/scsi/st.h2
-rw-r--r--drivers/scsi/storvsc_drv.c20
-rw-r--r--drivers/scsi/ufs/ufshcd.c8
-rw-r--r--include/scsi/iscsi_proto.h2
-rw-r--r--include/scsi/sas.h1
99 files changed, 6500 insertions, 4257 deletions
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 83f8ea8b79eb..80441ab608e4 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,11 @@
1Release Date : Mon. Mar 19, 2012 17:00:00 PST 2012 -
2 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford
4Current Version : 00.00.06.15-rc1
5Old Version : 00.00.06.14-rc1
6 1. Optimize HostMSIxVectors setting.
7 2. Add fpRead/WriteCapable, fpRead/WriteAcrossStripe checks.
8-------------------------------------------------------------------------------
1Release Date : Fri. Jan 6, 2012 17:00:00 PST 2010 - 9Release Date : Fri. Jan 6, 2012 17:00:00 PST 2010 -
2 (emaild-id:megaraidlinux@lsi.com) 10 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford 11 Adam Radford
diff --git a/MAINTAINERS b/MAINTAINERS
index 7560921a4e15..fddf29c057a1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1599,6 +1599,7 @@ F: include/linux/bcma/
1599 1599
1600BROCADE BFA FC SCSI DRIVER 1600BROCADE BFA FC SCSI DRIVER
1601M: Jing Huang <huangj@brocade.com> 1601M: Jing Huang <huangj@brocade.com>
1602M: Krishna C Gudipati <kgudipat@brocade.com>
1602L: linux-scsi@vger.kernel.org 1603L: linux-scsi@vger.kernel.org
1603S: Supported 1604S: Supported
1604F: drivers/scsi/bfa/ 1605F: drivers/scsi/bfa/
@@ -6882,6 +6883,14 @@ F: Documentation/cdrom/
6882F: drivers/cdrom/cdrom.c 6883F: drivers/cdrom/cdrom.c
6883F: include/linux/cdrom.h 6884F: include/linux/cdrom.h
6884 6885
6886UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
6887M: Vinayak Holikatti <vinholikatti@gmail.com>
6888M: Santosh Y <santoshsy@gmail.com>
6889L: linux-scsi@vger.kernel.org
6890S: Supported
6891F: Documentation/scsi/ufs.txt
6892F: drivers/scsi/ufs/
6893
6885UNSORTED BLOCK IMAGES (UBI) 6894UNSORTED BLOCK IMAGES (UBI)
6886M: Artem Bityutskiy <dedekind1@gmail.com> 6895M: Artem Bityutskiy <dedekind1@gmail.com>
6887W: http://www.linux-mtd.infradead.org/ 6896W: http://www.linux-mtd.infradead.org/
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 29684c8142b0..bea04e5d3b51 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -408,6 +408,7 @@ config BLK_DEV_3W_XXXX_RAID
408config SCSI_HPSA 408config SCSI_HPSA
409 tristate "HP Smart Array SCSI driver" 409 tristate "HP Smart Array SCSI driver"
410 depends on PCI && SCSI 410 depends on PCI && SCSI
411 select CHECK_SIGNATURE
411 help 412 help
412 This driver supports HP Smart Array Controllers (circa 2009). 413 This driver supports HP Smart Array Controllers (circa 2009).
413 It is a SCSI alternative to the cciss driver, which is a block 414 It is a SCSI alternative to the cciss driver, which is a block
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2bee51506a91..762820636304 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -424,6 +424,8 @@ static int aac_src_deliver_message(struct fib *fib)
424static int aac_src_ioremap(struct aac_dev *dev, u32 size) 424static int aac_src_ioremap(struct aac_dev *dev, u32 size)
425{ 425{
426 if (!size) { 426 if (!size) {
427 iounmap(dev->regs.src.bar1);
428 dev->regs.src.bar1 = NULL;
427 iounmap(dev->regs.src.bar0); 429 iounmap(dev->regs.src.bar0);
428 dev->base = dev->regs.src.bar0 = NULL; 430 dev->base = dev->regs.src.bar0 = NULL;
429 return 0; 431 return 0;
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 1d7b976c850f..a50b6a9030e8 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -132,10 +132,6 @@ struct be_ctrl_info {
132 ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \ 132 ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
133 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K)) 133 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
134 134
135/* Byte offset into the page corresponding to given address */
136#define OFFSET_IN_PAGE(addr) \
137 ((size_t)(addr) & (PAGE_SIZE_4K-1))
138
139/* Returns bit offset within a DWORD of a bitfield */ 135/* Returns bit offset within a DWORD of a bitfield */
140#define AMAP_BIT_OFFSET(_struct, field) \ 136#define AMAP_BIT_OFFSET(_struct, field) \
141 (((size_t)&(((_struct *)0)->field))%32) 137 (((size_t)&(((_struct *)0)->field))%32)
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index cdb15364bc69..d2e9e933f7a3 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -15,6 +15,8 @@
15 * Costa Mesa, CA 92626 15 * Costa Mesa, CA 92626
16 */ 16 */
17 17
18#include <scsi/iscsi_proto.h>
19
18#include "be.h" 20#include "be.h"
19#include "be_mgmt.h" 21#include "be_mgmt.h"
20#include "be_main.h" 22#include "be_main.h"
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 8b40a5b4366c..b0b36c6a145f 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -23,7 +23,7 @@
23 * firmware in the BE. These requests are communicated to the processor 23 * firmware in the BE. These requests are communicated to the processor
24 * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one 24 * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
25 * WRB inside a MAILBOX. 25 * WRB inside a MAILBOX.
26 * The commands are serviced by the ARM processor in the BladeEngine's MPU. 26 * The commands are serviced by the ARM processor in the OneConnect's MPU.
27 */ 27 */
28struct be_sge { 28struct be_sge {
29 u32 pa_lo; 29 u32 pa_lo;
@@ -163,7 +163,8 @@ struct be_mcc_mailbox {
163#define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES 3 163#define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES 3
164#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7 164#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7
165#define OPCODE_COMMON_ISCSI_NTWK_SET_VLAN 14 165#define OPCODE_COMMON_ISCSI_NTWK_SET_VLAN 14
166#define OPCODE_COMMON_ISCSI_NTWK_CONFIGURE_STATELESS_IP_ADDR 17 166#define OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR 17
167#define OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR 18
167#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR 21 168#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR 21
168#define OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY 22 169#define OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY 22
169#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY 23 170#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY 23
@@ -274,15 +275,15 @@ struct mgmt_conn_login_options {
274 struct mgmt_auth_method_format auth_data; 275 struct mgmt_auth_method_format auth_data;
275} __packed; 276} __packed;
276 277
277struct ip_address_format { 278struct ip_addr_format {
278 u16 size_of_structure; 279 u16 size_of_structure;
279 u8 reserved; 280 u8 reserved;
280 u8 ip_type; 281 u8 ip_type;
281 u8 ip_address[16]; 282 u8 addr[16];
282 u32 rsvd0; 283 u32 rsvd0;
283} __packed; 284} __packed;
284 285
285struct mgmt_conn_info { 286struct mgmt_conn_info {
286 u32 connection_handle; 287 u32 connection_handle;
287 u32 connection_status; 288 u32 connection_status;
288 u16 src_port; 289 u16 src_port;
@@ -290,9 +291,9 @@ struct mgmt_conn_info {
290 u16 dest_port_redirected; 291 u16 dest_port_redirected;
291 u16 cid; 292 u16 cid;
292 u32 estimated_throughput; 293 u32 estimated_throughput;
293 struct ip_address_format src_ipaddr; 294 struct ip_addr_format src_ipaddr;
294 struct ip_address_format dest_ipaddr; 295 struct ip_addr_format dest_ipaddr;
295 struct ip_address_format dest_ipaddr_redirected; 296 struct ip_addr_format dest_ipaddr_redirected;
296 struct mgmt_conn_login_options negotiated_login_options; 297 struct mgmt_conn_login_options negotiated_login_options;
297} __packed; 298} __packed;
298 299
@@ -322,43 +323,115 @@ struct mgmt_session_info {
322 struct mgmt_conn_info conn_list[1]; 323 struct mgmt_conn_info conn_list[1];
323} __packed; 324} __packed;
324 325
325struct be_cmd_req_get_session { 326struct be_cmd_get_session_req {
326 struct be_cmd_req_hdr hdr; 327 struct be_cmd_req_hdr hdr;
327 u32 session_handle; 328 u32 session_handle;
328} __packed; 329} __packed;
329 330
330struct be_cmd_resp_get_session { 331struct be_cmd_get_session_resp {
331 struct be_cmd_resp_hdr hdr; 332 struct be_cmd_resp_hdr hdr;
332 struct mgmt_session_info session_info; 333 struct mgmt_session_info session_info;
333} __packed; 334} __packed;
334 335
335struct mac_addr { 336struct mac_addr {
336 u16 size_of_struct; 337 u16 size_of_structure;
337 u8 addr[ETH_ALEN]; 338 u8 addr[ETH_ALEN];
338} __packed; 339} __packed;
339 340
340struct be_cmd_req_get_boot_target { 341struct be_cmd_get_boot_target_req {
341 struct be_cmd_req_hdr hdr; 342 struct be_cmd_req_hdr hdr;
342} __packed; 343} __packed;
343 344
344struct be_cmd_resp_get_boot_target { 345struct be_cmd_get_boot_target_resp {
345 struct be_cmd_resp_hdr hdr; 346 struct be_cmd_resp_hdr hdr;
346 u32 boot_session_count; 347 u32 boot_session_count;
347 int boot_session_handle; 348 int boot_session_handle;
348}; 349};
349 350
350struct be_cmd_req_mac_query { 351struct be_cmd_mac_query_req {
351 struct be_cmd_req_hdr hdr; 352 struct be_cmd_req_hdr hdr;
352 u8 type; 353 u8 type;
353 u8 permanent; 354 u8 permanent;
354 u16 if_id; 355 u16 if_id;
355} __packed; 356} __packed;
356 357
357struct be_cmd_resp_mac_query { 358struct be_cmd_get_mac_resp {
358 struct be_cmd_resp_hdr hdr; 359 struct be_cmd_resp_hdr hdr;
359 struct mac_addr mac; 360 struct mac_addr mac;
360}; 361};
361 362
363struct be_ip_addr_subnet_format {
364 u16 size_of_structure;
365 u8 ip_type;
366 u8 ipv6_prefix_length;
367 u8 addr[16];
368 u8 subnet_mask[16];
369 u32 rsvd0;
370} __packed;
371
372struct be_cmd_get_if_info_req {
373 struct be_cmd_req_hdr hdr;
374 u32 interface_hndl;
375 u32 ip_type;
376} __packed;
377
378struct be_cmd_get_if_info_resp {
379 struct be_cmd_req_hdr hdr;
380 u32 interface_hndl;
381 u32 vlan_priority;
382 u32 ip_addr_count;
383 u32 dhcp_state;
384 struct be_ip_addr_subnet_format ip_addr;
385} __packed;
386
387struct be_ip_addr_record {
388 u32 action;
389 u32 interface_hndl;
390 struct be_ip_addr_subnet_format ip_addr;
391 u32 status;
392} __packed;
393
394struct be_ip_addr_record_params {
395 u32 record_entry_count;
396 struct be_ip_addr_record ip_record;
397} __packed;
398
399struct be_cmd_set_ip_addr_req {
400 struct be_cmd_req_hdr hdr;
401 struct be_ip_addr_record_params ip_params;
402} __packed;
403
404
405struct be_cmd_set_dhcp_req {
406 struct be_cmd_req_hdr hdr;
407 u32 interface_hndl;
408 u32 ip_type;
409 u32 flags;
410 u32 retry_count;
411} __packed;
412
413struct be_cmd_rel_dhcp_req {
414 struct be_cmd_req_hdr hdr;
415 u32 interface_hndl;
416 u32 ip_type;
417} __packed;
418
419struct be_cmd_set_def_gateway_req {
420 struct be_cmd_req_hdr hdr;
421 u32 action;
422 struct ip_addr_format ip_addr;
423} __packed;
424
425struct be_cmd_get_def_gateway_req {
426 struct be_cmd_req_hdr hdr;
427 u32 ip_type;
428} __packed;
429
430struct be_cmd_get_def_gateway_resp {
431 struct be_cmd_req_hdr hdr;
432 struct ip_addr_format ip_addr;
433} __packed;
434
362/******************** Create CQ ***************************/ 435/******************** Create CQ ***************************/
363/** 436/**
364 * Pseudo amap definition in which each bit of the actual structure is defined 437 * Pseudo amap definition in which each bit of the actual structure is defined
@@ -489,7 +562,7 @@ struct be_cmd_req_modify_eq_delay {
489 562
490#define ETH_ALEN 6 563#define ETH_ALEN 6
491 564
492struct be_cmd_req_get_mac_addr { 565struct be_cmd_get_nic_conf_req {
493 struct be_cmd_req_hdr hdr; 566 struct be_cmd_req_hdr hdr;
494 u32 nic_port_count; 567 u32 nic_port_count;
495 u32 speed; 568 u32 speed;
@@ -501,7 +574,7 @@ struct be_cmd_req_get_mac_addr {
501 u32 rsvd[23]; 574 u32 rsvd[23];
502}; 575};
503 576
504struct be_cmd_resp_get_mac_addr { 577struct be_cmd_get_nic_conf_resp {
505 struct be_cmd_resp_hdr hdr; 578 struct be_cmd_resp_hdr hdr;
506 u32 nic_port_count; 579 u32 nic_port_count;
507 u32 speed; 580 u32 speed;
@@ -513,6 +586,39 @@ struct be_cmd_resp_get_mac_addr {
513 u32 rsvd[23]; 586 u32 rsvd[23];
514}; 587};
515 588
589#define BEISCSI_ALIAS_LEN 32
590
591struct be_cmd_hba_name {
592 struct be_cmd_req_hdr hdr;
593 u16 flags;
594 u16 rsvd0;
595 u8 initiator_name[ISCSI_NAME_LEN];
596 u8 initiator_alias[BEISCSI_ALIAS_LEN];
597} __packed;
598
599struct be_cmd_ntwk_link_status_req {
600 struct be_cmd_req_hdr hdr;
601 u32 rsvd0;
602} __packed;
603
604/*** Port Speed Values ***/
605#define BE2ISCSI_LINK_SPEED_ZERO 0x00
606#define BE2ISCSI_LINK_SPEED_10MBPS 0x01
607#define BE2ISCSI_LINK_SPEED_100MBPS 0x02
608#define BE2ISCSI_LINK_SPEED_1GBPS 0x03
609#define BE2ISCSI_LINK_SPEED_10GBPS 0x04
610struct be_cmd_ntwk_link_status_resp {
611 struct be_cmd_resp_hdr hdr;
612 u8 phys_port;
613 u8 mac_duplex;
614 u8 mac_speed;
615 u8 mac_fault;
616 u8 mgmt_mac_duplex;
617 u8 mgmt_mac_speed;
618 u16 qos_link_speed;
619 u32 logical_link_speed;
620} __packed;
621
516int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, 622int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
517 struct be_queue_info *eq, int eq_delay); 623 struct be_queue_info *eq, int eq_delay);
518 624
@@ -530,11 +636,8 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
530int be_poll_mcc(struct be_ctrl_info *ctrl); 636int be_poll_mcc(struct be_ctrl_info *ctrl);
531int mgmt_check_supported_fw(struct be_ctrl_info *ctrl, 637int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
532 struct beiscsi_hba *phba); 638 struct beiscsi_hba *phba);
533unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba); 639unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
534unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba); 640unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
535unsigned int beiscsi_get_session_info(struct beiscsi_hba *phba,
536 u32 boot_session_handle,
537 struct be_dma_mem *nonemb_cmd);
538 641
539void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); 642void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
540/*ISCSI Functuions */ 643/*ISCSI Functuions */
@@ -715,7 +818,7 @@ struct be_eq_delay_params_in {
715 818
716struct tcp_connect_and_offload_in { 819struct tcp_connect_and_offload_in {
717 struct be_cmd_req_hdr hdr; 820 struct be_cmd_req_hdr hdr;
718 struct ip_address_format ip_address; 821 struct ip_addr_format ip_address;
719 u16 tcp_port; 822 u16 tcp_port;
720 u16 cid; 823 u16 cid;
721 u16 cq_id; 824 u16 cq_id;
@@ -792,13 +895,14 @@ struct be_fw_cfg {
792 u32 function_caps; 895 u32 function_caps;
793} __packed; 896} __packed;
794 897
795struct be_all_if_id { 898struct be_cmd_get_all_if_id_req {
796 struct be_cmd_req_hdr hdr; 899 struct be_cmd_req_hdr hdr;
797 u32 if_count; 900 u32 if_count;
798 u32 if_hndl_list[1]; 901 u32 if_hndl_list[1];
799} __packed; 902} __packed;
800 903
801#define ISCSI_OPCODE_SCSI_DATA_OUT 5 904#define ISCSI_OPCODE_SCSI_DATA_OUT 5
905#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
802#define OPCODE_COMMON_MODIFY_EQ_DELAY 41 906#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
803#define OPCODE_COMMON_ISCSI_CLEANUP 59 907#define OPCODE_COMMON_ISCSI_CLEANUP 59
804#define OPCODE_COMMON_TCP_UPLOAD 56 908#define OPCODE_COMMON_TCP_UPLOAD 56
@@ -810,6 +914,8 @@ struct be_all_if_id {
810#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41 914#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
811#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42 915#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
812#define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET 52 916#define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET 52
917#define OPCODE_COMMON_WRITE_FLASH 96
918#define OPCODE_COMMON_READ_FLASH 97
813 919
814/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */ 920/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
815#define CMD_ISCSI_COMMAND_INVALIDATE 1 921#define CMD_ISCSI_COMMAND_INVALIDATE 1
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 33c8f09c7ac1..43f35034585d 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -23,6 +23,8 @@
23#include <scsi/scsi_cmnd.h> 23#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_device.h> 24#include <scsi/scsi_device.h>
25#include <scsi/scsi_host.h> 25#include <scsi/scsi_host.h>
26#include <scsi/scsi_netlink.h>
27#include <net/netlink.h>
26#include <scsi/scsi.h> 28#include <scsi/scsi.h>
27 29
28#include "be_iscsi.h" 30#include "be_iscsi.h"
@@ -207,6 +209,301 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
207 return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid); 209 return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
208} 210}
209 211
212static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
213{
214 if (phba->ipv4_iface)
215 return 0;
216
217 phba->ipv4_iface = iscsi_create_iface(phba->shost,
218 &beiscsi_iscsi_transport,
219 ISCSI_IFACE_TYPE_IPV4,
220 0, 0);
221 if (!phba->ipv4_iface) {
222 shost_printk(KERN_ERR, phba->shost, "Could not "
223 "create default IPv4 address.\n");
224 return -ENODEV;
225 }
226
227 return 0;
228}
229
230static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
231{
232 if (phba->ipv6_iface)
233 return 0;
234
235 phba->ipv6_iface = iscsi_create_iface(phba->shost,
236 &beiscsi_iscsi_transport,
237 ISCSI_IFACE_TYPE_IPV6,
238 0, 0);
239 if (!phba->ipv6_iface) {
240 shost_printk(KERN_ERR, phba->shost, "Could not "
241 "create default IPv6 address.\n");
242 return -ENODEV;
243 }
244
245 return 0;
246}
247
248void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)
249{
250 struct be_cmd_get_if_info_resp if_info;
251
252 if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info))
253 beiscsi_create_ipv4_iface(phba);
254
255 if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info))
256 beiscsi_create_ipv6_iface(phba);
257}
258
259void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba)
260{
261 if (phba->ipv6_iface)
262 iscsi_destroy_iface(phba->ipv6_iface);
263 if (phba->ipv4_iface)
264 iscsi_destroy_iface(phba->ipv4_iface);
265}
266
267static int
268beiscsi_set_static_ip(struct Scsi_Host *shost,
269 struct iscsi_iface_param_info *iface_param,
270 void *data, uint32_t dt_len)
271{
272 struct beiscsi_hba *phba = iscsi_host_priv(shost);
273 struct iscsi_iface_param_info *iface_ip = NULL;
274 struct iscsi_iface_param_info *iface_subnet = NULL;
275 struct nlattr *nla;
276 int ret;
277
278
279 switch (iface_param->param) {
280 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
281 nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
282 if (nla)
283 iface_ip = nla_data(nla);
284
285 nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
286 if (nla)
287 iface_subnet = nla_data(nla);
288 break;
289 case ISCSI_NET_PARAM_IPV4_ADDR:
290 iface_ip = iface_param;
291 nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
292 if (nla)
293 iface_subnet = nla_data(nla);
294 break;
295 case ISCSI_NET_PARAM_IPV4_SUBNET:
296 iface_subnet = iface_param;
297 nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
298 if (nla)
299 iface_ip = nla_data(nla);
300 break;
301 default:
302 shost_printk(KERN_ERR, shost, "Unsupported param %d\n",
303 iface_param->param);
304 }
305
306 if (!iface_ip || !iface_subnet) {
307 shost_printk(KERN_ERR, shost, "IP and Subnet Mask required\n");
308 return -EINVAL;
309 }
310
311 ret = mgmt_set_ip(phba, iface_ip, iface_subnet,
312 ISCSI_BOOTPROTO_STATIC);
313
314 return ret;
315}
316
317static int
318beiscsi_set_ipv4(struct Scsi_Host *shost,
319 struct iscsi_iface_param_info *iface_param,
320 void *data, uint32_t dt_len)
321{
322 struct beiscsi_hba *phba = iscsi_host_priv(shost);
323 int ret = 0;
324
325 /* Check the param */
326 switch (iface_param->param) {
327 case ISCSI_NET_PARAM_IPV4_GW:
328 ret = mgmt_set_gateway(phba, iface_param);
329 break;
330 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
331 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
332 ret = mgmt_set_ip(phba, iface_param,
333 NULL, ISCSI_BOOTPROTO_DHCP);
334 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
335 ret = beiscsi_set_static_ip(shost, iface_param,
336 data, dt_len);
337 else
338 shost_printk(KERN_ERR, shost, "Invalid BOOTPROTO: %d\n",
339 iface_param->value[0]);
340 break;
341 case ISCSI_NET_PARAM_IFACE_ENABLE:
342 if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
343 ret = beiscsi_create_ipv4_iface(phba);
344 else
345 iscsi_destroy_iface(phba->ipv4_iface);
346 break;
347 case ISCSI_NET_PARAM_IPV4_SUBNET:
348 case ISCSI_NET_PARAM_IPV4_ADDR:
349 ret = beiscsi_set_static_ip(shost, iface_param,
350 data, dt_len);
351 break;
352 default:
353 shost_printk(KERN_ERR, shost, "Param %d not supported\n",
354 iface_param->param);
355 }
356
357 return ret;
358}
359
360static int
361beiscsi_set_ipv6(struct Scsi_Host *shost,
362 struct iscsi_iface_param_info *iface_param,
363 void *data, uint32_t dt_len)
364{
365 struct beiscsi_hba *phba = iscsi_host_priv(shost);
366 int ret = 0;
367
368 switch (iface_param->param) {
369 case ISCSI_NET_PARAM_IFACE_ENABLE:
370 if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
371 ret = beiscsi_create_ipv6_iface(phba);
372 else {
373 iscsi_destroy_iface(phba->ipv6_iface);
374 ret = 0;
375 }
376 break;
377 case ISCSI_NET_PARAM_IPV6_ADDR:
378 ret = mgmt_set_ip(phba, iface_param, NULL,
379 ISCSI_BOOTPROTO_STATIC);
380 break;
381 default:
382 shost_printk(KERN_ERR, shost, "Param %d not supported\n",
383 iface_param->param);
384 }
385
386 return ret;
387}
388
389int be2iscsi_iface_set_param(struct Scsi_Host *shost,
390 void *data, uint32_t dt_len)
391{
392 struct iscsi_iface_param_info *iface_param = NULL;
393 struct nlattr *attrib;
394 uint32_t rm_len = dt_len;
395 int ret = 0 ;
396
397 nla_for_each_attr(attrib, data, dt_len, rm_len) {
398 iface_param = nla_data(attrib);
399
400 if (iface_param->param_type != ISCSI_NET_PARAM)
401 continue;
402
403 /*
404 * BE2ISCSI only supports 1 interface
405 */
406 if (iface_param->iface_num) {
407 shost_printk(KERN_ERR, shost, "Invalid iface_num %d."
408 "Only iface_num 0 is supported.\n",
409 iface_param->iface_num);
410 return -EINVAL;
411 }
412
413 switch (iface_param->iface_type) {
414 case ISCSI_IFACE_TYPE_IPV4:
415 ret = beiscsi_set_ipv4(shost, iface_param,
416 data, dt_len);
417 break;
418 case ISCSI_IFACE_TYPE_IPV6:
419 ret = beiscsi_set_ipv6(shost, iface_param,
420 data, dt_len);
421 break;
422 default:
423 shost_printk(KERN_ERR, shost,
424 "Invalid iface type :%d passed\n",
425 iface_param->iface_type);
426 break;
427 }
428
429 if (ret)
430 return ret;
431 }
432
433 return ret;
434}
435
436static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
437 struct iscsi_iface *iface, int param,
438 char *buf)
439{
440 struct be_cmd_get_if_info_resp if_info;
441 int len, ip_type = BE2_IPV4;
442
443 memset(&if_info, 0, sizeof(if_info));
444
445 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
446 ip_type = BE2_IPV6;
447
448 len = mgmt_get_if_info(phba, ip_type, &if_info);
449 if (len)
450 return len;
451
452 switch (param) {
453 case ISCSI_NET_PARAM_IPV4_ADDR:
454 len = sprintf(buf, "%pI4\n", &if_info.ip_addr.addr);
455 break;
456 case ISCSI_NET_PARAM_IPV6_ADDR:
457 len = sprintf(buf, "%pI6\n", &if_info.ip_addr.addr);
458 break;
459 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
460 if (!if_info.dhcp_state)
461 len = sprintf(buf, "static");
462 else
463 len = sprintf(buf, "dhcp");
464 break;
465 case ISCSI_NET_PARAM_IPV4_SUBNET:
466 len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
467 break;
468 default:
469 WARN_ON(1);
470 }
471
472 return len;
473}
474
475int be2iscsi_iface_get_param(struct iscsi_iface *iface,
476 enum iscsi_param_type param_type,
477 int param, char *buf)
478{
479 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
480 struct beiscsi_hba *phba = iscsi_host_priv(shost);
481 struct be_cmd_get_def_gateway_resp gateway;
482 int len = -ENOSYS;
483
484 switch (param) {
485 case ISCSI_NET_PARAM_IPV4_ADDR:
486 case ISCSI_NET_PARAM_IPV4_SUBNET:
487 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
488 case ISCSI_NET_PARAM_IPV6_ADDR:
489 len = be2iscsi_get_if_param(phba, iface, param, buf);
490 break;
491 case ISCSI_NET_PARAM_IFACE_ENABLE:
492 len = sprintf(buf, "enabled");
493 break;
494 case ISCSI_NET_PARAM_IPV4_GW:
495 memset(&gateway, 0, sizeof(gateway));
496 len = mgmt_get_gateway(phba, BE2_IPV4, &gateway);
497 if (!len)
498 len = sprintf(buf, "%pI4\n", &gateway.ip_addr.addr);
499 break;
500 default:
501 len = -ENOSYS;
502 }
503
504 return len;
505}
506
210/** 507/**
211 * beiscsi_ep_get_param - get the iscsi parameter 508 * beiscsi_ep_get_param - get the iscsi parameter
212 * @ep: pointer to iscsi ep 509 * @ep: pointer to iscsi ep
@@ -221,7 +518,7 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
221 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; 518 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
222 int len = 0; 519 int len = 0;
223 520
224 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param); 521 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_get_param, param= %d\n", param);
225 522
226 switch (param) { 523 switch (param) {
227 case ISCSI_PARAM_CONN_PORT: 524 case ISCSI_PARAM_CONN_PORT:
@@ -279,6 +576,121 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
279} 576}
280 577
281/** 578/**
579 * beiscsi_get_initname - Read Initiator Name from flash
580 * @buf: buffer bointer
581 * @phba: The device priv structure instance
582 *
583 * returns number of bytes
584 */
585static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
586{
587 int rc;
588 unsigned int tag, wrb_num;
589 unsigned short status, extd_status;
590 struct be_mcc_wrb *wrb;
591 struct be_cmd_hba_name *resp;
592 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
593
594 tag = be_cmd_get_initname(phba);
595 if (!tag) {
596 SE_DEBUG(DBG_LVL_1, "Getting Initiator Name Failed\n");
597 return -EBUSY;
598 } else
599 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
600 phba->ctrl.mcc_numtag[tag]);
601
602 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
603 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
604 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
605
606 if (status || extd_status) {
607 SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with "
608 "status = %d extd_status = %d\n",
609 status, extd_status);
610 free_mcc_tag(&phba->ctrl, tag);
611 return -EAGAIN;
612 }
613 wrb = queue_get_wrb(mccq, wrb_num);
614 free_mcc_tag(&phba->ctrl, tag);
615 resp = embedded_payload(wrb);
616 rc = sprintf(buf, "%s\n", resp->initiator_name);
617 return rc;
618}
619
620/**
621 * beiscsi_get_port_state - Get the Port State
622 * @shost : pointer to scsi_host structure
623 *
624 * returns number of bytes
625 */
626static void beiscsi_get_port_state(struct Scsi_Host *shost)
627{
628 struct beiscsi_hba *phba = iscsi_host_priv(shost);
629 struct iscsi_cls_host *ihost = shost->shost_data;
630
631 ihost->port_state = (phba->state == BE_ADAPTER_UP) ?
632 ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
633}
634
635/**
636 * beiscsi_get_port_speed - Get the Port Speed from Adapter
637 * @shost : pointer to scsi_host structure
638 *
639 * returns Success/Failure
640 */
641static int beiscsi_get_port_speed(struct Scsi_Host *shost)
642{
643 unsigned int tag, wrb_num;
644 unsigned short status, extd_status;
645 struct be_mcc_wrb *wrb;
646 struct be_cmd_ntwk_link_status_resp *resp;
647 struct beiscsi_hba *phba = iscsi_host_priv(shost);
648 struct iscsi_cls_host *ihost = shost->shost_data;
649 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
650
651 tag = be_cmd_get_port_speed(phba);
652 if (!tag) {
653 SE_DEBUG(DBG_LVL_1, "Getting Port Speed Failed\n");
654 return -EBUSY;
655 } else
656 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
657 phba->ctrl.mcc_numtag[tag]);
658
659 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
660 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
661 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
662
663 if (status || extd_status) {
664 SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with "
665 "status = %d extd_status = %d\n",
666 status, extd_status);
667 free_mcc_tag(&phba->ctrl, tag);
668 return -EAGAIN;
669 }
670 wrb = queue_get_wrb(mccq, wrb_num);
671 free_mcc_tag(&phba->ctrl, tag);
672 resp = embedded_payload(wrb);
673
674 switch (resp->mac_speed) {
675 case BE2ISCSI_LINK_SPEED_10MBPS:
676 ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;
677 break;
678 case BE2ISCSI_LINK_SPEED_100MBPS:
679 ihost->port_speed = BE2ISCSI_LINK_SPEED_100MBPS;
680 break;
681 case BE2ISCSI_LINK_SPEED_1GBPS:
682 ihost->port_speed = ISCSI_PORT_SPEED_1GBPS;
683 break;
684 case BE2ISCSI_LINK_SPEED_10GBPS:
685 ihost->port_speed = ISCSI_PORT_SPEED_10GBPS;
686 break;
687 default:
688 ihost->port_speed = ISCSI_PORT_SPEED_UNKNOWN;
689 }
690 return 0;
691}
692
693/**
282 * beiscsi_get_host_param - get the iscsi parameter 694 * beiscsi_get_host_param - get the iscsi parameter
283 * @shost: pointer to scsi_host structure 695 * @shost: pointer to scsi_host structure
284 * @param: parameter type identifier 696 * @param: parameter type identifier
@@ -301,6 +713,27 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
301 return status; 713 return status;
302 } 714 }
303 break; 715 break;
716 case ISCSI_HOST_PARAM_INITIATOR_NAME:
717 status = beiscsi_get_initname(buf, phba);
718 if (status < 0) {
719 SE_DEBUG(DBG_LVL_1,
720 "Retreiving Initiator Name Failed\n");
721 return status;
722 }
723 break;
724 case ISCSI_HOST_PARAM_PORT_STATE:
725 beiscsi_get_port_state(shost);
726 status = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
727 break;
728 case ISCSI_HOST_PARAM_PORT_SPEED:
729 status = beiscsi_get_port_speed(shost);
730 if (status) {
731 SE_DEBUG(DBG_LVL_1,
732 "Retreiving Port Speed Failed\n");
733 return status;
734 }
735 status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
736 break;
304 default: 737 default:
305 return iscsi_host_get_param(shost, param, buf); 738 return iscsi_host_get_param(shost, param, buf);
306 } 739 }
@@ -309,46 +742,21 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
309 742
310int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) 743int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
311{ 744{
312 struct be_cmd_resp_get_mac_addr *resp; 745 struct be_cmd_get_nic_conf_resp resp;
313 struct be_mcc_wrb *wrb;
314 unsigned int tag, wrb_num;
315 unsigned short status, extd_status;
316 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
317 int rc; 746 int rc;
318 747
319 if (phba->read_mac_address) 748 if (strlen(phba->mac_address))
320 return sysfs_format_mac(buf, phba->mac_address, 749 return strlcpy(buf, phba->mac_address, PAGE_SIZE);
321 ETH_ALEN);
322 750
323 tag = be_cmd_get_mac_addr(phba); 751 memset(&resp, 0, sizeof(resp));
324 if (!tag) { 752 rc = mgmt_get_nic_conf(phba, &resp);
325 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n"); 753 if (rc)
326 return -EBUSY; 754 return rc;
327 } else
328 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
329 phba->ctrl.mcc_numtag[tag]);
330 755
331 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16; 756 memcpy(phba->mac_address, resp.mac_address, ETH_ALEN);
332 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 757 return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
333 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
334 if (status || extd_status) {
335 SE_DEBUG(DBG_LVL_1, "Failed to get be_cmd_get_mac_addr"
336 " status = %d extd_status = %d\n",
337 status, extd_status);
338 free_mcc_tag(&phba->ctrl, tag);
339 return -EAGAIN;
340 }
341 wrb = queue_get_wrb(mccq, wrb_num);
342 free_mcc_tag(&phba->ctrl, tag);
343 resp = embedded_payload(wrb);
344 memcpy(phba->mac_address, resp->mac_address, ETH_ALEN);
345 rc = sysfs_format_mac(buf, phba->mac_address,
346 ETH_ALEN);
347 phba->read_mac_address = 1;
348 return rc;
349} 758}
350 759
351
352/** 760/**
353 * beiscsi_conn_get_stats - get the iscsi stats 761 * beiscsi_conn_get_stats - get the iscsi stats
354 * @cls_conn: pointer to iscsi cls conn 762 * @cls_conn: pointer to iscsi cls conn
@@ -736,11 +1144,24 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
736umode_t be2iscsi_attr_is_visible(int param_type, int param) 1144umode_t be2iscsi_attr_is_visible(int param_type, int param)
737{ 1145{
738 switch (param_type) { 1146 switch (param_type) {
1147 case ISCSI_NET_PARAM:
1148 switch (param) {
1149 case ISCSI_NET_PARAM_IFACE_ENABLE:
1150 case ISCSI_NET_PARAM_IPV4_ADDR:
1151 case ISCSI_NET_PARAM_IPV4_SUBNET:
1152 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1153 case ISCSI_NET_PARAM_IPV4_GW:
1154 case ISCSI_NET_PARAM_IPV6_ADDR:
1155 return S_IRUGO;
1156 default:
1157 return 0;
1158 }
739 case ISCSI_HOST_PARAM: 1159 case ISCSI_HOST_PARAM:
740 switch (param) { 1160 switch (param) {
741 case ISCSI_HOST_PARAM_HWADDRESS: 1161 case ISCSI_HOST_PARAM_HWADDRESS:
742 case ISCSI_HOST_PARAM_IPADDRESS:
743 case ISCSI_HOST_PARAM_INITIATOR_NAME: 1162 case ISCSI_HOST_PARAM_INITIATOR_NAME:
1163 case ISCSI_HOST_PARAM_PORT_STATE:
1164 case ISCSI_HOST_PARAM_PORT_SPEED:
744 return S_IRUGO; 1165 return S_IRUGO;
745 default: 1166 default:
746 return 0; 1167 return 0;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 5c45be134501..8b826fc06bcc 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -25,6 +25,21 @@
25 25
26#define BE2_IPV4 0x1 26#define BE2_IPV4 0x1
27#define BE2_IPV6 0x10 27#define BE2_IPV6 0x10
28#define BE2_DHCP_V4 0x05
29
30#define NON_BLOCKING 0x0
31#define BLOCKING 0x1
32
33void beiscsi_create_def_ifaces(struct beiscsi_hba *phba);
34
35void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba);
36
37int be2iscsi_iface_get_param(struct iscsi_iface *iface,
38 enum iscsi_param_type param_type,
39 int param, char *buf);
40
41int be2iscsi_iface_set_param(struct Scsi_Host *shost,
42 void *data, uint32_t count);
28 43
29umode_t be2iscsi_attr_is_visible(int param_type, int param); 44umode_t be2iscsi_attr_is_visible(int param_type, int param);
30 45
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 375756fa95cf..0b1d99c99fd2 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -28,8 +28,11 @@
28#include <linux/semaphore.h> 28#include <linux/semaphore.h>
29#include <linux/iscsi_boot_sysfs.h> 29#include <linux/iscsi_boot_sysfs.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/bsg-lib.h>
31 32
32#include <scsi/libiscsi.h> 33#include <scsi/libiscsi.h>
34#include <scsi/scsi_bsg_iscsi.h>
35#include <scsi/scsi_netlink.h>
33#include <scsi/scsi_transport_iscsi.h> 36#include <scsi/scsi_transport_iscsi.h>
34#include <scsi/scsi_transport.h> 37#include <scsi/scsi_transport.h>
35#include <scsi/scsi_cmnd.h> 38#include <scsi/scsi_cmnd.h>
@@ -48,7 +51,8 @@ static unsigned int num_hba = 0;
48 51
49MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 52MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
50MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 53MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
51MODULE_AUTHOR("ServerEngines Corporation"); 54MODULE_VERSION(BUILD_STR);
55MODULE_AUTHOR("Emulex Corporation");
52MODULE_LICENSE("GPL"); 56MODULE_LICENSE("GPL");
53module_param(be_iopoll_budget, int, 0); 57module_param(be_iopoll_budget, int, 0);
54module_param(enable_msix, int, 0); 58module_param(enable_msix, int, 0);
@@ -147,15 +151,15 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
147 struct invalidate_command_table *inv_tbl; 151 struct invalidate_command_table *inv_tbl;
148 struct be_dma_mem nonemb_cmd; 152 struct be_dma_mem nonemb_cmd;
149 unsigned int cid, tag, i, num_invalidate; 153 unsigned int cid, tag, i, num_invalidate;
150 int rc = FAILED;
151 154
152 /* invalidate iocbs */ 155 /* invalidate iocbs */
153 cls_session = starget_to_session(scsi_target(sc->device)); 156 cls_session = starget_to_session(scsi_target(sc->device));
154 session = cls_session->dd_data; 157 session = cls_session->dd_data;
155 spin_lock_bh(&session->lock); 158 spin_lock_bh(&session->lock);
156 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) 159 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
157 goto unlock; 160 spin_unlock_bh(&session->lock);
158 161 return FAILED;
162 }
159 conn = session->leadconn; 163 conn = session->leadconn;
160 beiscsi_conn = conn->dd_data; 164 beiscsi_conn = conn->dd_data;
161 phba = beiscsi_conn->phba; 165 phba = beiscsi_conn->phba;
@@ -208,9 +212,6 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
208 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 212 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
209 nonemb_cmd.va, nonemb_cmd.dma); 213 nonemb_cmd.va, nonemb_cmd.dma);
210 return iscsi_eh_device_reset(sc); 214 return iscsi_eh_device_reset(sc);
211unlock:
212 spin_unlock_bh(&session->lock);
213 return rc;
214} 215}
215 216
216static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 217static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
@@ -230,10 +231,10 @@ static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
230 case ISCSI_BOOT_TGT_IP_ADDR: 231 case ISCSI_BOOT_TGT_IP_ADDR:
231 if (boot_conn->dest_ipaddr.ip_type == 0x1) 232 if (boot_conn->dest_ipaddr.ip_type == 0x1)
232 rc = sprintf(buf, "%pI4\n", 233 rc = sprintf(buf, "%pI4\n",
233 (char *)&boot_conn->dest_ipaddr.ip_address); 234 (char *)&boot_conn->dest_ipaddr.addr);
234 else 235 else
235 rc = sprintf(str, "%pI6\n", 236 rc = sprintf(str, "%pI6\n",
236 (char *)&boot_conn->dest_ipaddr.ip_address); 237 (char *)&boot_conn->dest_ipaddr.addr);
237 break; 238 break;
238 case ISCSI_BOOT_TGT_PORT: 239 case ISCSI_BOOT_TGT_PORT:
239 rc = sprintf(str, "%d\n", boot_conn->dest_port); 240 rc = sprintf(str, "%d\n", boot_conn->dest_port);
@@ -311,12 +312,8 @@ static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
311 rc = sprintf(str, "0\n"); 312 rc = sprintf(str, "0\n");
312 break; 313 break;
313 case ISCSI_BOOT_ETH_MAC: 314 case ISCSI_BOOT_ETH_MAC:
314 rc = beiscsi_get_macaddr(buf, phba); 315 rc = beiscsi_get_macaddr(str, phba);
315 if (rc < 0) { 316 break;
316 SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
317 return rc;
318 }
319 break;
320 default: 317 default:
321 rc = -ENOSYS; 318 rc = -ENOSYS;
322 break; 319 break;
@@ -394,7 +391,7 @@ MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
394 391
395static struct scsi_host_template beiscsi_sht = { 392static struct scsi_host_template beiscsi_sht = {
396 .module = THIS_MODULE, 393 .module = THIS_MODULE,
397 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver", 394 .name = "Emulex 10Gbe open-iscsi Initiator Driver",
398 .proc_name = DRV_NAME, 395 .proc_name = DRV_NAME,
399 .queuecommand = iscsi_queuecommand, 396 .queuecommand = iscsi_queuecommand,
400 .change_queue_depth = iscsi_change_queue_depth, 397 .change_queue_depth = iscsi_change_queue_depth,
@@ -409,6 +406,8 @@ static struct scsi_host_template beiscsi_sht = {
409 .max_sectors = BEISCSI_MAX_SECTORS, 406 .max_sectors = BEISCSI_MAX_SECTORS,
410 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 407 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
411 .use_clustering = ENABLE_CLUSTERING, 408 .use_clustering = ENABLE_CLUSTERING,
409 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
410
412}; 411};
413 412
414static struct scsi_transport_template *beiscsi_scsi_transport; 413static struct scsi_transport_template *beiscsi_scsi_transport;
@@ -435,6 +434,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
435 phba->shost = shost; 434 phba->shost = shost;
436 phba->pcidev = pci_dev_get(pcidev); 435 phba->pcidev = pci_dev_get(pcidev);
437 pci_set_drvdata(pcidev, phba); 436 pci_set_drvdata(pcidev, phba);
437 phba->interface_handle = 0xFFFFFFFF;
438 438
439 if (iscsi_host_add(shost, &phba->pcidev->dev)) 439 if (iscsi_host_add(shost, &phba->pcidev->dev))
440 goto free_devices; 440 goto free_devices;
@@ -544,8 +544,7 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
544 &mbox_mem_alloc->dma); 544 &mbox_mem_alloc->dma);
545 if (!mbox_mem_alloc->va) { 545 if (!mbox_mem_alloc->va) {
546 beiscsi_unmap_pci_function(phba); 546 beiscsi_unmap_pci_function(phba);
547 status = -ENOMEM; 547 return -ENOMEM;
548 return status;
549 } 548 }
550 549
551 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 550 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
@@ -1252,9 +1251,9 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1252 task = pwrb_handle->pio_handle; 1251 task = pwrb_handle->pio_handle;
1253 1252
1254 io_task = task->dd_data; 1253 io_task = task->dd_data;
1255 spin_lock(&phba->mgmt_sgl_lock); 1254 spin_lock_bh(&phba->mgmt_sgl_lock);
1256 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 1255 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1257 spin_unlock(&phba->mgmt_sgl_lock); 1256 spin_unlock_bh(&phba->mgmt_sgl_lock);
1258 spin_lock_bh(&session->lock); 1257 spin_lock_bh(&session->lock);
1259 free_wrb_handle(phba, pwrb_context, pwrb_handle); 1258 free_wrb_handle(phba, pwrb_context, pwrb_handle);
1260 spin_unlock_bh(&session->lock); 1259 spin_unlock_bh(&session->lock);
@@ -1370,8 +1369,6 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
1370 struct be_bus_address phys_addr; 1369 struct be_bus_address phys_addr;
1371 struct list_head *pbusy_list; 1370 struct list_head *pbusy_list;
1372 struct async_pdu_handle *pasync_handle = NULL; 1371 struct async_pdu_handle *pasync_handle = NULL;
1373 int buffer_len = 0;
1374 unsigned char buffer_index = -1;
1375 unsigned char is_header = 0; 1372 unsigned char is_header = 0;
1376 1373
1377 phys_addr.u.a32.address_lo = 1374 phys_addr.u.a32.address_lo =
@@ -1392,22 +1389,11 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
1392 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1, 1389 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1393 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1390 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1394 index) / 32] & PDUCQE_INDEX_MASK)); 1391 index) / 32] & PDUCQE_INDEX_MASK));
1395
1396 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1397 pasync_ctx->async_header.pa_base.u.a64.address);
1398
1399 buffer_index = buffer_len /
1400 pasync_ctx->async_header.buffer_size;
1401
1402 break; 1392 break;
1403 case UNSOL_DATA_NOTIFY: 1393 case UNSOL_DATA_NOTIFY:
1404 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe-> 1394 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1405 dw[offsetof(struct amap_i_t_dpdu_cqe, 1395 dw[offsetof(struct amap_i_t_dpdu_cqe,
1406 index) / 32] & PDUCQE_INDEX_MASK)); 1396 index) / 32] & PDUCQE_INDEX_MASK));
1407 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1408 pasync_ctx->async_data.pa_base.u.
1409 a64.address);
1410 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1411 break; 1397 break;
1412 default: 1398 default:
1413 pbusy_list = NULL; 1399 pbusy_list = NULL;
@@ -1418,11 +1404,9 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
1418 return NULL; 1404 return NULL;
1419 } 1405 }
1420 1406
1421 WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1422 WARN_ON(list_empty(pbusy_list)); 1407 WARN_ON(list_empty(pbusy_list));
1423 list_for_each_entry(pasync_handle, pbusy_list, link) { 1408 list_for_each_entry(pasync_handle, pbusy_list, link) {
1424 WARN_ON(pasync_handle->consumed); 1409 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
1425 if (pasync_handle->index == buffer_index)
1426 break; 1410 break;
1427 } 1411 }
1428 1412
@@ -1449,15 +1433,13 @@ hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1449 unsigned int num_entries, writables = 0; 1433 unsigned int num_entries, writables = 0;
1450 unsigned int *pep_read_ptr, *pwritables; 1434 unsigned int *pep_read_ptr, *pwritables;
1451 1435
1452 1436 num_entries = pasync_ctx->num_entries;
1453 if (is_header) { 1437 if (is_header) {
1454 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr; 1438 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1455 pwritables = &pasync_ctx->async_header.writables; 1439 pwritables = &pasync_ctx->async_header.writables;
1456 num_entries = pasync_ctx->async_header.num_entries;
1457 } else { 1440 } else {
1458 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr; 1441 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1459 pwritables = &pasync_ctx->async_data.writables; 1442 pwritables = &pasync_ctx->async_data.writables;
1460 num_entries = pasync_ctx->async_data.num_entries;
1461 } 1443 }
1462 1444
1463 while ((*pep_read_ptr) != cq_index) { 1445 while ((*pep_read_ptr) != cq_index) {
@@ -1491,14 +1473,13 @@ hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1491 return 0; 1473 return 0;
1492} 1474}
1493 1475
1494static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba, 1476static void hwi_free_async_msg(struct beiscsi_hba *phba,
1495 unsigned int cri) 1477 unsigned int cri)
1496{ 1478{
1497 struct hwi_controller *phwi_ctrlr; 1479 struct hwi_controller *phwi_ctrlr;
1498 struct hwi_async_pdu_context *pasync_ctx; 1480 struct hwi_async_pdu_context *pasync_ctx;
1499 struct async_pdu_handle *pasync_handle, *tmp_handle; 1481 struct async_pdu_handle *pasync_handle, *tmp_handle;
1500 struct list_head *plist; 1482 struct list_head *plist;
1501 unsigned int i = 0;
1502 1483
1503 phwi_ctrlr = phba->phwi_ctrlr; 1484 phwi_ctrlr = phba->phwi_ctrlr;
1504 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1485 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
@@ -1508,23 +1489,20 @@ static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1508 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { 1489 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1509 list_del(&pasync_handle->link); 1490 list_del(&pasync_handle->link);
1510 1491
1511 if (i == 0) { 1492 if (pasync_handle->is_header) {
1512 list_add_tail(&pasync_handle->link, 1493 list_add_tail(&pasync_handle->link,
1513 &pasync_ctx->async_header.free_list); 1494 &pasync_ctx->async_header.free_list);
1514 pasync_ctx->async_header.free_entries++; 1495 pasync_ctx->async_header.free_entries++;
1515 i++;
1516 } else { 1496 } else {
1517 list_add_tail(&pasync_handle->link, 1497 list_add_tail(&pasync_handle->link,
1518 &pasync_ctx->async_data.free_list); 1498 &pasync_ctx->async_data.free_list);
1519 pasync_ctx->async_data.free_entries++; 1499 pasync_ctx->async_data.free_entries++;
1520 i++;
1521 } 1500 }
1522 } 1501 }
1523 1502
1524 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list); 1503 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1525 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0; 1504 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1526 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1505 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1527 return 0;
1528} 1506}
1529 1507
1530static struct phys_addr * 1508static struct phys_addr *
@@ -1557,16 +1535,15 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1557 1535
1558 phwi_ctrlr = phba->phwi_ctrlr; 1536 phwi_ctrlr = phba->phwi_ctrlr;
1559 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1537 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1538 num_entries = pasync_ctx->num_entries;
1560 1539
1561 if (is_header) { 1540 if (is_header) {
1562 num_entries = pasync_ctx->async_header.num_entries;
1563 writables = min(pasync_ctx->async_header.writables, 1541 writables = min(pasync_ctx->async_header.writables,
1564 pasync_ctx->async_header.free_entries); 1542 pasync_ctx->async_header.free_entries);
1565 pfree_link = pasync_ctx->async_header.free_list.next; 1543 pfree_link = pasync_ctx->async_header.free_list.next;
1566 host_write_num = pasync_ctx->async_header.host_write_ptr; 1544 host_write_num = pasync_ctx->async_header.host_write_ptr;
1567 ring_id = phwi_ctrlr->default_pdu_hdr.id; 1545 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1568 } else { 1546 } else {
1569 num_entries = pasync_ctx->async_data.num_entries;
1570 writables = min(pasync_ctx->async_data.writables, 1547 writables = min(pasync_ctx->async_data.writables,
1571 pasync_ctx->async_data.free_entries); 1548 pasync_ctx->async_data.free_entries);
1572 pfree_link = pasync_ctx->async_data.free_list.next; 1549 pfree_link = pasync_ctx->async_data.free_list.next;
@@ -1673,7 +1650,7 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1673 } 1650 }
1674 memcpy(pfirst_buffer + offset, 1651 memcpy(pfirst_buffer + offset,
1675 pasync_handle->pbuffer, buf_len); 1652 pasync_handle->pbuffer, buf_len);
1676 offset = buf_len; 1653 offset += buf_len;
1677 } 1654 }
1678 index++; 1655 index++;
1679 } 1656 }
@@ -1682,10 +1659,9 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1682 (beiscsi_conn->beiscsi_conn_cid - 1659 (beiscsi_conn->beiscsi_conn_cid -
1683 phba->fw_config.iscsi_cid_start), 1660 phba->fw_config.iscsi_cid_start),
1684 phdr, hdr_len, pfirst_buffer, 1661 phdr, hdr_len, pfirst_buffer,
1685 buf_len); 1662 offset);
1686 1663
1687 if (status == 0) 1664 hwi_free_async_msg(phba, cri);
1688 hwi_free_async_msg(phba, cri);
1689 return 0; 1665 return 0;
1690} 1666}
1691 1667
@@ -2229,7 +2205,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2229 struct mem_array *mem_arr, *mem_arr_orig; 2205 struct mem_array *mem_arr, *mem_arr_orig;
2230 unsigned int i, j, alloc_size, curr_alloc_size; 2206 unsigned int i, j, alloc_size, curr_alloc_size;
2231 2207
2232 phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2208 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2233 if (!phba->phwi_ctrlr) 2209 if (!phba->phwi_ctrlr)
2234 return -ENOMEM; 2210 return -ENOMEM;
2235 2211
@@ -2349,27 +2325,21 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2349 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2325 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2350} 2326}
2351 2327
2352static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2328static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2353{ 2329{
2354 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2330 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2355 struct wrb_handle *pwrb_handle; 2331 struct wrb_handle *pwrb_handle = NULL;
2356 struct hwi_controller *phwi_ctrlr; 2332 struct hwi_controller *phwi_ctrlr;
2357 struct hwi_wrb_context *pwrb_context; 2333 struct hwi_wrb_context *pwrb_context;
2358 struct iscsi_wrb *pwrb; 2334 struct iscsi_wrb *pwrb = NULL;
2359 unsigned int num_cxn_wrbh; 2335 unsigned int num_cxn_wrbh = 0;
2360 unsigned int num_cxn_wrb, j, idx, index; 2336 unsigned int num_cxn_wrb = 0, j, idx = 0, index;
2361 2337
2362 mem_descr_wrbh = phba->init_mem; 2338 mem_descr_wrbh = phba->init_mem;
2363 mem_descr_wrbh += HWI_MEM_WRBH; 2339 mem_descr_wrbh += HWI_MEM_WRBH;
2364 2340
2365 mem_descr_wrb = phba->init_mem; 2341 mem_descr_wrb = phba->init_mem;
2366 mem_descr_wrb += HWI_MEM_WRB; 2342 mem_descr_wrb += HWI_MEM_WRB;
2367
2368 idx = 0;
2369 pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2370 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2371 ((sizeof(struct wrb_handle)) *
2372 phba->params.wrbs_per_cxn));
2373 phwi_ctrlr = phba->phwi_ctrlr; 2343 phwi_ctrlr = phba->phwi_ctrlr;
2374 2344
2375 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2345 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
@@ -2377,12 +2347,32 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2377 pwrb_context->pwrb_handle_base = 2347 pwrb_context->pwrb_handle_base =
2378 kzalloc(sizeof(struct wrb_handle *) * 2348 kzalloc(sizeof(struct wrb_handle *) *
2379 phba->params.wrbs_per_cxn, GFP_KERNEL); 2349 phba->params.wrbs_per_cxn, GFP_KERNEL);
2350 if (!pwrb_context->pwrb_handle_base) {
2351 shost_printk(KERN_ERR, phba->shost,
2352 "Mem Alloc Failed. Failing to load\n");
2353 goto init_wrb_hndl_failed;
2354 }
2380 pwrb_context->pwrb_handle_basestd = 2355 pwrb_context->pwrb_handle_basestd =
2381 kzalloc(sizeof(struct wrb_handle *) * 2356 kzalloc(sizeof(struct wrb_handle *) *
2382 phba->params.wrbs_per_cxn, GFP_KERNEL); 2357 phba->params.wrbs_per_cxn, GFP_KERNEL);
2358 if (!pwrb_context->pwrb_handle_basestd) {
2359 shost_printk(KERN_ERR, phba->shost,
2360 "Mem Alloc Failed. Failing to load\n");
2361 goto init_wrb_hndl_failed;
2362 }
2363 if (!num_cxn_wrbh) {
2364 pwrb_handle =
2365 mem_descr_wrbh->mem_array[idx].virtual_address;
2366 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2367 ((sizeof(struct wrb_handle)) *
2368 phba->params.wrbs_per_cxn));
2369 idx++;
2370 }
2371 pwrb_context->alloc_index = 0;
2372 pwrb_context->wrb_handles_available = 0;
2373 pwrb_context->free_index = 0;
2374
2383 if (num_cxn_wrbh) { 2375 if (num_cxn_wrbh) {
2384 pwrb_context->alloc_index = 0;
2385 pwrb_context->wrb_handles_available = 0;
2386 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2376 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2387 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2377 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2388 pwrb_context->pwrb_handle_basestd[j] = 2378 pwrb_context->pwrb_handle_basestd[j] =
@@ -2391,49 +2381,21 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2391 pwrb_handle->wrb_index = j; 2381 pwrb_handle->wrb_index = j;
2392 pwrb_handle++; 2382 pwrb_handle++;
2393 } 2383 }
2394 pwrb_context->free_index = 0;
2395 num_cxn_wrbh--;
2396 } else {
2397 idx++;
2398 pwrb_handle =
2399 mem_descr_wrbh->mem_array[idx].virtual_address;
2400 num_cxn_wrbh =
2401 ((mem_descr_wrbh->mem_array[idx].size) /
2402 ((sizeof(struct wrb_handle)) *
2403 phba->params.wrbs_per_cxn));
2404 pwrb_context->alloc_index = 0;
2405 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2406 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2407 pwrb_context->pwrb_handle_basestd[j] =
2408 pwrb_handle;
2409 pwrb_context->wrb_handles_available++;
2410 pwrb_handle->wrb_index = j;
2411 pwrb_handle++;
2412 }
2413 pwrb_context->free_index = 0;
2414 num_cxn_wrbh--; 2384 num_cxn_wrbh--;
2415 } 2385 }
2416 } 2386 }
2417 idx = 0; 2387 idx = 0;
2418 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2419 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2420 ((sizeof(struct iscsi_wrb) *
2421 phba->params.wrbs_per_cxn));
2422 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2388 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2423 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2389 pwrb_context = &phwi_ctrlr->wrb_context[index];
2424 if (num_cxn_wrb) { 2390 if (!num_cxn_wrb) {
2425 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2426 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2427 pwrb_handle->pwrb = pwrb;
2428 pwrb++;
2429 }
2430 num_cxn_wrb--;
2431 } else {
2432 idx++;
2433 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2391 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2434 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2392 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2435 ((sizeof(struct iscsi_wrb) * 2393 ((sizeof(struct iscsi_wrb) *
2436 phba->params.wrbs_per_cxn)); 2394 phba->params.wrbs_per_cxn));
2395 idx++;
2396 }
2397
2398 if (num_cxn_wrb) {
2437 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2399 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2438 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2400 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2439 pwrb_handle->pwrb = pwrb; 2401 pwrb_handle->pwrb = pwrb;
@@ -2442,6 +2404,14 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2442 num_cxn_wrb--; 2404 num_cxn_wrb--;
2443 } 2405 }
2444 } 2406 }
2407 return 0;
2408init_wrb_hndl_failed:
2409 for (j = index; j > 0; j--) {
2410 pwrb_context = &phwi_ctrlr->wrb_context[j];
2411 kfree(pwrb_context->pwrb_handle_base);
2412 kfree(pwrb_context->pwrb_handle_basestd);
2413 }
2414 return -ENOMEM;
2445} 2415}
2446 2416
2447static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2417static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
@@ -2450,7 +2420,7 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2450 struct hba_parameters *p = &phba->params; 2420 struct hba_parameters *p = &phba->params;
2451 struct hwi_async_pdu_context *pasync_ctx; 2421 struct hwi_async_pdu_context *pasync_ctx;
2452 struct async_pdu_handle *pasync_header_h, *pasync_data_h; 2422 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2453 unsigned int index; 2423 unsigned int index, idx, num_per_mem, num_async_data;
2454 struct be_mem_descriptor *mem_descr; 2424 struct be_mem_descriptor *mem_descr;
2455 2425
2456 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2426 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
@@ -2462,10 +2432,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2462 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; 2432 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2463 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2433 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2464 2434
2465 pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl; 2435 pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
2466 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; 2436 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2467 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2468 pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2469 2437
2470 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2438 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2471 mem_descr += HWI_MEM_ASYNC_HEADER_BUF; 2439 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
@@ -2510,19 +2478,6 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2510 pasync_ctx->async_header.writables = 0; 2478 pasync_ctx->async_header.writables = 0;
2511 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); 2479 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2512 2480
2513 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2514 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2515 if (mem_descr->mem_array[0].virtual_address) {
2516 SE_DEBUG(DBG_LVL_8,
2517 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2518 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2519 } else
2520 shost_printk(KERN_WARNING, phba->shost,
2521 "No Virtual address\n");
2522 pasync_ctx->async_data.va_base =
2523 mem_descr->mem_array[0].virtual_address;
2524 pasync_ctx->async_data.pa_base.u.a64.address =
2525 mem_descr->mem_array[0].bus_address.u.a64.address;
2526 2481
2527 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2482 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2528 mem_descr += HWI_MEM_ASYNC_DATA_RING; 2483 mem_descr += HWI_MEM_ASYNC_DATA_RING;
@@ -2553,6 +2508,25 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2553 pasync_data_h = 2508 pasync_data_h =
2554 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base; 2509 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2555 2510
2511 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2512 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2513 if (mem_descr->mem_array[0].virtual_address) {
2514 SE_DEBUG(DBG_LVL_8,
2515 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2516 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2517 } else
2518 shost_printk(KERN_WARNING, phba->shost,
2519 "No Virtual address\n");
2520 idx = 0;
2521 pasync_ctx->async_data.va_base =
2522 mem_descr->mem_array[idx].virtual_address;
2523 pasync_ctx->async_data.pa_base.u.a64.address =
2524 mem_descr->mem_array[idx].bus_address.u.a64.address;
2525
2526 num_async_data = ((mem_descr->mem_array[idx].size) /
2527 phba->params.defpdu_data_sz);
2528 num_per_mem = 0;
2529
2556 for (index = 0; index < p->asyncpdus_per_ctrl; index++) { 2530 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2557 pasync_header_h->cri = -1; 2531 pasync_header_h->cri = -1;
2558 pasync_header_h->index = (char)index; 2532 pasync_header_h->index = (char)index;
@@ -2578,14 +2552,29 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2578 pasync_data_h->cri = -1; 2552 pasync_data_h->cri = -1;
2579 pasync_data_h->index = (char)index; 2553 pasync_data_h->index = (char)index;
2580 INIT_LIST_HEAD(&pasync_data_h->link); 2554 INIT_LIST_HEAD(&pasync_data_h->link);
2555
2556 if (!num_async_data) {
2557 num_per_mem = 0;
2558 idx++;
2559 pasync_ctx->async_data.va_base =
2560 mem_descr->mem_array[idx].virtual_address;
2561 pasync_ctx->async_data.pa_base.u.a64.address =
2562 mem_descr->mem_array[idx].
2563 bus_address.u.a64.address;
2564
2565 num_async_data = ((mem_descr->mem_array[idx].size) /
2566 phba->params.defpdu_data_sz);
2567 }
2581 pasync_data_h->pbuffer = 2568 pasync_data_h->pbuffer =
2582 (void *)((unsigned long) 2569 (void *)((unsigned long)
2583 (pasync_ctx->async_data.va_base) + 2570 (pasync_ctx->async_data.va_base) +
2584 (p->defpdu_data_sz * index)); 2571 (p->defpdu_data_sz * num_per_mem));
2585 2572
2586 pasync_data_h->pa.u.a64.address = 2573 pasync_data_h->pa.u.a64.address =
2587 pasync_ctx->async_data.pa_base.u.a64.address + 2574 pasync_ctx->async_data.pa_base.u.a64.address +
2588 (p->defpdu_data_sz * index); 2575 (p->defpdu_data_sz * num_per_mem);
2576 num_per_mem++;
2577 num_async_data--;
2589 2578
2590 list_add_tail(&pasync_data_h->link, 2579 list_add_tail(&pasync_data_h->link,
2591 &pasync_ctx->async_data.free_list); 2580 &pasync_ctx->async_data.free_list);
@@ -2913,9 +2902,11 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
2913static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 2902static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2914{ 2903{
2915 struct be_dma_mem *mem = &q->dma_mem; 2904 struct be_dma_mem *mem = &q->dma_mem;
2916 if (mem->va) 2905 if (mem->va) {
2917 pci_free_consistent(phba->pcidev, mem->size, 2906 pci_free_consistent(phba->pcidev, mem->size,
2918 mem->va, mem->dma); 2907 mem->va, mem->dma);
2908 mem->va = NULL;
2909 }
2919} 2910}
2920 2911
2921static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 2912static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
@@ -3215,7 +3206,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3215error: 3206error:
3216 shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed"); 3207 shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
3217 hwi_cleanup(phba); 3208 hwi_cleanup(phba);
3218 return -ENOMEM; 3209 return status;
3219} 3210}
3220 3211
3221static int hwi_init_controller(struct beiscsi_hba *phba) 3212static int hwi_init_controller(struct beiscsi_hba *phba)
@@ -3236,7 +3227,9 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
3236 } 3227 }
3237 3228
3238 iscsi_init_global_templates(phba); 3229 iscsi_init_global_templates(phba);
3239 beiscsi_init_wrb_handle(phba); 3230 if (beiscsi_init_wrb_handle(phba))
3231 return -ENOMEM;
3232
3240 hwi_init_async_pdu_ctx(phba); 3233 hwi_init_async_pdu_ctx(phba);
3241 if (hwi_init_port(phba) != 0) { 3234 if (hwi_init_port(phba) != 0) {
3242 shost_printk(KERN_ERR, phba->shost, 3235 shost_printk(KERN_ERR, phba->shost,
@@ -3288,7 +3281,7 @@ static int beiscsi_init_controller(struct beiscsi_hba *phba)
3288 3281
3289free_init: 3282free_init:
3290 beiscsi_free_mem(phba); 3283 beiscsi_free_mem(phba);
3291 return -ENOMEM; 3284 return ret;
3292} 3285}
3293 3286
3294static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3287static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
@@ -3475,8 +3468,8 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
3475 3468
3476static int beiscsi_get_boot_info(struct beiscsi_hba *phba) 3469static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3477{ 3470{
3478 struct be_cmd_resp_get_boot_target *boot_resp; 3471 struct be_cmd_get_boot_target_resp *boot_resp;
3479 struct be_cmd_resp_get_session *session_resp; 3472 struct be_cmd_get_session_resp *session_resp;
3480 struct be_mcc_wrb *wrb; 3473 struct be_mcc_wrb *wrb;
3481 struct be_dma_mem nonemb_cmd; 3474 struct be_dma_mem nonemb_cmd;
3482 unsigned int tag, wrb_num; 3475 unsigned int tag, wrb_num;
@@ -3484,9 +3477,9 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3484 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; 3477 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
3485 int ret = -ENOMEM; 3478 int ret = -ENOMEM;
3486 3479
3487 tag = beiscsi_get_boot_target(phba); 3480 tag = mgmt_get_boot_target(phba);
3488 if (!tag) { 3481 if (!tag) {
3489 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n"); 3482 SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed\n");
3490 return -EAGAIN; 3483 return -EAGAIN;
3491 } else 3484 } else
3492 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 3485 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -3496,7 +3489,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3496 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 3489 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3497 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 3490 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3498 if (status || extd_status) { 3491 if (status || extd_status) {
3499 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed" 3492 SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed"
3500 " status = %d extd_status = %d\n", 3493 " status = %d extd_status = %d\n",
3501 status, extd_status); 3494 status, extd_status);
3502 free_mcc_tag(&phba->ctrl, tag); 3495 free_mcc_tag(&phba->ctrl, tag);
@@ -3522,8 +3515,8 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3522 } 3515 }
3523 3516
3524 memset(nonemb_cmd.va, 0, sizeof(*session_resp)); 3517 memset(nonemb_cmd.va, 0, sizeof(*session_resp));
3525 tag = beiscsi_get_session_info(phba, 3518 tag = mgmt_get_session_info(phba, boot_resp->boot_session_handle,
3526 boot_resp->boot_session_handle, &nonemb_cmd); 3519 &nonemb_cmd);
3527 if (!tag) { 3520 if (!tag) {
3528 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info" 3521 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
3529 " Failed\n"); 3522 " Failed\n");
@@ -3696,6 +3689,57 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
3696 kfree(phba->ep_array); 3689 kfree(phba->ep_array);
3697} 3690}
3698 3691
3692static void beiscsi_cleanup_task(struct iscsi_task *task)
3693{
3694 struct beiscsi_io_task *io_task = task->dd_data;
3695 struct iscsi_conn *conn = task->conn;
3696 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3697 struct beiscsi_hba *phba = beiscsi_conn->phba;
3698 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3699 struct hwi_wrb_context *pwrb_context;
3700 struct hwi_controller *phwi_ctrlr;
3701
3702 phwi_ctrlr = phba->phwi_ctrlr;
3703 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3704 - phba->fw_config.iscsi_cid_start];
3705
3706 if (io_task->cmd_bhs) {
3707 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3708 io_task->bhs_pa.u.a64.address);
3709 io_task->cmd_bhs = NULL;
3710 }
3711
3712 if (task->sc) {
3713 if (io_task->pwrb_handle) {
3714 free_wrb_handle(phba, pwrb_context,
3715 io_task->pwrb_handle);
3716 io_task->pwrb_handle = NULL;
3717 }
3718
3719 if (io_task->psgl_handle) {
3720 spin_lock(&phba->io_sgl_lock);
3721 free_io_sgl_handle(phba, io_task->psgl_handle);
3722 spin_unlock(&phba->io_sgl_lock);
3723 io_task->psgl_handle = NULL;
3724 }
3725 } else {
3726 if (!beiscsi_conn->login_in_progress) {
3727 if (io_task->pwrb_handle) {
3728 free_wrb_handle(phba, pwrb_context,
3729 io_task->pwrb_handle);
3730 io_task->pwrb_handle = NULL;
3731 }
3732 if (io_task->psgl_handle) {
3733 spin_lock(&phba->mgmt_sgl_lock);
3734 free_mgmt_sgl_handle(phba,
3735 io_task->psgl_handle);
3736 spin_unlock(&phba->mgmt_sgl_lock);
3737 io_task->psgl_handle = NULL;
3738 }
3739 }
3740 }
3741}
3742
3699void 3743void
3700beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 3744beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3701 struct beiscsi_offload_params *params) 3745 struct beiscsi_offload_params *params)
@@ -3704,12 +3748,19 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3704 struct iscsi_target_context_update_wrb *pwrb = NULL; 3748 struct iscsi_target_context_update_wrb *pwrb = NULL;
3705 struct be_mem_descriptor *mem_descr; 3749 struct be_mem_descriptor *mem_descr;
3706 struct beiscsi_hba *phba = beiscsi_conn->phba; 3750 struct beiscsi_hba *phba = beiscsi_conn->phba;
3751 struct iscsi_task *task = beiscsi_conn->task;
3752 struct iscsi_session *session = task->conn->session;
3707 u32 doorbell = 0; 3753 u32 doorbell = 0;
3708 3754
3709 /* 3755 /*
3710 * We can always use 0 here because it is reserved by libiscsi for 3756 * We can always use 0 here because it is reserved by libiscsi for
3711 * login/startup related tasks. 3757 * login/startup related tasks.
3712 */ 3758 */
3759 beiscsi_conn->login_in_progress = 0;
3760 spin_lock_bh(&session->lock);
3761 beiscsi_cleanup_task(task);
3762 spin_unlock_bh(&session->lock);
3763
3713 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid - 3764 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3714 phba->fw_config.iscsi_cid_start)); 3765 phba->fw_config.iscsi_cid_start));
3715 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb; 3766 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
@@ -3823,7 +3874,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3823 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 3874 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3824 task->hdr_max = sizeof(struct be_cmd_bhs); 3875 task->hdr_max = sizeof(struct be_cmd_bhs);
3825 io_task->psgl_handle = NULL; 3876 io_task->psgl_handle = NULL;
3826 io_task->psgl_handle = NULL; 3877 io_task->pwrb_handle = NULL;
3827 3878
3828 if (task->sc) { 3879 if (task->sc) {
3829 spin_lock(&phba->io_sgl_lock); 3880 spin_lock(&phba->io_sgl_lock);
@@ -3865,6 +3916,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3865 io_task->pwrb_handle = 3916 io_task->pwrb_handle =
3866 beiscsi_conn->plogin_wrb_handle; 3917 beiscsi_conn->plogin_wrb_handle;
3867 } 3918 }
3919 beiscsi_conn->task = task;
3868 } else { 3920 } else {
3869 spin_lock(&phba->mgmt_sgl_lock); 3921 spin_lock(&phba->mgmt_sgl_lock);
3870 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 3922 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
@@ -3907,53 +3959,11 @@ free_hndls:
3907 io_task->pwrb_handle = NULL; 3959 io_task->pwrb_handle = NULL;
3908 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 3960 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3909 io_task->bhs_pa.u.a64.address); 3961 io_task->bhs_pa.u.a64.address);
3962 io_task->cmd_bhs = NULL;
3910 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n"); 3963 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
3911 return -ENOMEM; 3964 return -ENOMEM;
3912} 3965}
3913 3966
3914static void beiscsi_cleanup_task(struct iscsi_task *task)
3915{
3916 struct beiscsi_io_task *io_task = task->dd_data;
3917 struct iscsi_conn *conn = task->conn;
3918 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3919 struct beiscsi_hba *phba = beiscsi_conn->phba;
3920 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3921 struct hwi_wrb_context *pwrb_context;
3922 struct hwi_controller *phwi_ctrlr;
3923
3924 phwi_ctrlr = phba->phwi_ctrlr;
3925 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3926 - phba->fw_config.iscsi_cid_start];
3927 if (io_task->pwrb_handle) {
3928 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3929 io_task->pwrb_handle = NULL;
3930 }
3931
3932 if (io_task->cmd_bhs) {
3933 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3934 io_task->bhs_pa.u.a64.address);
3935 }
3936
3937 if (task->sc) {
3938 if (io_task->psgl_handle) {
3939 spin_lock(&phba->io_sgl_lock);
3940 free_io_sgl_handle(phba, io_task->psgl_handle);
3941 spin_unlock(&phba->io_sgl_lock);
3942 io_task->psgl_handle = NULL;
3943 }
3944 } else {
3945 if (task->hdr &&
3946 ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN))
3947 return;
3948 if (io_task->psgl_handle) {
3949 spin_lock(&phba->mgmt_sgl_lock);
3950 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3951 spin_unlock(&phba->mgmt_sgl_lock);
3952 io_task->psgl_handle = NULL;
3953 }
3954 }
3955}
3956
3957static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 3967static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3958 unsigned int num_sg, unsigned int xferlen, 3968 unsigned int num_sg, unsigned int xferlen,
3959 unsigned int writedir) 3969 unsigned int writedir)
@@ -3993,7 +4003,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3993 &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun)); 4003 &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3994 4004
3995 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4005 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3996 cpu_to_be16(*(unsigned short *)&io_task->cmd_bhs->iscsi_hdr.lun)); 4006 cpu_to_be16(*(unsigned short *)
4007 &io_task->cmd_bhs->iscsi_hdr.lun));
3997 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4008 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3998 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4009 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3999 io_task->pwrb_handle->wrb_index); 4010 io_task->pwrb_handle->wrb_index);
@@ -4126,6 +4137,76 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
4126 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); 4137 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4127} 4138}
4128 4139
4140/**
4141 * beiscsi_bsg_request - handle bsg request from ISCSI transport
4142 * @job: job to handle
4143 */
4144static int beiscsi_bsg_request(struct bsg_job *job)
4145{
4146 struct Scsi_Host *shost;
4147 struct beiscsi_hba *phba;
4148 struct iscsi_bsg_request *bsg_req = job->request;
4149 int rc = -EINVAL;
4150 unsigned int tag;
4151 struct be_dma_mem nonemb_cmd;
4152 struct be_cmd_resp_hdr *resp;
4153 struct iscsi_bsg_reply *bsg_reply = job->reply;
4154 unsigned short status, extd_status;
4155
4156 shost = iscsi_job_to_shost(job);
4157 phba = iscsi_host_priv(shost);
4158
4159 switch (bsg_req->msgcode) {
4160 case ISCSI_BSG_HST_VENDOR:
4161 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
4162 job->request_payload.payload_len,
4163 &nonemb_cmd.dma);
4164 if (nonemb_cmd.va == NULL) {
4165 SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for "
4166 "beiscsi_bsg_request\n");
4167 return -EIO;
4168 }
4169 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4170 &nonemb_cmd);
4171 if (!tag) {
4172 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
4173 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4174 nonemb_cmd.va, nonemb_cmd.dma);
4175 return -EAGAIN;
4176 } else
4177 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
4178 phba->ctrl.mcc_numtag[tag]);
4179 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
4180 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
4181 free_mcc_tag(&phba->ctrl, tag);
4182 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
4183 sg_copy_from_buffer(job->reply_payload.sg_list,
4184 job->reply_payload.sg_cnt,
4185 nonemb_cmd.va, (resp->response_length
4186 + sizeof(*resp)));
4187 bsg_reply->reply_payload_rcv_len = resp->response_length;
4188 bsg_reply->result = status;
4189 bsg_job_done(job, bsg_reply->result,
4190 bsg_reply->reply_payload_rcv_len);
4191 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4192 nonemb_cmd.va, nonemb_cmd.dma);
4193 if (status || extd_status) {
4194 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
4195 " status = %d extd_status = %d\n",
4196 status, extd_status);
4197 return -EIO;
4198 }
4199 break;
4200
4201 default:
4202 SE_DEBUG(DBG_LVL_1, "Unsupported bsg command: 0x%x\n",
4203 bsg_req->msgcode);
4204 break;
4205 }
4206
4207 return rc;
4208}
4209
4129static void beiscsi_quiesce(struct beiscsi_hba *phba) 4210static void beiscsi_quiesce(struct beiscsi_hba *phba)
4130{ 4211{
4131 struct hwi_controller *phwi_ctrlr; 4212 struct hwi_controller *phwi_ctrlr;
@@ -4183,6 +4264,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
4183 return; 4264 return;
4184 } 4265 }
4185 4266
4267 beiscsi_destroy_def_ifaces(phba);
4186 beiscsi_quiesce(phba); 4268 beiscsi_quiesce(phba);
4187 iscsi_boot_destroy_kset(phba->boot_kset); 4269 iscsi_boot_destroy_kset(phba->boot_kset);
4188 iscsi_host_remove(phba->shost); 4270 iscsi_host_remove(phba->shost);
@@ -4267,8 +4349,11 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4267 phba->num_cpus = num_cpus; 4349 phba->num_cpus = num_cpus;
4268 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus); 4350 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
4269 4351
4270 if (enable_msix) 4352 if (enable_msix) {
4271 beiscsi_msix_enable(phba); 4353 beiscsi_msix_enable(phba);
4354 if (!phba->msix_enabled)
4355 phba->num_cpus = 1;
4356 }
4272 ret = be_ctrl_init(phba, pcidev); 4357 ret = be_ctrl_init(phba, pcidev);
4273 if (ret) { 4358 if (ret) {
4274 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 4359 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -4366,8 +4451,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4366 * iscsi boot. 4451 * iscsi boot.
4367 */ 4452 */
4368 shost_printk(KERN_ERR, phba->shost, "Could not set up " 4453 shost_printk(KERN_ERR, phba->shost, "Could not set up "
4369 "iSCSI boot info."); 4454 "iSCSI boot info.\n");
4370 4455
4456 beiscsi_create_def_ifaces(phba);
4371 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n"); 4457 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
4372 return 0; 4458 return 0;
4373 4459
@@ -4418,6 +4504,8 @@ struct iscsi_transport beiscsi_iscsi_transport = {
4418 .bind_conn = beiscsi_conn_bind, 4504 .bind_conn = beiscsi_conn_bind,
4419 .destroy_conn = iscsi_conn_teardown, 4505 .destroy_conn = iscsi_conn_teardown,
4420 .attr_is_visible = be2iscsi_attr_is_visible, 4506 .attr_is_visible = be2iscsi_attr_is_visible,
4507 .set_iface_param = be2iscsi_iface_set_param,
4508 .get_iface_param = be2iscsi_iface_get_param,
4421 .set_param = beiscsi_set_param, 4509 .set_param = beiscsi_set_param,
4422 .get_conn_param = iscsi_conn_get_param, 4510 .get_conn_param = iscsi_conn_get_param,
4423 .get_session_param = iscsi_session_get_param, 4511 .get_session_param = iscsi_session_get_param,
@@ -4435,6 +4523,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
4435 .ep_poll = beiscsi_ep_poll, 4523 .ep_poll = beiscsi_ep_poll,
4436 .ep_disconnect = beiscsi_ep_disconnect, 4524 .ep_disconnect = beiscsi_ep_disconnect,
4437 .session_recovery_timedout = iscsi_session_recovery_timedout, 4525 .session_recovery_timedout = iscsi_session_recovery_timedout,
4526 .bsg_request = beiscsi_bsg_request,
4438}; 4527};
4439 4528
4440static struct pci_driver beiscsi_pci_driver = { 4529static struct pci_driver beiscsi_pci_driver = {
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index b4a06d5e5f9e..40fea6ec879c 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -34,9 +34,9 @@
34 34
35#include "be.h" 35#include "be.h"
36#define DRV_NAME "be2iscsi" 36#define DRV_NAME "be2iscsi"
37#define BUILD_STR "4.1.239.0" 37#define BUILD_STR "4.2.162.0"
38#define BE_NAME "ServerEngines BladeEngine2" \ 38#define BE_NAME "Emulex OneConnect" \
39 "Linux iSCSI Driver version" BUILD_STR 39 "Open-iSCSI Driver version" BUILD_STR
40#define DRV_DESC BE_NAME " " "Driver" 40#define DRV_DESC BE_NAME " " "Driver"
41 41
42#define BE_VENDOR_ID 0x19A2 42#define BE_VENDOR_ID 0x19A2
@@ -316,6 +316,8 @@ struct beiscsi_hba {
316 struct iscsi_endpoint **ep_array; 316 struct iscsi_endpoint **ep_array;
317 struct iscsi_boot_kset *boot_kset; 317 struct iscsi_boot_kset *boot_kset;
318 struct Scsi_Host *shost; 318 struct Scsi_Host *shost;
319 struct iscsi_iface *ipv4_iface;
320 struct iscsi_iface *ipv6_iface;
319 struct { 321 struct {
320 /** 322 /**
321 * group together since they are used most frequently 323 * group together since they are used most frequently
@@ -345,7 +347,7 @@ struct beiscsi_hba {
345 struct work_struct work_cqs; /* The work being queued */ 347 struct work_struct work_cqs; /* The work being queued */
346 struct be_ctrl_info ctrl; 348 struct be_ctrl_info ctrl;
347 unsigned int generation; 349 unsigned int generation;
348 unsigned int read_mac_address; 350 unsigned int interface_handle;
349 struct mgmt_session_info boot_sess; 351 struct mgmt_session_info boot_sess;
350 struct invalidate_command_table inv_tbl[128]; 352 struct invalidate_command_table inv_tbl[128];
351 353
@@ -525,8 +527,6 @@ struct hwi_async_pdu_context {
525 527
526 unsigned int free_entries; 528 unsigned int free_entries;
527 unsigned int busy_entries; 529 unsigned int busy_entries;
528 unsigned int buffer_size;
529 unsigned int num_entries;
530 530
531 struct list_head free_list; 531 struct list_head free_list;
532 } async_header; 532 } async_header;
@@ -543,11 +543,12 @@ struct hwi_async_pdu_context {
543 543
544 unsigned int free_entries; 544 unsigned int free_entries;
545 unsigned int busy_entries; 545 unsigned int busy_entries;
546 unsigned int buffer_size;
547 struct list_head free_list; 546 struct list_head free_list;
548 unsigned int num_entries;
549 } async_data; 547 } async_data;
550 548
549 unsigned int buffer_size;
550 unsigned int num_entries;
551
551 /** 552 /**
552 * This is a varying size list! Do not add anything 553 * This is a varying size list! Do not add anything
553 * after this entry!! 554 * after this entry!!
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 44762cfa3e12..01bb04cd9e75 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -17,15 +17,17 @@
17 * Costa Mesa, CA 92626 17 * Costa Mesa, CA 92626
18 */ 18 */
19 19
20#include <linux/bsg-lib.h>
21#include <scsi/scsi_transport_iscsi.h>
22#include <scsi/scsi_bsg_iscsi.h>
20#include "be_mgmt.h" 23#include "be_mgmt.h"
21#include "be_iscsi.h" 24#include "be_iscsi.h"
22#include <scsi/scsi_transport_iscsi.h>
23 25
24unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba) 26unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
25{ 27{
26 struct be_ctrl_info *ctrl = &phba->ctrl; 28 struct be_ctrl_info *ctrl = &phba->ctrl;
27 struct be_mcc_wrb *wrb; 29 struct be_mcc_wrb *wrb;
28 struct be_cmd_req_get_mac_addr *req; 30 struct be_cmd_get_boot_target_req *req;
29 unsigned int tag = 0; 31 unsigned int tag = 0;
30 32
31 SE_DEBUG(DBG_LVL_8, "In bescsi_get_boot_target\n"); 33 SE_DEBUG(DBG_LVL_8, "In bescsi_get_boot_target\n");
@@ -42,22 +44,22 @@ unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba)
42 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 44 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
43 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, 45 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
44 OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET, 46 OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
45 sizeof(*req)); 47 sizeof(struct be_cmd_get_boot_target_resp));
46 48
47 be_mcc_notify(phba); 49 be_mcc_notify(phba);
48 spin_unlock(&ctrl->mbox_lock); 50 spin_unlock(&ctrl->mbox_lock);
49 return tag; 51 return tag;
50} 52}
51 53
52unsigned int beiscsi_get_session_info(struct beiscsi_hba *phba, 54unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
53 u32 boot_session_handle, 55 u32 boot_session_handle,
54 struct be_dma_mem *nonemb_cmd) 56 struct be_dma_mem *nonemb_cmd)
55{ 57{
56 struct be_ctrl_info *ctrl = &phba->ctrl; 58 struct be_ctrl_info *ctrl = &phba->ctrl;
57 struct be_mcc_wrb *wrb; 59 struct be_mcc_wrb *wrb;
58 unsigned int tag = 0; 60 unsigned int tag = 0;
59 struct be_cmd_req_get_session *req; 61 struct be_cmd_get_session_req *req;
60 struct be_cmd_resp_get_session *resp; 62 struct be_cmd_get_session_resp *resp;
61 struct be_sge *sge; 63 struct be_sge *sge;
62 64
63 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_session_info\n"); 65 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_session_info\n");
@@ -187,6 +189,72 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
187 return status; 189 return status;
188} 190}
189 191
192unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
193 struct beiscsi_hba *phba,
194 struct bsg_job *job,
195 struct be_dma_mem *nonemb_cmd)
196{
197 struct be_cmd_resp_hdr *resp;
198 struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
199 struct be_sge *mcc_sge = nonembedded_sgl(wrb);
200 unsigned int tag = 0;
201 struct iscsi_bsg_request *bsg_req = job->request;
202 struct be_bsg_vendor_cmd *req = nonemb_cmd->va;
203 unsigned short region, sector_size, sector, offset;
204
205 nonemb_cmd->size = job->request_payload.payload_len;
206 memset(nonemb_cmd->va, 0, nonemb_cmd->size);
207 resp = nonemb_cmd->va;
208 region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
209 sector_size = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
210 sector = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
211 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[4];
212 req->region = region;
213 req->sector = sector;
214 req->offset = offset;
215 spin_lock(&ctrl->mbox_lock);
216 memset(wrb, 0, sizeof(*wrb));
217
218 switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
219 case BEISCSI_WRITE_FLASH:
220 offset = sector * sector_size + offset;
221 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
222 OPCODE_COMMON_WRITE_FLASH, sizeof(*req));
223 sg_copy_to_buffer(job->request_payload.sg_list,
224 job->request_payload.sg_cnt,
225 nonemb_cmd->va + offset, job->request_len);
226 break;
227 case BEISCSI_READ_FLASH:
228 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
229 OPCODE_COMMON_READ_FLASH, sizeof(*req));
230 break;
231 default:
232 shost_printk(KERN_WARNING, phba->shost,
233 "Unsupported cmd = 0x%x\n\n", bsg_req->rqst_data.
234 h_vendor.vendor_cmd[0]);
235 spin_unlock(&ctrl->mbox_lock);
236 return -ENOSYS;
237 }
238
239 tag = alloc_mcc_tag(phba);
240 if (!tag) {
241 spin_unlock(&ctrl->mbox_lock);
242 return tag;
243 }
244
245 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false,
246 job->request_payload.sg_cnt);
247 mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
248 mcc_sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
249 mcc_sge->len = cpu_to_le32(nonemb_cmd->size);
250 wrb->tag0 |= tag;
251
252 be_mcc_notify(phba);
253
254 spin_unlock(&ctrl->mbox_lock);
255 return tag;
256}
257
190int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) 258int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
191{ 259{
192 struct be_ctrl_info *ctrl = &phba->ctrl; 260 struct be_ctrl_info *ctrl = &phba->ctrl;
@@ -328,7 +396,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
328 struct sockaddr *dst_addr, 396 struct sockaddr *dst_addr,
329 struct beiscsi_endpoint *beiscsi_ep, 397 struct beiscsi_endpoint *beiscsi_ep,
330 struct be_dma_mem *nonemb_cmd) 398 struct be_dma_mem *nonemb_cmd)
331
332{ 399{
333 struct hwi_controller *phwi_ctrlr; 400 struct hwi_controller *phwi_ctrlr;
334 struct hwi_context_memory *phwi_context; 401 struct hwi_context_memory *phwi_context;
@@ -374,17 +441,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
374 if (dst_addr->sa_family == PF_INET) { 441 if (dst_addr->sa_family == PF_INET) {
375 __be32 s_addr = daddr_in->sin_addr.s_addr; 442 __be32 s_addr = daddr_in->sin_addr.s_addr;
376 req->ip_address.ip_type = BE2_IPV4; 443 req->ip_address.ip_type = BE2_IPV4;
377 req->ip_address.ip_address[0] = s_addr & 0x000000ff; 444 req->ip_address.addr[0] = s_addr & 0x000000ff;
378 req->ip_address.ip_address[1] = (s_addr & 0x0000ff00) >> 8; 445 req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8;
379 req->ip_address.ip_address[2] = (s_addr & 0x00ff0000) >> 16; 446 req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16;
380 req->ip_address.ip_address[3] = (s_addr & 0xff000000) >> 24; 447 req->ip_address.addr[3] = (s_addr & 0xff000000) >> 24;
381 req->tcp_port = ntohs(daddr_in->sin_port); 448 req->tcp_port = ntohs(daddr_in->sin_port);
382 beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr; 449 beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
383 beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port); 450 beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
384 beiscsi_ep->ip_type = BE2_IPV4; 451 beiscsi_ep->ip_type = BE2_IPV4;
385 } else if (dst_addr->sa_family == PF_INET6) { 452 } else if (dst_addr->sa_family == PF_INET6) {
386 req->ip_address.ip_type = BE2_IPV6; 453 req->ip_address.ip_type = BE2_IPV6;
387 memcpy(&req->ip_address.ip_address, 454 memcpy(&req->ip_address.addr,
388 &daddr_in6->sin6_addr.in6_u.u6_addr8, 16); 455 &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
389 req->tcp_port = ntohs(daddr_in6->sin6_port); 456 req->tcp_port = ntohs(daddr_in6->sin6_port);
390 beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port); 457 beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
@@ -419,14 +486,399 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
419 return tag; 486 return tag;
420} 487}
421 488
422unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba) 489unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
423{ 490{
424 struct be_ctrl_info *ctrl = &phba->ctrl; 491 struct be_ctrl_info *ctrl = &phba->ctrl;
425 struct be_mcc_wrb *wrb; 492 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
426 struct be_cmd_req_get_mac_addr *req; 493 struct be_cmd_get_all_if_id_req *req = embedded_payload(wrb);
494 struct be_cmd_get_all_if_id_req *pbe_allid = req;
495 int status = 0;
496
497 memset(wrb, 0, sizeof(*wrb));
498
499 spin_lock(&ctrl->mbox_lock);
500
501 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
502 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
503 OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
504 sizeof(*req));
505 status = be_mbox_notify(ctrl);
506 if (!status)
507 phba->interface_handle = pbe_allid->if_hndl_list[0];
508 else {
509 shost_printk(KERN_WARNING, phba->shost,
510 "Failed in mgmt_get_all_if_id\n");
511 }
512 spin_unlock(&ctrl->mbox_lock);
513
514 return status;
515}
516
517static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
518 struct be_dma_mem *nonemb_cmd, void *resp_buf,
519 int resp_buf_len)
520{
521 struct be_ctrl_info *ctrl = &phba->ctrl;
522 struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
523 unsigned short status, extd_status;
524 struct be_sge *sge;
525 unsigned int tag;
526 int rc = 0;
527
528 spin_lock(&ctrl->mbox_lock);
529 tag = alloc_mcc_tag(phba);
530 if (!tag) {
531 spin_unlock(&ctrl->mbox_lock);
532 rc = -ENOMEM;
533 goto free_cmd;
534 }
535 memset(wrb, 0, sizeof(*wrb));
536 wrb->tag0 |= tag;
537 sge = nonembedded_sgl(wrb);
538
539 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
540 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
541 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
542 sge->len = cpu_to_le32(nonemb_cmd->size);
543
544 be_mcc_notify(phba);
545 spin_unlock(&ctrl->mbox_lock);
546
547 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
548 phba->ctrl.mcc_numtag[tag]);
549
550 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
551 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
552 if (status || extd_status) {
553 SE_DEBUG(DBG_LVL_1,
554 "mgmt_exec_nonemb_cmd Failed status = %d"
555 "extd_status = %d\n", status, extd_status);
556 rc = -EIO;
557 goto free_tag;
558 }
559
560 if (resp_buf)
561 memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
562
563free_tag:
564 free_mcc_tag(&phba->ctrl, tag);
565free_cmd:
566 pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
567 nonemb_cmd->va, nonemb_cmd->dma);
568 return rc;
569}
570
571static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
572 int iscsi_cmd, int size)
573{
574 cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size),
575 &cmd->dma);
576 if (!cmd->va) {
577 SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
578 return -ENOMEM;
579 }
580 memset(cmd->va, 0, sizeof(size));
581 cmd->size = size;
582 be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
583 return 0;
584}
585
586static int
587mgmt_static_ip_modify(struct beiscsi_hba *phba,
588 struct be_cmd_get_if_info_resp *if_info,
589 struct iscsi_iface_param_info *ip_param,
590 struct iscsi_iface_param_info *subnet_param,
591 uint32_t ip_action)
592{
593 struct be_cmd_set_ip_addr_req *req;
594 struct be_dma_mem nonemb_cmd;
595 uint32_t ip_type;
596 int rc;
597
598 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
599 OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
600 sizeof(*req));
601 if (rc)
602 return rc;
603
604 ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
605 BE2_IPV6 : BE2_IPV4 ;
606
607 req = nonemb_cmd.va;
608 req->ip_params.record_entry_count = 1;
609 req->ip_params.ip_record.action = ip_action;
610 req->ip_params.ip_record.interface_hndl =
611 phba->interface_handle;
612 req->ip_params.ip_record.ip_addr.size_of_structure =
613 sizeof(struct be_ip_addr_subnet_format);
614 req->ip_params.ip_record.ip_addr.ip_type = ip_type;
615
616 if (ip_action == IP_ACTION_ADD) {
617 memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
618 ip_param->len);
619
620 if (subnet_param)
621 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
622 subnet_param->value, subnet_param->len);
623 } else {
624 memcpy(req->ip_params.ip_record.ip_addr.addr,
625 if_info->ip_addr.addr, ip_param->len);
626
627 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
628 if_info->ip_addr.subnet_mask, ip_param->len);
629 }
630
631 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
632 if (rc < 0)
633 shost_printk(KERN_WARNING, phba->shost,
634 "Failed to Modify existing IP Address\n");
635 return rc;
636}
637
638static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
639 uint32_t gtway_action, uint32_t param_len)
640{
641 struct be_cmd_set_def_gateway_req *req;
642 struct be_dma_mem nonemb_cmd;
643 int rt_val;
644
645
646 rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
647 OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
648 sizeof(*req));
649 if (rt_val)
650 return rt_val;
651
652 req = nonemb_cmd.va;
653 req->action = gtway_action;
654 req->ip_addr.ip_type = BE2_IPV4;
655
656 memcpy(req->ip_addr.addr, gt_addr, param_len);
657
658 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
659}
660
661int mgmt_set_ip(struct beiscsi_hba *phba,
662 struct iscsi_iface_param_info *ip_param,
663 struct iscsi_iface_param_info *subnet_param,
664 uint32_t boot_proto)
665{
666 struct be_cmd_get_def_gateway_resp gtway_addr_set;
667 struct be_cmd_get_if_info_resp if_info;
668 struct be_cmd_set_dhcp_req *dhcpreq;
669 struct be_cmd_rel_dhcp_req *reldhcp;
670 struct be_dma_mem nonemb_cmd;
671 uint8_t *gtway_addr;
672 uint32_t ip_type;
673 int rc;
674
675 if (mgmt_get_all_if_id(phba))
676 return -EIO;
677
678 memset(&if_info, 0, sizeof(if_info));
679 ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
680 BE2_IPV6 : BE2_IPV4 ;
681
682 rc = mgmt_get_if_info(phba, ip_type, &if_info);
683 if (rc)
684 return rc;
685
686 if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
687 if (if_info.dhcp_state) {
688 shost_printk(KERN_WARNING, phba->shost,
689 "DHCP Already Enabled\n");
690 return 0;
691 }
692 /* The ip_param->len is 1 in DHCP case. Setting
693 proper IP len as this it is used while
694 freeing the Static IP.
695 */
696 ip_param->len = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
697 IP_V6_LEN : IP_V4_LEN;
698
699 } else {
700 if (if_info.dhcp_state) {
701
702 memset(&if_info, 0, sizeof(if_info));
703 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
704 OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
705 sizeof(*reldhcp));
706
707 if (rc)
708 return rc;
709
710 reldhcp = nonemb_cmd.va;
711 reldhcp->interface_hndl = phba->interface_handle;
712 reldhcp->ip_type = ip_type;
713
714 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
715 if (rc < 0) {
716 shost_printk(KERN_WARNING, phba->shost,
717 "Failed to Delete existing dhcp\n");
718 return rc;
719 }
720 }
721 }
722
723 /* Delete the Static IP Set */
724 if (if_info.ip_addr.addr[0]) {
725 rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL,
726 IP_ACTION_DEL);
727 if (rc)
728 return rc;
729 }
730
731 /* Delete the Gateway settings if mode change is to DHCP */
732 if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
733 memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
734 rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
735 if (rc) {
736 shost_printk(KERN_WARNING, phba->shost,
737 "Failed to Get Gateway Addr\n");
738 return rc;
739 }
740
741 if (gtway_addr_set.ip_addr.addr[0]) {
742 gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
743 rc = mgmt_modify_gateway(phba, gtway_addr,
744 IP_ACTION_DEL, IP_V4_LEN);
745
746 if (rc) {
747 shost_printk(KERN_WARNING, phba->shost,
748 "Failed to clear Gateway Addr Set\n");
749 return rc;
750 }
751 }
752 }
753
754 /* Set Adapter to DHCP/Static Mode */
755 if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
756 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
757 OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
758 sizeof(*dhcpreq));
759 if (rc)
760 return rc;
761
762 dhcpreq = nonemb_cmd.va;
763 dhcpreq->flags = BLOCKING;
764 dhcpreq->retry_count = 1;
765 dhcpreq->interface_hndl = phba->interface_handle;
766 dhcpreq->ip_type = BE2_DHCP_V4;
767
768 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
769 } else {
770 return mgmt_static_ip_modify(phba, &if_info, ip_param,
771 subnet_param, IP_ACTION_ADD);
772 }
773
774 return rc;
775}
776
777int mgmt_set_gateway(struct beiscsi_hba *phba,
778 struct iscsi_iface_param_info *gateway_param)
779{
780 struct be_cmd_get_def_gateway_resp gtway_addr_set;
781 uint8_t *gtway_addr;
782 int rt_val;
783
784 memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
785 rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
786 if (rt_val) {
787 shost_printk(KERN_WARNING, phba->shost,
788 "Failed to Get Gateway Addr\n");
789 return rt_val;
790 }
791
792 if (gtway_addr_set.ip_addr.addr[0]) {
793 gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
794 rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL,
795 gateway_param->len);
796 if (rt_val) {
797 shost_printk(KERN_WARNING, phba->shost,
798 "Failed to clear Gateway Addr Set\n");
799 return rt_val;
800 }
801 }
802
803 gtway_addr = (uint8_t *)&gateway_param->value;
804 rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_ADD,
805 gateway_param->len);
806
807 if (rt_val)
808 shost_printk(KERN_WARNING, phba->shost,
809 "Failed to Set Gateway Addr\n");
810
811 return rt_val;
812}
813
814int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
815 struct be_cmd_get_def_gateway_resp *gateway)
816{
817 struct be_cmd_get_def_gateway_req *req;
818 struct be_dma_mem nonemb_cmd;
819 int rc;
820
821 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
822 OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
823 sizeof(*gateway));
824 if (rc)
825 return rc;
826
827 req = nonemb_cmd.va;
828 req->ip_type = ip_type;
829
830 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, gateway,
831 sizeof(*gateway));
832}
833
834int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
835 struct be_cmd_get_if_info_resp *if_info)
836{
837 struct be_cmd_get_if_info_req *req;
838 struct be_dma_mem nonemb_cmd;
839 int rc;
840
841 if (mgmt_get_all_if_id(phba))
842 return -EIO;
843
844 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
845 OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
846 sizeof(*if_info));
847 if (rc)
848 return rc;
849
850 req = nonemb_cmd.va;
851 req->interface_hndl = phba->interface_handle;
852 req->ip_type = ip_type;
853
854 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info,
855 sizeof(*if_info));
856}
857
858int mgmt_get_nic_conf(struct beiscsi_hba *phba,
859 struct be_cmd_get_nic_conf_resp *nic)
860{
861 struct be_dma_mem nonemb_cmd;
862 int rc;
863
864 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
865 OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
866 sizeof(*nic));
867 if (rc)
868 return rc;
869
870 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, nic, sizeof(*nic));
871}
872
873
874
875unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
876{
427 unsigned int tag = 0; 877 unsigned int tag = 0;
878 struct be_mcc_wrb *wrb;
879 struct be_cmd_hba_name *req;
880 struct be_ctrl_info *ctrl = &phba->ctrl;
428 881
429 SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n");
430 spin_lock(&ctrl->mbox_lock); 882 spin_lock(&ctrl->mbox_lock);
431 tag = alloc_mcc_tag(phba); 883 tag = alloc_mcc_tag(phba);
432 if (!tag) { 884 if (!tag) {
@@ -438,12 +890,38 @@ unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba)
438 req = embedded_payload(wrb); 890 req = embedded_payload(wrb);
439 wrb->tag0 |= tag; 891 wrb->tag0 |= tag;
440 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 892 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
441 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 893 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
442 OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG, 894 OPCODE_ISCSI_INI_CFG_GET_HBA_NAME,
443 sizeof(*req)); 895 sizeof(*req));
444 896
445 be_mcc_notify(phba); 897 be_mcc_notify(phba);
446 spin_unlock(&ctrl->mbox_lock); 898 spin_unlock(&ctrl->mbox_lock);
447 return tag; 899 return tag;
448} 900}
449 901
902unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba)
903{
904 unsigned int tag = 0;
905 struct be_mcc_wrb *wrb;
906 struct be_cmd_ntwk_link_status_req *req;
907 struct be_ctrl_info *ctrl = &phba->ctrl;
908
909 spin_lock(&ctrl->mbox_lock);
910 tag = alloc_mcc_tag(phba);
911 if (!tag) {
912 spin_unlock(&ctrl->mbox_lock);
913 return tag;
914 }
915
916 wrb = wrb_from_mccq(phba);
917 req = embedded_payload(wrb);
918 wrb->tag0 |= tag;
919 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
920 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
921 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
922 sizeof(*req));
923
924 be_mcc_notify(phba);
925 spin_unlock(&ctrl->mbox_lock);
926 return tag;
927}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 08428824ace2..5c2e37693ca8 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -20,11 +20,16 @@
20#ifndef _BEISCSI_MGMT_ 20#ifndef _BEISCSI_MGMT_
21#define _BEISCSI_MGMT_ 21#define _BEISCSI_MGMT_
22 22
23#include <linux/types.h> 23#include <scsi/scsi_bsg_iscsi.h>
24#include <linux/list.h>
25#include "be_iscsi.h" 24#include "be_iscsi.h"
26#include "be_main.h" 25#include "be_main.h"
27 26
27#define IP_ACTION_ADD 0x01
28#define IP_ACTION_DEL 0x02
29
30#define IP_V6_LEN 16
31#define IP_V4_LEN 4
32
28/** 33/**
29 * Pseudo amap definition in which each bit of the actual structure is defined 34 * Pseudo amap definition in which each bit of the actual structure is defined
30 * as a byte: used to calculate offset/shift/mask of each field 35 * as a byte: used to calculate offset/shift/mask of each field
@@ -98,6 +103,10 @@ unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
98 struct invalidate_command_table *inv_tbl, 103 struct invalidate_command_table *inv_tbl,
99 unsigned int num_invalidate, unsigned int cid, 104 unsigned int num_invalidate, unsigned int cid,
100 struct be_dma_mem *nonemb_cmd); 105 struct be_dma_mem *nonemb_cmd);
106unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
107 struct beiscsi_hba *phba,
108 struct bsg_job *job,
109 struct be_dma_mem *nonemb_cmd);
101 110
102struct iscsi_invalidate_connection_params_in { 111struct iscsi_invalidate_connection_params_in {
103 struct be_cmd_req_hdr hdr; 112 struct be_cmd_req_hdr hdr;
@@ -204,6 +213,13 @@ struct be_mgmt_controller_attributes_resp {
204 struct mgmt_controller_attributes params; 213 struct mgmt_controller_attributes params;
205} __packed; 214} __packed;
206 215
216struct be_bsg_vendor_cmd {
217 struct be_cmd_req_hdr hdr;
218 unsigned short region;
219 unsigned short offset;
220 unsigned short sector;
221} __packed;
222
207/* configuration management */ 223/* configuration management */
208 224
209#define GET_MGMT_CONTROLLER_WS(phba) (phba->pmgmt_ws) 225#define GET_MGMT_CONTROLLER_WS(phba) (phba->pmgmt_ws)
@@ -219,12 +235,15 @@ struct be_mgmt_controller_attributes_resp {
219 /* the CMD_RESPONSE_HEADER */ 235 /* the CMD_RESPONSE_HEADER */
220 236
221#define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\ 237#define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\
222 pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\ 238 pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
223 bus_address.u.a32.address_lo; \ 239 bus_address.u.a32.address_lo; \
224 pa->hi = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\ 240 pa->hi = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
225 bus_address.u.a32.address_hi; \ 241 bus_address.u.a32.address_hi; \
226} 242}
227 243
244#define BEISCSI_WRITE_FLASH 0
245#define BEISCSI_READ_FLASH 1
246
228struct beiscsi_endpoint { 247struct beiscsi_endpoint {
229 struct beiscsi_hba *phba; 248 struct beiscsi_hba *phba;
230 struct beiscsi_sess *sess; 249 struct beiscsi_sess *sess;
@@ -248,4 +267,27 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
248 unsigned short issue_reset, 267 unsigned short issue_reset,
249 unsigned short savecfg_flag); 268 unsigned short savecfg_flag);
250 269
270int mgmt_set_ip(struct beiscsi_hba *phba,
271 struct iscsi_iface_param_info *ip_param,
272 struct iscsi_iface_param_info *subnet_param,
273 uint32_t boot_proto);
274
275unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba);
276
277unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
278 u32 boot_session_handle,
279 struct be_dma_mem *nonemb_cmd);
280
281int mgmt_get_nic_conf(struct beiscsi_hba *phba,
282 struct be_cmd_get_nic_conf_resp *mac);
283
284int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
285 struct be_cmd_get_if_info_resp *if_info);
286
287int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
288 struct be_cmd_get_def_gateway_resp *gateway);
289
290int mgmt_set_gateway(struct beiscsi_hba *phba,
291 struct iscsi_iface_param_info *gateway_param);
292
251#endif 293#endif
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index e75e07d25915..51c9e1345719 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -799,9 +799,6 @@ struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
799 enum bfa_lport_role roles, 799 enum bfa_lport_role roles,
800 struct bfad_vf_s *vf_drv, 800 struct bfad_vf_s *vf_drv,
801 struct bfad_vport_s *vp_drv); 801 struct bfad_vport_s *vp_drv);
802void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
803 struct bfad_vf_s *vf_drv,
804 struct bfad_vport_s *vp_drv);
805 802
806/* 803/*
807 * vport callbacks 804 * vport callbacks
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 5d2a1307e5ce..937000db62a8 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -616,7 +616,7 @@ bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
616 __port_action[port->fabric->fab_type].online(port); 616 __port_action[port->fabric->fab_type].online(port);
617 617
618 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 618 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
619 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 619 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
620 "Logical port online: WWN = %s Role = %s\n", 620 "Logical port online: WWN = %s Role = %s\n",
621 lpwwn_buf, "Initiator"); 621 lpwwn_buf, "Initiator");
622 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE); 622 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
@@ -639,12 +639,12 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
639 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 639 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
640 if (bfa_sm_cmp_state(port->fabric, 640 if (bfa_sm_cmp_state(port->fabric,
641 bfa_fcs_fabric_sm_online) == BFA_TRUE) { 641 bfa_fcs_fabric_sm_online) == BFA_TRUE) {
642 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 642 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
643 "Logical port lost fabric connectivity: WWN = %s Role = %s\n", 643 "Logical port lost fabric connectivity: WWN = %s Role = %s\n",
644 lpwwn_buf, "Initiator"); 644 lpwwn_buf, "Initiator");
645 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT); 645 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
646 } else { 646 } else {
647 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 647 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
648 "Logical port taken offline: WWN = %s Role = %s\n", 648 "Logical port taken offline: WWN = %s Role = %s\n",
649 lpwwn_buf, "Initiator"); 649 lpwwn_buf, "Initiator");
650 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE); 650 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
@@ -709,14 +709,10 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
709 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE); 709 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
710 710
711 /* Base port will be deleted by the OS driver */ 711 /* Base port will be deleted by the OS driver */
712 if (port->vport) { 712 if (port->vport)
713 bfa_fcb_lport_delete(port->fcs->bfad, port->port_cfg.roles,
714 port->fabric->vf_drv,
715 port->vport ? port->vport->vport_drv : NULL);
716 bfa_fcs_vport_delete_comp(port->vport); 713 bfa_fcs_vport_delete_comp(port->vport);
717 } else { 714 else
718 bfa_wc_down(&port->fabric->wc); 715 bfa_wc_down(&port->fabric->wc);
719 }
720} 716}
721 717
722 718
@@ -5714,17 +5710,23 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
5714 (struct bfad_vport_s *)vport->vport_drv; 5710 (struct bfad_vport_s *)vport->vport_drv;
5715 5711
5716 bfa_fcs_fabric_delvport(__vport_fabric(vport), vport); 5712 bfa_fcs_fabric_delvport(__vport_fabric(vport), vport);
5713 bfa_lps_delete(vport->lps);
5717 5714
5718 if (vport_drv->comp_del) 5715 if (vport_drv->comp_del) {
5719 complete(vport_drv->comp_del); 5716 complete(vport_drv->comp_del);
5720 else 5717 return;
5721 kfree(vport_drv); 5718 }
5722 5719
5723 bfa_lps_delete(vport->lps); 5720 /*
5721 * We queue the vport delete work to the IM work_q from here.
5722 * The memory for the bfad_vport_s is freed from the FC function
5723 * template vport_delete entry point.
5724 */
5725 if (vport_drv)
5726 bfad_im_port_delete(vport_drv->drv_port.bfad,
5727 &vport_drv->drv_port);
5724} 5728}
5725 5729
5726
5727
5728/* 5730/*
5729 * fcs_vport_public FCS virtual port public interfaces 5731 * fcs_vport_public FCS virtual port public interfaces
5730 */ 5732 */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 404fd10ddb21..2e4b0be14a20 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -456,23 +456,6 @@ bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
456 return port_drv; 456 return port_drv;
457} 457}
458 458
459void
460bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
461 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
462{
463 struct bfad_port_s *port_drv;
464
465 /* this will be only called from rmmod context */
466 if (vp_drv && !vp_drv->comp_del) {
467 port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
468 ((vf_drv) ? (&(vf_drv)->base_port) :
469 (&(bfad)->pport));
470 bfa_trc(bfad, roles);
471 if (roles & BFA_LPORT_ROLE_FCP_IM)
472 bfad_im_port_delete(bfad, port_drv);
473 }
474}
475
476/* 459/*
477 * FCS RPORT alloc callback, after successful PLOGI by FCS 460 * FCS RPORT alloc callback, after successful PLOGI by FCS
478 */ 461 */
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 7b1ecd2b3ffe..8b6c6bf7837e 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -497,6 +497,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
497 if (im_port->flags & BFAD_PORT_DELETE) { 497 if (im_port->flags & BFAD_PORT_DELETE) {
498 bfad_scsi_host_free(bfad, im_port); 498 bfad_scsi_host_free(bfad, im_port);
499 list_del(&vport->list_entry); 499 list_del(&vport->list_entry);
500 kfree(vport);
500 return 0; 501 return 0;
501 } 502 }
502 503
@@ -758,25 +759,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
758 else if (!strcmp(model, "Brocade-804")) 759 else if (!strcmp(model, "Brocade-804"))
759 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 760 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
760 "Brocade 8Gbps FC HBA for HP Bladesystem C-class"); 761 "Brocade 8Gbps FC HBA for HP Bladesystem C-class");
761 else if (!strcmp(model, "Brocade-902") || 762 else if (!strcmp(model, "Brocade-1741"))
762 !strcmp(model, "Brocade-1741"))
763 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 763 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
764 "Brocade 10Gbps CNA for Dell M-Series Blade Servers"); 764 "Brocade 10Gbps CNA for Dell M-Series Blade Servers");
765 else if (strstr(model, "Brocade-1560")) { 765 else if (strstr(model, "Brocade-1860")) {
766 if (nports == 1)
767 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
768 "Brocade 16Gbps PCIe single port FC HBA");
769 else
770 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
771 "Brocade 16Gbps PCIe dual port FC HBA");
772 } else if (strstr(model, "Brocade-1710")) {
773 if (nports == 1)
774 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
775 "Brocade 10Gbps single port CNA");
776 else
777 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
778 "Brocade 10Gbps dual port CNA");
779 } else if (strstr(model, "Brocade-1860")) {
780 if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc)) 766 if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc))
781 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 767 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
782 "Brocade 10Gbps single port CNA"); 768 "Brocade 10Gbps single port CNA");
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 495a841645f9..25093a04123b 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -1,6 +1,6 @@
1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI 1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
2 * 2 *
3 * Copyright (c) 2006 - 2011 Broadcom Corporation 3 * Copyright (c) 2006 - 2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index 72118db89a20..dc0a08e69c82 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -1,6 +1,6 @@
1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. 1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
2 * 2 *
3 * Copyright (c) 2006 - 2011 Broadcom Corporation 3 * Copyright (c) 2006 - 2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 0bd70e80efe4..0c53c28dc3d3 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -1,6 +1,6 @@
1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2006 - 2011 Broadcom Corporation 3 * Copyright (c) 2006 - 2012 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie 5 * Copyright (c) 2007, 2008 Mike Christie
6 * 6 *
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index f9d6f4129093..ece47e502282 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1,6 +1,6 @@
1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2006 - 2011 Broadcom Corporation 3 * Copyright (c) 2006 - 2012 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie 5 * Copyright (c) 2007, 2008 Mike Christie
6 * 6 *
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 4927cca733d3..8b6816706ee5 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -1,6 +1,6 @@
1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2006 - 2011 Broadcom Corporation 3 * Copyright (c) 2006 - 2012 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie 5 * Copyright (c) 2007, 2008 Mike Christie
6 * 6 *
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
18static u32 adapter_count; 18static u32 adapter_count;
19 19
20#define DRV_MODULE_NAME "bnx2i" 20#define DRV_MODULE_NAME "bnx2i"
21#define DRV_MODULE_VERSION "2.7.0.3" 21#define DRV_MODULE_VERSION "2.7.2.2"
22#define DRV_MODULE_RELDATE "Jun 15, 2011" 22#define DRV_MODULE_RELDATE "Apr 25, 2012"
23 23
24static char version[] __devinitdata = 24static char version[] __devinitdata =
25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1a44b45e7bef..f8d516b53161 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. 2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
3 * 3 *
4 * Copyright (c) 2006 - 2011 Broadcom Corporation 4 * Copyright (c) 2006 - 2012 Broadcom Corporation
5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
6 * Copyright (c) 2007, 2008 Mike Christie 6 * Copyright (c) 2007, 2008 Mike Christie
7 * 7 *
@@ -2244,6 +2244,7 @@ static struct scsi_host_template bnx2i_host_template = {
2244 .eh_device_reset_handler = iscsi_eh_device_reset, 2244 .eh_device_reset_handler = iscsi_eh_device_reset,
2245 .eh_target_reset_handler = iscsi_eh_recover_target, 2245 .eh_target_reset_handler = iscsi_eh_recover_target,
2246 .change_queue_depth = iscsi_change_queue_depth, 2246 .change_queue_depth = iscsi_change_queue_depth,
2247 .target_alloc = iscsi_target_alloc,
2247 .can_queue = 2048, 2248 .can_queue = 2048,
2248 .max_sectors = 127, 2249 .max_sectors = 127,
2249 .cmd_per_lun = 128, 2250 .cmd_per_lun = 128,
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index 83a77f7244d2..c61cf7a43658 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -1,6 +1,6 @@
1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2004 - 2011 Broadcom Corporation 3 * Copyright (c) 2004 - 2012 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 04c5cea47a22..fda9cdea0e60 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -55,11 +55,16 @@
55#define ALUA_FAILOVER_TIMEOUT (60 * HZ) 55#define ALUA_FAILOVER_TIMEOUT (60 * HZ)
56#define ALUA_FAILOVER_RETRIES 5 56#define ALUA_FAILOVER_RETRIES 5
57 57
58/* flags passed from user level */
59#define ALUA_OPTIMIZE_STPG 1
60
58struct alua_dh_data { 61struct alua_dh_data {
59 int group_id; 62 int group_id;
60 int rel_port; 63 int rel_port;
61 int tpgs; 64 int tpgs;
62 int state; 65 int state;
66 int pref;
67 unsigned flags; /* used for optimizing STPG */
63 unsigned char inq[ALUA_INQUIRY_SIZE]; 68 unsigned char inq[ALUA_INQUIRY_SIZE];
64 unsigned char *buff; 69 unsigned char *buff;
65 int bufflen; 70 int bufflen;
@@ -554,14 +559,16 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
554 for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) { 559 for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) {
555 if (h->group_id == (ucp[2] << 8) + ucp[3]) { 560 if (h->group_id == (ucp[2] << 8) + ucp[3]) {
556 h->state = ucp[0] & 0x0f; 561 h->state = ucp[0] & 0x0f;
562 h->pref = ucp[0] >> 7;
557 valid_states = ucp[1]; 563 valid_states = ucp[1];
558 } 564 }
559 off = 8 + (ucp[7] * 4); 565 off = 8 + (ucp[7] * 4);
560 } 566 }
561 567
562 sdev_printk(KERN_INFO, sdev, 568 sdev_printk(KERN_INFO, sdev,
563 "%s: port group %02x state %c supports %c%c%c%c%c%c%c\n", 569 "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
564 ALUA_DH_NAME, h->group_id, print_alua_state(h->state), 570 ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
571 h->pref ? "preferred" : "non-preferred",
565 valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', 572 valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
566 valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', 573 valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
567 valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', 574 valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
@@ -621,6 +628,37 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
621out: 628out:
622 return err; 629 return err;
623} 630}
631/*
632 * alua_set_params - set/unset the optimize flag
633 * @sdev: device on the path to be activated
634 * params - parameters in the following format
635 * "no_of_params\0param1\0param2\0param3\0...\0"
636 * For example, to set the flag pass the following parameters
637 * from multipath.conf
638 * hardware_handler "2 alua 1"
639 */
640static int alua_set_params(struct scsi_device *sdev, const char *params)
641{
642 struct alua_dh_data *h = get_alua_data(sdev);
643 unsigned int optimize = 0, argc;
644 const char *p = params;
645 int result = SCSI_DH_OK;
646
647 if ((sscanf(params, "%u", &argc) != 1) || (argc != 1))
648 return -EINVAL;
649
650 while (*p++)
651 ;
652 if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1))
653 return -EINVAL;
654
655 if (optimize)
656 h->flags |= ALUA_OPTIMIZE_STPG;
657 else
658 h->flags &= ~ALUA_OPTIMIZE_STPG;
659
660 return result;
661}
624 662
625/* 663/*
626 * alua_activate - activate a path 664 * alua_activate - activate a path
@@ -637,14 +675,37 @@ static int alua_activate(struct scsi_device *sdev,
637{ 675{
638 struct alua_dh_data *h = get_alua_data(sdev); 676 struct alua_dh_data *h = get_alua_data(sdev);
639 int err = SCSI_DH_OK; 677 int err = SCSI_DH_OK;
678 int stpg = 0;
640 679
641 err = alua_rtpg(sdev, h); 680 err = alua_rtpg(sdev, h);
642 if (err != SCSI_DH_OK) 681 if (err != SCSI_DH_OK)
643 goto out; 682 goto out;
644 683
645 if (h->tpgs & TPGS_MODE_EXPLICIT && 684 if (h->tpgs & TPGS_MODE_EXPLICIT) {
646 h->state != TPGS_STATE_OPTIMIZED && 685 switch (h->state) {
647 h->state != TPGS_STATE_LBA_DEPENDENT) { 686 case TPGS_STATE_NONOPTIMIZED:
687 stpg = 1;
688 if ((h->flags & ALUA_OPTIMIZE_STPG) &&
689 (!h->pref) &&
690 (h->tpgs & TPGS_MODE_IMPLICIT))
691 stpg = 0;
692 break;
693 case TPGS_STATE_STANDBY:
694 stpg = 1;
695 break;
696 case TPGS_STATE_UNAVAILABLE:
697 case TPGS_STATE_OFFLINE:
698 err = SCSI_DH_IO;
699 break;
700 case TPGS_STATE_TRANSITIONING:
701 err = SCSI_DH_RETRY;
702 break;
703 default:
704 break;
705 }
706 }
707
708 if (stpg) {
648 h->callback_fn = fn; 709 h->callback_fn = fn;
649 h->callback_data = data; 710 h->callback_data = data;
650 err = submit_stpg(h); 711 err = submit_stpg(h);
@@ -698,6 +759,7 @@ static struct scsi_device_handler alua_dh = {
698 .prep_fn = alua_prep_fn, 759 .prep_fn = alua_prep_fn,
699 .check_sense = alua_check_sense, 760 .check_sense = alua_check_sense,
700 .activate = alua_activate, 761 .activate = alua_activate,
762 .set_params = alua_set_params,
701 .match = alua_match, 763 .match = alua_match,
702}; 764};
703 765
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 335e85192807..76e3d0b5bfa6 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -411,20 +411,18 @@ out:
411} 411}
412 412
413/** 413/**
414 * fcoe_interface_cleanup() - Clean up a FCoE interface 414 * fcoe_interface_remove() - remove FCoE interface from netdev
415 * @fcoe: The FCoE interface to be cleaned up 415 * @fcoe: The FCoE interface to be cleaned up
416 * 416 *
417 * Caller must be holding the RTNL mutex 417 * Caller must be holding the RTNL mutex
418 */ 418 */
419static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) 419static void fcoe_interface_remove(struct fcoe_interface *fcoe)
420{ 420{
421 struct net_device *netdev = fcoe->netdev; 421 struct net_device *netdev = fcoe->netdev;
422 struct fcoe_ctlr *fip = &fcoe->ctlr; 422 struct fcoe_ctlr *fip = &fcoe->ctlr;
423 u8 flogi_maddr[ETH_ALEN]; 423 u8 flogi_maddr[ETH_ALEN];
424 const struct net_device_ops *ops; 424 const struct net_device_ops *ops;
425 425
426 rtnl_lock();
427
428 /* 426 /*
429 * Don't listen for Ethernet packets anymore. 427 * Don't listen for Ethernet packets anymore.
430 * synchronize_net() ensures that the packet handlers are not running 428 * synchronize_net() ensures that the packet handlers are not running
@@ -453,12 +451,28 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
453 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" 451 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
454 " specific feature for LLD.\n"); 452 " specific feature for LLD.\n");
455 } 453 }
454 fcoe->removed = 1;
455}
456
457
458/**
459 * fcoe_interface_cleanup() - Clean up a FCoE interface
460 * @fcoe: The FCoE interface to be cleaned up
461 */
462static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
463{
464 struct net_device *netdev = fcoe->netdev;
465 struct fcoe_ctlr *fip = &fcoe->ctlr;
456 466
467 rtnl_lock();
468 if (!fcoe->removed)
469 fcoe_interface_remove(fcoe);
457 rtnl_unlock(); 470 rtnl_unlock();
458 471
459 /* Release the self-reference taken during fcoe_interface_create() */ 472 /* Release the self-reference taken during fcoe_interface_create() */
460 /* tear-down the FCoE controller */ 473 /* tear-down the FCoE controller */
461 fcoe_ctlr_destroy(fip); 474 fcoe_ctlr_destroy(fip);
475 scsi_host_put(fcoe->ctlr.lp->host);
462 kfree(fcoe); 476 kfree(fcoe);
463 dev_put(netdev); 477 dev_put(netdev);
464 module_put(THIS_MODULE); 478 module_put(THIS_MODULE);
@@ -522,13 +536,11 @@ static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
522 struct fcoe_port *port = lport_priv(lport); 536 struct fcoe_port *port = lport_priv(lport);
523 struct fcoe_interface *fcoe = port->priv; 537 struct fcoe_interface *fcoe = port->priv;
524 538
525 rtnl_lock();
526 if (!is_zero_ether_addr(port->data_src_addr)) 539 if (!is_zero_ether_addr(port->data_src_addr))
527 dev_uc_del(fcoe->netdev, port->data_src_addr); 540 dev_uc_del(fcoe->netdev, port->data_src_addr);
528 if (!is_zero_ether_addr(addr)) 541 if (!is_zero_ether_addr(addr))
529 dev_uc_add(fcoe->netdev, addr); 542 dev_uc_add(fcoe->netdev, addr);
530 memcpy(port->data_src_addr, addr, ETH_ALEN); 543 memcpy(port->data_src_addr, addr, ETH_ALEN);
531 rtnl_unlock();
532} 544}
533 545
534/** 546/**
@@ -941,6 +953,10 @@ static void fcoe_if_destroy(struct fc_lport *lport)
941 rtnl_lock(); 953 rtnl_lock();
942 if (!is_zero_ether_addr(port->data_src_addr)) 954 if (!is_zero_ether_addr(port->data_src_addr))
943 dev_uc_del(netdev, port->data_src_addr); 955 dev_uc_del(netdev, port->data_src_addr);
956 if (lport->vport)
957 synchronize_net();
958 else
959 fcoe_interface_remove(fcoe);
944 rtnl_unlock(); 960 rtnl_unlock();
945 961
946 /* Free queued packets for the per-CPU receive threads */ 962 /* Free queued packets for the per-CPU receive threads */
@@ -959,8 +975,12 @@ static void fcoe_if_destroy(struct fc_lport *lport)
959 /* Free memory used by statistical counters */ 975 /* Free memory used by statistical counters */
960 fc_lport_free_stats(lport); 976 fc_lport_free_stats(lport);
961 977
962 /* Release the Scsi_Host */ 978 /*
963 scsi_host_put(lport->host); 979 * Release the Scsi_Host for vport but hold on to
980 * master lport until it fcoe interface fully cleaned-up.
981 */
982 if (lport->vport)
983 scsi_host_put(lport->host);
964} 984}
965 985
966/** 986/**
@@ -2274,10 +2294,9 @@ static void fcoe_percpu_clean(struct fc_lport *lport)
2274 continue; 2294 continue;
2275 2295
2276 skb = dev_alloc_skb(0); 2296 skb = dev_alloc_skb(0);
2277 if (!skb) { 2297 if (!skb)
2278 spin_unlock_bh(&pp->fcoe_rx_list.lock);
2279 continue; 2298 continue;
2280 } 2299
2281 skb->destructor = fcoe_percpu_flush_done; 2300 skb->destructor = fcoe_percpu_flush_done;
2282 2301
2283 spin_lock_bh(&pp->fcoe_rx_list.lock); 2302 spin_lock_bh(&pp->fcoe_rx_list.lock);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 3c2733a12aa1..96ac938d39cc 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -71,7 +71,8 @@ do { \
71 * @ctlr: The FCoE controller (for FIP) 71 * @ctlr: The FCoE controller (for FIP)
72 * @oem: The offload exchange manager for all local port 72 * @oem: The offload exchange manager for all local port
73 * instances associated with this port 73 * instances associated with this port
74 * This structure is 1:1 with a net devive. 74 * @removed: Indicates fcoe interface removed from net device
75 * This structure is 1:1 with a net device.
75 */ 76 */
76struct fcoe_interface { 77struct fcoe_interface {
77 struct list_head list; 78 struct list_head list;
@@ -81,6 +82,7 @@ struct fcoe_interface {
81 struct packet_type fip_packet_type; 82 struct packet_type fip_packet_type;
82 struct fcoe_ctlr ctlr; 83 struct fcoe_ctlr ctlr;
83 struct fc_exch_mgr *oem; 84 struct fc_exch_mgr *oem;
85 u8 removed;
84}; 86};
85 87
86#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) 88#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 249a106888d9..5a4c7250aa77 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1883,7 +1883,13 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
1883 frame = (struct fip_frame *)skb->data; 1883 frame = (struct fip_frame *)skb->data;
1884 memset(frame, 0, len); 1884 memset(frame, 0, len);
1885 memcpy(frame->eth.h_dest, dest, ETH_ALEN); 1885 memcpy(frame->eth.h_dest, dest, ETH_ALEN);
1886 memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN); 1886
1887 if (sub == FIP_SC_VN_BEACON) {
1888 hton24(frame->eth.h_source, FIP_VN_FC_MAP);
1889 hton24(frame->eth.h_source + 3, fip->port_id);
1890 } else {
1891 memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
1892 }
1887 frame->eth.h_proto = htons(ETH_P_FIP); 1893 frame->eth.h_proto = htons(ETH_P_FIP);
1888 1894
1889 frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); 1895 frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 500e20dd56ec..796482badf13 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -159,6 +159,7 @@ static int hpsa_change_queue_depth(struct scsi_device *sdev,
159 int qdepth, int reason); 159 int qdepth, int reason);
160 160
161static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 161static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
162static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
162static int hpsa_slave_alloc(struct scsi_device *sdev); 163static int hpsa_slave_alloc(struct scsi_device *sdev);
163static void hpsa_slave_destroy(struct scsi_device *sdev); 164static void hpsa_slave_destroy(struct scsi_device *sdev);
164 165
@@ -171,7 +172,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
171static void calc_bucket_map(int *bucket, int num_buckets, 172static void calc_bucket_map(int *bucket, int num_buckets,
172 int nsgs, int *bucket_map); 173 int nsgs, int *bucket_map);
173static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 174static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
174static inline u32 next_command(struct ctlr_info *h); 175static inline u32 next_command(struct ctlr_info *h, u8 q);
175static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, 176static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
176 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 177 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
177 u64 *cfg_offset); 178 u64 *cfg_offset);
@@ -180,6 +181,7 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
180static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 181static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
181static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, 182static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
182 void __iomem *vaddr, int wait_for_ready); 183 void __iomem *vaddr, int wait_for_ready);
184static inline void finish_cmd(struct CommandList *c);
183#define BOARD_NOT_READY 0 185#define BOARD_NOT_READY 0
184#define BOARD_READY 1 186#define BOARD_READY 1
185 187
@@ -234,6 +236,16 @@ static int check_for_unit_attention(struct ctlr_info *h,
234 return 1; 236 return 1;
235} 237}
236 238
239static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
240{
241 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
242 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
243 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
244 return 0;
245 dev_warn(&h->pdev->dev, HPSA "device busy");
246 return 1;
247}
248
237static ssize_t host_store_rescan(struct device *dev, 249static ssize_t host_store_rescan(struct device *dev,
238 struct device_attribute *attr, 250 struct device_attribute *attr,
239 const char *buf, size_t count) 251 const char *buf, size_t count)
@@ -368,7 +380,7 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
368} 380}
369 381
370static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 382static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
371 "UNKNOWN" 383 "1(ADM)", "UNKNOWN"
372}; 384};
373#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 385#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
374 386
@@ -497,6 +509,7 @@ static struct scsi_host_template hpsa_driver_template = {
497 .change_queue_depth = hpsa_change_queue_depth, 509 .change_queue_depth = hpsa_change_queue_depth,
498 .this_id = -1, 510 .this_id = -1,
499 .use_clustering = ENABLE_CLUSTERING, 511 .use_clustering = ENABLE_CLUSTERING,
512 .eh_abort_handler = hpsa_eh_abort_handler,
500 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 513 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
501 .ioctl = hpsa_ioctl, 514 .ioctl = hpsa_ioctl,
502 .slave_alloc = hpsa_slave_alloc, 515 .slave_alloc = hpsa_slave_alloc,
@@ -516,24 +529,28 @@ static inline void addQ(struct list_head *list, struct CommandList *c)
516 list_add_tail(&c->list, list); 529 list_add_tail(&c->list, list);
517} 530}
518 531
519static inline u32 next_command(struct ctlr_info *h) 532static inline u32 next_command(struct ctlr_info *h, u8 q)
520{ 533{
521 u32 a; 534 u32 a;
535 struct reply_pool *rq = &h->reply_queue[q];
536 unsigned long flags;
522 537
523 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 538 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
524 return h->access.command_completed(h); 539 return h->access.command_completed(h, q);
525 540
526 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 541 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
527 a = *(h->reply_pool_head); /* Next cmd in ring buffer */ 542 a = rq->head[rq->current_entry];
528 (h->reply_pool_head)++; 543 rq->current_entry++;
544 spin_lock_irqsave(&h->lock, flags);
529 h->commands_outstanding--; 545 h->commands_outstanding--;
546 spin_unlock_irqrestore(&h->lock, flags);
530 } else { 547 } else {
531 a = FIFO_EMPTY; 548 a = FIFO_EMPTY;
532 } 549 }
533 /* Check for wraparound */ 550 /* Check for wraparound */
534 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 551 if (rq->current_entry == h->max_commands) {
535 h->reply_pool_head = h->reply_pool; 552 rq->current_entry = 0;
536 h->reply_pool_wraparound ^= 1; 553 rq->wraparound ^= 1;
537 } 554 }
538 return a; 555 return a;
539} 556}
@@ -544,8 +561,41 @@ static inline u32 next_command(struct ctlr_info *h)
544 */ 561 */
545static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 562static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
546{ 563{
547 if (likely(h->transMethod & CFGTBL_Trans_Performant)) 564 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
548 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 565 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
566 if (likely(h->msix_vector))
567 c->Header.ReplyQueue =
568 smp_processor_id() % h->nreply_queues;
569 }
570}
571
572static int is_firmware_flash_cmd(u8 *cdb)
573{
574 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
575}
576
577/*
578 * During firmware flash, the heartbeat register may not update as frequently
579 * as it should. So we dial down lockup detection during firmware flash. and
580 * dial it back up when firmware flash completes.
581 */
582#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
583#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
584static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
585 struct CommandList *c)
586{
587 if (!is_firmware_flash_cmd(c->Request.CDB))
588 return;
589 atomic_inc(&h->firmware_flash_in_progress);
590 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
591}
592
593static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
594 struct CommandList *c)
595{
596 if (is_firmware_flash_cmd(c->Request.CDB) &&
597 atomic_dec_and_test(&h->firmware_flash_in_progress))
598 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
549} 599}
550 600
551static void enqueue_cmd_and_start_io(struct ctlr_info *h, 601static void enqueue_cmd_and_start_io(struct ctlr_info *h,
@@ -554,11 +604,12 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
554 unsigned long flags; 604 unsigned long flags;
555 605
556 set_performant_mode(h, c); 606 set_performant_mode(h, c);
607 dial_down_lockup_detection_during_fw_flash(h, c);
557 spin_lock_irqsave(&h->lock, flags); 608 spin_lock_irqsave(&h->lock, flags);
558 addQ(&h->reqQ, c); 609 addQ(&h->reqQ, c);
559 h->Qdepth++; 610 h->Qdepth++;
560 start_io(h);
561 spin_unlock_irqrestore(&h->lock, flags); 611 spin_unlock_irqrestore(&h->lock, flags);
612 start_io(h);
562} 613}
563 614
564static inline void removeQ(struct CommandList *c) 615static inline void removeQ(struct CommandList *c)
@@ -1193,7 +1244,7 @@ static void complete_scsi_command(struct CommandList *cp)
1193 break; 1244 break;
1194 } 1245 }
1195 /* Must be some other type of check condition */ 1246 /* Must be some other type of check condition */
1196 dev_warn(&h->pdev->dev, "cp %p has check condition: " 1247 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
1197 "unknown type: " 1248 "unknown type: "
1198 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1249 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1199 "Returning result: 0x%x, " 1250 "Returning result: 0x%x, "
@@ -1370,16 +1421,24 @@ static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1370 } 1421 }
1371} 1422}
1372 1423
1424#define MAX_DRIVER_CMD_RETRIES 25
1373static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 1425static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1374 struct CommandList *c, int data_direction) 1426 struct CommandList *c, int data_direction)
1375{ 1427{
1376 int retry_count = 0; 1428 int backoff_time = 10, retry_count = 0;
1377 1429
1378 do { 1430 do {
1379 memset(c->err_info, 0, sizeof(*c->err_info)); 1431 memset(c->err_info, 0, sizeof(*c->err_info));
1380 hpsa_scsi_do_simple_cmd_core(h, c); 1432 hpsa_scsi_do_simple_cmd_core(h, c);
1381 retry_count++; 1433 retry_count++;
1382 } while (check_for_unit_attention(h, c) && retry_count <= 3); 1434 if (retry_count > 3) {
1435 msleep(backoff_time);
1436 if (backoff_time < 1000)
1437 backoff_time *= 2;
1438 }
1439 } while ((check_for_unit_attention(h, c) ||
1440 check_for_busy(h, c)) &&
1441 retry_count <= MAX_DRIVER_CMD_RETRIES);
1383 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 1442 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1384} 1443}
1385 1444
@@ -2065,9 +2124,8 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
2065 done(cmd); 2124 done(cmd);
2066 return 0; 2125 return 0;
2067 } 2126 }
2068 /* Need a lock as this is being allocated from the pool */
2069 c = cmd_alloc(h);
2070 spin_unlock_irqrestore(&h->lock, flags); 2127 spin_unlock_irqrestore(&h->lock, flags);
2128 c = cmd_alloc(h);
2071 if (c == NULL) { /* trouble... */ 2129 if (c == NULL) { /* trouble... */
2072 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2130 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2073 return SCSI_MLQUEUE_HOST_BUSY; 2131 return SCSI_MLQUEUE_HOST_BUSY;
@@ -2334,6 +2392,261 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2334 return FAILED; 2392 return FAILED;
2335} 2393}
2336 2394
2395static void swizzle_abort_tag(u8 *tag)
2396{
2397 u8 original_tag[8];
2398
2399 memcpy(original_tag, tag, 8);
2400 tag[0] = original_tag[3];
2401 tag[1] = original_tag[2];
2402 tag[2] = original_tag[1];
2403 tag[3] = original_tag[0];
2404 tag[4] = original_tag[7];
2405 tag[5] = original_tag[6];
2406 tag[6] = original_tag[5];
2407 tag[7] = original_tag[4];
2408}
2409
2410static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
2411 struct CommandList *abort, int swizzle)
2412{
2413 int rc = IO_OK;
2414 struct CommandList *c;
2415 struct ErrorInfo *ei;
2416
2417 c = cmd_special_alloc(h);
2418 if (c == NULL) { /* trouble... */
2419 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2420 return -ENOMEM;
2421 }
2422
2423 fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG);
2424 if (swizzle)
2425 swizzle_abort_tag(&c->Request.CDB[4]);
2426 hpsa_scsi_do_simple_cmd_core(h, c);
2427 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
2428 __func__, abort->Header.Tag.upper, abort->Header.Tag.lower);
2429 /* no unmap needed here because no data xfer. */
2430
2431 ei = c->err_info;
2432 switch (ei->CommandStatus) {
2433 case CMD_SUCCESS:
2434 break;
2435 case CMD_UNABORTABLE: /* Very common, don't make noise. */
2436 rc = -1;
2437 break;
2438 default:
2439 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
2440 __func__, abort->Header.Tag.upper,
2441 abort->Header.Tag.lower);
2442 hpsa_scsi_interpret_error(c);
2443 rc = -1;
2444 break;
2445 }
2446 cmd_special_free(h, c);
2447 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
2448 abort->Header.Tag.upper, abort->Header.Tag.lower);
2449 return rc;
2450}
2451
2452/*
2453 * hpsa_find_cmd_in_queue
2454 *
2455 * Used to determine whether a command (find) is still present
2456 * in queue_head. Optionally excludes the last element of queue_head.
2457 *
2458 * This is used to avoid unnecessary aborts. Commands in h->reqQ have
2459 * not yet been submitted, and so can be aborted by the driver without
2460 * sending an abort to the hardware.
2461 *
2462 * Returns pointer to command if found in queue, NULL otherwise.
2463 */
2464static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
2465 struct scsi_cmnd *find, struct list_head *queue_head)
2466{
2467 unsigned long flags;
2468 struct CommandList *c = NULL; /* ptr into cmpQ */
2469
2470 if (!find)
2471 return 0;
2472 spin_lock_irqsave(&h->lock, flags);
2473 list_for_each_entry(c, queue_head, list) {
2474 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
2475 continue;
2476 if (c->scsi_cmd == find) {
2477 spin_unlock_irqrestore(&h->lock, flags);
2478 return c;
2479 }
2480 }
2481 spin_unlock_irqrestore(&h->lock, flags);
2482 return NULL;
2483}
2484
2485static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
2486 u8 *tag, struct list_head *queue_head)
2487{
2488 unsigned long flags;
2489 struct CommandList *c;
2490
2491 spin_lock_irqsave(&h->lock, flags);
2492 list_for_each_entry(c, queue_head, list) {
2493 if (memcmp(&c->Header.Tag, tag, 8) != 0)
2494 continue;
2495 spin_unlock_irqrestore(&h->lock, flags);
2496 return c;
2497 }
2498 spin_unlock_irqrestore(&h->lock, flags);
2499 return NULL;
2500}
2501
2502/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
2503 * tell which kind we're dealing with, so we send the abort both ways. There
2504 * shouldn't be any collisions between swizzled and unswizzled tags due to the
2505 * way we construct our tags but we check anyway in case the assumptions which
2506 * make this true someday become false.
2507 */
2508static int hpsa_send_abort_both_ways(struct ctlr_info *h,
2509 unsigned char *scsi3addr, struct CommandList *abort)
2510{
2511 u8 swizzled_tag[8];
2512 struct CommandList *c;
2513 int rc = 0, rc2 = 0;
2514
2515 /* we do not expect to find the swizzled tag in our queue, but
2516 * check anyway just to be sure the assumptions which make this
2517 * the case haven't become wrong.
2518 */
2519 memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
2520 swizzle_abort_tag(swizzled_tag);
2521 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
2522 if (c != NULL) {
2523 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
2524 return hpsa_send_abort(h, scsi3addr, abort, 0);
2525 }
2526 rc = hpsa_send_abort(h, scsi3addr, abort, 0);
2527
2528 /* if the command is still in our queue, we can't conclude that it was
2529 * aborted (it might have just completed normally) but in any case
2530 * we don't need to try to abort it another way.
2531 */
2532 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
2533 if (c)
2534 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
2535 return rc && rc2;
2536}
2537
2538/* Send an abort for the specified command.
2539 * If the device and controller support it,
2540 * send a task abort request.
2541 */
2542static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
2543{
2544
2545 int i, rc;
2546 struct ctlr_info *h;
2547 struct hpsa_scsi_dev_t *dev;
2548 struct CommandList *abort; /* pointer to command to be aborted */
2549 struct CommandList *found;
2550 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
2551 char msg[256]; /* For debug messaging. */
2552 int ml = 0;
2553
2554 /* Find the controller of the command to be aborted */
2555 h = sdev_to_hba(sc->device);
2556 if (WARN(h == NULL,
2557 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
2558 return FAILED;
2559
2560 /* Check that controller supports some kind of task abort */
2561 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
2562 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
2563 return FAILED;
2564
2565 memset(msg, 0, sizeof(msg));
2566 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
2567 h->scsi_host->host_no, sc->device->channel,
2568 sc->device->id, sc->device->lun);
2569
2570 /* Find the device of the command to be aborted */
2571 dev = sc->device->hostdata;
2572 if (!dev) {
2573 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
2574 msg);
2575 return FAILED;
2576 }
2577
2578 /* Get SCSI command to be aborted */
2579 abort = (struct CommandList *) sc->host_scribble;
2580 if (abort == NULL) {
2581 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
2582 msg);
2583 return FAILED;
2584 }
2585
2586 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ",
2587 abort->Header.Tag.upper, abort->Header.Tag.lower);
2588 as = (struct scsi_cmnd *) abort->scsi_cmd;
2589 if (as != NULL)
2590 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
2591 as->cmnd[0], as->serial_number);
2592 dev_dbg(&h->pdev->dev, "%s\n", msg);
2593 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
2594 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2595
2596 /* Search reqQ to See if command is queued but not submitted,
2597 * if so, complete the command with aborted status and remove
2598 * it from the reqQ.
2599 */
2600 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
2601 if (found) {
2602 found->err_info->CommandStatus = CMD_ABORTED;
2603 finish_cmd(found);
2604 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
2605 msg);
2606 return SUCCESS;
2607 }
2608
2609 /* not in reqQ, if also not in cmpQ, must have already completed */
2610 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
2611 if (!found) {
2612 dev_dbg(&h->pdev->dev, "%s Request FAILED (not known to driver).\n",
2613 msg);
2614 return SUCCESS;
2615 }
2616
2617 /*
2618 * Command is in flight, or possibly already completed
2619 * by the firmware (but not to the scsi mid layer) but we can't
2620 * distinguish which. Send the abort down.
2621 */
2622 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
2623 if (rc != 0) {
2624 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
2625 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
2626 h->scsi_host->host_no,
2627 dev->bus, dev->target, dev->lun);
2628 return FAILED;
2629 }
2630 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
2631
2632 /* If the abort(s) above completed and actually aborted the
2633 * command, then the command to be aborted should already be
2634 * completed. If not, wait around a bit more to see if they
2635 * manage to complete normally.
2636 */
2637#define ABORT_COMPLETE_WAIT_SECS 30
2638 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
2639 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
2640 if (!found)
2641 return SUCCESS;
2642 msleep(100);
2643 }
2644 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
2645 msg, ABORT_COMPLETE_WAIT_SECS);
2646 return FAILED;
2647}
2648
2649
2337/* 2650/*
2338 * For operations that cannot sleep, a command block is allocated at init, 2651 * For operations that cannot sleep, a command block is allocated at init,
2339 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 2652 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
@@ -2346,14 +2659,21 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
2346 int i; 2659 int i;
2347 union u64bit temp64; 2660 union u64bit temp64;
2348 dma_addr_t cmd_dma_handle, err_dma_handle; 2661 dma_addr_t cmd_dma_handle, err_dma_handle;
2662 unsigned long flags;
2349 2663
2664 spin_lock_irqsave(&h->lock, flags);
2350 do { 2665 do {
2351 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 2666 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2352 if (i == h->nr_cmds) 2667 if (i == h->nr_cmds) {
2668 spin_unlock_irqrestore(&h->lock, flags);
2353 return NULL; 2669 return NULL;
2670 }
2354 } while (test_and_set_bit 2671 } while (test_and_set_bit
2355 (i & (BITS_PER_LONG - 1), 2672 (i & (BITS_PER_LONG - 1),
2356 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 2673 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2674 h->nr_allocs++;
2675 spin_unlock_irqrestore(&h->lock, flags);
2676
2357 c = h->cmd_pool + i; 2677 c = h->cmd_pool + i;
2358 memset(c, 0, sizeof(*c)); 2678 memset(c, 0, sizeof(*c));
2359 cmd_dma_handle = h->cmd_pool_dhandle 2679 cmd_dma_handle = h->cmd_pool_dhandle
@@ -2362,7 +2682,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
2362 memset(c->err_info, 0, sizeof(*c->err_info)); 2682 memset(c->err_info, 0, sizeof(*c->err_info));
2363 err_dma_handle = h->errinfo_pool_dhandle 2683 err_dma_handle = h->errinfo_pool_dhandle
2364 + i * sizeof(*c->err_info); 2684 + i * sizeof(*c->err_info);
2365 h->nr_allocs++;
2366 2685
2367 c->cmdindex = i; 2686 c->cmdindex = i;
2368 2687
@@ -2418,11 +2737,14 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2418static void cmd_free(struct ctlr_info *h, struct CommandList *c) 2737static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2419{ 2738{
2420 int i; 2739 int i;
2740 unsigned long flags;
2421 2741
2422 i = c - h->cmd_pool; 2742 i = c - h->cmd_pool;
2743 spin_lock_irqsave(&h->lock, flags);
2423 clear_bit(i & (BITS_PER_LONG - 1), 2744 clear_bit(i & (BITS_PER_LONG - 1),
2424 h->cmd_pool_bits + (i / BITS_PER_LONG)); 2745 h->cmd_pool_bits + (i / BITS_PER_LONG));
2425 h->nr_frees++; 2746 h->nr_frees++;
2747 spin_unlock_irqrestore(&h->lock, flags);
2426} 2748}
2427 2749
2428static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 2750static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
@@ -2866,6 +3188,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2866 int cmd_type) 3188 int cmd_type)
2867{ 3189{
2868 int pci_dir = XFER_NONE; 3190 int pci_dir = XFER_NONE;
3191 struct CommandList *a; /* for commands to be aborted */
2869 3192
2870 c->cmd_type = CMD_IOCTL_PEND; 3193 c->cmd_type = CMD_IOCTL_PEND;
2871 c->Header.ReplyQueue = 0; 3194 c->Header.ReplyQueue = 0;
@@ -2949,8 +3272,35 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2949 c->Request.CDB[5] = 0x00; 3272 c->Request.CDB[5] = 0x00;
2950 c->Request.CDB[6] = 0x00; 3273 c->Request.CDB[6] = 0x00;
2951 c->Request.CDB[7] = 0x00; 3274 c->Request.CDB[7] = 0x00;
3275 break;
3276 case HPSA_ABORT_MSG:
3277 a = buff; /* point to command to be aborted */
3278 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
3279 a->Header.Tag.upper, a->Header.Tag.lower,
3280 c->Header.Tag.upper, c->Header.Tag.lower);
3281 c->Request.CDBLen = 16;
3282 c->Request.Type.Type = TYPE_MSG;
3283 c->Request.Type.Attribute = ATTR_SIMPLE;
3284 c->Request.Type.Direction = XFER_WRITE;
3285 c->Request.Timeout = 0; /* Don't time out */
3286 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
3287 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
3288 c->Request.CDB[2] = 0x00; /* reserved */
3289 c->Request.CDB[3] = 0x00; /* reserved */
3290 /* Tag to abort goes in CDB[4]-CDB[11] */
3291 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
3292 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
3293 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
3294 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
3295 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
3296 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
3297 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
3298 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
3299 c->Request.CDB[12] = 0x00; /* reserved */
3300 c->Request.CDB[13] = 0x00; /* reserved */
3301 c->Request.CDB[14] = 0x00; /* reserved */
3302 c->Request.CDB[15] = 0x00; /* reserved */
2952 break; 3303 break;
2953
2954 default: 3304 default:
2955 dev_warn(&h->pdev->dev, "unknown message type %d\n", 3305 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2956 cmd); 3306 cmd);
@@ -2998,7 +3348,9 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
2998static void start_io(struct ctlr_info *h) 3348static void start_io(struct ctlr_info *h)
2999{ 3349{
3000 struct CommandList *c; 3350 struct CommandList *c;
3351 unsigned long flags;
3001 3352
3353 spin_lock_irqsave(&h->lock, flags);
3002 while (!list_empty(&h->reqQ)) { 3354 while (!list_empty(&h->reqQ)) {
3003 c = list_entry(h->reqQ.next, struct CommandList, list); 3355 c = list_entry(h->reqQ.next, struct CommandList, list);
3004 /* can't do anything if fifo is full */ 3356 /* can't do anything if fifo is full */
@@ -3011,17 +3363,28 @@ static void start_io(struct ctlr_info *h)
3011 removeQ(c); 3363 removeQ(c);
3012 h->Qdepth--; 3364 h->Qdepth--;
3013 3365
3014 /* Tell the controller execute command */
3015 h->access.submit_command(h, c);
3016
3017 /* Put job onto the completed Q */ 3366 /* Put job onto the completed Q */
3018 addQ(&h->cmpQ, c); 3367 addQ(&h->cmpQ, c);
3368
3369 /* Must increment commands_outstanding before unlocking
3370 * and submitting to avoid race checking for fifo full
3371 * condition.
3372 */
3373 h->commands_outstanding++;
3374 if (h->commands_outstanding > h->max_outstanding)
3375 h->max_outstanding = h->commands_outstanding;
3376
3377 /* Tell the controller execute command */
3378 spin_unlock_irqrestore(&h->lock, flags);
3379 h->access.submit_command(h, c);
3380 spin_lock_irqsave(&h->lock, flags);
3019 } 3381 }
3382 spin_unlock_irqrestore(&h->lock, flags);
3020} 3383}
3021 3384
3022static inline unsigned long get_next_completion(struct ctlr_info *h) 3385static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
3023{ 3386{
3024 return h->access.command_completed(h); 3387 return h->access.command_completed(h, q);
3025} 3388}
3026 3389
3027static inline bool interrupt_pending(struct ctlr_info *h) 3390static inline bool interrupt_pending(struct ctlr_info *h)
@@ -3045,9 +3408,14 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
3045 return 0; 3408 return 0;
3046} 3409}
3047 3410
3048static inline void finish_cmd(struct CommandList *c, u32 raw_tag) 3411static inline void finish_cmd(struct CommandList *c)
3049{ 3412{
3413 unsigned long flags;
3414
3415 spin_lock_irqsave(&c->h->lock, flags);
3050 removeQ(c); 3416 removeQ(c);
3417 spin_unlock_irqrestore(&c->h->lock, flags);
3418 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
3051 if (likely(c->cmd_type == CMD_SCSI)) 3419 if (likely(c->cmd_type == CMD_SCSI))
3052 complete_scsi_command(c); 3420 complete_scsi_command(c);
3053 else if (c->cmd_type == CMD_IOCTL_PEND) 3421 else if (c->cmd_type == CMD_IOCTL_PEND)
@@ -3075,36 +3443,38 @@ static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
3075} 3443}
3076 3444
3077/* process completion of an indexed ("direct lookup") command */ 3445/* process completion of an indexed ("direct lookup") command */
3078static inline u32 process_indexed_cmd(struct ctlr_info *h, 3446static inline void process_indexed_cmd(struct ctlr_info *h,
3079 u32 raw_tag) 3447 u32 raw_tag)
3080{ 3448{
3081 u32 tag_index; 3449 u32 tag_index;
3082 struct CommandList *c; 3450 struct CommandList *c;
3083 3451
3084 tag_index = hpsa_tag_to_index(raw_tag); 3452 tag_index = hpsa_tag_to_index(raw_tag);
3085 if (bad_tag(h, tag_index, raw_tag)) 3453 if (!bad_tag(h, tag_index, raw_tag)) {
3086 return next_command(h); 3454 c = h->cmd_pool + tag_index;
3087 c = h->cmd_pool + tag_index; 3455 finish_cmd(c);
3088 finish_cmd(c, raw_tag); 3456 }
3089 return next_command(h);
3090} 3457}
3091 3458
3092/* process completion of a non-indexed command */ 3459/* process completion of a non-indexed command */
3093static inline u32 process_nonindexed_cmd(struct ctlr_info *h, 3460static inline void process_nonindexed_cmd(struct ctlr_info *h,
3094 u32 raw_tag) 3461 u32 raw_tag)
3095{ 3462{
3096 u32 tag; 3463 u32 tag;
3097 struct CommandList *c = NULL; 3464 struct CommandList *c = NULL;
3465 unsigned long flags;
3098 3466
3099 tag = hpsa_tag_discard_error_bits(h, raw_tag); 3467 tag = hpsa_tag_discard_error_bits(h, raw_tag);
3468 spin_lock_irqsave(&h->lock, flags);
3100 list_for_each_entry(c, &h->cmpQ, list) { 3469 list_for_each_entry(c, &h->cmpQ, list) {
3101 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { 3470 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
3102 finish_cmd(c, raw_tag); 3471 spin_unlock_irqrestore(&h->lock, flags);
3103 return next_command(h); 3472 finish_cmd(c);
3473 return;
3104 } 3474 }
3105 } 3475 }
3476 spin_unlock_irqrestore(&h->lock, flags);
3106 bad_tag(h, h->nr_cmds + 1, raw_tag); 3477 bad_tag(h, h->nr_cmds + 1, raw_tag);
3107 return next_command(h);
3108} 3478}
3109 3479
3110/* Some controllers, like p400, will give us one interrupt 3480/* Some controllers, like p400, will give us one interrupt
@@ -3126,10 +3496,20 @@ static int ignore_bogus_interrupt(struct ctlr_info *h)
3126 return 1; 3496 return 1;
3127} 3497}
3128 3498
3129static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id) 3499/*
3500 * Convert &h->q[x] (passed to interrupt handlers) back to h.
3501 * Relies on (h-q[x] == x) being true for x such that
3502 * 0 <= x < MAX_REPLY_QUEUES.
3503 */
3504static struct ctlr_info *queue_to_hba(u8 *queue)
3130{ 3505{
3131 struct ctlr_info *h = dev_id; 3506 return container_of((queue - *queue), struct ctlr_info, q[0]);
3132 unsigned long flags; 3507}
3508
3509static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
3510{
3511 struct ctlr_info *h = queue_to_hba(queue);
3512 u8 q = *(u8 *) queue;
3133 u32 raw_tag; 3513 u32 raw_tag;
3134 3514
3135 if (ignore_bogus_interrupt(h)) 3515 if (ignore_bogus_interrupt(h))
@@ -3137,74 +3517,68 @@ static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
3137 3517
3138 if (interrupt_not_for_us(h)) 3518 if (interrupt_not_for_us(h))
3139 return IRQ_NONE; 3519 return IRQ_NONE;
3140 spin_lock_irqsave(&h->lock, flags);
3141 h->last_intr_timestamp = get_jiffies_64(); 3520 h->last_intr_timestamp = get_jiffies_64();
3142 while (interrupt_pending(h)) { 3521 while (interrupt_pending(h)) {
3143 raw_tag = get_next_completion(h); 3522 raw_tag = get_next_completion(h, q);
3144 while (raw_tag != FIFO_EMPTY) 3523 while (raw_tag != FIFO_EMPTY)
3145 raw_tag = next_command(h); 3524 raw_tag = next_command(h, q);
3146 } 3525 }
3147 spin_unlock_irqrestore(&h->lock, flags);
3148 return IRQ_HANDLED; 3526 return IRQ_HANDLED;
3149} 3527}
3150 3528
3151static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id) 3529static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
3152{ 3530{
3153 struct ctlr_info *h = dev_id; 3531 struct ctlr_info *h = queue_to_hba(queue);
3154 unsigned long flags;
3155 u32 raw_tag; 3532 u32 raw_tag;
3533 u8 q = *(u8 *) queue;
3156 3534
3157 if (ignore_bogus_interrupt(h)) 3535 if (ignore_bogus_interrupt(h))
3158 return IRQ_NONE; 3536 return IRQ_NONE;
3159 3537
3160 spin_lock_irqsave(&h->lock, flags);
3161 h->last_intr_timestamp = get_jiffies_64(); 3538 h->last_intr_timestamp = get_jiffies_64();
3162 raw_tag = get_next_completion(h); 3539 raw_tag = get_next_completion(h, q);
3163 while (raw_tag != FIFO_EMPTY) 3540 while (raw_tag != FIFO_EMPTY)
3164 raw_tag = next_command(h); 3541 raw_tag = next_command(h, q);
3165 spin_unlock_irqrestore(&h->lock, flags);
3166 return IRQ_HANDLED; 3542 return IRQ_HANDLED;
3167} 3543}
3168 3544
3169static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) 3545static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
3170{ 3546{
3171 struct ctlr_info *h = dev_id; 3547 struct ctlr_info *h = queue_to_hba((u8 *) queue);
3172 unsigned long flags;
3173 u32 raw_tag; 3548 u32 raw_tag;
3549 u8 q = *(u8 *) queue;
3174 3550
3175 if (interrupt_not_for_us(h)) 3551 if (interrupt_not_for_us(h))
3176 return IRQ_NONE; 3552 return IRQ_NONE;
3177 spin_lock_irqsave(&h->lock, flags);
3178 h->last_intr_timestamp = get_jiffies_64(); 3553 h->last_intr_timestamp = get_jiffies_64();
3179 while (interrupt_pending(h)) { 3554 while (interrupt_pending(h)) {
3180 raw_tag = get_next_completion(h); 3555 raw_tag = get_next_completion(h, q);
3181 while (raw_tag != FIFO_EMPTY) { 3556 while (raw_tag != FIFO_EMPTY) {
3182 if (hpsa_tag_contains_index(raw_tag)) 3557 if (likely(hpsa_tag_contains_index(raw_tag)))
3183 raw_tag = process_indexed_cmd(h, raw_tag); 3558 process_indexed_cmd(h, raw_tag);
3184 else 3559 else
3185 raw_tag = process_nonindexed_cmd(h, raw_tag); 3560 process_nonindexed_cmd(h, raw_tag);
3561 raw_tag = next_command(h, q);
3186 } 3562 }
3187 } 3563 }
3188 spin_unlock_irqrestore(&h->lock, flags);
3189 return IRQ_HANDLED; 3564 return IRQ_HANDLED;
3190} 3565}
3191 3566
3192static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) 3567static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
3193{ 3568{
3194 struct ctlr_info *h = dev_id; 3569 struct ctlr_info *h = queue_to_hba(queue);
3195 unsigned long flags;
3196 u32 raw_tag; 3570 u32 raw_tag;
3571 u8 q = *(u8 *) queue;
3197 3572
3198 spin_lock_irqsave(&h->lock, flags);
3199 h->last_intr_timestamp = get_jiffies_64(); 3573 h->last_intr_timestamp = get_jiffies_64();
3200 raw_tag = get_next_completion(h); 3574 raw_tag = get_next_completion(h, q);
3201 while (raw_tag != FIFO_EMPTY) { 3575 while (raw_tag != FIFO_EMPTY) {
3202 if (hpsa_tag_contains_index(raw_tag)) 3576 if (likely(hpsa_tag_contains_index(raw_tag)))
3203 raw_tag = process_indexed_cmd(h, raw_tag); 3577 process_indexed_cmd(h, raw_tag);
3204 else 3578 else
3205 raw_tag = process_nonindexed_cmd(h, raw_tag); 3579 process_nonindexed_cmd(h, raw_tag);
3580 raw_tag = next_command(h, q);
3206 } 3581 }
3207 spin_unlock_irqrestore(&h->lock, flags);
3208 return IRQ_HANDLED; 3582 return IRQ_HANDLED;
3209} 3583}
3210 3584
@@ -3638,10 +4012,13 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3638static void __devinit hpsa_interrupt_mode(struct ctlr_info *h) 4012static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
3639{ 4013{
3640#ifdef CONFIG_PCI_MSI 4014#ifdef CONFIG_PCI_MSI
3641 int err; 4015 int err, i;
3642 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1}, 4016 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
3643 {0, 2}, {0, 3} 4017
3644 }; 4018 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
4019 hpsa_msix_entries[i].vector = 0;
4020 hpsa_msix_entries[i].entry = i;
4021 }
3645 4022
3646 /* Some boards advertise MSI but don't really support it */ 4023 /* Some boards advertise MSI but don't really support it */
3647 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 4024 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
@@ -3649,12 +4026,11 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
3649 goto default_int_mode; 4026 goto default_int_mode;
3650 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 4027 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
3651 dev_info(&h->pdev->dev, "MSIX\n"); 4028 dev_info(&h->pdev->dev, "MSIX\n");
3652 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4); 4029 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
4030 MAX_REPLY_QUEUES);
3653 if (!err) { 4031 if (!err) {
3654 h->intr[0] = hpsa_msix_entries[0].vector; 4032 for (i = 0; i < MAX_REPLY_QUEUES; i++)
3655 h->intr[1] = hpsa_msix_entries[1].vector; 4033 h->intr[i] = hpsa_msix_entries[i].vector;
3656 h->intr[2] = hpsa_msix_entries[2].vector;
3657 h->intr[3] = hpsa_msix_entries[3].vector;
3658 h->msix_vector = 1; 4034 h->msix_vector = 1;
3659 return; 4035 return;
3660 } 4036 }
@@ -3705,14 +4081,6 @@ static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
3705 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 4081 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
3706} 4082}
3707 4083
3708static inline bool hpsa_board_disabled(struct pci_dev *pdev)
3709{
3710 u16 command;
3711
3712 (void) pci_read_config_word(pdev, PCI_COMMAND, &command);
3713 return ((command & PCI_COMMAND_MEMORY) == 0);
3714}
3715
3716static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 4084static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
3717 unsigned long *memory_bar) 4085 unsigned long *memory_bar)
3718{ 4086{
@@ -3838,14 +4206,14 @@ static void __devinit hpsa_find_board_params(struct ctlr_info *h)
3838 h->maxsgentries = 31; /* default to traditional values */ 4206 h->maxsgentries = 31; /* default to traditional values */
3839 h->chainsize = 0; 4207 h->chainsize = 0;
3840 } 4208 }
4209
4210 /* Find out what task management functions are supported and cache */
4211 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
3841} 4212}
3842 4213
3843static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 4214static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
3844{ 4215{
3845 if ((readb(&h->cfgtable->Signature[0]) != 'C') || 4216 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
3846 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3847 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3848 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3849 dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 4217 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
3850 return false; 4218 return false;
3851 } 4219 }
@@ -3932,11 +4300,6 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
3932 h->product_name = products[prod_index].product_name; 4300 h->product_name = products[prod_index].product_name;
3933 h->access = *(products[prod_index].access); 4301 h->access = *(products[prod_index].access);
3934 4302
3935 if (hpsa_board_disabled(h->pdev)) {
3936 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
3937 return -ENODEV;
3938 }
3939
3940 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 4303 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
3941 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 4304 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
3942 4305
@@ -3946,6 +4309,9 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
3946 return err; 4309 return err;
3947 } 4310 }
3948 4311
4312 /* Enable bus mastering (pci_disable_device may disable this) */
4313 pci_set_master(h->pdev);
4314
3949 err = pci_request_regions(h->pdev, HPSA); 4315 err = pci_request_regions(h->pdev, HPSA);
3950 if (err) { 4316 if (err) {
3951 dev_err(&h->pdev->dev, 4317 dev_err(&h->pdev->dev,
@@ -3987,10 +4353,7 @@ err_out_free_res:
3987 iounmap(h->cfgtable); 4353 iounmap(h->cfgtable);
3988 if (h->vaddr) 4354 if (h->vaddr)
3989 iounmap(h->vaddr); 4355 iounmap(h->vaddr);
3990 /* 4356 pci_disable_device(h->pdev);
3991 * Deliberately omit pci_disable_device(): it does something nasty to
3992 * Smart Array controllers that pci_enable_device does not undo
3993 */
3994 pci_release_regions(h->pdev); 4357 pci_release_regions(h->pdev);
3995 return err; 4358 return err;
3996} 4359}
@@ -4081,14 +4444,33 @@ static int hpsa_request_irq(struct ctlr_info *h,
4081 irqreturn_t (*msixhandler)(int, void *), 4444 irqreturn_t (*msixhandler)(int, void *),
4082 irqreturn_t (*intxhandler)(int, void *)) 4445 irqreturn_t (*intxhandler)(int, void *))
4083{ 4446{
4084 int rc; 4447 int rc, i;
4085 4448
4086 if (h->msix_vector || h->msi_vector) 4449 /*
4087 rc = request_irq(h->intr[h->intr_mode], msixhandler, 4450 * initialize h->q[x] = x so that interrupt handlers know which
4088 0, h->devname, h); 4451 * queue to process.
4089 else 4452 */
4090 rc = request_irq(h->intr[h->intr_mode], intxhandler, 4453 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4091 IRQF_SHARED, h->devname, h); 4454 h->q[i] = (u8) i;
4455
4456 if (h->intr_mode == PERF_MODE_INT && h->msix_vector) {
4457 /* If performant mode and MSI-X, use multiple reply queues */
4458 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4459 rc = request_irq(h->intr[i], msixhandler,
4460 0, h->devname,
4461 &h->q[i]);
4462 } else {
4463 /* Use single reply pool */
4464 if (h->msix_vector || h->msi_vector) {
4465 rc = request_irq(h->intr[h->intr_mode],
4466 msixhandler, 0, h->devname,
4467 &h->q[h->intr_mode]);
4468 } else {
4469 rc = request_irq(h->intr[h->intr_mode],
4470 intxhandler, IRQF_SHARED, h->devname,
4471 &h->q[h->intr_mode]);
4472 }
4473 }
4092 if (rc) { 4474 if (rc) {
4093 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 4475 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
4094 h->intr[h->intr_mode], h->devname); 4476 h->intr[h->intr_mode], h->devname);
@@ -4121,15 +4503,38 @@ static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
4121 return 0; 4503 return 0;
4122} 4504}
4123 4505
4124static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 4506static void free_irqs(struct ctlr_info *h)
4125{ 4507{
4126 free_irq(h->intr[h->intr_mode], h); 4508 int i;
4509
4510 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
4511 /* Single reply queue, only one irq to free */
4512 i = h->intr_mode;
4513 free_irq(h->intr[i], &h->q[i]);
4514 return;
4515 }
4516
4517 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4518 free_irq(h->intr[i], &h->q[i]);
4519}
4520
4521static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
4522{
4523 free_irqs(h);
4127#ifdef CONFIG_PCI_MSI 4524#ifdef CONFIG_PCI_MSI
4128 if (h->msix_vector) 4525 if (h->msix_vector) {
4129 pci_disable_msix(h->pdev); 4526 if (h->pdev->msix_enabled)
4130 else if (h->msi_vector) 4527 pci_disable_msix(h->pdev);
4131 pci_disable_msi(h->pdev); 4528 } else if (h->msi_vector) {
4529 if (h->pdev->msi_enabled)
4530 pci_disable_msi(h->pdev);
4531 }
4132#endif /* CONFIG_PCI_MSI */ 4532#endif /* CONFIG_PCI_MSI */
4533}
4534
4535static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
4536{
4537 hpsa_free_irqs_and_disable_msix(h);
4133 hpsa_free_sg_chain_blocks(h); 4538 hpsa_free_sg_chain_blocks(h);
4134 hpsa_free_cmd_pool(h); 4539 hpsa_free_cmd_pool(h);
4135 kfree(h->blockFetchTable); 4540 kfree(h->blockFetchTable);
@@ -4165,7 +4570,7 @@ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
4165 while (!list_empty(list)) { 4570 while (!list_empty(list)) {
4166 c = list_entry(list->next, struct CommandList, list); 4571 c = list_entry(list->next, struct CommandList, list);
4167 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 4572 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
4168 finish_cmd(c, c->Header.Tag.lower); 4573 finish_cmd(c);
4169 } 4574 }
4170} 4575}
4171 4576
@@ -4188,9 +4593,6 @@ static void controller_lockup_detected(struct ctlr_info *h)
4188 spin_unlock_irqrestore(&h->lock, flags); 4593 spin_unlock_irqrestore(&h->lock, flags);
4189} 4594}
4190 4595
4191#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
4192#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
4193
4194static void detect_controller_lockup(struct ctlr_info *h) 4596static void detect_controller_lockup(struct ctlr_info *h)
4195{ 4597{
4196 u64 now; 4598 u64 now;
@@ -4201,7 +4603,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
4201 now = get_jiffies_64(); 4603 now = get_jiffies_64();
4202 /* If we've received an interrupt recently, we're ok. */ 4604 /* If we've received an interrupt recently, we're ok. */
4203 if (time_after64(h->last_intr_timestamp + 4605 if (time_after64(h->last_intr_timestamp +
4204 (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now)) 4606 (h->heartbeat_sample_interval), now))
4205 return; 4607 return;
4206 4608
4207 /* 4609 /*
@@ -4210,7 +4612,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
4210 * otherwise don't care about signals in this thread. 4612 * otherwise don't care about signals in this thread.
4211 */ 4613 */
4212 if (time_after64(h->last_heartbeat_timestamp + 4614 if (time_after64(h->last_heartbeat_timestamp +
4213 (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now)) 4615 (h->heartbeat_sample_interval), now))
4214 return; 4616 return;
4215 4617
4216 /* If heartbeat has not changed since we last looked, we're not ok. */ 4618 /* If heartbeat has not changed since we last looked, we're not ok. */
@@ -4252,6 +4654,7 @@ static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
4252{ 4654{
4253 unsigned long flags; 4655 unsigned long flags;
4254 4656
4657 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
4255 spin_lock_irqsave(&lockup_detector_lock, flags); 4658 spin_lock_irqsave(&lockup_detector_lock, flags);
4256 list_add_tail(&h->lockup_list, &hpsa_ctlr_list); 4659 list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
4257 spin_unlock_irqrestore(&lockup_detector_lock, flags); 4660 spin_unlock_irqrestore(&lockup_detector_lock, flags);
@@ -4391,7 +4794,7 @@ reinit_after_soft_reset:
4391 spin_lock_irqsave(&h->lock, flags); 4794 spin_lock_irqsave(&h->lock, flags);
4392 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4795 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4393 spin_unlock_irqrestore(&h->lock, flags); 4796 spin_unlock_irqrestore(&h->lock, flags);
4394 free_irq(h->intr[h->intr_mode], h); 4797 free_irqs(h);
4395 rc = hpsa_request_irq(h, hpsa_msix_discard_completions, 4798 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
4396 hpsa_intx_discard_completions); 4799 hpsa_intx_discard_completions);
4397 if (rc) { 4800 if (rc) {
@@ -4441,7 +4844,7 @@ reinit_after_soft_reset:
4441clean4: 4844clean4:
4442 hpsa_free_sg_chain_blocks(h); 4845 hpsa_free_sg_chain_blocks(h);
4443 hpsa_free_cmd_pool(h); 4846 hpsa_free_cmd_pool(h);
4444 free_irq(h->intr[h->intr_mode], h); 4847 free_irqs(h);
4445clean2: 4848clean2:
4446clean1: 4849clean1:
4447 kfree(h); 4850 kfree(h);
@@ -4484,13 +4887,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
4484 */ 4887 */
4485 hpsa_flush_cache(h); 4888 hpsa_flush_cache(h);
4486 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4889 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4487 free_irq(h->intr[h->intr_mode], h); 4890 hpsa_free_irqs_and_disable_msix(h);
4488#ifdef CONFIG_PCI_MSI
4489 if (h->msix_vector)
4490 pci_disable_msix(h->pdev);
4491 else if (h->msi_vector)
4492 pci_disable_msi(h->pdev);
4493#endif /* CONFIG_PCI_MSI */
4494} 4891}
4495 4892
4496static void __devexit hpsa_free_device_info(struct ctlr_info *h) 4893static void __devexit hpsa_free_device_info(struct ctlr_info *h)
@@ -4529,10 +4926,7 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
4529 kfree(h->cmd_pool_bits); 4926 kfree(h->cmd_pool_bits);
4530 kfree(h->blockFetchTable); 4927 kfree(h->blockFetchTable);
4531 kfree(h->hba_inquiry_data); 4928 kfree(h->hba_inquiry_data);
4532 /* 4929 pci_disable_device(pdev);
4533 * Deliberately omit pci_disable_device(): it does something nasty to
4534 * Smart Array controllers that pci_enable_device does not undo
4535 */
4536 pci_release_regions(pdev); 4930 pci_release_regions(pdev);
4537 pci_set_drvdata(pdev, NULL); 4931 pci_set_drvdata(pdev, NULL);
4538 kfree(h); 4932 kfree(h);
@@ -4627,11 +5021,8 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
4627 * 10 = 6 s/g entry or 24k 5021 * 10 = 6 s/g entry or 24k
4628 */ 5022 */
4629 5023
4630 h->reply_pool_wraparound = 1; /* spec: init to 1 */
4631
4632 /* Controller spec: zero out this buffer. */ 5024 /* Controller spec: zero out this buffer. */
4633 memset(h->reply_pool, 0, h->reply_pool_size); 5025 memset(h->reply_pool, 0, h->reply_pool_size);
4634 h->reply_pool_head = h->reply_pool;
4635 5026
4636 bft[7] = SG_ENTRIES_IN_CMD + 4; 5027 bft[7] = SG_ENTRIES_IN_CMD + 4;
4637 calc_bucket_map(bft, ARRAY_SIZE(bft), 5028 calc_bucket_map(bft, ARRAY_SIZE(bft),
@@ -4641,12 +5032,19 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
4641 5032
4642 /* size of controller ring buffer */ 5033 /* size of controller ring buffer */
4643 writel(h->max_commands, &h->transtable->RepQSize); 5034 writel(h->max_commands, &h->transtable->RepQSize);
4644 writel(1, &h->transtable->RepQCount); 5035 writel(h->nreply_queues, &h->transtable->RepQCount);
4645 writel(0, &h->transtable->RepQCtrAddrLow32); 5036 writel(0, &h->transtable->RepQCtrAddrLow32);
4646 writel(0, &h->transtable->RepQCtrAddrHigh32); 5037 writel(0, &h->transtable->RepQCtrAddrHigh32);
4647 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); 5038
4648 writel(0, &h->transtable->RepQAddr0High32); 5039 for (i = 0; i < h->nreply_queues; i++) {
4649 writel(CFGTBL_Trans_Performant | use_short_tags, 5040 writel(0, &h->transtable->RepQAddr[i].upper);
5041 writel(h->reply_pool_dhandle +
5042 (h->max_commands * sizeof(u64) * i),
5043 &h->transtable->RepQAddr[i].lower);
5044 }
5045
5046 writel(CFGTBL_Trans_Performant | use_short_tags |
5047 CFGTBL_Trans_enable_directed_msix,
4650 &(h->cfgtable->HostWrite.TransportRequest)); 5048 &(h->cfgtable->HostWrite.TransportRequest));
4651 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 5049 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
4652 hpsa_wait_for_mode_change_ack(h); 5050 hpsa_wait_for_mode_change_ack(h);
@@ -4664,6 +5062,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
4664static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 5062static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
4665{ 5063{
4666 u32 trans_support; 5064 u32 trans_support;
5065 int i;
4667 5066
4668 if (hpsa_simple_mode) 5067 if (hpsa_simple_mode)
4669 return; 5068 return;
@@ -4672,12 +5071,20 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
4672 if (!(trans_support & PERFORMANT_MODE)) 5071 if (!(trans_support & PERFORMANT_MODE))
4673 return; 5072 return;
4674 5073
5074 h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1;
4675 hpsa_get_max_perf_mode_cmds(h); 5075 hpsa_get_max_perf_mode_cmds(h);
4676 /* Performant mode ring buffer and supporting data structures */ 5076 /* Performant mode ring buffer and supporting data structures */
4677 h->reply_pool_size = h->max_commands * sizeof(u64); 5077 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
4678 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, 5078 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
4679 &(h->reply_pool_dhandle)); 5079 &(h->reply_pool_dhandle));
4680 5080
5081 for (i = 0; i < h->nreply_queues; i++) {
5082 h->reply_queue[i].head = &h->reply_pool[h->max_commands * i];
5083 h->reply_queue[i].size = h->max_commands;
5084 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
5085 h->reply_queue[i].current_entry = 0;
5086 }
5087
4681 /* Need a block fetch table for performant mode */ 5088 /* Need a block fetch table for performant mode */
4682 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 5089 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
4683 sizeof(u32)), GFP_KERNEL); 5090 sizeof(u32)), GFP_KERNEL);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 7b28d54fa878..981647989bfd 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -34,7 +34,7 @@ struct access_method {
34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); 34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
35 unsigned long (*fifo_full)(struct ctlr_info *h); 35 unsigned long (*fifo_full)(struct ctlr_info *h);
36 bool (*intr_pending)(struct ctlr_info *h); 36 bool (*intr_pending)(struct ctlr_info *h);
37 unsigned long (*command_completed)(struct ctlr_info *h); 37 unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
38}; 38};
39 39
40struct hpsa_scsi_dev_t { 40struct hpsa_scsi_dev_t {
@@ -48,6 +48,13 @@ struct hpsa_scsi_dev_t {
48 unsigned char raid_level; /* from inquiry page 0xC1 */ 48 unsigned char raid_level; /* from inquiry page 0xC1 */
49}; 49};
50 50
51struct reply_pool {
52 u64 *head;
53 size_t size;
54 u8 wraparound;
55 u32 current_entry;
56};
57
51struct ctlr_info { 58struct ctlr_info {
52 int ctlr; 59 int ctlr;
53 char devname[8]; 60 char devname[8];
@@ -68,7 +75,7 @@ struct ctlr_info {
68# define DOORBELL_INT 1 75# define DOORBELL_INT 1
69# define SIMPLE_MODE_INT 2 76# define SIMPLE_MODE_INT 2
70# define MEMQ_MODE_INT 3 77# define MEMQ_MODE_INT 3
71 unsigned int intr[4]; 78 unsigned int intr[MAX_REPLY_QUEUES];
72 unsigned int msix_vector; 79 unsigned int msix_vector;
73 unsigned int msi_vector; 80 unsigned int msi_vector;
74 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ 81 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
@@ -78,7 +85,6 @@ struct ctlr_info {
78 struct list_head reqQ; 85 struct list_head reqQ;
79 struct list_head cmpQ; 86 struct list_head cmpQ;
80 unsigned int Qdepth; 87 unsigned int Qdepth;
81 unsigned int maxQsinceinit;
82 unsigned int maxSG; 88 unsigned int maxSG;
83 spinlock_t lock; 89 spinlock_t lock;
84 int maxsgentries; 90 int maxsgentries;
@@ -111,20 +117,45 @@ struct ctlr_info {
111 unsigned long transMethod; 117 unsigned long transMethod;
112 118
113 /* 119 /*
114 * Performant mode completion buffer 120 * Performant mode completion buffers
115 */ 121 */
116 u64 *reply_pool; 122 u64 *reply_pool;
117 dma_addr_t reply_pool_dhandle;
118 u64 *reply_pool_head;
119 size_t reply_pool_size; 123 size_t reply_pool_size;
120 unsigned char reply_pool_wraparound; 124 struct reply_pool reply_queue[MAX_REPLY_QUEUES];
125 u8 nreply_queues;
126 dma_addr_t reply_pool_dhandle;
121 u32 *blockFetchTable; 127 u32 *blockFetchTable;
122 unsigned char *hba_inquiry_data; 128 unsigned char *hba_inquiry_data;
123 u64 last_intr_timestamp; 129 u64 last_intr_timestamp;
124 u32 last_heartbeat; 130 u32 last_heartbeat;
125 u64 last_heartbeat_timestamp; 131 u64 last_heartbeat_timestamp;
132 u32 heartbeat_sample_interval;
133 atomic_t firmware_flash_in_progress;
126 u32 lockup_detected; 134 u32 lockup_detected;
127 struct list_head lockup_list; 135 struct list_head lockup_list;
136 /* Address of h->q[x] is passed to intr handler to know which queue */
137 u8 q[MAX_REPLY_QUEUES];
138 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
139#define HPSATMF_BITS_SUPPORTED (1 << 0)
140#define HPSATMF_PHYS_LUN_RESET (1 << 1)
141#define HPSATMF_PHYS_NEX_RESET (1 << 2)
142#define HPSATMF_PHYS_TASK_ABORT (1 << 3)
143#define HPSATMF_PHYS_TSET_ABORT (1 << 4)
144#define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
145#define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
146#define HPSATMF_PHYS_QRY_TASK (1 << 7)
147#define HPSATMF_PHYS_QRY_TSET (1 << 8)
148#define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
149#define HPSATMF_MASK_SUPPORTED (1 << 16)
150#define HPSATMF_LOG_LUN_RESET (1 << 17)
151#define HPSATMF_LOG_NEX_RESET (1 << 18)
152#define HPSATMF_LOG_TASK_ABORT (1 << 19)
153#define HPSATMF_LOG_TSET_ABORT (1 << 20)
154#define HPSATMF_LOG_CLEAR_ACA (1 << 21)
155#define HPSATMF_LOG_CLEAR_TSET (1 << 22)
156#define HPSATMF_LOG_QRY_TASK (1 << 23)
157#define HPSATMF_LOG_QRY_TSET (1 << 24)
158#define HPSATMF_LOG_QRY_ASYNC (1 << 25)
128}; 159};
129#define HPSA_ABORT_MSG 0 160#define HPSA_ABORT_MSG 0
130#define HPSA_DEVICE_RESET_MSG 1 161#define HPSA_DEVICE_RESET_MSG 1
@@ -216,9 +247,6 @@ static void SA5_submit_command(struct ctlr_info *h,
216 c->Header.Tag.lower); 247 c->Header.Tag.lower);
217 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 248 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
218 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 249 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
219 h->commands_outstanding++;
220 if (h->commands_outstanding > h->max_outstanding)
221 h->max_outstanding = h->commands_outstanding;
222} 250}
223 251
224/* 252/*
@@ -254,16 +282,17 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
254 } 282 }
255} 283}
256 284
257static unsigned long SA5_performant_completed(struct ctlr_info *h) 285static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
258{ 286{
259 unsigned long register_value = FIFO_EMPTY; 287 struct reply_pool *rq = &h->reply_queue[q];
288 unsigned long flags, register_value = FIFO_EMPTY;
260 289
261 /* flush the controller write of the reply queue by reading
262 * outbound doorbell status register.
263 */
264 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
265 /* msi auto clears the interrupt pending bit. */ 290 /* msi auto clears the interrupt pending bit. */
266 if (!(h->msi_vector || h->msix_vector)) { 291 if (!(h->msi_vector || h->msix_vector)) {
292 /* flush the controller write of the reply queue by reading
293 * outbound doorbell status register.
294 */
295 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
267 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); 296 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
268 /* Do a read in order to flush the write to the controller 297 /* Do a read in order to flush the write to the controller
269 * (as per spec.) 298 * (as per spec.)
@@ -271,19 +300,20 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h)
271 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 300 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
272 } 301 }
273 302
274 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 303 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
275 register_value = *(h->reply_pool_head); 304 register_value = rq->head[rq->current_entry];
276 (h->reply_pool_head)++; 305 rq->current_entry++;
306 spin_lock_irqsave(&h->lock, flags);
277 h->commands_outstanding--; 307 h->commands_outstanding--;
308 spin_unlock_irqrestore(&h->lock, flags);
278 } else { 309 } else {
279 register_value = FIFO_EMPTY; 310 register_value = FIFO_EMPTY;
280 } 311 }
281 /* Check for wraparound */ 312 /* Check for wraparound */
282 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 313 if (rq->current_entry == h->max_commands) {
283 h->reply_pool_head = h->reply_pool; 314 rq->current_entry = 0;
284 h->reply_pool_wraparound ^= 1; 315 rq->wraparound ^= 1;
285 } 316 }
286
287 return register_value; 317 return register_value;
288} 318}
289 319
@@ -303,13 +333,18 @@ static unsigned long SA5_fifo_full(struct ctlr_info *h)
303 * returns value read from hardware. 333 * returns value read from hardware.
304 * returns FIFO_EMPTY if there is nothing to read 334 * returns FIFO_EMPTY if there is nothing to read
305 */ 335 */
306static unsigned long SA5_completed(struct ctlr_info *h) 336static unsigned long SA5_completed(struct ctlr_info *h,
337 __attribute__((unused)) u8 q)
307{ 338{
308 unsigned long register_value 339 unsigned long register_value
309 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); 340 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
341 unsigned long flags;
310 342
311 if (register_value != FIFO_EMPTY) 343 if (register_value != FIFO_EMPTY) {
344 spin_lock_irqsave(&h->lock, flags);
312 h->commands_outstanding--; 345 h->commands_outstanding--;
346 spin_unlock_irqrestore(&h->lock, flags);
347 }
313 348
314#ifdef HPSA_DEBUG 349#ifdef HPSA_DEBUG
315 if (register_value != FIFO_EMPTY) 350 if (register_value != FIFO_EMPTY)
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 8049815d8c1e..a894f2eca7ac 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -82,6 +82,29 @@
82#define TYPE_CMD 0x00 82#define TYPE_CMD 0x00
83#define TYPE_MSG 0x01 83#define TYPE_MSG 0x01
84 84
85/* Message Types */
86#define HPSA_TASK_MANAGEMENT 0x00
87#define HPSA_RESET 0x01
88#define HPSA_SCAN 0x02
89#define HPSA_NOOP 0x03
90
91#define HPSA_CTLR_RESET_TYPE 0x00
92#define HPSA_BUS_RESET_TYPE 0x01
93#define HPSA_TARGET_RESET_TYPE 0x03
94#define HPSA_LUN_RESET_TYPE 0x04
95#define HPSA_NEXUS_RESET_TYPE 0x05
96
97/* Task Management Functions */
98#define HPSA_TMF_ABORT_TASK 0x00
99#define HPSA_TMF_ABORT_TASK_SET 0x01
100#define HPSA_TMF_CLEAR_ACA 0x02
101#define HPSA_TMF_CLEAR_TASK_SET 0x03
102#define HPSA_TMF_QUERY_TASK 0x04
103#define HPSA_TMF_QUERY_TASK_SET 0x05
104#define HPSA_TMF_QUERY_ASYNCEVENT 0x06
105
106
107
85/* config space register offsets */ 108/* config space register offsets */
86#define CFG_VENDORID 0x00 109#define CFG_VENDORID 0x00
87#define CFG_DEVICEID 0x02 110#define CFG_DEVICEID 0x02
@@ -106,6 +129,7 @@
106#define CFGTBL_Trans_Simple 0x00000002l 129#define CFGTBL_Trans_Simple 0x00000002l
107#define CFGTBL_Trans_Performant 0x00000004l 130#define CFGTBL_Trans_Performant 0x00000004l
108#define CFGTBL_Trans_use_short_tags 0x20000000l 131#define CFGTBL_Trans_use_short_tags 0x20000000l
132#define CFGTBL_Trans_enable_directed_msix (1 << 30)
109 133
110#define CFGTBL_BusType_Ultra2 0x00000001l 134#define CFGTBL_BusType_Ultra2 0x00000001l
111#define CFGTBL_BusType_Ultra3 0x00000002l 135#define CFGTBL_BusType_Ultra3 0x00000002l
@@ -162,6 +186,7 @@ struct SenseSubsystem_info {
162#define BMIC_WRITE 0x27 186#define BMIC_WRITE 0x27
163#define BMIC_CACHE_FLUSH 0xc2 187#define BMIC_CACHE_FLUSH 0xc2
164#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */ 188#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
189#define BMIC_FLASH_FIRMWARE 0xF7
165 190
166/* Command List Structure */ 191/* Command List Structure */
167union SCSI3Addr { 192union SCSI3Addr {
@@ -337,11 +362,17 @@ struct CfgTable {
337 u32 MaxPhysicalDevices; 362 u32 MaxPhysicalDevices;
338 u32 MaxPhysicalDrivesPerLogicalUnit; 363 u32 MaxPhysicalDrivesPerLogicalUnit;
339 u32 MaxPerformantModeCommands; 364 u32 MaxPerformantModeCommands;
340 u8 reserved[0x78 - 0x58]; 365 u32 MaxBlockFetch;
366 u32 PowerConservationSupport;
367 u32 PowerConservationEnable;
368 u32 TMFSupportFlags;
369 u8 TMFTagMask[8];
370 u8 reserved[0x78 - 0x70];
341 u32 misc_fw_support; /* offset 0x78 */ 371 u32 misc_fw_support; /* offset 0x78 */
342#define MISC_FW_DOORBELL_RESET (0x02) 372#define MISC_FW_DOORBELL_RESET (0x02)
343#define MISC_FW_DOORBELL_RESET2 (0x010) 373#define MISC_FW_DOORBELL_RESET2 (0x010)
344 u8 driver_version[32]; 374 u8 driver_version[32];
375
345}; 376};
346 377
347#define NUM_BLOCKFETCH_ENTRIES 8 378#define NUM_BLOCKFETCH_ENTRIES 8
@@ -351,8 +382,8 @@ struct TransTable_struct {
351 u32 RepQCount; 382 u32 RepQCount;
352 u32 RepQCtrAddrLow32; 383 u32 RepQCtrAddrLow32;
353 u32 RepQCtrAddrHigh32; 384 u32 RepQCtrAddrHigh32;
354 u32 RepQAddr0Low32; 385#define MAX_REPLY_QUEUES 8
355 u32 RepQAddr0High32; 386 struct vals32 RepQAddr[MAX_REPLY_QUEUES];
356}; 387};
357 388
358struct hpsa_pci_info { 389struct hpsa_pci_info {
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index d4bf9c12ecd4..45385f531649 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -192,22 +192,27 @@ static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
192 192
193static bool sci_controller_isr(struct isci_host *ihost) 193static bool sci_controller_isr(struct isci_host *ihost)
194{ 194{
195 if (sci_controller_completion_queue_has_entries(ihost)) { 195 if (sci_controller_completion_queue_has_entries(ihost))
196 return true; 196 return true;
197 } else {
198 /*
199 * we have a spurious interrupt it could be that we have already
200 * emptied the completion queue from a previous interrupt */
201 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
202 197
203 /* 198 /* we have a spurious interrupt it could be that we have already
204 * There is a race in the hardware that could cause us not to be notified 199 * emptied the completion queue from a previous interrupt
205 * of an interrupt completion if we do not take this step. We will mask 200 * FIXME: really!?
206 * then unmask the interrupts so if there is another interrupt pending 201 */
207 * the clearing of the interrupt source we get the next interrupt message. */ 202 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
203
204 /* There is a race in the hardware that could cause us not to be
205 * notified of an interrupt completion if we do not take this
206 * step. We will mask then unmask the interrupts so if there is
207 * another interrupt pending the clearing of the interrupt
208 * source we get the next interrupt message.
209 */
210 spin_lock(&ihost->scic_lock);
211 if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
208 writel(0xFF000000, &ihost->smu_registers->interrupt_mask); 212 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
209 writel(0, &ihost->smu_registers->interrupt_mask); 213 writel(0, &ihost->smu_registers->interrupt_mask);
210 } 214 }
215 spin_unlock(&ihost->scic_lock);
211 216
212 return false; 217 return false;
213} 218}
@@ -642,7 +647,6 @@ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status co
642 if (completion_status != SCI_SUCCESS) 647 if (completion_status != SCI_SUCCESS)
643 dev_info(&ihost->pdev->dev, 648 dev_info(&ihost->pdev->dev,
644 "controller start timed out, continuing...\n"); 649 "controller start timed out, continuing...\n");
645 isci_host_change_state(ihost, isci_ready);
646 clear_bit(IHOST_START_PENDING, &ihost->flags); 650 clear_bit(IHOST_START_PENDING, &ihost->flags);
647 wake_up(&ihost->eventq); 651 wake_up(&ihost->eventq);
648} 652}
@@ -657,12 +661,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
657 661
658 sas_drain_work(ha); 662 sas_drain_work(ha);
659 663
660 dev_dbg(&ihost->pdev->dev,
661 "%s: ihost->status = %d, time = %ld\n",
662 __func__, isci_host_get_state(ihost), time);
663
664 return 1; 664 return 1;
665
666} 665}
667 666
668/** 667/**
@@ -704,14 +703,15 @@ static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
704 703
705static void sci_controller_enable_interrupts(struct isci_host *ihost) 704static void sci_controller_enable_interrupts(struct isci_host *ihost)
706{ 705{
707 BUG_ON(ihost->smu_registers == NULL); 706 set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
708 writel(0, &ihost->smu_registers->interrupt_mask); 707 writel(0, &ihost->smu_registers->interrupt_mask);
709} 708}
710 709
711void sci_controller_disable_interrupts(struct isci_host *ihost) 710void sci_controller_disable_interrupts(struct isci_host *ihost)
712{ 711{
713 BUG_ON(ihost->smu_registers == NULL); 712 clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
714 writel(0xffffffff, &ihost->smu_registers->interrupt_mask); 713 writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
714 readl(&ihost->smu_registers->interrupt_mask); /* flush */
715} 715}
716 716
717static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) 717static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
@@ -822,7 +822,7 @@ static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *
822 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); 822 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
823} 823}
824 824
825static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) 825void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
826{ 826{
827 if (ihost->sm.current_state_id == SCIC_STARTING) { 827 if (ihost->sm.current_state_id == SCIC_STARTING) {
828 /* 828 /*
@@ -849,6 +849,7 @@ static bool is_phy_starting(struct isci_phy *iphy)
849 case SCI_PHY_SUB_AWAIT_SATA_POWER: 849 case SCI_PHY_SUB_AWAIT_SATA_POWER:
850 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: 850 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
851 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: 851 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
852 case SCI_PHY_SUB_AWAIT_OSSP_EN:
852 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: 853 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
853 case SCI_PHY_SUB_FINAL: 854 case SCI_PHY_SUB_FINAL:
854 return true; 855 return true;
@@ -857,6 +858,39 @@ static bool is_phy_starting(struct isci_phy *iphy)
857 } 858 }
858} 859}
859 860
861bool is_controller_start_complete(struct isci_host *ihost)
862{
863 int i;
864
865 for (i = 0; i < SCI_MAX_PHYS; i++) {
866 struct isci_phy *iphy = &ihost->phys[i];
867 u32 state = iphy->sm.current_state_id;
868
869 /* in apc mode we need to check every phy, in
870 * mpc mode we only need to check phys that have
871 * been configured into a port
872 */
873 if (is_port_config_apc(ihost))
874 /* pass */;
875 else if (!phy_get_non_dummy_port(iphy))
876 continue;
877
878 /* The controller start operation is complete iff:
879 * - all links have been given an opportunity to start
880 * - have no indication of a connected device
881 * - have an indication of a connected device and it has
882 * finished the link training process.
883 */
884 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
885 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
886 (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
887 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
888 return false;
889 }
890
891 return true;
892}
893
860/** 894/**
861 * sci_controller_start_next_phy - start phy 895 * sci_controller_start_next_phy - start phy
862 * @scic: controller 896 * @scic: controller
@@ -877,36 +911,7 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
877 return status; 911 return status;
878 912
879 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { 913 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
880 bool is_controller_start_complete = true; 914 if (is_controller_start_complete(ihost)) {
881 u32 state;
882 u8 index;
883
884 for (index = 0; index < SCI_MAX_PHYS; index++) {
885 iphy = &ihost->phys[index];
886 state = iphy->sm.current_state_id;
887
888 if (!phy_get_non_dummy_port(iphy))
889 continue;
890
891 /* The controller start operation is complete iff:
892 * - all links have been given an opportunity to start
893 * - have no indication of a connected device
894 * - have an indication of a connected device and it has
895 * finished the link training process.
896 */
897 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
898 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
899 (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
900 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
901 is_controller_start_complete = false;
902 break;
903 }
904 }
905
906 /*
907 * The controller has successfully finished the start process.
908 * Inform the SCI Core user and transition to the READY state. */
909 if (is_controller_start_complete == true) {
910 sci_controller_transition_to_ready(ihost, SCI_SUCCESS); 915 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
911 sci_del_timer(&ihost->phy_timer); 916 sci_del_timer(&ihost->phy_timer);
912 ihost->phy_startup_timer_pending = false; 917 ihost->phy_startup_timer_pending = false;
@@ -987,9 +992,8 @@ static enum sci_status sci_controller_start(struct isci_host *ihost,
987 u16 index; 992 u16 index;
988 993
989 if (ihost->sm.current_state_id != SCIC_INITIALIZED) { 994 if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
990 dev_warn(&ihost->pdev->dev, 995 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
991 "SCIC Controller start operation requested in " 996 __func__, ihost->sm.current_state_id);
992 "invalid state\n");
993 return SCI_FAILURE_INVALID_STATE; 997 return SCI_FAILURE_INVALID_STATE;
994 } 998 }
995 999
@@ -1053,9 +1057,8 @@ void isci_host_scan_start(struct Scsi_Host *shost)
1053 spin_unlock_irq(&ihost->scic_lock); 1057 spin_unlock_irq(&ihost->scic_lock);
1054} 1058}
1055 1059
1056static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) 1060static void isci_host_stop_complete(struct isci_host *ihost)
1057{ 1061{
1058 isci_host_change_state(ihost, isci_stopped);
1059 sci_controller_disable_interrupts(ihost); 1062 sci_controller_disable_interrupts(ihost);
1060 clear_bit(IHOST_STOP_PENDING, &ihost->flags); 1063 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1061 wake_up(&ihost->eventq); 1064 wake_up(&ihost->eventq);
@@ -1074,6 +1077,32 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
1074 writel(0, &ihost->smu_registers->interrupt_mask); 1077 writel(0, &ihost->smu_registers->interrupt_mask);
1075} 1078}
1076 1079
1080void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
1081{
1082 task->lldd_task = NULL;
1083 if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
1084 !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1085 if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
1086 /* Normal notification (task_done) */
1087 dev_dbg(&ihost->pdev->dev,
1088 "%s: Normal - ireq/task = %p/%p\n",
1089 __func__, ireq, task);
1090
1091 task->task_done(task);
1092 } else {
1093 dev_dbg(&ihost->pdev->dev,
1094 "%s: Error - ireq/task = %p/%p\n",
1095 __func__, ireq, task);
1096
1097 sas_task_abort(task);
1098 }
1099 }
1100 if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
1101 wake_up_all(&ihost->eventq);
1102
1103 if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
1104 isci_free_tag(ihost, ireq->io_tag);
1105}
1077/** 1106/**
1078 * isci_host_completion_routine() - This function is the delayed service 1107 * isci_host_completion_routine() - This function is the delayed service
1079 * routine that calls the sci core library's completion handler. It's 1108 * routine that calls the sci core library's completion handler. It's
@@ -1082,107 +1111,15 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
1082 * @data: This parameter specifies the ISCI host object 1111 * @data: This parameter specifies the ISCI host object
1083 * 1112 *
1084 */ 1113 */
1085static void isci_host_completion_routine(unsigned long data) 1114void isci_host_completion_routine(unsigned long data)
1086{ 1115{
1087 struct isci_host *ihost = (struct isci_host *)data; 1116 struct isci_host *ihost = (struct isci_host *)data;
1088 struct list_head completed_request_list;
1089 struct list_head errored_request_list;
1090 struct list_head *current_position;
1091 struct list_head *next_position;
1092 struct isci_request *request;
1093 struct isci_request *next_request;
1094 struct sas_task *task;
1095 u16 active; 1117 u16 active;
1096 1118
1097 INIT_LIST_HEAD(&completed_request_list);
1098 INIT_LIST_HEAD(&errored_request_list);
1099
1100 spin_lock_irq(&ihost->scic_lock); 1119 spin_lock_irq(&ihost->scic_lock);
1101
1102 sci_controller_completion_handler(ihost); 1120 sci_controller_completion_handler(ihost);
1103
1104 /* Take the lists of completed I/Os from the host. */
1105
1106 list_splice_init(&ihost->requests_to_complete,
1107 &completed_request_list);
1108
1109 /* Take the list of errored I/Os from the host. */
1110 list_splice_init(&ihost->requests_to_errorback,
1111 &errored_request_list);
1112
1113 spin_unlock_irq(&ihost->scic_lock); 1121 spin_unlock_irq(&ihost->scic_lock);
1114 1122
1115 /* Process any completions in the lists. */
1116 list_for_each_safe(current_position, next_position,
1117 &completed_request_list) {
1118
1119 request = list_entry(current_position, struct isci_request,
1120 completed_node);
1121 task = isci_request_access_task(request);
1122
1123 /* Normal notification (task_done) */
1124 dev_dbg(&ihost->pdev->dev,
1125 "%s: Normal - request/task = %p/%p\n",
1126 __func__,
1127 request,
1128 task);
1129
1130 /* Return the task to libsas */
1131 if (task != NULL) {
1132
1133 task->lldd_task = NULL;
1134 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1135
1136 /* If the task is already in the abort path,
1137 * the task_done callback cannot be called.
1138 */
1139 task->task_done(task);
1140 }
1141 }
1142
1143 spin_lock_irq(&ihost->scic_lock);
1144 isci_free_tag(ihost, request->io_tag);
1145 spin_unlock_irq(&ihost->scic_lock);
1146 }
1147 list_for_each_entry_safe(request, next_request, &errored_request_list,
1148 completed_node) {
1149
1150 task = isci_request_access_task(request);
1151
1152 /* Use sas_task_abort */
1153 dev_warn(&ihost->pdev->dev,
1154 "%s: Error - request/task = %p/%p\n",
1155 __func__,
1156 request,
1157 task);
1158
1159 if (task != NULL) {
1160
1161 /* Put the task into the abort path if it's not there
1162 * already.
1163 */
1164 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
1165 sas_task_abort(task);
1166
1167 } else {
1168 /* This is a case where the request has completed with a
1169 * status such that it needed further target servicing,
1170 * but the sas_task reference has already been removed
1171 * from the request. Since it was errored, it was not
1172 * being aborted, so there is nothing to do except free
1173 * it.
1174 */
1175
1176 spin_lock_irq(&ihost->scic_lock);
1177 /* Remove the request from the remote device's list
1178 * of pending requests.
1179 */
1180 list_del_init(&request->dev_node);
1181 isci_free_tag(ihost, request->io_tag);
1182 spin_unlock_irq(&ihost->scic_lock);
1183 }
1184 }
1185
1186 /* the coalesence timeout doubles at each encoding step, so 1123 /* the coalesence timeout doubles at each encoding step, so
1187 * update it based on the ilog2 value of the outstanding requests 1124 * update it based on the ilog2 value of the outstanding requests
1188 */ 1125 */
@@ -1213,9 +1150,8 @@ static void isci_host_completion_routine(unsigned long data)
1213static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) 1150static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1214{ 1151{
1215 if (ihost->sm.current_state_id != SCIC_READY) { 1152 if (ihost->sm.current_state_id != SCIC_READY) {
1216 dev_warn(&ihost->pdev->dev, 1153 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1217 "SCIC Controller stop operation requested in " 1154 __func__, ihost->sm.current_state_id);
1218 "invalid state\n");
1219 return SCI_FAILURE_INVALID_STATE; 1155 return SCI_FAILURE_INVALID_STATE;
1220 } 1156 }
1221 1157
@@ -1241,7 +1177,7 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost)
1241 switch (ihost->sm.current_state_id) { 1177 switch (ihost->sm.current_state_id) {
1242 case SCIC_RESET: 1178 case SCIC_RESET:
1243 case SCIC_READY: 1179 case SCIC_READY:
1244 case SCIC_STOPPED: 1180 case SCIC_STOPPING:
1245 case SCIC_FAILED: 1181 case SCIC_FAILED:
1246 /* 1182 /*
1247 * The reset operation is not a graceful cleanup, just 1183 * The reset operation is not a graceful cleanup, just
@@ -1250,13 +1186,50 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost)
1250 sci_change_state(&ihost->sm, SCIC_RESETTING); 1186 sci_change_state(&ihost->sm, SCIC_RESETTING);
1251 return SCI_SUCCESS; 1187 return SCI_SUCCESS;
1252 default: 1188 default:
1253 dev_warn(&ihost->pdev->dev, 1189 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1254 "SCIC Controller reset operation requested in " 1190 __func__, ihost->sm.current_state_id);
1255 "invalid state\n");
1256 return SCI_FAILURE_INVALID_STATE; 1191 return SCI_FAILURE_INVALID_STATE;
1257 } 1192 }
1258} 1193}
1259 1194
1195static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1196{
1197 u32 index;
1198 enum sci_status status;
1199 enum sci_status phy_status;
1200
1201 status = SCI_SUCCESS;
1202
1203 for (index = 0; index < SCI_MAX_PHYS; index++) {
1204 phy_status = sci_phy_stop(&ihost->phys[index]);
1205
1206 if (phy_status != SCI_SUCCESS &&
1207 phy_status != SCI_FAILURE_INVALID_STATE) {
1208 status = SCI_FAILURE;
1209
1210 dev_warn(&ihost->pdev->dev,
1211 "%s: Controller stop operation failed to stop "
1212 "phy %d because of status %d.\n",
1213 __func__,
1214 ihost->phys[index].phy_index, phy_status);
1215 }
1216 }
1217
1218 return status;
1219}
1220
1221
1222/**
1223 * isci_host_deinit - shutdown frame reception and dma
1224 * @ihost: host to take down
1225 *
1226 * This is called in either the driver shutdown or the suspend path. In
1227 * the shutdown case libsas went through port teardown and normal device
1228 * removal (i.e. physical links stayed up to service scsi_device removal
1229 * commands). In the suspend case we disable the hardware without
1230 * notifying libsas of the link down events since we want libsas to
1231 * remember the domain across the suspend/resume cycle
1232 */
1260void isci_host_deinit(struct isci_host *ihost) 1233void isci_host_deinit(struct isci_host *ihost)
1261{ 1234{
1262 int i; 1235 int i;
@@ -1265,17 +1238,6 @@ void isci_host_deinit(struct isci_host *ihost)
1265 for (i = 0; i < isci_gpio_count(ihost); i++) 1238 for (i = 0; i < isci_gpio_count(ihost); i++)
1266 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); 1239 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
1267 1240
1268 isci_host_change_state(ihost, isci_stopping);
1269 for (i = 0; i < SCI_MAX_PORTS; i++) {
1270 struct isci_port *iport = &ihost->ports[i];
1271 struct isci_remote_device *idev, *d;
1272
1273 list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
1274 if (test_bit(IDEV_ALLOCATED, &idev->flags))
1275 isci_remote_device_stop(ihost, idev);
1276 }
1277 }
1278
1279 set_bit(IHOST_STOP_PENDING, &ihost->flags); 1241 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1280 1242
1281 spin_lock_irq(&ihost->scic_lock); 1243 spin_lock_irq(&ihost->scic_lock);
@@ -1284,12 +1246,21 @@ void isci_host_deinit(struct isci_host *ihost)
1284 1246
1285 wait_for_stop(ihost); 1247 wait_for_stop(ihost);
1286 1248
1249 /* phy stop is after controller stop to allow port and device to
1250 * go idle before shutting down the phys, but the expectation is
1251 * that i/o has been shut off well before we reach this
1252 * function.
1253 */
1254 sci_controller_stop_phys(ihost);
1255
1287 /* disable sgpio: where the above wait should give time for the 1256 /* disable sgpio: where the above wait should give time for the
1288 * enclosure to sample the gpios going inactive 1257 * enclosure to sample the gpios going inactive
1289 */ 1258 */
1290 writel(0, &ihost->scu_registers->peg0.sgpio.interface_control); 1259 writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
1291 1260
1261 spin_lock_irq(&ihost->scic_lock);
1292 sci_controller_reset(ihost); 1262 sci_controller_reset(ihost);
1263 spin_unlock_irq(&ihost->scic_lock);
1293 1264
1294 /* Cancel any/all outstanding port timers */ 1265 /* Cancel any/all outstanding port timers */
1295 for (i = 0; i < ihost->logical_port_entries; i++) { 1266 for (i = 0; i < ihost->logical_port_entries; i++) {
@@ -1328,29 +1299,6 @@ static void __iomem *smu_base(struct isci_host *isci_host)
1328 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; 1299 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1329} 1300}
1330 1301
1331static void isci_user_parameters_get(struct sci_user_parameters *u)
1332{
1333 int i;
1334
1335 for (i = 0; i < SCI_MAX_PHYS; i++) {
1336 struct sci_phy_user_params *u_phy = &u->phys[i];
1337
1338 u_phy->max_speed_generation = phy_gen;
1339
1340 /* we are not exporting these for now */
1341 u_phy->align_insertion_frequency = 0x7f;
1342 u_phy->in_connection_align_insertion_frequency = 0xff;
1343 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
1344 }
1345
1346 u->stp_inactivity_timeout = stp_inactive_to;
1347 u->ssp_inactivity_timeout = ssp_inactive_to;
1348 u->stp_max_occupancy_timeout = stp_max_occ_to;
1349 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
1350 u->no_outbound_task_timeout = no_outbound_task_to;
1351 u->max_concurr_spinup = max_concurr_spinup;
1352}
1353
1354static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) 1302static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1355{ 1303{
1356 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1304 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
@@ -1510,32 +1458,6 @@ static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1510 sci_controller_set_interrupt_coalescence(ihost, 0, 0); 1458 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1511} 1459}
1512 1460
1513static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1514{
1515 u32 index;
1516 enum sci_status status;
1517 enum sci_status phy_status;
1518
1519 status = SCI_SUCCESS;
1520
1521 for (index = 0; index < SCI_MAX_PHYS; index++) {
1522 phy_status = sci_phy_stop(&ihost->phys[index]);
1523
1524 if (phy_status != SCI_SUCCESS &&
1525 phy_status != SCI_FAILURE_INVALID_STATE) {
1526 status = SCI_FAILURE;
1527
1528 dev_warn(&ihost->pdev->dev,
1529 "%s: Controller stop operation failed to stop "
1530 "phy %d because of status %d.\n",
1531 __func__,
1532 ihost->phys[index].phy_index, phy_status);
1533 }
1534 }
1535
1536 return status;
1537}
1538
1539static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) 1461static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1540{ 1462{
1541 u32 index; 1463 u32 index;
@@ -1595,10 +1517,11 @@ static void sci_controller_stopping_state_enter(struct sci_base_state_machine *s
1595{ 1517{
1596 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1518 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1597 1519
1598 /* Stop all of the components for this controller */
1599 sci_controller_stop_phys(ihost);
1600 sci_controller_stop_ports(ihost);
1601 sci_controller_stop_devices(ihost); 1520 sci_controller_stop_devices(ihost);
1521 sci_controller_stop_ports(ihost);
1522
1523 if (!sci_controller_has_remote_devices_stopping(ihost))
1524 isci_host_stop_complete(ihost);
1602} 1525}
1603 1526
1604static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) 1527static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
@@ -1624,6 +1547,9 @@ static void sci_controller_reset_hardware(struct isci_host *ihost)
1624 1547
1625 /* The write to the UFQGP clears the UFQPR */ 1548 /* The write to the UFQGP clears the UFQPR */
1626 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); 1549 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1550
1551 /* clear all interrupts */
1552 writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
1627} 1553}
1628 1554
1629static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) 1555static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
@@ -1655,59 +1581,9 @@ static const struct sci_base_state sci_controller_state_table[] = {
1655 .enter_state = sci_controller_stopping_state_enter, 1581 .enter_state = sci_controller_stopping_state_enter,
1656 .exit_state = sci_controller_stopping_state_exit, 1582 .exit_state = sci_controller_stopping_state_exit,
1657 }, 1583 },
1658 [SCIC_STOPPED] = {},
1659 [SCIC_FAILED] = {} 1584 [SCIC_FAILED] = {}
1660}; 1585};
1661 1586
1662static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
1663{
1664 /* these defaults are overridden by the platform / firmware */
1665 u16 index;
1666
1667 /* Default to APC mode. */
1668 ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1669
1670 /* Default to APC mode. */
1671 ihost->oem_parameters.controller.max_concurr_spin_up = 1;
1672
1673 /* Default to no SSC operation. */
1674 ihost->oem_parameters.controller.do_enable_ssc = false;
1675
1676 /* Default to short cables on all phys. */
1677 ihost->oem_parameters.controller.cable_selection_mask = 0;
1678
1679 /* Initialize all of the port parameter information to narrow ports. */
1680 for (index = 0; index < SCI_MAX_PORTS; index++) {
1681 ihost->oem_parameters.ports[index].phy_mask = 0;
1682 }
1683
1684 /* Initialize all of the phy parameter information. */
1685 for (index = 0; index < SCI_MAX_PHYS; index++) {
1686 /* Default to 3G (i.e. Gen 2). */
1687 ihost->user_parameters.phys[index].max_speed_generation =
1688 SCIC_SDS_PARM_GEN2_SPEED;
1689
1690 /* the frequencies cannot be 0 */
1691 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
1692 ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
1693 ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1694
1695 /*
1696 * Previous Vitesse based expanders had a arbitration issue that
1697 * is worked around by having the upper 32-bits of SAS address
1698 * with a value greater then the Vitesse company identifier.
1699 * Hence, usage of 0x5FCFFFFF. */
1700 ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
1701 ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
1702 }
1703
1704 ihost->user_parameters.stp_inactivity_timeout = 5;
1705 ihost->user_parameters.ssp_inactivity_timeout = 5;
1706 ihost->user_parameters.stp_max_occupancy_timeout = 5;
1707 ihost->user_parameters.ssp_max_occupancy_timeout = 20;
1708 ihost->user_parameters.no_outbound_task_timeout = 2;
1709}
1710
1711static void controller_timeout(unsigned long data) 1587static void controller_timeout(unsigned long data)
1712{ 1588{
1713 struct sci_timer *tmr = (struct sci_timer *)data; 1589 struct sci_timer *tmr = (struct sci_timer *)data;
@@ -1724,7 +1600,7 @@ static void controller_timeout(unsigned long data)
1724 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); 1600 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1725 else if (sm->current_state_id == SCIC_STOPPING) { 1601 else if (sm->current_state_id == SCIC_STOPPING) {
1726 sci_change_state(sm, SCIC_FAILED); 1602 sci_change_state(sm, SCIC_FAILED);
1727 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); 1603 isci_host_stop_complete(ihost);
1728 } else /* / @todo Now what do we want to do in this case? */ 1604 } else /* / @todo Now what do we want to do in this case? */
1729 dev_err(&ihost->pdev->dev, 1605 dev_err(&ihost->pdev->dev,
1730 "%s: Controller timer fired when controller was not " 1606 "%s: Controller timer fired when controller was not "
@@ -1764,9 +1640,6 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
1764 1640
1765 sci_init_timer(&ihost->timer, controller_timeout); 1641 sci_init_timer(&ihost->timer, controller_timeout);
1766 1642
1767 /* Initialize the User and OEM parameters to default values. */
1768 sci_controller_set_default_config_parameters(ihost);
1769
1770 return sci_controller_reset(ihost); 1643 return sci_controller_reset(ihost);
1771} 1644}
1772 1645
@@ -1846,27 +1719,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
1846 return 0; 1719 return 0;
1847} 1720}
1848 1721
1849static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
1850{
1851 u32 state = ihost->sm.current_state_id;
1852 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
1853
1854 if (state == SCIC_RESET ||
1855 state == SCIC_INITIALIZING ||
1856 state == SCIC_INITIALIZED) {
1857 u8 oem_version = pci_info->orom ? pci_info->orom->hdr.version :
1858 ISCI_ROM_VER_1_0;
1859
1860 if (sci_oem_parameters_validate(&ihost->oem_parameters,
1861 oem_version))
1862 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1863
1864 return SCI_SUCCESS;
1865 }
1866
1867 return SCI_FAILURE_INVALID_STATE;
1868}
1869
1870static u8 max_spin_up(struct isci_host *ihost) 1722static u8 max_spin_up(struct isci_host *ihost)
1871{ 1723{
1872 if (ihost->user_parameters.max_concurr_spinup) 1724 if (ihost->user_parameters.max_concurr_spinup)
@@ -1914,7 +1766,7 @@ static void power_control_timeout(unsigned long data)
1914 ihost->power_control.phys_granted_power++; 1766 ihost->power_control.phys_granted_power++;
1915 sci_phy_consume_power_handler(iphy); 1767 sci_phy_consume_power_handler(iphy);
1916 1768
1917 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { 1769 if (iphy->protocol == SAS_PROTOCOL_SSP) {
1918 u8 j; 1770 u8 j;
1919 1771
1920 for (j = 0; j < SCI_MAX_PHYS; j++) { 1772 for (j = 0; j < SCI_MAX_PHYS; j++) {
@@ -1988,7 +1840,7 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1988 sizeof(current_phy->frame_rcvd.iaf.sas_addr)); 1840 sizeof(current_phy->frame_rcvd.iaf.sas_addr));
1989 1841
1990 if (current_phy->sm.current_state_id == SCI_PHY_READY && 1842 if (current_phy->sm.current_state_id == SCI_PHY_READY &&
1991 current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS && 1843 current_phy->protocol == SAS_PROTOCOL_SSP &&
1992 other == 0) { 1844 other == 0) {
1993 sci_phy_consume_power_handler(iphy); 1845 sci_phy_consume_power_handler(iphy);
1994 break; 1846 break;
@@ -2279,9 +2131,8 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2279 unsigned long i, state, val; 2131 unsigned long i, state, val;
2280 2132
2281 if (ihost->sm.current_state_id != SCIC_RESET) { 2133 if (ihost->sm.current_state_id != SCIC_RESET) {
2282 dev_warn(&ihost->pdev->dev, 2134 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2283 "SCIC Controller initialize operation requested " 2135 __func__, ihost->sm.current_state_id);
2284 "in invalid state\n");
2285 return SCI_FAILURE_INVALID_STATE; 2136 return SCI_FAILURE_INVALID_STATE;
2286 } 2137 }
2287 2138
@@ -2384,96 +2235,76 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2384 return result; 2235 return result;
2385} 2236}
2386 2237
2387static enum sci_status sci_user_parameters_set(struct isci_host *ihost, 2238static int sci_controller_dma_alloc(struct isci_host *ihost)
2388 struct sci_user_parameters *sci_parms)
2389{
2390 u32 state = ihost->sm.current_state_id;
2391
2392 if (state == SCIC_RESET ||
2393 state == SCIC_INITIALIZING ||
2394 state == SCIC_INITIALIZED) {
2395 u16 index;
2396
2397 /*
2398 * Validate the user parameters. If they are not legal, then
2399 * return a failure.
2400 */
2401 for (index = 0; index < SCI_MAX_PHYS; index++) {
2402 struct sci_phy_user_params *user_phy;
2403
2404 user_phy = &sci_parms->phys[index];
2405
2406 if (!((user_phy->max_speed_generation <=
2407 SCIC_SDS_PARM_MAX_SPEED) &&
2408 (user_phy->max_speed_generation >
2409 SCIC_SDS_PARM_NO_SPEED)))
2410 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2411
2412 if (user_phy->in_connection_align_insertion_frequency <
2413 3)
2414 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2415
2416 if ((user_phy->in_connection_align_insertion_frequency <
2417 3) ||
2418 (user_phy->align_insertion_frequency == 0) ||
2419 (user_phy->
2420 notify_enable_spin_up_insertion_frequency ==
2421 0))
2422 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2423 }
2424
2425 if ((sci_parms->stp_inactivity_timeout == 0) ||
2426 (sci_parms->ssp_inactivity_timeout == 0) ||
2427 (sci_parms->stp_max_occupancy_timeout == 0) ||
2428 (sci_parms->ssp_max_occupancy_timeout == 0) ||
2429 (sci_parms->no_outbound_task_timeout == 0))
2430 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2431
2432 memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
2433
2434 return SCI_SUCCESS;
2435 }
2436
2437 return SCI_FAILURE_INVALID_STATE;
2438}
2439
2440static int sci_controller_mem_init(struct isci_host *ihost)
2441{ 2239{
2442 struct device *dev = &ihost->pdev->dev; 2240 struct device *dev = &ihost->pdev->dev;
2443 dma_addr_t dma;
2444 size_t size; 2241 size_t size;
2445 int err; 2242 int i;
2243
2244 /* detect re-initialization */
2245 if (ihost->completion_queue)
2246 return 0;
2446 2247
2447 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); 2248 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2448 ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); 2249 ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
2250 GFP_KERNEL);
2449 if (!ihost->completion_queue) 2251 if (!ihost->completion_queue)
2450 return -ENOMEM; 2252 return -ENOMEM;
2451 2253
2452 writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
2453 writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
2454
2455 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); 2254 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2456 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, 2255 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
2457 GFP_KERNEL); 2256 GFP_KERNEL);
2257
2458 if (!ihost->remote_node_context_table) 2258 if (!ihost->remote_node_context_table)
2459 return -ENOMEM; 2259 return -ENOMEM;
2460 2260
2461 writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
2462 writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
2463
2464 size = ihost->task_context_entries * sizeof(struct scu_task_context), 2261 size = ihost->task_context_entries * sizeof(struct scu_task_context),
2465 ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); 2262 ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
2263 GFP_KERNEL);
2466 if (!ihost->task_context_table) 2264 if (!ihost->task_context_table)
2467 return -ENOMEM; 2265 return -ENOMEM;
2468 2266
2469 ihost->task_context_dma = dma; 2267 size = SCI_UFI_TOTAL_SIZE;
2470 writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); 2268 ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
2471 writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); 2269 if (!ihost->ufi_buf)
2270 return -ENOMEM;
2271
2272 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2273 struct isci_request *ireq;
2274 dma_addr_t dma;
2275
2276 ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
2277 if (!ireq)
2278 return -ENOMEM;
2279
2280 ireq->tc = &ihost->task_context_table[i];
2281 ireq->owning_controller = ihost;
2282 ireq->request_daddr = dma;
2283 ireq->isci_host = ihost;
2284 ihost->reqs[i] = ireq;
2285 }
2286
2287 return 0;
2288}
2289
2290static int sci_controller_mem_init(struct isci_host *ihost)
2291{
2292 int err = sci_controller_dma_alloc(ihost);
2472 2293
2473 err = sci_unsolicited_frame_control_construct(ihost);
2474 if (err) 2294 if (err)
2475 return err; 2295 return err;
2476 2296
2297 writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
2298 writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
2299
2300 writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
2301 writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
2302
2303 writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
2304 writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
2305
2306 sci_unsolicited_frame_control_construct(ihost);
2307
2477 /* 2308 /*
2478 * Inform the silicon as to the location of the UF headers and 2309 * Inform the silicon as to the location of the UF headers and
2479 * address table. 2310 * address table.
@@ -2491,22 +2322,22 @@ static int sci_controller_mem_init(struct isci_host *ihost)
2491 return 0; 2322 return 0;
2492} 2323}
2493 2324
2325/**
2326 * isci_host_init - (re-)initialize hardware and internal (private) state
2327 * @ihost: host to init
2328 *
2329 * Any public facing objects (like asd_sas_port, and asd_sas_phys), or
2330 * one-time initialization objects like locks and waitqueues, are
2331 * not touched (they are initialized in isci_host_alloc)
2332 */
2494int isci_host_init(struct isci_host *ihost) 2333int isci_host_init(struct isci_host *ihost)
2495{ 2334{
2496 int err = 0, i; 2335 int i, err;
2497 enum sci_status status; 2336 enum sci_status status;
2498 struct sci_user_parameters sci_user_params;
2499 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
2500
2501 spin_lock_init(&ihost->state_lock);
2502 spin_lock_init(&ihost->scic_lock);
2503 init_waitqueue_head(&ihost->eventq);
2504
2505 isci_host_change_state(ihost, isci_starting);
2506
2507 status = sci_controller_construct(ihost, scu_base(ihost),
2508 smu_base(ihost));
2509 2337
2338 spin_lock_irq(&ihost->scic_lock);
2339 status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
2340 spin_unlock_irq(&ihost->scic_lock);
2510 if (status != SCI_SUCCESS) { 2341 if (status != SCI_SUCCESS) {
2511 dev_err(&ihost->pdev->dev, 2342 dev_err(&ihost->pdev->dev,
2512 "%s: sci_controller_construct failed - status = %x\n", 2343 "%s: sci_controller_construct failed - status = %x\n",
@@ -2515,48 +2346,6 @@ int isci_host_init(struct isci_host *ihost)
2515 return -ENODEV; 2346 return -ENODEV;
2516 } 2347 }
2517 2348
2518 ihost->sas_ha.dev = &ihost->pdev->dev;
2519 ihost->sas_ha.lldd_ha = ihost;
2520
2521 /*
2522 * grab initial values stored in the controller object for OEM and USER
2523 * parameters
2524 */
2525 isci_user_parameters_get(&sci_user_params);
2526 status = sci_user_parameters_set(ihost, &sci_user_params);
2527 if (status != SCI_SUCCESS) {
2528 dev_warn(&ihost->pdev->dev,
2529 "%s: sci_user_parameters_set failed\n",
2530 __func__);
2531 return -ENODEV;
2532 }
2533
2534 /* grab any OEM parameters specified in orom */
2535 if (pci_info->orom) {
2536 status = isci_parse_oem_parameters(&ihost->oem_parameters,
2537 pci_info->orom,
2538 ihost->id);
2539 if (status != SCI_SUCCESS) {
2540 dev_warn(&ihost->pdev->dev,
2541 "parsing firmware oem parameters failed\n");
2542 return -EINVAL;
2543 }
2544 }
2545
2546 status = sci_oem_parameters_set(ihost);
2547 if (status != SCI_SUCCESS) {
2548 dev_warn(&ihost->pdev->dev,
2549 "%s: sci_oem_parameters_set failed\n",
2550 __func__);
2551 return -ENODEV;
2552 }
2553
2554 tasklet_init(&ihost->completion_tasklet,
2555 isci_host_completion_routine, (unsigned long)ihost);
2556
2557 INIT_LIST_HEAD(&ihost->requests_to_complete);
2558 INIT_LIST_HEAD(&ihost->requests_to_errorback);
2559
2560 spin_lock_irq(&ihost->scic_lock); 2349 spin_lock_irq(&ihost->scic_lock);
2561 status = sci_controller_initialize(ihost); 2350 status = sci_controller_initialize(ihost);
2562 spin_unlock_irq(&ihost->scic_lock); 2351 spin_unlock_irq(&ihost->scic_lock);
@@ -2572,43 +2361,12 @@ int isci_host_init(struct isci_host *ihost)
2572 if (err) 2361 if (err)
2573 return err; 2362 return err;
2574 2363
2575 for (i = 0; i < SCI_MAX_PORTS; i++)
2576 isci_port_init(&ihost->ports[i], ihost, i);
2577
2578 for (i = 0; i < SCI_MAX_PHYS; i++)
2579 isci_phy_init(&ihost->phys[i], ihost, i);
2580
2581 /* enable sgpio */ 2364 /* enable sgpio */
2582 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control); 2365 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
2583 for (i = 0; i < isci_gpio_count(ihost); i++) 2366 for (i = 0; i < isci_gpio_count(ihost); i++)
2584 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); 2367 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
2585 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code); 2368 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
2586 2369
2587 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
2588 struct isci_remote_device *idev = &ihost->devices[i];
2589
2590 INIT_LIST_HEAD(&idev->reqs_in_process);
2591 INIT_LIST_HEAD(&idev->node);
2592 }
2593
2594 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2595 struct isci_request *ireq;
2596 dma_addr_t dma;
2597
2598 ireq = dmam_alloc_coherent(&ihost->pdev->dev,
2599 sizeof(struct isci_request), &dma,
2600 GFP_KERNEL);
2601 if (!ireq)
2602 return -ENOMEM;
2603
2604 ireq->tc = &ihost->task_context_table[i];
2605 ireq->owning_controller = ihost;
2606 spin_lock_init(&ireq->state_lock);
2607 ireq->request_daddr = dma;
2608 ireq->isci_host = ihost;
2609 ihost->reqs[i] = ireq;
2610 }
2611
2612 return 0; 2370 return 0;
2613} 2371}
2614 2372
@@ -2654,7 +2412,7 @@ void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2654 } 2412 }
2655} 2413}
2656 2414
2657static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) 2415bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2658{ 2416{
2659 u32 index; 2417 u32 index;
2660 2418
@@ -2680,7 +2438,7 @@ void sci_controller_remote_device_stopped(struct isci_host *ihost,
2680 } 2438 }
2681 2439
2682 if (!sci_controller_has_remote_devices_stopping(ihost)) 2440 if (!sci_controller_has_remote_devices_stopping(ihost))
2683 sci_change_state(&ihost->sm, SCIC_STOPPED); 2441 isci_host_stop_complete(ihost);
2684} 2442}
2685 2443
2686void sci_controller_post_request(struct isci_host *ihost, u32 request) 2444void sci_controller_post_request(struct isci_host *ihost, u32 request)
@@ -2842,7 +2600,8 @@ enum sci_status sci_controller_start_io(struct isci_host *ihost,
2842 enum sci_status status; 2600 enum sci_status status;
2843 2601
2844 if (ihost->sm.current_state_id != SCIC_READY) { 2602 if (ihost->sm.current_state_id != SCIC_READY) {
2845 dev_warn(&ihost->pdev->dev, "invalid state to start I/O"); 2603 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2604 __func__, ihost->sm.current_state_id);
2846 return SCI_FAILURE_INVALID_STATE; 2605 return SCI_FAILURE_INVALID_STATE;
2847 } 2606 }
2848 2607
@@ -2866,22 +2625,26 @@ enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2866 enum sci_status status; 2625 enum sci_status status;
2867 2626
2868 if (ihost->sm.current_state_id != SCIC_READY) { 2627 if (ihost->sm.current_state_id != SCIC_READY) {
2869 dev_warn(&ihost->pdev->dev, 2628 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2870 "invalid state to terminate request\n"); 2629 __func__, ihost->sm.current_state_id);
2871 return SCI_FAILURE_INVALID_STATE; 2630 return SCI_FAILURE_INVALID_STATE;
2872 } 2631 }
2873
2874 status = sci_io_request_terminate(ireq); 2632 status = sci_io_request_terminate(ireq);
2875 if (status != SCI_SUCCESS)
2876 return status;
2877 2633
2878 /* 2634 dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
2879 * Utilize the original post context command and or in the POST_TC_ABORT 2635 __func__, status, ireq, ireq->flags);
2880 * request sub-type. 2636
2881 */ 2637 if ((status == SCI_SUCCESS) &&
2882 sci_controller_post_request(ihost, 2638 !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
2883 ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); 2639 !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
2884 return SCI_SUCCESS; 2640 /* Utilize the original post context command and or in the
2641 * POST_TC_ABORT request sub-type.
2642 */
2643 sci_controller_post_request(
2644 ihost, ireq->post_context |
2645 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2646 }
2647 return status;
2885} 2648}
2886 2649
2887/** 2650/**
@@ -2915,7 +2678,8 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2915 clear_bit(IREQ_ACTIVE, &ireq->flags); 2678 clear_bit(IREQ_ACTIVE, &ireq->flags);
2916 return SCI_SUCCESS; 2679 return SCI_SUCCESS;
2917 default: 2680 default:
2918 dev_warn(&ihost->pdev->dev, "invalid state to complete I/O"); 2681 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2682 __func__, ihost->sm.current_state_id);
2919 return SCI_FAILURE_INVALID_STATE; 2683 return SCI_FAILURE_INVALID_STATE;
2920 } 2684 }
2921 2685
@@ -2926,7 +2690,8 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2926 struct isci_host *ihost = ireq->owning_controller; 2690 struct isci_host *ihost = ireq->owning_controller;
2927 2691
2928 if (ihost->sm.current_state_id != SCIC_READY) { 2692 if (ihost->sm.current_state_id != SCIC_READY) {
2929 dev_warn(&ihost->pdev->dev, "invalid state to continue I/O"); 2693 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2694 __func__, ihost->sm.current_state_id);
2930 return SCI_FAILURE_INVALID_STATE; 2695 return SCI_FAILURE_INVALID_STATE;
2931 } 2696 }
2932 2697
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index adbad69d1069..9ab58e0540e7 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -55,6 +55,7 @@
55#ifndef _SCI_HOST_H_ 55#ifndef _SCI_HOST_H_
56#define _SCI_HOST_H_ 56#define _SCI_HOST_H_
57 57
58#include <scsi/sas_ata.h>
58#include "remote_device.h" 59#include "remote_device.h"
59#include "phy.h" 60#include "phy.h"
60#include "isci.h" 61#include "isci.h"
@@ -108,6 +109,8 @@ struct sci_port_configuration_agent;
108typedef void (*port_config_fn)(struct isci_host *, 109typedef void (*port_config_fn)(struct isci_host *,
109 struct sci_port_configuration_agent *, 110 struct sci_port_configuration_agent *,
110 struct isci_port *, struct isci_phy *); 111 struct isci_port *, struct isci_phy *);
112bool is_port_config_apc(struct isci_host *ihost);
113bool is_controller_start_complete(struct isci_host *ihost);
111 114
112struct sci_port_configuration_agent { 115struct sci_port_configuration_agent {
113 u16 phy_configured_mask; 116 u16 phy_configured_mask;
@@ -157,13 +160,17 @@ struct isci_host {
157 struct sci_power_control power_control; 160 struct sci_power_control power_control;
158 u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; 161 u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
159 struct scu_task_context *task_context_table; 162 struct scu_task_context *task_context_table;
160 dma_addr_t task_context_dma; 163 dma_addr_t tc_dma;
161 union scu_remote_node_context *remote_node_context_table; 164 union scu_remote_node_context *remote_node_context_table;
165 dma_addr_t rnc_dma;
162 u32 *completion_queue; 166 u32 *completion_queue;
167 dma_addr_t cq_dma;
163 u32 completion_queue_get; 168 u32 completion_queue_get;
164 u32 logical_port_entries; 169 u32 logical_port_entries;
165 u32 remote_node_entries; 170 u32 remote_node_entries;
166 u32 task_context_entries; 171 u32 task_context_entries;
172 void *ufi_buf;
173 dma_addr_t ufi_dma;
167 struct sci_unsolicited_frame_control uf_control; 174 struct sci_unsolicited_frame_control uf_control;
168 175
169 /* phy startup */ 176 /* phy startup */
@@ -190,17 +197,13 @@ struct isci_host {
190 struct asd_sas_port sas_ports[SCI_MAX_PORTS]; 197 struct asd_sas_port sas_ports[SCI_MAX_PORTS];
191 struct sas_ha_struct sas_ha; 198 struct sas_ha_struct sas_ha;
192 199
193 spinlock_t state_lock;
194 struct pci_dev *pdev; 200 struct pci_dev *pdev;
195 enum isci_status status;
196 #define IHOST_START_PENDING 0 201 #define IHOST_START_PENDING 0
197 #define IHOST_STOP_PENDING 1 202 #define IHOST_STOP_PENDING 1
203 #define IHOST_IRQ_ENABLED 2
198 unsigned long flags; 204 unsigned long flags;
199 wait_queue_head_t eventq; 205 wait_queue_head_t eventq;
200 struct Scsi_Host *shost;
201 struct tasklet_struct completion_tasklet; 206 struct tasklet_struct completion_tasklet;
202 struct list_head requests_to_complete;
203 struct list_head requests_to_errorback;
204 spinlock_t scic_lock; 207 spinlock_t scic_lock;
205 struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; 208 struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
206 struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; 209 struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
@@ -274,13 +277,6 @@ enum sci_controller_states {
274 SCIC_STOPPING, 277 SCIC_STOPPING,
275 278
276 /** 279 /**
277 * This state indicates that the controller has successfully been stopped.
278 * In this state no new IO operations are permitted.
279 * This state is entered from the STOPPING state.
280 */
281 SCIC_STOPPED,
282
283 /**
284 * This state indicates that the controller could not successfully be 280 * This state indicates that the controller could not successfully be
285 * initialized. In this state no new IO operations are permitted. 281 * initialized. In this state no new IO operations are permitted.
286 * This state is entered from the INITIALIZING state. 282 * This state is entered from the INITIALIZING state.
@@ -309,32 +305,16 @@ static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
309 return pci_get_drvdata(pdev); 305 return pci_get_drvdata(pdev);
310} 306}
311 307
308static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
309{
310 return ihost->sas_ha.core.shost;
311}
312
312#define for_each_isci_host(id, ihost, pdev) \ 313#define for_each_isci_host(id, ihost, pdev) \
313 for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ 314 for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
314 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ 315 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
315 ihost = to_pci_info(pdev)->hosts[++id]) 316 ihost = to_pci_info(pdev)->hosts[++id])
316 317
317static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
318{
319 return isci_host->status;
320}
321
322static inline void isci_host_change_state(struct isci_host *isci_host,
323 enum isci_status status)
324{
325 unsigned long flags;
326
327 dev_dbg(&isci_host->pdev->dev,
328 "%s: isci_host = %p, state = 0x%x",
329 __func__,
330 isci_host,
331 status);
332 spin_lock_irqsave(&isci_host->state_lock, flags);
333 isci_host->status = status;
334 spin_unlock_irqrestore(&isci_host->state_lock, flags);
335
336}
337
338static inline void wait_for_start(struct isci_host *ihost) 318static inline void wait_for_start(struct isci_host *ihost)
339{ 319{
340 wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); 320 wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
@@ -360,6 +340,11 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
360 return dev->port->ha->lldd_ha; 340 return dev->port->ha->lldd_ha;
361} 341}
362 342
343static inline struct isci_host *idev_to_ihost(struct isci_remote_device *idev)
344{
345 return dev_to_ihost(idev->domain_dev);
346}
347
363/* we always use protocol engine group zero */ 348/* we always use protocol engine group zero */
364#define ISCI_PEG 0 349#define ISCI_PEG 0
365 350
@@ -378,8 +363,7 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
378{ 363{
379 struct domain_device *dev = idev->domain_dev; 364 struct domain_device *dev = idev->domain_dev;
380 365
381 if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && 366 if (dev_is_sata(dev) && dev->parent)
382 !idev->is_direct_attached)
383 return SCU_STP_REMOTE_NODE_COUNT; 367 return SCU_STP_REMOTE_NODE_COUNT;
384 return SCU_SSP_REMOTE_NODE_COUNT; 368 return SCU_SSP_REMOTE_NODE_COUNT;
385} 369}
@@ -475,36 +459,17 @@ void sci_controller_free_remote_node_context(
475 struct isci_remote_device *idev, 459 struct isci_remote_device *idev,
476 u16 node_id); 460 u16 node_id);
477 461
478struct isci_request *sci_request_by_tag(struct isci_host *ihost, 462struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag);
479 u16 io_tag); 463void sci_controller_power_control_queue_insert(struct isci_host *ihost,
480 464 struct isci_phy *iphy);
481void sci_controller_power_control_queue_insert( 465void sci_controller_power_control_queue_remove(struct isci_host *ihost,
482 struct isci_host *ihost, 466 struct isci_phy *iphy);
483 struct isci_phy *iphy); 467void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
484 468 struct isci_phy *iphy);
485void sci_controller_power_control_queue_remove( 469void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
486 struct isci_host *ihost, 470 struct isci_phy *iphy);
487 struct isci_phy *iphy); 471void sci_controller_remote_device_stopped(struct isci_host *ihost,
488 472 struct isci_remote_device *idev);
489void sci_controller_link_up(
490 struct isci_host *ihost,
491 struct isci_port *iport,
492 struct isci_phy *iphy);
493
494void sci_controller_link_down(
495 struct isci_host *ihost,
496 struct isci_port *iport,
497 struct isci_phy *iphy);
498
499void sci_controller_remote_device_stopped(
500 struct isci_host *ihost,
501 struct isci_remote_device *idev);
502
503void sci_controller_copy_task_context(
504 struct isci_host *ihost,
505 struct isci_request *ireq);
506
507void sci_controller_register_setup(struct isci_host *ihost);
508 473
509enum sci_status sci_controller_continue_io(struct isci_request *ireq); 474enum sci_status sci_controller_continue_io(struct isci_request *ireq);
510int isci_host_scan_finished(struct Scsi_Host *, unsigned long); 475int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
@@ -512,29 +477,14 @@ void isci_host_scan_start(struct Scsi_Host *);
512u16 isci_alloc_tag(struct isci_host *ihost); 477u16 isci_alloc_tag(struct isci_host *ihost);
513enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag); 478enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
514void isci_tci_free(struct isci_host *ihost, u16 tci); 479void isci_tci_free(struct isci_host *ihost, u16 tci);
480void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task);
515 481
516int isci_host_init(struct isci_host *); 482int isci_host_init(struct isci_host *);
517 483void isci_host_completion_routine(unsigned long data);
518void isci_host_init_controller_names( 484void isci_host_deinit(struct isci_host *);
519 struct isci_host *isci_host, 485void sci_controller_disable_interrupts(struct isci_host *ihost);
520 unsigned int controller_idx); 486bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost);
521 487void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status);
522void isci_host_deinit(
523 struct isci_host *);
524
525void isci_host_port_link_up(
526 struct isci_host *,
527 struct isci_port *,
528 struct isci_phy *);
529int isci_host_dev_found(struct domain_device *);
530
531void isci_host_remote_device_start_complete(
532 struct isci_host *,
533 struct isci_remote_device *,
534 enum sci_status);
535
536void sci_controller_disable_interrupts(
537 struct isci_host *ihost);
538 488
539enum sci_status sci_controller_start_io( 489enum sci_status sci_controller_start_io(
540 struct isci_host *ihost, 490 struct isci_host *ihost,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 5137db5a5d85..47e28b555029 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -271,13 +271,12 @@ static void isci_unregister(struct isci_host *isci_host)
271 if (!isci_host) 271 if (!isci_host)
272 return; 272 return;
273 273
274 shost = isci_host->shost;
275
276 sas_unregister_ha(&isci_host->sas_ha); 274 sas_unregister_ha(&isci_host->sas_ha);
277 275
278 sas_remove_host(isci_host->shost); 276 shost = to_shost(isci_host);
279 scsi_remove_host(isci_host->shost); 277 sas_remove_host(shost);
280 scsi_host_put(isci_host->shost); 278 scsi_remove_host(shost);
279 scsi_host_put(shost);
281} 280}
282 281
283static int __devinit isci_pci_init(struct pci_dev *pdev) 282static int __devinit isci_pci_init(struct pci_dev *pdev)
@@ -397,38 +396,199 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
397 return err; 396 return err;
398} 397}
399 398
399static void isci_user_parameters_get(struct sci_user_parameters *u)
400{
401 int i;
402
403 for (i = 0; i < SCI_MAX_PHYS; i++) {
404 struct sci_phy_user_params *u_phy = &u->phys[i];
405
406 u_phy->max_speed_generation = phy_gen;
407
408 /* we are not exporting these for now */
409 u_phy->align_insertion_frequency = 0x7f;
410 u_phy->in_connection_align_insertion_frequency = 0xff;
411 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
412 }
413
414 u->stp_inactivity_timeout = stp_inactive_to;
415 u->ssp_inactivity_timeout = ssp_inactive_to;
416 u->stp_max_occupancy_timeout = stp_max_occ_to;
417 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
418 u->no_outbound_task_timeout = no_outbound_task_to;
419 u->max_concurr_spinup = max_concurr_spinup;
420}
421
422static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
423 struct sci_user_parameters *sci_parms)
424{
425 u16 index;
426
427 /*
428 * Validate the user parameters. If they are not legal, then
429 * return a failure.
430 */
431 for (index = 0; index < SCI_MAX_PHYS; index++) {
432 struct sci_phy_user_params *u;
433
434 u = &sci_parms->phys[index];
435
436 if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) &&
437 (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED)))
438 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
439
440 if (u->in_connection_align_insertion_frequency < 3)
441 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
442
443 if ((u->in_connection_align_insertion_frequency < 3) ||
444 (u->align_insertion_frequency == 0) ||
445 (u->notify_enable_spin_up_insertion_frequency == 0))
446 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
447 }
448
449 if ((sci_parms->stp_inactivity_timeout == 0) ||
450 (sci_parms->ssp_inactivity_timeout == 0) ||
451 (sci_parms->stp_max_occupancy_timeout == 0) ||
452 (sci_parms->ssp_max_occupancy_timeout == 0) ||
453 (sci_parms->no_outbound_task_timeout == 0))
454 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
455
456 memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
457
458 return SCI_SUCCESS;
459}
460
461static void sci_oem_defaults(struct isci_host *ihost)
462{
463 /* these defaults are overridden by the platform / firmware */
464 struct sci_user_parameters *user = &ihost->user_parameters;
465 struct sci_oem_params *oem = &ihost->oem_parameters;
466 int i;
467
468 /* Default to APC mode. */
469 oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
470
471 /* Default to APC mode. */
472 oem->controller.max_concurr_spin_up = 1;
473
474 /* Default to no SSC operation. */
475 oem->controller.do_enable_ssc = false;
476
477 /* Default to short cables on all phys. */
478 oem->controller.cable_selection_mask = 0;
479
480 /* Initialize all of the port parameter information to narrow ports. */
481 for (i = 0; i < SCI_MAX_PORTS; i++)
482 oem->ports[i].phy_mask = 0;
483
484 /* Initialize all of the phy parameter information. */
485 for (i = 0; i < SCI_MAX_PHYS; i++) {
486 /* Default to 3G (i.e. Gen 2). */
487 user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED;
488
489 /* the frequencies cannot be 0 */
490 user->phys[i].align_insertion_frequency = 0x7f;
491 user->phys[i].in_connection_align_insertion_frequency = 0xff;
492 user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33;
493
494 /* Previous Vitesse based expanders had a arbitration issue that
495 * is worked around by having the upper 32-bits of SAS address
496 * with a value greater then the Vitesse company identifier.
497 * Hence, usage of 0x5FCFFFFF.
498 */
499 oem->phys[i].sas_address.low = 0x1 + ihost->id;
500 oem->phys[i].sas_address.high = 0x5FCFFFFF;
501 }
502
503 user->stp_inactivity_timeout = 5;
504 user->ssp_inactivity_timeout = 5;
505 user->stp_max_occupancy_timeout = 5;
506 user->ssp_max_occupancy_timeout = 20;
507 user->no_outbound_task_timeout = 2;
508}
509
400static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) 510static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
401{ 511{
402 struct isci_host *isci_host; 512 struct isci_orom *orom = to_pci_info(pdev)->orom;
513 struct sci_user_parameters sci_user_params;
514 u8 oem_version = ISCI_ROM_VER_1_0;
515 struct isci_host *ihost;
403 struct Scsi_Host *shost; 516 struct Scsi_Host *shost;
404 int err; 517 int err, i;
405 518
406 isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL); 519 ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL);
407 if (!isci_host) 520 if (!ihost)
408 return NULL; 521 return NULL;
409 522
410 isci_host->pdev = pdev; 523 ihost->pdev = pdev;
411 isci_host->id = id; 524 ihost->id = id;
525 spin_lock_init(&ihost->scic_lock);
526 init_waitqueue_head(&ihost->eventq);
527 ihost->sas_ha.dev = &ihost->pdev->dev;
528 ihost->sas_ha.lldd_ha = ihost;
529 tasklet_init(&ihost->completion_tasklet,
530 isci_host_completion_routine, (unsigned long)ihost);
531
532 /* validate module parameters */
533 /* TODO: kill struct sci_user_parameters and reference directly */
534 sci_oem_defaults(ihost);
535 isci_user_parameters_get(&sci_user_params);
536 if (sci_user_parameters_set(ihost, &sci_user_params)) {
537 dev_warn(&pdev->dev,
538 "%s: sci_user_parameters_set failed\n", __func__);
539 return NULL;
540 }
541
542 /* sanity check platform (or 'firmware') oem parameters */
543 if (orom) {
544 if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) {
545 dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n");
546 return NULL;
547 }
548 ihost->oem_parameters = orom->ctrl[id];
549 oem_version = orom->hdr.version;
550 }
551
552 /* validate oem parameters (platform, firmware, or built-in defaults) */
553 if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) {
554 dev_warn(&pdev->dev, "oem parameter validation failed\n");
555 return NULL;
556 }
557
558 for (i = 0; i < SCI_MAX_PORTS; i++) {
559 struct isci_port *iport = &ihost->ports[i];
560
561 INIT_LIST_HEAD(&iport->remote_dev_list);
562 iport->isci_host = ihost;
563 }
564
565 for (i = 0; i < SCI_MAX_PHYS; i++)
566 isci_phy_init(&ihost->phys[i], ihost, i);
567
568 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
569 struct isci_remote_device *idev = &ihost->devices[i];
570
571 INIT_LIST_HEAD(&idev->node);
572 }
412 573
413 shost = scsi_host_alloc(&isci_sht, sizeof(void *)); 574 shost = scsi_host_alloc(&isci_sht, sizeof(void *));
414 if (!shost) 575 if (!shost)
415 return NULL; 576 return NULL;
416 isci_host->shost = shost;
417 577
418 dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: " 578 dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
419 "{%s, %s, %s, %s}\n", 579 "{%s, %s, %s, %s}\n",
420 (is_cable_select_overridden() ? "* " : ""), isci_host->id, 580 (is_cable_select_overridden() ? "* " : ""), ihost->id,
421 lookup_cable_names(decode_cable_selection(isci_host, 3)), 581 lookup_cable_names(decode_cable_selection(ihost, 3)),
422 lookup_cable_names(decode_cable_selection(isci_host, 2)), 582 lookup_cable_names(decode_cable_selection(ihost, 2)),
423 lookup_cable_names(decode_cable_selection(isci_host, 1)), 583 lookup_cable_names(decode_cable_selection(ihost, 1)),
424 lookup_cable_names(decode_cable_selection(isci_host, 0))); 584 lookup_cable_names(decode_cable_selection(ihost, 0)));
425 585
426 err = isci_host_init(isci_host); 586 err = isci_host_init(ihost);
427 if (err) 587 if (err)
428 goto err_shost; 588 goto err_shost;
429 589
430 SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha; 590 SHOST_TO_SAS_HA(shost) = &ihost->sas_ha;
431 isci_host->sas_ha.core.shost = shost; 591 ihost->sas_ha.core.shost = shost;
432 shost->transportt = isci_transport_template; 592 shost->transportt = isci_transport_template;
433 593
434 shost->max_id = ~0; 594 shost->max_id = ~0;
@@ -439,11 +599,11 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
439 if (err) 599 if (err)
440 goto err_shost; 600 goto err_shost;
441 601
442 err = isci_register_sas_ha(isci_host); 602 err = isci_register_sas_ha(ihost);
443 if (err) 603 if (err)
444 goto err_shost_remove; 604 goto err_shost_remove;
445 605
446 return isci_host; 606 return ihost;
447 607
448 err_shost_remove: 608 err_shost_remove:
449 scsi_remove_host(shost); 609 scsi_remove_host(shost);
@@ -476,7 +636,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
476 if (!orom) 636 if (!orom)
477 orom = isci_request_oprom(pdev); 637 orom = isci_request_oprom(pdev);
478 638
479 for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { 639 for (i = 0; orom && i < num_controllers(pdev); i++) {
480 if (sci_oem_parameters_validate(&orom->ctrl[i], 640 if (sci_oem_parameters_validate(&orom->ctrl[i],
481 orom->hdr.version)) { 641 orom->hdr.version)) {
482 dev_warn(&pdev->dev, 642 dev_warn(&pdev->dev,
@@ -525,11 +685,11 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
525 pci_info->hosts[i] = h; 685 pci_info->hosts[i] = h;
526 686
527 /* turn on DIF support */ 687 /* turn on DIF support */
528 scsi_host_set_prot(h->shost, 688 scsi_host_set_prot(to_shost(h),
529 SHOST_DIF_TYPE1_PROTECTION | 689 SHOST_DIF_TYPE1_PROTECTION |
530 SHOST_DIF_TYPE2_PROTECTION | 690 SHOST_DIF_TYPE2_PROTECTION |
531 SHOST_DIF_TYPE3_PROTECTION); 691 SHOST_DIF_TYPE3_PROTECTION);
532 scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC); 692 scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
533 } 693 }
534 694
535 err = isci_setup_interrupts(pdev); 695 err = isci_setup_interrupts(pdev);
@@ -537,7 +697,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
537 goto err_host_alloc; 697 goto err_host_alloc;
538 698
539 for_each_isci_host(i, isci_host, pdev) 699 for_each_isci_host(i, isci_host, pdev)
540 scsi_scan_host(isci_host->shost); 700 scsi_scan_host(to_shost(isci_host));
541 701
542 return 0; 702 return 0;
543 703
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index fab3586840b5..18f43d4c30ba 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -580,7 +580,7 @@ static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
580 580
581 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); 581 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
582 582
583 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS; 583 iphy->protocol = SAS_PROTOCOL_SSP;
584} 584}
585 585
586static void sci_phy_start_sata_link_training(struct isci_phy *iphy) 586static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
@@ -591,7 +591,7 @@ static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
591 */ 591 */
592 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); 592 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
593 593
594 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; 594 iphy->protocol = SAS_PROTOCOL_SATA;
595} 595}
596 596
597/** 597/**
@@ -668,6 +668,19 @@ static const char *phy_event_name(u32 event_code)
668 phy_to_host(iphy)->id, iphy->phy_index, \ 668 phy_to_host(iphy)->id, iphy->phy_index, \
669 phy_state_name(state), phy_event_name(code), code) 669 phy_state_name(state), phy_event_name(code), code)
670 670
671
672void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
673{
674 u32 val;
675
676 /* Extend timeout */
677 val = readl(&iphy->link_layer_registers->transmit_comsas_signal);
678 val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK);
679 val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout);
680
681 writel(val, &iphy->link_layer_registers->transmit_comsas_signal);
682}
683
671enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) 684enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
672{ 685{
673 enum sci_phy_states state = iphy->sm.current_state_id; 686 enum sci_phy_states state = iphy->sm.current_state_id;
@@ -683,6 +696,13 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
683 sci_phy_start_sata_link_training(iphy); 696 sci_phy_start_sata_link_training(iphy);
684 iphy->is_in_link_training = true; 697 iphy->is_in_link_training = true;
685 break; 698 break;
699 case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
700 /* Extend timeout value */
701 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
702
703 /* Start the oob/sn state machine over again */
704 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
705 break;
686 default: 706 default:
687 phy_event_dbg(iphy, state, event_code); 707 phy_event_dbg(iphy, state, event_code);
688 return SCI_FAILURE; 708 return SCI_FAILURE;
@@ -717,9 +737,19 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
717 sci_phy_start_sata_link_training(iphy); 737 sci_phy_start_sata_link_training(iphy);
718 break; 738 break;
719 case SCU_EVENT_LINK_FAILURE: 739 case SCU_EVENT_LINK_FAILURE:
740 /* Change the timeout value to default */
741 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
742
720 /* Link failure change state back to the starting state */ 743 /* Link failure change state back to the starting state */
721 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 744 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
722 break; 745 break;
746 case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
747 /* Extend the timeout value */
748 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
749
750 /* Start the oob/sn state machine over again */
751 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
752 break;
723 default: 753 default:
724 phy_event_warn(iphy, state, event_code); 754 phy_event_warn(iphy, state, event_code);
725 return SCI_FAILURE; 755 return SCI_FAILURE;
@@ -740,7 +770,14 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
740 sci_phy_start_sata_link_training(iphy); 770 sci_phy_start_sata_link_training(iphy);
741 break; 771 break;
742 case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: 772 case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
773 /* Extend the timeout value */
774 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
775
776 /* Start the oob/sn state machine over again */
777 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
778 break;
743 case SCU_EVENT_LINK_FAILURE: 779 case SCU_EVENT_LINK_FAILURE:
780 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
744 case SCU_EVENT_HARD_RESET_RECEIVED: 781 case SCU_EVENT_HARD_RESET_RECEIVED:
745 /* Start the oob/sn state machine over again */ 782 /* Start the oob/sn state machine over again */
746 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 783 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
@@ -753,6 +790,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
753 case SCI_PHY_SUB_AWAIT_SAS_POWER: 790 case SCI_PHY_SUB_AWAIT_SAS_POWER:
754 switch (scu_get_event_code(event_code)) { 791 switch (scu_get_event_code(event_code)) {
755 case SCU_EVENT_LINK_FAILURE: 792 case SCU_EVENT_LINK_FAILURE:
793 /* Change the timeout value to default */
794 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
795
756 /* Link failure change state back to the starting state */ 796 /* Link failure change state back to the starting state */
757 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 797 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
758 break; 798 break;
@@ -764,6 +804,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
764 case SCI_PHY_SUB_AWAIT_SATA_POWER: 804 case SCI_PHY_SUB_AWAIT_SATA_POWER:
765 switch (scu_get_event_code(event_code)) { 805 switch (scu_get_event_code(event_code)) {
766 case SCU_EVENT_LINK_FAILURE: 806 case SCU_EVENT_LINK_FAILURE:
807 /* Change the timeout value to default */
808 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
809
767 /* Link failure change state back to the starting state */ 810 /* Link failure change state back to the starting state */
768 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 811 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
769 break; 812 break;
@@ -788,6 +831,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
788 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: 831 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
789 switch (scu_get_event_code(event_code)) { 832 switch (scu_get_event_code(event_code)) {
790 case SCU_EVENT_LINK_FAILURE: 833 case SCU_EVENT_LINK_FAILURE:
834 /* Change the timeout value to default */
835 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
836
791 /* Link failure change state back to the starting state */ 837 /* Link failure change state back to the starting state */
792 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 838 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
793 break; 839 break;
@@ -797,7 +843,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
797 */ 843 */
798 break; 844 break;
799 case SCU_EVENT_SATA_PHY_DETECTED: 845 case SCU_EVENT_SATA_PHY_DETECTED:
800 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; 846 iphy->protocol = SAS_PROTOCOL_SATA;
801 847
802 /* We have received the SATA PHY notification change state */ 848 /* We have received the SATA PHY notification change state */
803 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); 849 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
@@ -836,6 +882,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
836 SCI_PHY_SUB_AWAIT_SIG_FIS_UF); 882 SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
837 break; 883 break;
838 case SCU_EVENT_LINK_FAILURE: 884 case SCU_EVENT_LINK_FAILURE:
885 /* Change the timeout value to default */
886 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
887
839 /* Link failure change state back to the starting state */ 888 /* Link failure change state back to the starting state */
840 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 889 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
841 break; 890 break;
@@ -859,6 +908,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
859 break; 908 break;
860 909
861 case SCU_EVENT_LINK_FAILURE: 910 case SCU_EVENT_LINK_FAILURE:
911 /* Change the timeout value to default */
912 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
913
862 /* Link failure change state back to the starting state */ 914 /* Link failure change state back to the starting state */
863 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 915 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
864 break; 916 break;
@@ -871,16 +923,26 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
871 case SCI_PHY_READY: 923 case SCI_PHY_READY:
872 switch (scu_get_event_code(event_code)) { 924 switch (scu_get_event_code(event_code)) {
873 case SCU_EVENT_LINK_FAILURE: 925 case SCU_EVENT_LINK_FAILURE:
926 /* Set default timeout */
927 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
928
874 /* Link failure change state back to the starting state */ 929 /* Link failure change state back to the starting state */
875 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 930 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
876 break; 931 break;
877 case SCU_EVENT_BROADCAST_CHANGE: 932 case SCU_EVENT_BROADCAST_CHANGE:
933 case SCU_EVENT_BROADCAST_SES:
934 case SCU_EVENT_BROADCAST_RESERVED0:
935 case SCU_EVENT_BROADCAST_RESERVED1:
936 case SCU_EVENT_BROADCAST_EXPANDER:
937 case SCU_EVENT_BROADCAST_AEN:
878 /* Broadcast change received. Notify the port. */ 938 /* Broadcast change received. Notify the port. */
879 if (phy_get_non_dummy_port(iphy) != NULL) 939 if (phy_get_non_dummy_port(iphy) != NULL)
880 sci_port_broadcast_change_received(iphy->owning_port, iphy); 940 sci_port_broadcast_change_received(iphy->owning_port, iphy);
881 else 941 else
882 iphy->bcn_received_while_port_unassigned = true; 942 iphy->bcn_received_while_port_unassigned = true;
883 break; 943 break;
944 case SCU_EVENT_BROADCAST_RESERVED3:
945 case SCU_EVENT_BROADCAST_RESERVED4:
884 default: 946 default:
885 phy_event_warn(iphy, state, event_code); 947 phy_event_warn(iphy, state, event_code);
886 return SCI_FAILURE_INVALID_STATE; 948 return SCI_FAILURE_INVALID_STATE;
@@ -1215,7 +1277,7 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
1215 scu_link_layer_start_oob(iphy); 1277 scu_link_layer_start_oob(iphy);
1216 1278
1217 /* We don't know what kind of phy we are going to be just yet */ 1279 /* We don't know what kind of phy we are going to be just yet */
1218 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; 1280 iphy->protocol = SAS_PROTOCOL_NONE;
1219 iphy->bcn_received_while_port_unassigned = false; 1281 iphy->bcn_received_while_port_unassigned = false;
1220 1282
1221 if (iphy->sm.previous_state_id == SCI_PHY_READY) 1283 if (iphy->sm.previous_state_id == SCI_PHY_READY)
@@ -1250,7 +1312,7 @@ static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
1250 */ 1312 */
1251 sci_port_deactivate_phy(iphy->owning_port, iphy, false); 1313 sci_port_deactivate_phy(iphy->owning_port, iphy, false);
1252 1314
1253 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { 1315 if (iphy->protocol == SAS_PROTOCOL_SSP) {
1254 scu_link_layer_tx_hard_reset(iphy); 1316 scu_link_layer_tx_hard_reset(iphy);
1255 } else { 1317 } else {
1256 /* The SCU does not need to have a discrete reset state so 1318 /* The SCU does not need to have a discrete reset state so
@@ -1316,7 +1378,7 @@ void sci_phy_construct(struct isci_phy *iphy,
1316 iphy->owning_port = iport; 1378 iphy->owning_port = iport;
1317 iphy->phy_index = phy_index; 1379 iphy->phy_index = phy_index;
1318 iphy->bcn_received_while_port_unassigned = false; 1380 iphy->bcn_received_while_port_unassigned = false;
1319 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; 1381 iphy->protocol = SAS_PROTOCOL_NONE;
1320 iphy->link_layer_registers = NULL; 1382 iphy->link_layer_registers = NULL;
1321 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; 1383 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
1322 1384
@@ -1380,12 +1442,14 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
1380 switch (func) { 1442 switch (func) {
1381 case PHY_FUNC_DISABLE: 1443 case PHY_FUNC_DISABLE:
1382 spin_lock_irqsave(&ihost->scic_lock, flags); 1444 spin_lock_irqsave(&ihost->scic_lock, flags);
1445 scu_link_layer_start_oob(iphy);
1383 sci_phy_stop(iphy); 1446 sci_phy_stop(iphy);
1384 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1447 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1385 break; 1448 break;
1386 1449
1387 case PHY_FUNC_LINK_RESET: 1450 case PHY_FUNC_LINK_RESET:
1388 spin_lock_irqsave(&ihost->scic_lock, flags); 1451 spin_lock_irqsave(&ihost->scic_lock, flags);
1452 scu_link_layer_start_oob(iphy);
1389 sci_phy_stop(iphy); 1453 sci_phy_stop(iphy);
1390 sci_phy_start(iphy); 1454 sci_phy_start(iphy);
1391 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1455 spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
index 0e45833ba06d..45fecfa36a98 100644
--- a/drivers/scsi/isci/phy.h
+++ b/drivers/scsi/isci/phy.h
@@ -76,13 +76,6 @@
76 */ 76 */
77#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 77#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
78 78
79enum sci_phy_protocol {
80 SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
81 SCIC_SDS_PHY_PROTOCOL_SAS,
82 SCIC_SDS_PHY_PROTOCOL_SATA,
83 SCIC_SDS_MAX_PHY_PROTOCOLS
84};
85
86/** 79/**
87 * isci_phy - hba local phy infrastructure 80 * isci_phy - hba local phy infrastructure
88 * @sm: 81 * @sm:
@@ -95,7 +88,7 @@ struct isci_phy {
95 struct sci_base_state_machine sm; 88 struct sci_base_state_machine sm;
96 struct isci_port *owning_port; 89 struct isci_port *owning_port;
97 enum sas_linkrate max_negotiated_speed; 90 enum sas_linkrate max_negotiated_speed;
98 enum sci_phy_protocol protocol; 91 enum sas_protocol protocol;
99 u8 phy_index; 92 u8 phy_index;
100 bool bcn_received_while_port_unassigned; 93 bool bcn_received_while_port_unassigned;
101 bool is_in_link_training; 94 bool is_in_link_training;
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 5fada73b71ff..2fb85bf75449 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -184,7 +184,7 @@ static void isci_port_link_up(struct isci_host *isci_host,
184 184
185 sci_port_get_properties(iport, &properties); 185 sci_port_get_properties(iport, &properties);
186 186
187 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { 187 if (iphy->protocol == SAS_PROTOCOL_SATA) {
188 u64 attached_sas_address; 188 u64 attached_sas_address;
189 189
190 iphy->sas_phy.oob_mode = SATA_OOB_MODE; 190 iphy->sas_phy.oob_mode = SATA_OOB_MODE;
@@ -204,7 +204,7 @@ static void isci_port_link_up(struct isci_host *isci_host,
204 204
205 memcpy(&iphy->sas_phy.attached_sas_addr, 205 memcpy(&iphy->sas_phy.attached_sas_addr,
206 &attached_sas_address, sizeof(attached_sas_address)); 206 &attached_sas_address, sizeof(attached_sas_address));
207 } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { 207 } else if (iphy->protocol == SAS_PROTOCOL_SSP) {
208 iphy->sas_phy.oob_mode = SAS_OOB_MODE; 208 iphy->sas_phy.oob_mode = SAS_OOB_MODE;
209 iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame); 209 iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
210 210
@@ -251,10 +251,10 @@ static void isci_port_link_down(struct isci_host *isci_host,
251 if (isci_phy->sas_phy.port && 251 if (isci_phy->sas_phy.port &&
252 isci_phy->sas_phy.port->num_phys == 1) { 252 isci_phy->sas_phy.port->num_phys == 1) {
253 /* change the state for all devices on this port. The 253 /* change the state for all devices on this port. The
254 * next task sent to this device will be returned as 254 * next task sent to this device will be returned as
255 * SAS_TASK_UNDELIVERED, and the scsi mid layer will 255 * SAS_TASK_UNDELIVERED, and the scsi mid layer will
256 * remove the target 256 * remove the target
257 */ 257 */
258 list_for_each_entry(isci_device, 258 list_for_each_entry(isci_device,
259 &isci_port->remote_dev_list, 259 &isci_port->remote_dev_list,
260 node) { 260 node) {
@@ -517,7 +517,7 @@ void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_a
517 */ 517 */
518 iphy = sci_port_get_a_connected_phy(iport); 518 iphy = sci_port_get_a_connected_phy(iport);
519 if (iphy) { 519 if (iphy) {
520 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) { 520 if (iphy->protocol != SAS_PROTOCOL_SATA) {
521 sci_phy_get_attached_sas_address(iphy, sas); 521 sci_phy_get_attached_sas_address(iphy, sas);
522 } else { 522 } else {
523 sci_phy_get_sas_address(iphy, sas); 523 sci_phy_get_sas_address(iphy, sas);
@@ -624,7 +624,7 @@ static void sci_port_activate_phy(struct isci_port *iport,
624{ 624{
625 struct isci_host *ihost = iport->owning_controller; 625 struct isci_host *ihost = iport->owning_controller;
626 626
627 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME)) 627 if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME))
628 sci_phy_resume(iphy); 628 sci_phy_resume(iphy);
629 629
630 iport->active_phy_mask |= 1 << iphy->phy_index; 630 iport->active_phy_mask |= 1 << iphy->phy_index;
@@ -751,12 +751,10 @@ static bool sci_port_is_wide(struct isci_port *iport)
751 * wide ports and direct attached phys. Since there are no wide ported SATA 751 * wide ports and direct attached phys. Since there are no wide ported SATA
752 * devices this could become an invalid port configuration. 752 * devices this could become an invalid port configuration.
753 */ 753 */
754bool sci_port_link_detected( 754bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
755 struct isci_port *iport,
756 struct isci_phy *iphy)
757{ 755{
758 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && 756 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
759 (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) { 757 (iphy->protocol == SAS_PROTOCOL_SATA)) {
760 if (sci_port_is_wide(iport)) { 758 if (sci_port_is_wide(iport)) {
761 sci_port_invalid_link_up(iport, iphy); 759 sci_port_invalid_link_up(iport, iphy);
762 return false; 760 return false;
@@ -1201,6 +1199,8 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
1201 enum sci_status status; 1199 enum sci_status status;
1202 enum sci_port_states state; 1200 enum sci_port_states state;
1203 1201
1202 sci_port_bcn_enable(iport);
1203
1204 state = iport->sm.current_state_id; 1204 state = iport->sm.current_state_id;
1205 switch (state) { 1205 switch (state) {
1206 case SCI_PORT_STOPPED: { 1206 case SCI_PORT_STOPPED: {
@@ -1548,6 +1548,29 @@ static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
1548 isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); 1548 isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
1549} 1549}
1550 1550
1551void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
1552{
1553 int phy_index;
1554 u32 phy_mask = iport->active_phy_mask;
1555
1556 if (timeout)
1557 ++iport->hang_detect_users;
1558 else if (iport->hang_detect_users > 1)
1559 --iport->hang_detect_users;
1560 else
1561 iport->hang_detect_users = 0;
1562
1563 if (timeout || (iport->hang_detect_users == 0)) {
1564 for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
1565 if ((phy_mask >> phy_index) & 1) {
1566 writel(timeout,
1567 &iport->phy_table[phy_index]
1568 ->link_layer_registers
1569 ->link_layer_hang_detection_timeout);
1570 }
1571 }
1572 }
1573}
1551/* --------------------------------------------------------------------------- */ 1574/* --------------------------------------------------------------------------- */
1552 1575
1553static const struct sci_base_state sci_port_state_table[] = { 1576static const struct sci_base_state sci_port_state_table[] = {
@@ -1596,6 +1619,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
1596 1619
1597 iport->started_request_count = 0; 1620 iport->started_request_count = 0;
1598 iport->assigned_device_count = 0; 1621 iport->assigned_device_count = 0;
1622 iport->hang_detect_users = 0;
1599 1623
1600 iport->reserved_rni = SCU_DUMMY_INDEX; 1624 iport->reserved_rni = SCU_DUMMY_INDEX;
1601 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; 1625 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
@@ -1608,13 +1632,6 @@ void sci_port_construct(struct isci_port *iport, u8 index,
1608 iport->phy_table[index] = NULL; 1632 iport->phy_table[index] = NULL;
1609} 1633}
1610 1634
1611void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
1612{
1613 INIT_LIST_HEAD(&iport->remote_dev_list);
1614 INIT_LIST_HEAD(&iport->domain_dev_list);
1615 iport->isci_host = ihost;
1616}
1617
1618void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) 1635void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
1619{ 1636{
1620 struct isci_host *ihost = iport->owning_controller; 1637 struct isci_host *ihost = iport->owning_controller;
@@ -1671,17 +1688,6 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
1671 __func__, iport, status); 1688 __func__, iport, status);
1672 1689
1673 } 1690 }
1674
1675 /* If the hard reset for the port has failed, consider this
1676 * the same as link failures on all phys in the port.
1677 */
1678 if (ret != TMF_RESP_FUNC_COMPLETE) {
1679
1680 dev_err(&ihost->pdev->dev,
1681 "%s: iport = %p; hard reset failed "
1682 "(0x%x) - driving explicit link fail for all phys\n",
1683 __func__, iport, iport->hard_reset_status);
1684 }
1685 return ret; 1691 return ret;
1686} 1692}
1687 1693
@@ -1740,7 +1746,7 @@ void isci_port_formed(struct asd_sas_phy *phy)
1740 struct isci_host *ihost = phy->ha->lldd_ha; 1746 struct isci_host *ihost = phy->ha->lldd_ha;
1741 struct isci_phy *iphy = to_iphy(phy); 1747 struct isci_phy *iphy = to_iphy(phy);
1742 struct asd_sas_port *port = phy->port; 1748 struct asd_sas_port *port = phy->port;
1743 struct isci_port *iport; 1749 struct isci_port *iport = NULL;
1744 unsigned long flags; 1750 unsigned long flags;
1745 int i; 1751 int i;
1746 1752
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index 6b56240c2051..861e8f72811b 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -97,7 +97,6 @@ enum isci_status {
97struct isci_port { 97struct isci_port {
98 struct isci_host *isci_host; 98 struct isci_host *isci_host;
99 struct list_head remote_dev_list; 99 struct list_head remote_dev_list;
100 struct list_head domain_dev_list;
101 #define IPORT_RESET_PENDING 0 100 #define IPORT_RESET_PENDING 0
102 unsigned long state; 101 unsigned long state;
103 enum sci_status hard_reset_status; 102 enum sci_status hard_reset_status;
@@ -112,6 +111,7 @@ struct isci_port {
112 u16 reserved_tag; 111 u16 reserved_tag;
113 u32 started_request_count; 112 u32 started_request_count;
114 u32 assigned_device_count; 113 u32 assigned_device_count;
114 u32 hang_detect_users;
115 u32 not_ready_reason; 115 u32 not_ready_reason;
116 struct isci_phy *phy_table[SCI_MAX_PHYS]; 116 struct isci_phy *phy_table[SCI_MAX_PHYS];
117 struct isci_host *owning_controller; 117 struct isci_host *owning_controller;
@@ -270,14 +270,13 @@ void sci_port_get_attached_sas_address(
270 struct isci_port *iport, 270 struct isci_port *iport,
271 struct sci_sas_address *sas_address); 271 struct sci_sas_address *sas_address);
272 272
273void sci_port_set_hang_detection_timeout(
274 struct isci_port *isci_port,
275 u32 timeout);
276
273void isci_port_formed(struct asd_sas_phy *); 277void isci_port_formed(struct asd_sas_phy *);
274void isci_port_deformed(struct asd_sas_phy *); 278void isci_port_deformed(struct asd_sas_phy *);
275 279
276void isci_port_init(
277 struct isci_port *port,
278 struct isci_host *host,
279 int index);
280
281int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, 280int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
282 struct isci_phy *iphy); 281 struct isci_phy *iphy);
283int isci_ata_check_ready(struct domain_device *dev); 282int isci_ata_check_ready(struct domain_device *dev);
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 6d1e9544cbe5..cd962da4a57a 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
57 57
58#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) 58#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
59#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) 59#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
60#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250) 60#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (1000)
61 61
62enum SCIC_SDS_APC_ACTIVITY { 62enum SCIC_SDS_APC_ACTIVITY {
63 SCIC_SDS_APC_SKIP_PHY, 63 SCIC_SDS_APC_SKIP_PHY,
@@ -472,13 +472,9 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
472 * down event or a link up event where we can not yet tell to which a phy 472 * down event or a link up event where we can not yet tell to which a phy
473 * belongs. 473 * belongs.
474 */ 474 */
475static void sci_apc_agent_start_timer( 475static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent,
476 struct sci_port_configuration_agent *port_agent, 476 u32 timeout)
477 u32 timeout)
478{ 477{
479 if (port_agent->timer_pending)
480 sci_del_timer(&port_agent->timer);
481
482 port_agent->timer_pending = true; 478 port_agent->timer_pending = true;
483 sci_mod_timer(&port_agent->timer, timeout); 479 sci_mod_timer(&port_agent->timer, timeout);
484} 480}
@@ -697,6 +693,9 @@ static void apc_agent_timeout(unsigned long data)
697 &ihost->phys[index], false); 693 &ihost->phys[index], false);
698 } 694 }
699 695
696 if (is_controller_start_complete(ihost))
697 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
698
700done: 699done:
701 spin_unlock_irqrestore(&ihost->scic_lock, flags); 700 spin_unlock_irqrestore(&ihost->scic_lock, flags);
702} 701}
@@ -732,6 +731,11 @@ void sci_port_configuration_agent_construct(
732 } 731 }
733} 732}
734 733
734bool is_port_config_apc(struct isci_host *ihost)
735{
736 return ihost->port_agent.link_up_handler == sci_apc_agent_link_up;
737}
738
735enum sci_status sci_port_configuration_agent_initialize( 739enum sci_status sci_port_configuration_agent_initialize(
736 struct isci_host *ihost, 740 struct isci_host *ihost,
737 struct sci_port_configuration_agent *port_agent) 741 struct sci_port_configuration_agent *port_agent)
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index 9b8117b9d756..4d95654c3fd4 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -112,18 +112,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
112 return rom; 112 return rom;
113} 113}
114 114
115enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
116 struct isci_orom *orom, int scu_index)
117{
118 /* check for valid inputs */
119 if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
120 scu_index > orom->hdr.num_elements || !oem)
121 return -EINVAL;
122
123 *oem = orom->ctrl[scu_index];
124 return 0;
125}
126
127struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw) 115struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
128{ 116{
129 struct isci_orom *orom = NULL, *data; 117 struct isci_orom *orom = NULL, *data;
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index bb0e9d4d97c9..e08b578241f8 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -156,8 +156,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
156 156
157struct isci_orom; 157struct isci_orom;
158struct isci_orom *isci_request_oprom(struct pci_dev *pdev); 158struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
159enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
160 struct isci_orom *orom, int scu_index);
161struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); 159struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
162struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); 160struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
163 161
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 7eb0ccd45fe6..97f3ceb8d724 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -1239,6 +1239,14 @@ struct scu_transport_layer_registers {
1239#define SCU_SAS_LLCTL_GEN_BIT(name) \ 1239#define SCU_SAS_LLCTL_GEN_BIT(name) \
1240 SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name) 1240 SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
1241 1241
1242#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT (0xF0)
1243#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED (0x1FF)
1244#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_SHIFT (0)
1245#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK (0x3FF)
1246
1247#define SCU_SAS_LLTXCOMSAS_GEN_VAL(name, value) \
1248 SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_TXCOMSAS_ ## name, value)
1249
1242 1250
1243/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */ 1251/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */
1244#define SCU_PSZGCR_OFFSET 0x00E4 1252#define SCU_PSZGCR_OFFSET 0x00E4
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 8f501b0a81d6..c3aa6c5457b9 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -72,46 +72,11 @@ const char *dev_state_name(enum sci_remote_device_states state)
72} 72}
73#undef C 73#undef C
74 74
75/** 75enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
76 * isci_remote_device_not_ready() - This function is called by the ihost when 76 enum sci_remote_node_suspension_reasons reason)
77 * the remote device is not ready. We mark the isci device as ready (not
78 * "ready_for_io") and signal the waiting proccess.
79 * @isci_host: This parameter specifies the isci host object.
80 * @isci_device: This parameter specifies the remote device
81 *
82 * sci_lock is held on entrance to this function.
83 */
84static void isci_remote_device_not_ready(struct isci_host *ihost,
85 struct isci_remote_device *idev, u32 reason)
86{ 77{
87 struct isci_request *ireq; 78 return sci_remote_node_context_suspend(&idev->rnc, reason,
88 79 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
89 dev_dbg(&ihost->pdev->dev,
90 "%s: isci_device = %p\n", __func__, idev);
91
92 switch (reason) {
93 case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
94 set_bit(IDEV_GONE, &idev->flags);
95 break;
96 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
97 set_bit(IDEV_IO_NCQERROR, &idev->flags);
98
99 /* Kill all outstanding requests for the device. */
100 list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
101
102 dev_dbg(&ihost->pdev->dev,
103 "%s: isci_device = %p request = %p\n",
104 __func__, idev, ireq);
105
106 sci_controller_terminate_request(ihost,
107 idev,
108 ireq);
109 }
110 /* Fall through into the default case... */
111 default:
112 clear_bit(IDEV_IO_READY, &idev->flags);
113 break;
114 }
115} 80}
116 81
117/** 82/**
@@ -133,18 +98,29 @@ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote
133 wake_up(&ihost->eventq); 98 wake_up(&ihost->eventq);
134} 99}
135 100
136/* called once the remote node context is ready to be freed. 101static enum sci_status sci_remote_device_terminate_req(
137 * The remote device can now report that its stop operation is complete. none 102 struct isci_host *ihost,
138 */ 103 struct isci_remote_device *idev,
139static void rnc_destruct_done(void *_dev) 104 int check_abort,
105 struct isci_request *ireq)
140{ 106{
141 struct isci_remote_device *idev = _dev; 107 if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
108 (ireq->target_device != idev) ||
109 (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
110 return SCI_SUCCESS;
142 111
143 BUG_ON(idev->started_request_count != 0); 112 dev_dbg(&ihost->pdev->dev,
144 sci_change_state(&idev->sm, SCI_DEV_STOPPED); 113 "%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
114 __func__, idev, idev->flags, ireq, ireq->target_device);
115
116 set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
117
118 return sci_controller_terminate_request(ihost, idev, ireq);
145} 119}
146 120
147static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) 121static enum sci_status sci_remote_device_terminate_reqs_checkabort(
122 struct isci_remote_device *idev,
123 int chk)
148{ 124{
149 struct isci_host *ihost = idev->owning_port->owning_controller; 125 struct isci_host *ihost = idev->owning_port->owning_controller;
150 enum sci_status status = SCI_SUCCESS; 126 enum sci_status status = SCI_SUCCESS;
@@ -154,18 +130,210 @@ static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_d
154 struct isci_request *ireq = ihost->reqs[i]; 130 struct isci_request *ireq = ihost->reqs[i];
155 enum sci_status s; 131 enum sci_status s;
156 132
157 if (!test_bit(IREQ_ACTIVE, &ireq->flags) || 133 s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
158 ireq->target_device != idev)
159 continue;
160
161 s = sci_controller_terminate_request(ihost, idev, ireq);
162 if (s != SCI_SUCCESS) 134 if (s != SCI_SUCCESS)
163 status = s; 135 status = s;
164 } 136 }
137 return status;
138}
139
140static bool isci_compare_suspendcount(
141 struct isci_remote_device *idev,
142 u32 localcount)
143{
144 smp_rmb();
145
146 /* Check for a change in the suspend count, or the RNC
147 * being destroyed.
148 */
149 return (localcount != idev->rnc.suspend_count)
150 || sci_remote_node_context_is_being_destroyed(&idev->rnc);
151}
152
153static bool isci_check_reqterm(
154 struct isci_host *ihost,
155 struct isci_remote_device *idev,
156 struct isci_request *ireq,
157 u32 localcount)
158{
159 unsigned long flags;
160 bool res;
165 161
162 spin_lock_irqsave(&ihost->scic_lock, flags);
163 res = isci_compare_suspendcount(idev, localcount)
164 && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
165 spin_unlock_irqrestore(&ihost->scic_lock, flags);
166
167 return res;
168}
169
170static bool isci_check_devempty(
171 struct isci_host *ihost,
172 struct isci_remote_device *idev,
173 u32 localcount)
174{
175 unsigned long flags;
176 bool res;
177
178 spin_lock_irqsave(&ihost->scic_lock, flags);
179 res = isci_compare_suspendcount(idev, localcount)
180 && idev->started_request_count == 0;
181 spin_unlock_irqrestore(&ihost->scic_lock, flags);
182
183 return res;
184}
185
186enum sci_status isci_remote_device_terminate_requests(
187 struct isci_host *ihost,
188 struct isci_remote_device *idev,
189 struct isci_request *ireq)
190{
191 enum sci_status status = SCI_SUCCESS;
192 unsigned long flags;
193 u32 rnc_suspend_count;
194
195 spin_lock_irqsave(&ihost->scic_lock, flags);
196
197 if (isci_get_device(idev) == NULL) {
198 dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
199 __func__, idev);
200 spin_unlock_irqrestore(&ihost->scic_lock, flags);
201 status = SCI_FAILURE;
202 } else {
203 /* If already suspended, don't wait for another suspension. */
204 smp_rmb();
205 rnc_suspend_count
206 = sci_remote_node_context_is_suspended(&idev->rnc)
207 ? 0 : idev->rnc.suspend_count;
208
209 dev_dbg(&ihost->pdev->dev,
210 "%s: idev=%p, ireq=%p; started_request_count=%d, "
211 "rnc_suspend_count=%d, rnc.suspend_count=%d"
212 "about to wait\n",
213 __func__, idev, ireq, idev->started_request_count,
214 rnc_suspend_count, idev->rnc.suspend_count);
215
216 #define MAX_SUSPEND_MSECS 10000
217 if (ireq) {
218 /* Terminate a specific TC. */
219 set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
220 sci_remote_device_terminate_req(ihost, idev, 0, ireq);
221 spin_unlock_irqrestore(&ihost->scic_lock, flags);
222 if (!wait_event_timeout(ihost->eventq,
223 isci_check_reqterm(ihost, idev, ireq,
224 rnc_suspend_count),
225 msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
226
227 dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
228 __func__, ihost->id);
229 dev_dbg(&ihost->pdev->dev,
230 "%s: ******* Timeout waiting for "
231 "suspend; idev=%p, current state %s; "
232 "started_request_count=%d, flags=%lx\n\t"
233 "rnc_suspend_count=%d, rnc.suspend_count=%d "
234 "RNC: current state %s, current "
235 "suspend_type %x dest state %d;\n"
236 "ireq=%p, ireq->flags = %lx\n",
237 __func__, idev,
238 dev_state_name(idev->sm.current_state_id),
239 idev->started_request_count, idev->flags,
240 rnc_suspend_count, idev->rnc.suspend_count,
241 rnc_state_name(idev->rnc.sm.current_state_id),
242 idev->rnc.suspend_type,
243 idev->rnc.destination_state,
244 ireq, ireq->flags);
245 }
246 spin_lock_irqsave(&ihost->scic_lock, flags);
247 clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
248 if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
249 isci_free_tag(ihost, ireq->io_tag);
250 spin_unlock_irqrestore(&ihost->scic_lock, flags);
251 } else {
252 /* Terminate all TCs. */
253 sci_remote_device_terminate_requests(idev);
254 spin_unlock_irqrestore(&ihost->scic_lock, flags);
255 if (!wait_event_timeout(ihost->eventq,
256 isci_check_devempty(ihost, idev,
257 rnc_suspend_count),
258 msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
259
260 dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
261 __func__, ihost->id);
262 dev_dbg(&ihost->pdev->dev,
263 "%s: ******* Timeout waiting for "
264 "suspend; idev=%p, current state %s; "
265 "started_request_count=%d, flags=%lx\n\t"
266 "rnc_suspend_count=%d, "
267 "RNC: current state %s, "
268 "rnc.suspend_count=%d, current "
269 "suspend_type %x dest state %d\n",
270 __func__, idev,
271 dev_state_name(idev->sm.current_state_id),
272 idev->started_request_count, idev->flags,
273 rnc_suspend_count,
274 rnc_state_name(idev->rnc.sm.current_state_id),
275 idev->rnc.suspend_count,
276 idev->rnc.suspend_type,
277 idev->rnc.destination_state);
278 }
279 }
280 dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
281 __func__, idev);
282 isci_put_device(idev);
283 }
166 return status; 284 return status;
167} 285}
168 286
287/**
288* isci_remote_device_not_ready() - This function is called by the ihost when
289* the remote device is not ready. We mark the isci device as ready (not
290* "ready_for_io") and signal the waiting proccess.
291* @isci_host: This parameter specifies the isci host object.
292* @isci_device: This parameter specifies the remote device
293*
294* sci_lock is held on entrance to this function.
295*/
296static void isci_remote_device_not_ready(struct isci_host *ihost,
297 struct isci_remote_device *idev,
298 u32 reason)
299{
300 dev_dbg(&ihost->pdev->dev,
301 "%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
302
303 switch (reason) {
304 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
305 set_bit(IDEV_IO_NCQERROR, &idev->flags);
306
307 /* Suspend the remote device so the I/O can be terminated. */
308 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
309
310 /* Kill all outstanding requests for the device. */
311 sci_remote_device_terminate_requests(idev);
312
313 /* Fall through into the default case... */
314 default:
315 clear_bit(IDEV_IO_READY, &idev->flags);
316 break;
317 }
318}
319
320/* called once the remote node context is ready to be freed.
321 * The remote device can now report that its stop operation is complete. none
322 */
323static void rnc_destruct_done(void *_dev)
324{
325 struct isci_remote_device *idev = _dev;
326
327 BUG_ON(idev->started_request_count != 0);
328 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
329}
330
331enum sci_status sci_remote_device_terminate_requests(
332 struct isci_remote_device *idev)
333{
334 return sci_remote_device_terminate_reqs_checkabort(idev, 0);
335}
336
169enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, 337enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
170 u32 timeout) 338 u32 timeout)
171{ 339{
@@ -201,13 +369,16 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
201 case SCI_SMP_DEV_IDLE: 369 case SCI_SMP_DEV_IDLE:
202 case SCI_SMP_DEV_CMD: 370 case SCI_SMP_DEV_CMD:
203 sci_change_state(sm, SCI_DEV_STOPPING); 371 sci_change_state(sm, SCI_DEV_STOPPING);
204 if (idev->started_request_count == 0) { 372 if (idev->started_request_count == 0)
205 sci_remote_node_context_destruct(&idev->rnc, 373 sci_remote_node_context_destruct(&idev->rnc,
206 rnc_destruct_done, idev); 374 rnc_destruct_done,
207 return SCI_SUCCESS; 375 idev);
208 } else 376 else {
209 return sci_remote_device_terminate_requests(idev); 377 sci_remote_device_suspend(
210 break; 378 idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
379 sci_remote_device_terminate_requests(idev);
380 }
381 return SCI_SUCCESS;
211 case SCI_DEV_STOPPING: 382 case SCI_DEV_STOPPING:
212 /* All requests should have been terminated, but if there is an 383 /* All requests should have been terminated, but if there is an
213 * attempt to stop a device already in the stopping state, then 384 * attempt to stop a device already in the stopping state, then
@@ -265,22 +436,6 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev
265 return SCI_SUCCESS; 436 return SCI_SUCCESS;
266} 437}
267 438
268enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
269 u32 suspend_type)
270{
271 struct sci_base_state_machine *sm = &idev->sm;
272 enum sci_remote_device_states state = sm->current_state_id;
273
274 if (state != SCI_STP_DEV_CMD) {
275 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
276 __func__, dev_state_name(state));
277 return SCI_FAILURE_INVALID_STATE;
278 }
279
280 return sci_remote_node_context_suspend(&idev->rnc,
281 suspend_type, NULL, NULL);
282}
283
284enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, 439enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
285 u32 frame_index) 440 u32 frame_index)
286{ 441{
@@ -412,9 +567,9 @@ static void atapi_remote_device_resume_done(void *_dev)
412enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, 567enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
413 u32 event_code) 568 u32 event_code)
414{ 569{
570 enum sci_status status;
415 struct sci_base_state_machine *sm = &idev->sm; 571 struct sci_base_state_machine *sm = &idev->sm;
416 enum sci_remote_device_states state = sm->current_state_id; 572 enum sci_remote_device_states state = sm->current_state_id;
417 enum sci_status status;
418 573
419 switch (scu_get_event_type(event_code)) { 574 switch (scu_get_event_type(event_code)) {
420 case SCU_EVENT_TYPE_RNC_OPS_MISC: 575 case SCU_EVENT_TYPE_RNC_OPS_MISC:
@@ -427,9 +582,7 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
427 status = SCI_SUCCESS; 582 status = SCI_SUCCESS;
428 583
429 /* Suspend the associated RNC */ 584 /* Suspend the associated RNC */
430 sci_remote_node_context_suspend(&idev->rnc, 585 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
431 SCI_SOFTWARE_SUSPENSION,
432 NULL, NULL);
433 586
434 dev_dbg(scirdev_to_dev(idev), 587 dev_dbg(scirdev_to_dev(idev),
435 "%s: device: %p event code: %x: %s\n", 588 "%s: device: %p event code: %x: %s\n",
@@ -455,6 +608,10 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
455 if (status != SCI_SUCCESS) 608 if (status != SCI_SUCCESS)
456 return status; 609 return status;
457 610
611 /* Decode device-specific states that may require an RNC resume during
612 * normal operation. When the abort path is active, these resumes are
613 * managed when the abort path exits.
614 */
458 if (state == SCI_STP_DEV_ATAPI_ERROR) { 615 if (state == SCI_STP_DEV_ATAPI_ERROR) {
459 /* For ATAPI error state resume the RNC right away. */ 616 /* For ATAPI error state resume the RNC right away. */
460 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || 617 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
@@ -743,10 +900,6 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
743 if (status != SCI_SUCCESS) 900 if (status != SCI_SUCCESS)
744 return status; 901 return status;
745 902
746 status = sci_remote_node_context_start_task(&idev->rnc, ireq);
747 if (status != SCI_SUCCESS)
748 goto out;
749
750 status = sci_request_start(ireq); 903 status = sci_request_start(ireq);
751 if (status != SCI_SUCCESS) 904 if (status != SCI_SUCCESS)
752 goto out; 905 goto out;
@@ -765,11 +918,11 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
765 * the correct action when the remote node context is suspended 918 * the correct action when the remote node context is suspended
766 * and later resumed. 919 * and later resumed.
767 */ 920 */
768 sci_remote_node_context_suspend(&idev->rnc, 921 sci_remote_device_suspend(idev,
769 SCI_SOFTWARE_SUSPENSION, NULL, NULL); 922 SCI_SW_SUSPEND_LINKHANG_DETECT);
770 sci_remote_node_context_resume(&idev->rnc, 923
771 sci_remote_device_continue_request, 924 status = sci_remote_node_context_start_task(&idev->rnc, ireq,
772 idev); 925 sci_remote_device_continue_request, idev);
773 926
774 out: 927 out:
775 sci_remote_device_start_request(idev, ireq, status); 928 sci_remote_device_start_request(idev, ireq, status);
@@ -783,7 +936,9 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
783 if (status != SCI_SUCCESS) 936 if (status != SCI_SUCCESS)
784 return status; 937 return status;
785 938
786 status = sci_remote_node_context_start_task(&idev->rnc, ireq); 939 /* Resume the RNC as needed: */
940 status = sci_remote_node_context_start_task(&idev->rnc, ireq,
941 NULL, NULL);
787 if (status != SCI_SUCCESS) 942 if (status != SCI_SUCCESS)
788 break; 943 break;
789 944
@@ -892,7 +1047,7 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_
892 * here should go through isci_remote_device_nuke_requests. 1047 * here should go through isci_remote_device_nuke_requests.
893 * If we hit this condition, we will need a way to complete 1048 * If we hit this condition, we will need a way to complete
894 * io requests in process */ 1049 * io requests in process */
895 BUG_ON(!list_empty(&idev->reqs_in_process)); 1050 BUG_ON(idev->started_request_count > 0);
896 1051
897 sci_remote_device_destruct(idev); 1052 sci_remote_device_destruct(idev);
898 list_del_init(&idev->node); 1053 list_del_init(&idev->node);
@@ -954,14 +1109,21 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm
954static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) 1109static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
955{ 1110{
956 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1111 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1112 struct isci_host *ihost = idev->owning_port->owning_controller;
957 1113
958 sci_remote_node_context_suspend( 1114 dev_dbg(&ihost->pdev->dev,
959 &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); 1115 "%s: isci_device = %p\n", __func__, idev);
1116
1117 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
960} 1118}
961 1119
962static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) 1120static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
963{ 1121{
964 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1122 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1123 struct isci_host *ihost = idev->owning_port->owning_controller;
1124
1125 dev_dbg(&ihost->pdev->dev,
1126 "%s: isci_device = %p\n", __func__, idev);
965 1127
966 sci_remote_node_context_resume(&idev->rnc, NULL, NULL); 1128 sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
967} 1129}
@@ -1113,33 +1275,20 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1113{ 1275{
1114 enum sci_status status; 1276 enum sci_status status;
1115 struct sci_port_properties properties; 1277 struct sci_port_properties properties;
1116 struct domain_device *dev = idev->domain_dev;
1117 1278
1118 sci_remote_device_construct(iport, idev); 1279 sci_remote_device_construct(iport, idev);
1119 1280
1120 /*
1121 * This information is request to determine how many remote node context
1122 * entries will be needed to store the remote node.
1123 */
1124 idev->is_direct_attached = true;
1125
1126 sci_port_get_properties(iport, &properties); 1281 sci_port_get_properties(iport, &properties);
1127 /* Get accurate port width from port's phy mask for a DA device. */ 1282 /* Get accurate port width from port's phy mask for a DA device. */
1128 idev->device_port_width = hweight32(properties.phy_mask); 1283 idev->device_port_width = hweight32(properties.phy_mask);
1129 1284
1130 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1285 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1131 idev, 1286 idev,
1132 &idev->rnc.remote_node_index); 1287 &idev->rnc.remote_node_index);
1133 1288
1134 if (status != SCI_SUCCESS) 1289 if (status != SCI_SUCCESS)
1135 return status; 1290 return status;
1136 1291
1137 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
1138 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
1139 /* pass */;
1140 else
1141 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
1142
1143 idev->connection_rate = sci_port_get_max_allowed_speed(iport); 1292 idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1144 1293
1145 return SCI_SUCCESS; 1294 return SCI_SUCCESS;
@@ -1171,19 +1320,13 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1171 if (status != SCI_SUCCESS) 1320 if (status != SCI_SUCCESS)
1172 return status; 1321 return status;
1173 1322
1174 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || 1323 /* For SAS-2 the physical link rate is actually a logical link
1175 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
1176 /* pass */;
1177 else
1178 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
1179
1180 /*
1181 * For SAS-2 the physical link rate is actually a logical link
1182 * rate that incorporates multiplexing. The SCU doesn't 1324 * rate that incorporates multiplexing. The SCU doesn't
1183 * incorporate multiplexing and for the purposes of the 1325 * incorporate multiplexing and for the purposes of the
1184 * connection the logical link rate is that same as the 1326 * connection the logical link rate is that same as the
1185 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay 1327 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
1186 * one another, so this code works for both situations. */ 1328 * one another, so this code works for both situations.
1329 */
1187 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), 1330 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
1188 dev->linkrate); 1331 dev->linkrate);
1189 1332
@@ -1193,6 +1336,105 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1193 return SCI_SUCCESS; 1336 return SCI_SUCCESS;
1194} 1337}
1195 1338
1339enum sci_status sci_remote_device_resume(
1340 struct isci_remote_device *idev,
1341 scics_sds_remote_node_context_callback cb_fn,
1342 void *cb_p)
1343{
1344 enum sci_status status;
1345
1346 status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
1347 if (status != SCI_SUCCESS)
1348 dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
1349 __func__, status);
1350 return status;
1351}
1352
1353static void isci_remote_device_resume_from_abort_complete(void *cbparam)
1354{
1355 struct isci_remote_device *idev = cbparam;
1356 struct isci_host *ihost = idev->owning_port->owning_controller;
1357 scics_sds_remote_node_context_callback abort_resume_cb =
1358 idev->abort_resume_cb;
1359
1360 dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
1361 __func__, abort_resume_cb);
1362
1363 if (abort_resume_cb != NULL) {
1364 idev->abort_resume_cb = NULL;
1365 abort_resume_cb(idev->abort_resume_cbparam);
1366 }
1367 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1368 wake_up(&ihost->eventq);
1369}
1370
1371static bool isci_remote_device_test_resume_done(
1372 struct isci_host *ihost,
1373 struct isci_remote_device *idev)
1374{
1375 unsigned long flags;
1376 bool done;
1377
1378 spin_lock_irqsave(&ihost->scic_lock, flags);
1379 done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
1380 || test_bit(IDEV_STOP_PENDING, &idev->flags)
1381 || sci_remote_node_context_is_being_destroyed(&idev->rnc);
1382 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1383
1384 return done;
1385}
1386
1387void isci_remote_device_wait_for_resume_from_abort(
1388 struct isci_host *ihost,
1389 struct isci_remote_device *idev)
1390{
1391 dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
1392 __func__, idev);
1393
1394 #define MAX_RESUME_MSECS 10000
1395 if (!wait_event_timeout(ihost->eventq,
1396 isci_remote_device_test_resume_done(ihost, idev),
1397 msecs_to_jiffies(MAX_RESUME_MSECS))) {
1398
1399 dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
1400 "resume: %p\n", __func__, idev);
1401 }
1402 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1403
1404 dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
1405 __func__, idev);
1406}
1407
1408enum sci_status isci_remote_device_resume_from_abort(
1409 struct isci_host *ihost,
1410 struct isci_remote_device *idev)
1411{
1412 unsigned long flags;
1413 enum sci_status status = SCI_SUCCESS;
1414 int destroyed;
1415
1416 spin_lock_irqsave(&ihost->scic_lock, flags);
1417 /* Preserve any current resume callbacks, for instance from other
1418 * resumptions.
1419 */
1420 idev->abort_resume_cb = idev->rnc.user_callback;
1421 idev->abort_resume_cbparam = idev->rnc.user_cookie;
1422 set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1423 clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1424 destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
1425 if (!destroyed)
1426 status = sci_remote_device_resume(
1427 idev, isci_remote_device_resume_from_abort_complete,
1428 idev);
1429 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1430 if (!destroyed && (status == SCI_SUCCESS))
1431 isci_remote_device_wait_for_resume_from_abort(ihost, idev);
1432 else
1433 clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1434
1435 return status;
1436}
1437
1196/** 1438/**
1197 * sci_remote_device_start() - This method will start the supplied remote 1439 * sci_remote_device_start() - This method will start the supplied remote
1198 * device. This method enables normal IO requests to flow through to the 1440 * device. This method enables normal IO requests to flow through to the
@@ -1207,7 +1449,7 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1207 * the device when there have been no phys added to it. 1449 * the device when there have been no phys added to it.
1208 */ 1450 */
1209static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, 1451static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1210 u32 timeout) 1452 u32 timeout)
1211{ 1453{
1212 struct sci_base_state_machine *sm = &idev->sm; 1454 struct sci_base_state_machine *sm = &idev->sm;
1213 enum sci_remote_device_states state = sm->current_state_id; 1455 enum sci_remote_device_states state = sm->current_state_id;
@@ -1219,9 +1461,8 @@ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1219 return SCI_FAILURE_INVALID_STATE; 1461 return SCI_FAILURE_INVALID_STATE;
1220 } 1462 }
1221 1463
1222 status = sci_remote_node_context_resume(&idev->rnc, 1464 status = sci_remote_device_resume(idev, remote_device_resume_done,
1223 remote_device_resume_done, 1465 idev);
1224 idev);
1225 if (status != SCI_SUCCESS) 1466 if (status != SCI_SUCCESS)
1226 return status; 1467 return status;
1227 1468
@@ -1259,20 +1500,6 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1259 return status; 1500 return status;
1260} 1501}
1261 1502
1262void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
1263{
1264 DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
1265
1266 dev_dbg(&ihost->pdev->dev,
1267 "%s: idev = %p\n", __func__, idev);
1268
1269 /* Cleanup all requests pending for this device. */
1270 isci_terminate_pending_requests(ihost, idev);
1271
1272 dev_dbg(&ihost->pdev->dev,
1273 "%s: idev = %p, done\n", __func__, idev);
1274}
1275
1276/** 1503/**
1277 * This function builds the isci_remote_device when a libsas dev_found message 1504 * This function builds the isci_remote_device when a libsas dev_found message
1278 * is received. 1505 * is received.
@@ -1297,10 +1524,6 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1297 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); 1524 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1298 return NULL; 1525 return NULL;
1299 } 1526 }
1300
1301 if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
1302 return NULL;
1303
1304 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) 1527 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1305 return NULL; 1528 return NULL;
1306 1529
@@ -1342,14 +1565,8 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
1342 spin_lock_irqsave(&ihost->scic_lock, flags); 1565 spin_lock_irqsave(&ihost->scic_lock, flags);
1343 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ 1566 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1344 set_bit(IDEV_GONE, &idev->flags); 1567 set_bit(IDEV_GONE, &idev->flags);
1345 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1346
1347 /* Kill all outstanding requests. */
1348 isci_remote_device_nuke_requests(ihost, idev);
1349 1568
1350 set_bit(IDEV_STOP_PENDING, &idev->flags); 1569 set_bit(IDEV_STOP_PENDING, &idev->flags);
1351
1352 spin_lock_irqsave(&ihost->scic_lock, flags);
1353 status = sci_remote_device_stop(idev, 50); 1570 status = sci_remote_device_stop(idev, 50);
1354 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1571 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1355 1572
@@ -1359,6 +1576,9 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
1359 else 1576 else
1360 wait_for_device_stop(ihost, idev); 1577 wait_for_device_stop(ihost, idev);
1361 1578
1579 dev_dbg(&ihost->pdev->dev,
1580 "%s: isci_device = %p, waiting done.\n", __func__, idev);
1581
1362 return status; 1582 return status;
1363} 1583}
1364 1584
@@ -1434,3 +1654,73 @@ int isci_remote_device_found(struct domain_device *dev)
1434 1654
1435 return status == SCI_SUCCESS ? 0 : -ENODEV; 1655 return status == SCI_SUCCESS ? 0 : -ENODEV;
1436} 1656}
1657
1658enum sci_status isci_remote_device_suspend_terminate(
1659 struct isci_host *ihost,
1660 struct isci_remote_device *idev,
1661 struct isci_request *ireq)
1662{
1663 unsigned long flags;
1664 enum sci_status status;
1665
1666 /* Put the device into suspension. */
1667 spin_lock_irqsave(&ihost->scic_lock, flags);
1668 set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1669 sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1670 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1671
1672 /* Terminate and wait for the completions. */
1673 status = isci_remote_device_terminate_requests(ihost, idev, ireq);
1674 if (status != SCI_SUCCESS)
1675 dev_dbg(&ihost->pdev->dev,
1676 "%s: isci_remote_device_terminate_requests(%p) "
1677 "returned %d!\n",
1678 __func__, idev, status);
1679
1680 /* NOTE: RNC resumption is left to the caller! */
1681 return status;
1682}
1683
1684int isci_remote_device_is_safe_to_abort(
1685 struct isci_remote_device *idev)
1686{
1687 return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
1688}
1689
1690enum sci_status sci_remote_device_abort_requests_pending_abort(
1691 struct isci_remote_device *idev)
1692{
1693 return sci_remote_device_terminate_reqs_checkabort(idev, 1);
1694}
1695
1696enum sci_status isci_remote_device_reset_complete(
1697 struct isci_host *ihost,
1698 struct isci_remote_device *idev)
1699{
1700 unsigned long flags;
1701 enum sci_status status;
1702
1703 spin_lock_irqsave(&ihost->scic_lock, flags);
1704 status = sci_remote_device_reset_complete(idev);
1705 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1706
1707 return status;
1708}
1709
1710void isci_dev_set_hang_detection_timeout(
1711 struct isci_remote_device *idev,
1712 u32 timeout)
1713{
1714 if (dev_is_sata(idev->domain_dev)) {
1715 if (timeout) {
1716 if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
1717 &idev->flags))
1718 return; /* Already enabled. */
1719 } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
1720 &idev->flags))
1721 return; /* Not enabled. */
1722
1723 sci_port_set_hang_detection_timeout(idev->owning_port,
1724 timeout);
1725 }
1726}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 58637ee08f55..7674caae1d88 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -85,27 +85,38 @@ struct isci_remote_device {
85 #define IDEV_GONE 3 85 #define IDEV_GONE 3
86 #define IDEV_IO_READY 4 86 #define IDEV_IO_READY 4
87 #define IDEV_IO_NCQERROR 5 87 #define IDEV_IO_NCQERROR 5
88 #define IDEV_RNC_LLHANG_ENABLED 6
89 #define IDEV_ABORT_PATH_ACTIVE 7
90 #define IDEV_ABORT_PATH_RESUME_PENDING 8
88 unsigned long flags; 91 unsigned long flags;
89 struct kref kref; 92 struct kref kref;
90 struct isci_port *isci_port; 93 struct isci_port *isci_port;
91 struct domain_device *domain_dev; 94 struct domain_device *domain_dev;
92 struct list_head node; 95 struct list_head node;
93 struct list_head reqs_in_process;
94 struct sci_base_state_machine sm; 96 struct sci_base_state_machine sm;
95 u32 device_port_width; 97 u32 device_port_width;
96 enum sas_linkrate connection_rate; 98 enum sas_linkrate connection_rate;
97 bool is_direct_attached;
98 struct isci_port *owning_port; 99 struct isci_port *owning_port;
99 struct sci_remote_node_context rnc; 100 struct sci_remote_node_context rnc;
100 /* XXX unify with device reference counting and delete */ 101 /* XXX unify with device reference counting and delete */
101 u32 started_request_count; 102 u32 started_request_count;
102 struct isci_request *working_request; 103 struct isci_request *working_request;
103 u32 not_ready_reason; 104 u32 not_ready_reason;
105 scics_sds_remote_node_context_callback abort_resume_cb;
106 void *abort_resume_cbparam;
104}; 107};
105 108
106#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 109#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
107 110
108/* device reference routines must be called under sci_lock */ 111/* device reference routines must be called under sci_lock */
112static inline struct isci_remote_device *isci_get_device(
113 struct isci_remote_device *idev)
114{
115 if (idev)
116 kref_get(&idev->kref);
117 return idev;
118}
119
109static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) 120static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
110{ 121{
111 struct isci_remote_device *idev = dev->lldd_dev; 122 struct isci_remote_device *idev = dev->lldd_dev;
@@ -302,6 +313,8 @@ static inline void sci_remote_device_decrement_request_count(struct isci_remote_
302 idev->started_request_count--; 313 idev->started_request_count--;
303} 314}
304 315
316void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout);
317
305enum sci_status sci_remote_device_frame_handler( 318enum sci_status sci_remote_device_frame_handler(
306 struct isci_remote_device *idev, 319 struct isci_remote_device *idev,
307 u32 frame_index); 320 u32 frame_index);
@@ -325,12 +338,50 @@ enum sci_status sci_remote_device_complete_io(
325 struct isci_remote_device *idev, 338 struct isci_remote_device *idev,
326 struct isci_request *ireq); 339 struct isci_request *ireq);
327 340
328enum sci_status sci_remote_device_suspend(
329 struct isci_remote_device *idev,
330 u32 suspend_type);
331
332void sci_remote_device_post_request( 341void sci_remote_device_post_request(
333 struct isci_remote_device *idev, 342 struct isci_remote_device *idev,
334 u32 request); 343 u32 request);
335 344
345enum sci_status sci_remote_device_terminate_requests(
346 struct isci_remote_device *idev);
347
348int isci_remote_device_is_safe_to_abort(
349 struct isci_remote_device *idev);
350
351enum sci_status
352sci_remote_device_abort_requests_pending_abort(
353 struct isci_remote_device *idev);
354
355enum sci_status isci_remote_device_suspend(
356 struct isci_host *ihost,
357 struct isci_remote_device *idev);
358
359enum sci_status sci_remote_device_resume(
360 struct isci_remote_device *idev,
361 scics_sds_remote_node_context_callback cb_fn,
362 void *cb_p);
363
364enum sci_status isci_remote_device_resume_from_abort(
365 struct isci_host *ihost,
366 struct isci_remote_device *idev);
367
368enum sci_status isci_remote_device_reset(
369 struct isci_host *ihost,
370 struct isci_remote_device *idev);
371
372enum sci_status isci_remote_device_reset_complete(
373 struct isci_host *ihost,
374 struct isci_remote_device *idev);
375
376enum sci_status isci_remote_device_suspend_terminate(
377 struct isci_host *ihost,
378 struct isci_remote_device *idev,
379 struct isci_request *ireq);
380
381enum sci_status isci_remote_device_terminate_requests(
382 struct isci_host *ihost,
383 struct isci_remote_device *idev,
384 struct isci_request *ireq);
385enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
386 enum sci_remote_node_suspension_reasons reason);
336#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ 387#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 3a9463481f38..1910100638a2 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -52,7 +52,7 @@
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 55#include <scsi/sas_ata.h>
56#include "host.h" 56#include "host.h"
57#include "isci.h" 57#include "isci.h"
58#include "remote_device.h" 58#include "remote_device.h"
@@ -90,6 +90,15 @@ bool sci_remote_node_context_is_ready(
90 return false; 90 return false;
91} 91}
92 92
93bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc)
94{
95 u32 current_state = sci_rnc->sm.current_state_id;
96
97 if (current_state == SCI_RNC_TX_RX_SUSPENDED)
98 return true;
99 return false;
100}
101
93static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) 102static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
94{ 103{
95 if (id < ihost->remote_node_entries && 104 if (id < ihost->remote_node_entries &&
@@ -131,7 +140,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
131 140
132 rnc->ssp.arbitration_wait_time = 0; 141 rnc->ssp.arbitration_wait_time = 0;
133 142
134 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { 143 if (dev_is_sata(dev)) {
135 rnc->ssp.connection_occupancy_timeout = 144 rnc->ssp.connection_occupancy_timeout =
136 ihost->user_parameters.stp_max_occupancy_timeout; 145 ihost->user_parameters.stp_max_occupancy_timeout;
137 rnc->ssp.connection_inactivity_timeout = 146 rnc->ssp.connection_inactivity_timeout =
@@ -151,7 +160,6 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
151 rnc->ssp.oaf_source_zone_group = 0; 160 rnc->ssp.oaf_source_zone_group = 0;
152 rnc->ssp.oaf_more_compatibility_features = 0; 161 rnc->ssp.oaf_more_compatibility_features = 0;
153} 162}
154
155/** 163/**
156 * 164 *
157 * @sci_rnc: 165 * @sci_rnc:
@@ -165,23 +173,30 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
165static void sci_remote_node_context_setup_to_resume( 173static void sci_remote_node_context_setup_to_resume(
166 struct sci_remote_node_context *sci_rnc, 174 struct sci_remote_node_context *sci_rnc,
167 scics_sds_remote_node_context_callback callback, 175 scics_sds_remote_node_context_callback callback,
168 void *callback_parameter) 176 void *callback_parameter,
177 enum sci_remote_node_context_destination_state dest_param)
169{ 178{
170 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) { 179 if (sci_rnc->destination_state != RNC_DEST_FINAL) {
171 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY; 180 sci_rnc->destination_state = dest_param;
172 sci_rnc->user_callback = callback; 181 if (callback != NULL) {
173 sci_rnc->user_cookie = callback_parameter; 182 sci_rnc->user_callback = callback;
183 sci_rnc->user_cookie = callback_parameter;
184 }
174 } 185 }
175} 186}
176 187
177static void sci_remote_node_context_setup_to_destory( 188static void sci_remote_node_context_setup_to_destroy(
178 struct sci_remote_node_context *sci_rnc, 189 struct sci_remote_node_context *sci_rnc,
179 scics_sds_remote_node_context_callback callback, 190 scics_sds_remote_node_context_callback callback,
180 void *callback_parameter) 191 void *callback_parameter)
181{ 192{
182 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL; 193 struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc));
194
195 sci_rnc->destination_state = RNC_DEST_FINAL;
183 sci_rnc->user_callback = callback; 196 sci_rnc->user_callback = callback;
184 sci_rnc->user_cookie = callback_parameter; 197 sci_rnc->user_cookie = callback_parameter;
198
199 wake_up(&ihost->eventq);
185} 200}
186 201
187/** 202/**
@@ -203,9 +218,19 @@ static void sci_remote_node_context_notify_user(
203 218
204static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) 219static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
205{ 220{
206 if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) 221 switch (rnc->destination_state) {
222 case RNC_DEST_READY:
223 case RNC_DEST_SUSPENDED_RESUME:
224 rnc->destination_state = RNC_DEST_READY;
225 /* Fall through... */
226 case RNC_DEST_FINAL:
207 sci_remote_node_context_resume(rnc, rnc->user_callback, 227 sci_remote_node_context_resume(rnc, rnc->user_callback,
208 rnc->user_cookie); 228 rnc->user_cookie);
229 break;
230 default:
231 rnc->destination_state = RNC_DEST_UNSPECIFIED;
232 break;
233 }
209} 234}
210 235
211static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) 236static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
@@ -219,13 +244,12 @@ static void sci_remote_node_context_validate_context_buffer(struct sci_remote_no
219 244
220 rnc_buffer->ssp.is_valid = true; 245 rnc_buffer->ssp.is_valid = true;
221 246
222 if (!idev->is_direct_attached && 247 if (dev_is_sata(dev) && dev->parent) {
223 (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
224 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); 248 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
225 } else { 249 } else {
226 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); 250 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
227 251
228 if (idev->is_direct_attached) 252 if (!dev->parent)
229 sci_port_setup_transports(idev->owning_port, 253 sci_port_setup_transports(idev->owning_port,
230 sci_rnc->remote_node_index); 254 sci_rnc->remote_node_index);
231 } 255 }
@@ -248,13 +272,18 @@ static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_
248static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) 272static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
249{ 273{
250 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 274 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
275 struct isci_remote_device *idev = rnc_to_dev(rnc);
276 struct isci_host *ihost = idev->owning_port->owning_controller;
251 277
252 /* Check to see if we have gotten back to the initial state because 278 /* Check to see if we have gotten back to the initial state because
253 * someone requested to destroy the remote node context object. 279 * someone requested to destroy the remote node context object.
254 */ 280 */
255 if (sm->previous_state_id == SCI_RNC_INVALIDATING) { 281 if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
256 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; 282 rnc->destination_state = RNC_DEST_UNSPECIFIED;
257 sci_remote_node_context_notify_user(rnc); 283 sci_remote_node_context_notify_user(rnc);
284
285 smp_wmb();
286 wake_up(&ihost->eventq);
258 } 287 }
259} 288}
260 289
@@ -269,6 +298,8 @@ static void sci_remote_node_context_invalidating_state_enter(struct sci_base_sta
269{ 298{
270 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 299 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
271 300
301 /* Terminate all outstanding requests. */
302 sci_remote_device_terminate_requests(rnc_to_dev(rnc));
272 sci_remote_node_context_invalidate_context_buffer(rnc); 303 sci_remote_node_context_invalidate_context_buffer(rnc);
273} 304}
274 305
@@ -287,10 +318,8 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m
287 * resume because of a target reset we also need to update 318 * resume because of a target reset we also need to update
288 * the STPTLDARNI register with the RNi of the device 319 * the STPTLDARNI register with the RNi of the device
289 */ 320 */
290 if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && 321 if (dev_is_sata(dev) && !dev->parent)
291 idev->is_direct_attached) 322 sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
292 sci_port_setup_transports(idev->owning_port,
293 rnc->remote_node_index);
294 323
295 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); 324 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
296} 325}
@@ -298,10 +327,22 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m
298static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) 327static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
299{ 328{
300 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 329 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
330 enum sci_remote_node_context_destination_state dest_select;
331 int tell_user = 1;
332
333 dest_select = rnc->destination_state;
334 rnc->destination_state = RNC_DEST_UNSPECIFIED;
301 335
302 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; 336 if ((dest_select == RNC_DEST_SUSPENDED) ||
337 (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
338 sci_remote_node_context_suspend(
339 rnc, rnc->suspend_reason,
340 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
303 341
304 if (rnc->user_callback) 342 if (dest_select == RNC_DEST_SUSPENDED_RESUME)
343 tell_user = 0; /* Wait until ready again. */
344 }
345 if (tell_user)
305 sci_remote_node_context_notify_user(rnc); 346 sci_remote_node_context_notify_user(rnc);
306} 347}
307 348
@@ -315,10 +356,34 @@ static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_sta
315static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) 356static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
316{ 357{
317 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 358 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
359 struct isci_remote_device *idev = rnc_to_dev(rnc);
360 struct isci_host *ihost = idev->owning_port->owning_controller;
361 u32 new_count = rnc->suspend_count + 1;
362
363 if (new_count == 0)
364 rnc->suspend_count = 1;
365 else
366 rnc->suspend_count = new_count;
367 smp_wmb();
318 368
369 /* Terminate outstanding requests pending abort. */
370 sci_remote_device_abort_requests_pending_abort(idev);
371
372 wake_up(&ihost->eventq);
319 sci_remote_node_context_continue_state_transitions(rnc); 373 sci_remote_node_context_continue_state_transitions(rnc);
320} 374}
321 375
376static void sci_remote_node_context_await_suspend_state_exit(
377 struct sci_base_state_machine *sm)
378{
379 struct sci_remote_node_context *rnc
380 = container_of(sm, typeof(*rnc), sm);
381 struct isci_remote_device *idev = rnc_to_dev(rnc);
382
383 if (dev_is_sata(idev->domain_dev))
384 isci_dev_set_hang_detection_timeout(idev, 0);
385}
386
322static const struct sci_base_state sci_remote_node_context_state_table[] = { 387static const struct sci_base_state sci_remote_node_context_state_table[] = {
323 [SCI_RNC_INITIAL] = { 388 [SCI_RNC_INITIAL] = {
324 .enter_state = sci_remote_node_context_initial_state_enter, 389 .enter_state = sci_remote_node_context_initial_state_enter,
@@ -341,7 +406,9 @@ static const struct sci_base_state sci_remote_node_context_state_table[] = {
341 [SCI_RNC_TX_RX_SUSPENDED] = { 406 [SCI_RNC_TX_RX_SUSPENDED] = {
342 .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, 407 .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
343 }, 408 },
344 [SCI_RNC_AWAIT_SUSPENSION] = { }, 409 [SCI_RNC_AWAIT_SUSPENSION] = {
410 .exit_state = sci_remote_node_context_await_suspend_state_exit,
411 },
345}; 412};
346 413
347void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, 414void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
@@ -350,7 +417,7 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
350 memset(rnc, 0, sizeof(struct sci_remote_node_context)); 417 memset(rnc, 0, sizeof(struct sci_remote_node_context));
351 418
352 rnc->remote_node_index = remote_node_index; 419 rnc->remote_node_index = remote_node_index;
353 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; 420 rnc->destination_state = RNC_DEST_UNSPECIFIED;
354 421
355 sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); 422 sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
356} 423}
@@ -359,6 +426,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
359 u32 event_code) 426 u32 event_code)
360{ 427{
361 enum scis_sds_remote_node_context_states state; 428 enum scis_sds_remote_node_context_states state;
429 u32 next_state;
362 430
363 state = sci_rnc->sm.current_state_id; 431 state = sci_rnc->sm.current_state_id;
364 switch (state) { 432 switch (state) {
@@ -373,18 +441,18 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
373 break; 441 break;
374 case SCI_RNC_INVALIDATING: 442 case SCI_RNC_INVALIDATING:
375 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { 443 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
376 if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) 444 if (sci_rnc->destination_state == RNC_DEST_FINAL)
377 state = SCI_RNC_INITIAL; 445 next_state = SCI_RNC_INITIAL;
378 else 446 else
379 state = SCI_RNC_POSTING; 447 next_state = SCI_RNC_POSTING;
380 sci_change_state(&sci_rnc->sm, state); 448 sci_change_state(&sci_rnc->sm, next_state);
381 } else { 449 } else {
382 switch (scu_get_event_type(event_code)) { 450 switch (scu_get_event_type(event_code)) {
383 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 451 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
384 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 452 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
385 /* We really dont care if the hardware is going to suspend 453 /* We really dont care if the hardware is going to suspend
386 * the device since it's being invalidated anyway */ 454 * the device since it's being invalidated anyway */
387 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), 455 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
388 "%s: SCIC Remote Node Context 0x%p was " 456 "%s: SCIC Remote Node Context 0x%p was "
389 "suspeneded by hardware while being " 457 "suspeneded by hardware while being "
390 "invalidated.\n", __func__, sci_rnc); 458 "invalidated.\n", __func__, sci_rnc);
@@ -403,7 +471,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
403 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 471 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
404 /* We really dont care if the hardware is going to suspend 472 /* We really dont care if the hardware is going to suspend
405 * the device since it's being resumed anyway */ 473 * the device since it's being resumed anyway */
406 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), 474 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
407 "%s: SCIC Remote Node Context 0x%p was " 475 "%s: SCIC Remote Node Context 0x%p was "
408 "suspeneded by hardware while being resumed.\n", 476 "suspeneded by hardware while being resumed.\n",
409 __func__, sci_rnc); 477 __func__, sci_rnc);
@@ -417,11 +485,11 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
417 switch (scu_get_event_type(event_code)) { 485 switch (scu_get_event_type(event_code)) {
418 case SCU_EVENT_TL_RNC_SUSPEND_TX: 486 case SCU_EVENT_TL_RNC_SUSPEND_TX:
419 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); 487 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
420 sci_rnc->suspension_code = scu_get_event_specifier(event_code); 488 sci_rnc->suspend_type = scu_get_event_type(event_code);
421 break; 489 break;
422 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: 490 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
423 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); 491 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
424 sci_rnc->suspension_code = scu_get_event_specifier(event_code); 492 sci_rnc->suspend_type = scu_get_event_type(event_code);
425 break; 493 break;
426 default: 494 default:
427 goto out; 495 goto out;
@@ -430,27 +498,29 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
430 case SCI_RNC_AWAIT_SUSPENSION: 498 case SCI_RNC_AWAIT_SUSPENSION:
431 switch (scu_get_event_type(event_code)) { 499 switch (scu_get_event_type(event_code)) {
432 case SCU_EVENT_TL_RNC_SUSPEND_TX: 500 case SCU_EVENT_TL_RNC_SUSPEND_TX:
433 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); 501 next_state = SCI_RNC_TX_SUSPENDED;
434 sci_rnc->suspension_code = scu_get_event_specifier(event_code);
435 break; 502 break;
436 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: 503 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
437 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); 504 next_state = SCI_RNC_TX_RX_SUSPENDED;
438 sci_rnc->suspension_code = scu_get_event_specifier(event_code);
439 break; 505 break;
440 default: 506 default:
441 goto out; 507 goto out;
442 } 508 }
509 if (sci_rnc->suspend_type == scu_get_event_type(event_code))
510 sci_change_state(&sci_rnc->sm, next_state);
443 break; 511 break;
444 default: 512 default:
445 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 513 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
446 "%s: invalid state %d\n", __func__, state); 514 "%s: invalid state: %s\n", __func__,
515 rnc_state_name(state));
447 return SCI_FAILURE_INVALID_STATE; 516 return SCI_FAILURE_INVALID_STATE;
448 } 517 }
449 return SCI_SUCCESS; 518 return SCI_SUCCESS;
450 519
451 out: 520 out:
452 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 521 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
453 "%s: code: %#x state: %d\n", __func__, event_code, state); 522 "%s: code: %#x state: %s\n", __func__, event_code,
523 rnc_state_name(state));
454 return SCI_FAILURE; 524 return SCI_FAILURE;
455 525
456} 526}
@@ -464,20 +534,23 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
464 state = sci_rnc->sm.current_state_id; 534 state = sci_rnc->sm.current_state_id;
465 switch (state) { 535 switch (state) {
466 case SCI_RNC_INVALIDATING: 536 case SCI_RNC_INVALIDATING:
467 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); 537 sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
468 return SCI_SUCCESS; 538 return SCI_SUCCESS;
469 case SCI_RNC_POSTING: 539 case SCI_RNC_POSTING:
470 case SCI_RNC_RESUMING: 540 case SCI_RNC_RESUMING:
471 case SCI_RNC_READY: 541 case SCI_RNC_READY:
472 case SCI_RNC_TX_SUSPENDED: 542 case SCI_RNC_TX_SUSPENDED:
473 case SCI_RNC_TX_RX_SUSPENDED: 543 case SCI_RNC_TX_RX_SUSPENDED:
474 case SCI_RNC_AWAIT_SUSPENSION: 544 sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
475 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
476 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); 545 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
477 return SCI_SUCCESS; 546 return SCI_SUCCESS;
547 case SCI_RNC_AWAIT_SUSPENSION:
548 sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
549 return SCI_SUCCESS;
478 case SCI_RNC_INITIAL: 550 case SCI_RNC_INITIAL:
479 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 551 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
480 "%s: invalid state %d\n", __func__, state); 552 "%s: invalid state: %s\n", __func__,
553 rnc_state_name(state));
481 /* We have decided that the destruct request on the remote node context 554 /* We have decided that the destruct request on the remote node context
482 * can not fail since it is either in the initial/destroyed state or is 555 * can not fail since it is either in the initial/destroyed state or is
483 * can be destroyed. 556 * can be destroyed.
@@ -485,35 +558,101 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
485 return SCI_SUCCESS; 558 return SCI_SUCCESS;
486 default: 559 default:
487 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 560 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
488 "%s: invalid state %d\n", __func__, state); 561 "%s: invalid state %s\n", __func__,
562 rnc_state_name(state));
489 return SCI_FAILURE_INVALID_STATE; 563 return SCI_FAILURE_INVALID_STATE;
490 } 564 }
491} 565}
492 566
493enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, 567enum sci_status sci_remote_node_context_suspend(
494 u32 suspend_type, 568 struct sci_remote_node_context *sci_rnc,
495 scics_sds_remote_node_context_callback cb_fn, 569 enum sci_remote_node_suspension_reasons suspend_reason,
496 void *cb_p) 570 u32 suspend_type)
497{ 571{
498 enum scis_sds_remote_node_context_states state; 572 enum scis_sds_remote_node_context_states state
573 = sci_rnc->sm.current_state_id;
574 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
575 enum sci_status status = SCI_FAILURE_INVALID_STATE;
576 enum sci_remote_node_context_destination_state dest_param =
577 RNC_DEST_UNSPECIFIED;
578
579 dev_dbg(scirdev_to_dev(idev),
580 "%s: current state %s, current suspend_type %x dest state %d,"
581 " arg suspend_reason %d, arg suspend_type %x",
582 __func__, rnc_state_name(state), sci_rnc->suspend_type,
583 sci_rnc->destination_state, suspend_reason,
584 suspend_type);
585
586 /* Disable automatic state continuations if explicitly suspending. */
587 if ((suspend_reason == SCI_HW_SUSPEND) ||
588 (sci_rnc->destination_state == RNC_DEST_FINAL))
589 dest_param = sci_rnc->destination_state;
499 590
500 state = sci_rnc->sm.current_state_id; 591 switch (state) {
501 if (state != SCI_RNC_READY) { 592 case SCI_RNC_READY:
593 break;
594 case SCI_RNC_INVALIDATING:
595 if (sci_rnc->destination_state == RNC_DEST_FINAL) {
596 dev_warn(scirdev_to_dev(idev),
597 "%s: already destroying %p\n",
598 __func__, sci_rnc);
599 return SCI_FAILURE_INVALID_STATE;
600 }
601 /* Fall through and handle like SCI_RNC_POSTING */
602 case SCI_RNC_RESUMING:
603 /* Fall through and handle like SCI_RNC_POSTING */
604 case SCI_RNC_POSTING:
605 /* Set the destination state to AWAIT - this signals the
606 * entry into the SCI_RNC_READY state that a suspension
607 * needs to be done immediately.
608 */
609 if (sci_rnc->destination_state != RNC_DEST_FINAL)
610 sci_rnc->destination_state = RNC_DEST_SUSPENDED;
611 sci_rnc->suspend_type = suspend_type;
612 sci_rnc->suspend_reason = suspend_reason;
613 return SCI_SUCCESS;
614
615 case SCI_RNC_TX_SUSPENDED:
616 if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
617 status = SCI_SUCCESS;
618 break;
619 case SCI_RNC_TX_RX_SUSPENDED:
620 if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
621 status = SCI_SUCCESS;
622 break;
623 case SCI_RNC_AWAIT_SUSPENSION:
624 if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
625 || (suspend_type == sci_rnc->suspend_type))
626 return SCI_SUCCESS;
627 break;
628 default:
502 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 629 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
503 "%s: invalid state %d\n", __func__, state); 630 "%s: invalid state %s\n", __func__,
631 rnc_state_name(state));
504 return SCI_FAILURE_INVALID_STATE; 632 return SCI_FAILURE_INVALID_STATE;
505 } 633 }
634 sci_rnc->destination_state = dest_param;
635 sci_rnc->suspend_type = suspend_type;
636 sci_rnc->suspend_reason = suspend_reason;
637
638 if (status == SCI_SUCCESS) { /* Already in the destination state? */
639 struct isci_host *ihost = idev->owning_port->owning_controller;
640
641 wake_up_all(&ihost->eventq); /* Let observers look. */
642 return SCI_SUCCESS;
643 }
644 if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
645 (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
506 646
507 sci_rnc->user_callback = cb_fn; 647 if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
508 sci_rnc->user_cookie = cb_p; 648 isci_dev_set_hang_detection_timeout(idev, 0x00000001);
509 sci_rnc->suspension_code = suspend_type;
510 649
511 if (suspend_type == SCI_SOFTWARE_SUSPENSION) { 650 sci_remote_device_post_request(
512 sci_remote_device_post_request(rnc_to_dev(sci_rnc), 651 idev, SCI_SOFTWARE_SUSPEND_CMD);
513 SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
514 } 652 }
653 if (state != SCI_RNC_AWAIT_SUSPENSION)
654 sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
515 655
516 sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
517 return SCI_SUCCESS; 656 return SCI_SUCCESS;
518} 657}
519 658
@@ -522,56 +661,86 @@ enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *s
522 void *cb_p) 661 void *cb_p)
523{ 662{
524 enum scis_sds_remote_node_context_states state; 663 enum scis_sds_remote_node_context_states state;
664 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
525 665
526 state = sci_rnc->sm.current_state_id; 666 state = sci_rnc->sm.current_state_id;
667 dev_dbg(scirdev_to_dev(idev),
668 "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; "
669 "dev resume path %s\n",
670 __func__, rnc_state_name(state), cb_fn, cb_p,
671 sci_rnc->destination_state,
672 test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)
673 ? "<abort active>" : "<normal>");
674
527 switch (state) { 675 switch (state) {
528 case SCI_RNC_INITIAL: 676 case SCI_RNC_INITIAL:
529 if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 677 if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
530 return SCI_FAILURE_INVALID_STATE; 678 return SCI_FAILURE_INVALID_STATE;
531 679
532 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); 680 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p,
533 sci_remote_node_context_construct_buffer(sci_rnc); 681 RNC_DEST_READY);
534 sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); 682 if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
683 sci_remote_node_context_construct_buffer(sci_rnc);
684 sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
685 }
535 return SCI_SUCCESS; 686 return SCI_SUCCESS;
687
536 case SCI_RNC_POSTING: 688 case SCI_RNC_POSTING:
537 case SCI_RNC_INVALIDATING: 689 case SCI_RNC_INVALIDATING:
538 case SCI_RNC_RESUMING: 690 case SCI_RNC_RESUMING:
539 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) 691 /* We are still waiting to post when a resume was
540 return SCI_FAILURE_INVALID_STATE; 692 * requested.
541 693 */
542 sci_rnc->user_callback = cb_fn; 694 switch (sci_rnc->destination_state) {
543 sci_rnc->user_cookie = cb_p; 695 case RNC_DEST_SUSPENDED:
696 case RNC_DEST_SUSPENDED_RESUME:
697 /* Previously waiting to suspend after posting.
698 * Now continue onto resumption.
699 */
700 sci_remote_node_context_setup_to_resume(
701 sci_rnc, cb_fn, cb_p,
702 RNC_DEST_SUSPENDED_RESUME);
703 break;
704 default:
705 sci_remote_node_context_setup_to_resume(
706 sci_rnc, cb_fn, cb_p,
707 RNC_DEST_READY);
708 break;
709 }
544 return SCI_SUCCESS; 710 return SCI_SUCCESS;
545 case SCI_RNC_TX_SUSPENDED: { 711
546 struct isci_remote_device *idev = rnc_to_dev(sci_rnc); 712 case SCI_RNC_TX_SUSPENDED:
547 struct domain_device *dev = idev->domain_dev; 713 case SCI_RNC_TX_RX_SUSPENDED:
548 714 {
549 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); 715 struct domain_device *dev = idev->domain_dev;
550 716 /* If this is an expander attached SATA device we must
551 /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */ 717 * invalidate and repost the RNC since this is the only
552 if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) 718 * way to clear the TCi to NCQ tag mapping table for
553 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); 719 * the RNi. All other device types we can just resume.
554 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { 720 */
555 if (idev->is_direct_attached) { 721 sci_remote_node_context_setup_to_resume(
556 /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */ 722 sci_rnc, cb_fn, cb_p, RNC_DEST_READY);
557 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); 723
558 } else { 724 if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
559 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); 725 if ((dev_is_sata(dev) && dev->parent) ||
726 (sci_rnc->destination_state == RNC_DEST_FINAL))
727 sci_change_state(&sci_rnc->sm,
728 SCI_RNC_INVALIDATING);
729 else
730 sci_change_state(&sci_rnc->sm,
731 SCI_RNC_RESUMING);
560 } 732 }
561 } else 733 }
562 return SCI_FAILURE;
563 return SCI_SUCCESS; 734 return SCI_SUCCESS;
564 } 735
565 case SCI_RNC_TX_RX_SUSPENDED:
566 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
567 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
568 return SCI_FAILURE_INVALID_STATE;
569 case SCI_RNC_AWAIT_SUSPENSION: 736 case SCI_RNC_AWAIT_SUSPENSION:
570 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); 737 sci_remote_node_context_setup_to_resume(
738 sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME);
571 return SCI_SUCCESS; 739 return SCI_SUCCESS;
572 default: 740 default:
573 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 741 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
574 "%s: invalid state %d\n", __func__, state); 742 "%s: invalid state %s\n", __func__,
743 rnc_state_name(state));
575 return SCI_FAILURE_INVALID_STATE; 744 return SCI_FAILURE_INVALID_STATE;
576 } 745 }
577} 746}
@@ -590,35 +759,51 @@ enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context
590 case SCI_RNC_TX_RX_SUSPENDED: 759 case SCI_RNC_TX_RX_SUSPENDED:
591 case SCI_RNC_AWAIT_SUSPENSION: 760 case SCI_RNC_AWAIT_SUSPENSION:
592 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 761 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
593 "%s: invalid state %d\n", __func__, state); 762 "%s: invalid state %s\n", __func__,
763 rnc_state_name(state));
594 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 764 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
595 default: 765 default:
596 break; 766 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
767 "%s: invalid state %s\n", __func__,
768 rnc_state_name(state));
769 return SCI_FAILURE_INVALID_STATE;
597 } 770 }
598 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
599 "%s: requested to start IO while still resuming, %d\n",
600 __func__, state);
601 return SCI_FAILURE_INVALID_STATE;
602} 771}
603 772
604enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, 773enum sci_status sci_remote_node_context_start_task(
605 struct isci_request *ireq) 774 struct sci_remote_node_context *sci_rnc,
775 struct isci_request *ireq,
776 scics_sds_remote_node_context_callback cb_fn,
777 void *cb_p)
778{
779 enum sci_status status = sci_remote_node_context_resume(sci_rnc,
780 cb_fn, cb_p);
781 if (status != SCI_SUCCESS)
782 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
783 "%s: resume failed: %d\n", __func__, status);
784 return status;
785}
786
787int sci_remote_node_context_is_safe_to_abort(
788 struct sci_remote_node_context *sci_rnc)
606{ 789{
607 enum scis_sds_remote_node_context_states state; 790 enum scis_sds_remote_node_context_states state;
608 791
609 state = sci_rnc->sm.current_state_id; 792 state = sci_rnc->sm.current_state_id;
610 switch (state) { 793 switch (state) {
794 case SCI_RNC_INVALIDATING:
795 case SCI_RNC_TX_RX_SUSPENDED:
796 return 1;
797 case SCI_RNC_POSTING:
611 case SCI_RNC_RESUMING: 798 case SCI_RNC_RESUMING:
612 case SCI_RNC_READY: 799 case SCI_RNC_READY:
613 case SCI_RNC_AWAIT_SUSPENSION:
614 return SCI_SUCCESS;
615 case SCI_RNC_TX_SUSPENDED: 800 case SCI_RNC_TX_SUSPENDED:
616 case SCI_RNC_TX_RX_SUSPENDED: 801 case SCI_RNC_AWAIT_SUSPENSION:
617 sci_remote_node_context_resume(sci_rnc, NULL, NULL); 802 case SCI_RNC_INITIAL:
618 return SCI_SUCCESS; 803 return 0;
619 default: 804 default:
620 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 805 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
621 "%s: invalid state %d\n", __func__, state); 806 "%s: invalid state %d\n", __func__, state);
622 return SCI_FAILURE_INVALID_STATE; 807 return 0;
623 } 808 }
624} 809}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
index a241e0f4c865..a703b9ce0c2c 100644
--- a/drivers/scsi/isci/remote_node_context.h
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -75,8 +75,13 @@
75 */ 75 */
76#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF 76#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF
77 77
78#define SCU_HARDWARE_SUSPENSION (0) 78enum sci_remote_node_suspension_reasons {
79#define SCI_SOFTWARE_SUSPENSION (1) 79 SCI_HW_SUSPEND,
80 SCI_SW_SUSPEND_NORMAL,
81 SCI_SW_SUSPEND_LINKHANG_DETECT
82};
83#define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX
84#define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX
80 85
81struct isci_request; 86struct isci_request;
82struct isci_remote_device; 87struct isci_remote_device;
@@ -137,9 +142,13 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state);
137 * node context. 142 * node context.
138 */ 143 */
139enum sci_remote_node_context_destination_state { 144enum sci_remote_node_context_destination_state {
140 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED, 145 RNC_DEST_UNSPECIFIED,
141 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY, 146 RNC_DEST_READY,
142 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL 147 RNC_DEST_FINAL,
148 RNC_DEST_SUSPENDED, /* Set when suspend during post/invalidate */
149 RNC_DEST_SUSPENDED_RESUME /* Set when a resume was done during posting
150 * or invalidating and already suspending.
151 */
143}; 152};
144 153
145/** 154/**
@@ -156,10 +165,12 @@ struct sci_remote_node_context {
156 u16 remote_node_index; 165 u16 remote_node_index;
157 166
158 /** 167 /**
159 * This field is the recored suspension code or the reason for the remote node 168 * This field is the recored suspension type of the remote node
160 * context suspension. 169 * context suspension.
161 */ 170 */
162 u32 suspension_code; 171 u32 suspend_type;
172 enum sci_remote_node_suspension_reasons suspend_reason;
173 u32 suspend_count;
163 174
164 /** 175 /**
165 * This field is true if the remote node context is resuming from its current 176 * This field is true if the remote node context is resuming from its current
@@ -193,6 +204,8 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
193bool sci_remote_node_context_is_ready( 204bool sci_remote_node_context_is_ready(
194 struct sci_remote_node_context *sci_rnc); 205 struct sci_remote_node_context *sci_rnc);
195 206
207bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc);
208
196enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, 209enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
197 u32 event_code); 210 u32 event_code);
198enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, 211enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
@@ -200,14 +213,24 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
200 void *callback_parameter); 213 void *callback_parameter);
201enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, 214enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
202 u32 suspend_type, 215 u32 suspend_type,
203 scics_sds_remote_node_context_callback cb_fn, 216 u32 suspension_code);
204 void *cb_p);
205enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, 217enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
206 scics_sds_remote_node_context_callback cb_fn, 218 scics_sds_remote_node_context_callback cb_fn,
207 void *cb_p); 219 void *cb_p);
208enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, 220enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
209 struct isci_request *ireq); 221 struct isci_request *ireq,
222 scics_sds_remote_node_context_callback cb_fn,
223 void *cb_p);
210enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, 224enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
211 struct isci_request *ireq); 225 struct isci_request *ireq);
226int sci_remote_node_context_is_safe_to_abort(
227 struct sci_remote_node_context *sci_rnc);
212 228
229static inline bool sci_remote_node_context_is_being_destroyed(
230 struct sci_remote_node_context *sci_rnc)
231{
232 return (sci_rnc->destination_state == RNC_DEST_FINAL)
233 || ((sci_rnc->sm.current_state_id == SCI_RNC_INITIAL)
234 && (sci_rnc->destination_state == RNC_DEST_UNSPECIFIED));
235}
213#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ 236#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 2def1e3960f6..7a0431c73493 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -92,11 +92,11 @@ static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
92 if (idx == 0) { 92 if (idx == 0) {
93 offset = (void *) &ireq->tc->sgl_pair_ab - 93 offset = (void *) &ireq->tc->sgl_pair_ab -
94 (void *) &ihost->task_context_table[0]; 94 (void *) &ihost->task_context_table[0];
95 return ihost->task_context_dma + offset; 95 return ihost->tc_dma + offset;
96 } else if (idx == 1) { 96 } else if (idx == 1) {
97 offset = (void *) &ireq->tc->sgl_pair_cd - 97 offset = (void *) &ireq->tc->sgl_pair_cd -
98 (void *) &ihost->task_context_table[0]; 98 (void *) &ihost->task_context_table[0];
99 return ihost->task_context_dma + offset; 99 return ihost->tc_dma + offset;
100 } 100 }
101 101
102 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); 102 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
@@ -730,7 +730,7 @@ static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *i
730{ 730{
731 struct sas_task *task = isci_request_access_task(ireq); 731 struct sas_task *task = isci_request_access_task(ireq);
732 732
733 ireq->protocol = SCIC_SSP_PROTOCOL; 733 ireq->protocol = SAS_PROTOCOL_SSP;
734 734
735 scu_ssp_io_request_construct_task_context(ireq, 735 scu_ssp_io_request_construct_task_context(ireq,
736 task->data_dir, 736 task->data_dir,
@@ -763,7 +763,7 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
763 bool copy = false; 763 bool copy = false;
764 struct sas_task *task = isci_request_access_task(ireq); 764 struct sas_task *task = isci_request_access_task(ireq);
765 765
766 ireq->protocol = SCIC_STP_PROTOCOL; 766 ireq->protocol = SAS_PROTOCOL_STP;
767 767
768 copy = (task->data_dir == DMA_NONE) ? false : true; 768 copy = (task->data_dir == DMA_NONE) ? false : true;
769 769
@@ -863,6 +863,8 @@ sci_io_request_terminate(struct isci_request *ireq)
863 863
864 switch (state) { 864 switch (state) {
865 case SCI_REQ_CONSTRUCTED: 865 case SCI_REQ_CONSTRUCTED:
866 /* Set to make sure no HW terminate posting is done: */
867 set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags);
866 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 868 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
867 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 869 ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
868 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 870 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
@@ -883,8 +885,7 @@ sci_io_request_terminate(struct isci_request *ireq)
883 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: 885 case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
884 case SCI_REQ_ATAPI_WAIT_D2H: 886 case SCI_REQ_ATAPI_WAIT_D2H:
885 case SCI_REQ_ATAPI_WAIT_TC_COMP: 887 case SCI_REQ_ATAPI_WAIT_TC_COMP:
886 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 888 /* Fall through and change state to ABORTING... */
887 return SCI_SUCCESS;
888 case SCI_REQ_TASK_WAIT_TC_RESP: 889 case SCI_REQ_TASK_WAIT_TC_RESP:
889 /* The task frame was already confirmed to have been 890 /* The task frame was already confirmed to have been
890 * sent by the SCU HW. Since the state machine is 891 * sent by the SCU HW. Since the state machine is
@@ -893,20 +894,21 @@ sci_io_request_terminate(struct isci_request *ireq)
893 * and don't wait for the task response. 894 * and don't wait for the task response.
894 */ 895 */
895 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 896 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
896 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 897 /* Fall through and handle like ABORTING... */
897 return SCI_SUCCESS;
898 case SCI_REQ_ABORTING: 898 case SCI_REQ_ABORTING:
899 /* If a request has a termination requested twice, return 899 if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
900 * a failure indication, since HW confirmation of the first 900 set_bit(IREQ_PENDING_ABORT, &ireq->flags);
901 * abort is still outstanding. 901 else
902 clear_bit(IREQ_PENDING_ABORT, &ireq->flags);
903 /* If the request is only waiting on the remote device
904 * suspension, return SUCCESS so the caller will wait too.
902 */ 905 */
906 return SCI_SUCCESS;
903 case SCI_REQ_COMPLETED: 907 case SCI_REQ_COMPLETED:
904 default: 908 default:
905 dev_warn(&ireq->owning_controller->pdev->dev, 909 dev_warn(&ireq->owning_controller->pdev->dev,
906 "%s: SCIC IO Request requested to abort while in wrong " 910 "%s: SCIC IO Request requested to abort while in wrong "
907 "state %d\n", 911 "state %d\n", __func__, ireq->sm.current_state_id);
908 __func__,
909 ireq->sm.current_state_id);
910 break; 912 break;
911 } 913 }
912 914
@@ -1070,7 +1072,7 @@ request_started_state_tc_event(struct isci_request *ireq,
1070 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 1072 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1071 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 1073 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1072 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 1074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1073 if (ireq->protocol == SCIC_STP_PROTOCOL) { 1075 if (ireq->protocol == SAS_PROTOCOL_STP) {
1074 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1076 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1075 SCU_COMPLETION_TL_STATUS_SHIFT; 1077 SCU_COMPLETION_TL_STATUS_SHIFT;
1076 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1078 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
@@ -2117,7 +2119,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
2117 */ 2119 */
2118 if (ireq->stp.rsp.fis_type == FIS_REGD2H) { 2120 if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
2119 sci_remote_device_suspend(ireq->target_device, 2121 sci_remote_device_suspend(ireq->target_device,
2120 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 2122 SCI_SW_SUSPEND_NORMAL);
2121 2123
2122 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2124 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2123 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2125 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
@@ -2138,13 +2140,6 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
2138 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR 2140 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2139 * - this comes only for B0 2141 * - this comes only for B0
2140 */ 2142 */
2141 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2142 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2143 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2144 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2145 sci_remote_device_suspend(ireq->target_device,
2146 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2147 /* Fall through to the default case */
2148 default: 2143 default:
2149 /* All other completion status cause the IO to be complete. */ 2144 /* All other completion status cause the IO to be complete. */
2150 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2145 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
@@ -2262,15 +2257,151 @@ static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ire
2262 return status; 2257 return status;
2263} 2258}
2264 2259
2260static int sci_request_smp_completion_status_is_tx_suspend(
2261 unsigned int completion_status)
2262{
2263 switch (completion_status) {
2264 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2265 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2266 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2267 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2268 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2269 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2270 return 1;
2271 }
2272 return 0;
2273}
2274
2275static int sci_request_smp_completion_status_is_tx_rx_suspend(
2276 unsigned int completion_status)
2277{
2278 return 0; /* There are no Tx/Rx SMP suspend conditions. */
2279}
2280
2281static int sci_request_ssp_completion_status_is_tx_suspend(
2282 unsigned int completion_status)
2283{
2284 switch (completion_status) {
2285 case SCU_TASK_DONE_TX_RAW_CMD_ERR:
2286 case SCU_TASK_DONE_LF_ERR:
2287 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2288 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2289 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2290 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2291 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2292 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2293 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2294 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2295 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2296 return 1;
2297 }
2298 return 0;
2299}
2300
2301static int sci_request_ssp_completion_status_is_tx_rx_suspend(
2302 unsigned int completion_status)
2303{
2304 return 0; /* There are no Tx/Rx SSP suspend conditions. */
2305}
2306
2307static int sci_request_stpsata_completion_status_is_tx_suspend(
2308 unsigned int completion_status)
2309{
2310 switch (completion_status) {
2311 case SCU_TASK_DONE_TX_RAW_CMD_ERR:
2312 case SCU_TASK_DONE_LL_R_ERR:
2313 case SCU_TASK_DONE_LL_PERR:
2314 case SCU_TASK_DONE_REG_ERR:
2315 case SCU_TASK_DONE_SDB_ERR:
2316 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2317 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2318 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2319 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2320 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2321 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2322 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2323 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2324 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2325 return 1;
2326 }
2327 return 0;
2328}
2329
2330
2331static int sci_request_stpsata_completion_status_is_tx_rx_suspend(
2332 unsigned int completion_status)
2333{
2334 switch (completion_status) {
2335 case SCU_TASK_DONE_LF_ERR:
2336 case SCU_TASK_DONE_LL_SY_TERM:
2337 case SCU_TASK_DONE_LL_LF_TERM:
2338 case SCU_TASK_DONE_BREAK_RCVD:
2339 case SCU_TASK_DONE_INV_FIS_LEN:
2340 case SCU_TASK_DONE_UNEXP_FIS:
2341 case SCU_TASK_DONE_UNEXP_SDBFIS:
2342 case SCU_TASK_DONE_MAX_PLD_ERR:
2343 return 1;
2344 }
2345 return 0;
2346}
2347
2348static void sci_request_handle_suspending_completions(
2349 struct isci_request *ireq,
2350 u32 completion_code)
2351{
2352 int is_tx = 0;
2353 int is_tx_rx = 0;
2354
2355 switch (ireq->protocol) {
2356 case SAS_PROTOCOL_SMP:
2357 is_tx = sci_request_smp_completion_status_is_tx_suspend(
2358 completion_code);
2359 is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend(
2360 completion_code);
2361 break;
2362 case SAS_PROTOCOL_SSP:
2363 is_tx = sci_request_ssp_completion_status_is_tx_suspend(
2364 completion_code);
2365 is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend(
2366 completion_code);
2367 break;
2368 case SAS_PROTOCOL_STP:
2369 is_tx = sci_request_stpsata_completion_status_is_tx_suspend(
2370 completion_code);
2371 is_tx_rx =
2372 sci_request_stpsata_completion_status_is_tx_rx_suspend(
2373 completion_code);
2374 break;
2375 default:
2376 dev_warn(&ireq->isci_host->pdev->dev,
2377 "%s: request %p has no valid protocol\n",
2378 __func__, ireq);
2379 break;
2380 }
2381 if (is_tx || is_tx_rx) {
2382 BUG_ON(is_tx && is_tx_rx);
2383
2384 sci_remote_node_context_suspend(
2385 &ireq->target_device->rnc,
2386 SCI_HW_SUSPEND,
2387 (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX
2388 : SCU_EVENT_TL_RNC_SUSPEND_TX);
2389 }
2390}
2391
2265enum sci_status 2392enum sci_status
2266sci_io_request_tc_completion(struct isci_request *ireq, 2393sci_io_request_tc_completion(struct isci_request *ireq,
2267 u32 completion_code) 2394 u32 completion_code)
2268{ 2395{
2269 enum sci_base_request_states state; 2396 enum sci_base_request_states state;
2270 struct isci_host *ihost = ireq->owning_controller; 2397 struct isci_host *ihost = ireq->owning_controller;
2271 2398
2272 state = ireq->sm.current_state_id; 2399 state = ireq->sm.current_state_id;
2273 2400
2401 /* Decode those completions that signal upcoming suspension events. */
2402 sci_request_handle_suspending_completions(
2403 ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code));
2404
2274 switch (state) { 2405 switch (state) {
2275 case SCI_REQ_STARTED: 2406 case SCI_REQ_STARTED:
2276 return request_started_state_tc_event(ireq, completion_code); 2407 return request_started_state_tc_event(ireq, completion_code);
@@ -2362,9 +2493,6 @@ static void isci_request_process_response_iu(
2362 * @request: This parameter is the completed isci_request object. 2493 * @request: This parameter is the completed isci_request object.
2363 * @response_ptr: This parameter specifies the service response for the I/O. 2494 * @response_ptr: This parameter specifies the service response for the I/O.
2364 * @status_ptr: This parameter specifies the exec status for the I/O. 2495 * @status_ptr: This parameter specifies the exec status for the I/O.
2365 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2366 * the LLDD with respect to completing this request or forcing an abort
2367 * condition on the I/O.
2368 * @open_rej_reason: This parameter specifies the encoded reason for the 2496 * @open_rej_reason: This parameter specifies the encoded reason for the
2369 * abandon-class reject. 2497 * abandon-class reject.
2370 * 2498 *
@@ -2375,14 +2503,12 @@ static void isci_request_set_open_reject_status(
2375 struct sas_task *task, 2503 struct sas_task *task,
2376 enum service_response *response_ptr, 2504 enum service_response *response_ptr,
2377 enum exec_status *status_ptr, 2505 enum exec_status *status_ptr,
2378 enum isci_completion_selection *complete_to_host_ptr,
2379 enum sas_open_rej_reason open_rej_reason) 2506 enum sas_open_rej_reason open_rej_reason)
2380{ 2507{
2381 /* Task in the target is done. */ 2508 /* Task in the target is done. */
2382 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2509 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2383 *response_ptr = SAS_TASK_UNDELIVERED; 2510 *response_ptr = SAS_TASK_UNDELIVERED;
2384 *status_ptr = SAS_OPEN_REJECT; 2511 *status_ptr = SAS_OPEN_REJECT;
2385 *complete_to_host_ptr = isci_perform_normal_io_completion;
2386 task->task_status.open_rej_reason = open_rej_reason; 2512 task->task_status.open_rej_reason = open_rej_reason;
2387} 2513}
2388 2514
@@ -2392,9 +2518,6 @@ static void isci_request_set_open_reject_status(
2392 * @request: This parameter is the completed isci_request object. 2518 * @request: This parameter is the completed isci_request object.
2393 * @response_ptr: This parameter specifies the service response for the I/O. 2519 * @response_ptr: This parameter specifies the service response for the I/O.
2394 * @status_ptr: This parameter specifies the exec status for the I/O. 2520 * @status_ptr: This parameter specifies the exec status for the I/O.
2395 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2396 * the LLDD with respect to completing this request or forcing an abort
2397 * condition on the I/O.
2398 * 2521 *
2399 * none. 2522 * none.
2400 */ 2523 */
@@ -2403,8 +2526,7 @@ static void isci_request_handle_controller_specific_errors(
2403 struct isci_request *request, 2526 struct isci_request *request,
2404 struct sas_task *task, 2527 struct sas_task *task,
2405 enum service_response *response_ptr, 2528 enum service_response *response_ptr,
2406 enum exec_status *status_ptr, 2529 enum exec_status *status_ptr)
2407 enum isci_completion_selection *complete_to_host_ptr)
2408{ 2530{
2409 unsigned int cstatus; 2531 unsigned int cstatus;
2410 2532
@@ -2445,9 +2567,6 @@ static void isci_request_handle_controller_specific_errors(
2445 *status_ptr = SAS_ABORTED_TASK; 2567 *status_ptr = SAS_ABORTED_TASK;
2446 2568
2447 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2569 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2448
2449 *complete_to_host_ptr =
2450 isci_perform_normal_io_completion;
2451 } else { 2570 } else {
2452 /* Task in the target is not done. */ 2571 /* Task in the target is not done. */
2453 *response_ptr = SAS_TASK_UNDELIVERED; 2572 *response_ptr = SAS_TASK_UNDELIVERED;
@@ -2458,9 +2577,6 @@ static void isci_request_handle_controller_specific_errors(
2458 *status_ptr = SAM_STAT_TASK_ABORTED; 2577 *status_ptr = SAM_STAT_TASK_ABORTED;
2459 2578
2460 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2579 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2461
2462 *complete_to_host_ptr =
2463 isci_perform_error_io_completion;
2464 } 2580 }
2465 2581
2466 break; 2582 break;
@@ -2489,8 +2605,6 @@ static void isci_request_handle_controller_specific_errors(
2489 *status_ptr = SAS_ABORTED_TASK; 2605 *status_ptr = SAS_ABORTED_TASK;
2490 2606
2491 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2607 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2492
2493 *complete_to_host_ptr = isci_perform_normal_io_completion;
2494 break; 2608 break;
2495 2609
2496 2610
@@ -2501,7 +2615,7 @@ static void isci_request_handle_controller_specific_errors(
2501 2615
2502 isci_request_set_open_reject_status( 2616 isci_request_set_open_reject_status(
2503 request, task, response_ptr, status_ptr, 2617 request, task, response_ptr, status_ptr,
2504 complete_to_host_ptr, SAS_OREJ_WRONG_DEST); 2618 SAS_OREJ_WRONG_DEST);
2505 break; 2619 break;
2506 2620
2507 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2621 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
@@ -2511,56 +2625,56 @@ static void isci_request_handle_controller_specific_errors(
2511 */ 2625 */
2512 isci_request_set_open_reject_status( 2626 isci_request_set_open_reject_status(
2513 request, task, response_ptr, status_ptr, 2627 request, task, response_ptr, status_ptr,
2514 complete_to_host_ptr, SAS_OREJ_RESV_AB0); 2628 SAS_OREJ_RESV_AB0);
2515 break; 2629 break;
2516 2630
2517 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2631 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2518 2632
2519 isci_request_set_open_reject_status( 2633 isci_request_set_open_reject_status(
2520 request, task, response_ptr, status_ptr, 2634 request, task, response_ptr, status_ptr,
2521 complete_to_host_ptr, SAS_OREJ_RESV_AB1); 2635 SAS_OREJ_RESV_AB1);
2522 break; 2636 break;
2523 2637
2524 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2638 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2525 2639
2526 isci_request_set_open_reject_status( 2640 isci_request_set_open_reject_status(
2527 request, task, response_ptr, status_ptr, 2641 request, task, response_ptr, status_ptr,
2528 complete_to_host_ptr, SAS_OREJ_RESV_AB2); 2642 SAS_OREJ_RESV_AB2);
2529 break; 2643 break;
2530 2644
2531 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2645 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2532 2646
2533 isci_request_set_open_reject_status( 2647 isci_request_set_open_reject_status(
2534 request, task, response_ptr, status_ptr, 2648 request, task, response_ptr, status_ptr,
2535 complete_to_host_ptr, SAS_OREJ_RESV_AB3); 2649 SAS_OREJ_RESV_AB3);
2536 break; 2650 break;
2537 2651
2538 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2652 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2539 2653
2540 isci_request_set_open_reject_status( 2654 isci_request_set_open_reject_status(
2541 request, task, response_ptr, status_ptr, 2655 request, task, response_ptr, status_ptr,
2542 complete_to_host_ptr, SAS_OREJ_BAD_DEST); 2656 SAS_OREJ_BAD_DEST);
2543 break; 2657 break;
2544 2658
2545 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2659 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2546 2660
2547 isci_request_set_open_reject_status( 2661 isci_request_set_open_reject_status(
2548 request, task, response_ptr, status_ptr, 2662 request, task, response_ptr, status_ptr,
2549 complete_to_host_ptr, SAS_OREJ_STP_NORES); 2663 SAS_OREJ_STP_NORES);
2550 break; 2664 break;
2551 2665
2552 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2666 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2553 2667
2554 isci_request_set_open_reject_status( 2668 isci_request_set_open_reject_status(
2555 request, task, response_ptr, status_ptr, 2669 request, task, response_ptr, status_ptr,
2556 complete_to_host_ptr, SAS_OREJ_EPROTO); 2670 SAS_OREJ_EPROTO);
2557 break; 2671 break;
2558 2672
2559 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2673 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2560 2674
2561 isci_request_set_open_reject_status( 2675 isci_request_set_open_reject_status(
2562 request, task, response_ptr, status_ptr, 2676 request, task, response_ptr, status_ptr,
2563 complete_to_host_ptr, SAS_OREJ_CONN_RATE); 2677 SAS_OREJ_CONN_RATE);
2564 break; 2678 break;
2565 2679
2566 case SCU_TASK_DONE_LL_R_ERR: 2680 case SCU_TASK_DONE_LL_R_ERR:
@@ -2592,95 +2706,12 @@ static void isci_request_handle_controller_specific_errors(
2592 *response_ptr = SAS_TASK_UNDELIVERED; 2706 *response_ptr = SAS_TASK_UNDELIVERED;
2593 *status_ptr = SAM_STAT_TASK_ABORTED; 2707 *status_ptr = SAM_STAT_TASK_ABORTED;
2594 2708
2595 if (task->task_proto == SAS_PROTOCOL_SMP) { 2709 if (task->task_proto == SAS_PROTOCOL_SMP)
2596 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2710 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2597 2711 else
2598 *complete_to_host_ptr = isci_perform_normal_io_completion;
2599 } else {
2600 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2712 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2601
2602 *complete_to_host_ptr = isci_perform_error_io_completion;
2603 }
2604 break;
2605 }
2606}
2607
2608/**
2609 * isci_task_save_for_upper_layer_completion() - This function saves the
2610 * request for later completion to the upper layer driver.
2611 * @host: This parameter is a pointer to the host on which the the request
2612 * should be queued (either as an error or success).
2613 * @request: This parameter is the completed request.
2614 * @response: This parameter is the response code for the completed task.
2615 * @status: This parameter is the status code for the completed task.
2616 *
2617 * none.
2618 */
2619static void isci_task_save_for_upper_layer_completion(
2620 struct isci_host *host,
2621 struct isci_request *request,
2622 enum service_response response,
2623 enum exec_status status,
2624 enum isci_completion_selection task_notification_selection)
2625{
2626 struct sas_task *task = isci_request_access_task(request);
2627
2628 task_notification_selection
2629 = isci_task_set_completion_status(task, response, status,
2630 task_notification_selection);
2631
2632 /* Tasks aborted specifically by a call to the lldd_abort_task
2633 * function should not be completed to the host in the regular path.
2634 */
2635 switch (task_notification_selection) {
2636
2637 case isci_perform_normal_io_completion:
2638 /* Normal notification (task_done) */
2639
2640 /* Add to the completed list. */
2641 list_add(&request->completed_node,
2642 &host->requests_to_complete);
2643
2644 /* Take the request off the device's pending request list. */
2645 list_del_init(&request->dev_node);
2646 break;
2647
2648 case isci_perform_aborted_io_completion:
2649 /* No notification to libsas because this request is
2650 * already in the abort path.
2651 */
2652 /* Wake up whatever process was waiting for this
2653 * request to complete.
2654 */
2655 WARN_ON(request->io_request_completion == NULL);
2656
2657 if (request->io_request_completion != NULL) {
2658
2659 /* Signal whoever is waiting that this
2660 * request is complete.
2661 */
2662 complete(request->io_request_completion);
2663 }
2664 break;
2665
2666 case isci_perform_error_io_completion:
2667 /* Use sas_task_abort */
2668 /* Add to the aborted list. */
2669 list_add(&request->completed_node,
2670 &host->requests_to_errorback);
2671 break;
2672
2673 default:
2674 /* Add to the error to libsas list. */
2675 list_add(&request->completed_node,
2676 &host->requests_to_errorback);
2677 break; 2713 break;
2678 } 2714 }
2679 dev_dbg(&host->pdev->dev,
2680 "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n",
2681 __func__, task_notification_selection, task,
2682 (task) ? task->task_status.resp : 0, response,
2683 (task) ? task->task_status.stat : 0, status);
2684} 2715}
2685 2716
2686static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) 2717static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
@@ -2715,295 +2746,164 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
2715 struct isci_remote_device *idev = request->target_device; 2746 struct isci_remote_device *idev = request->target_device;
2716 enum service_response response = SAS_TASK_UNDELIVERED; 2747 enum service_response response = SAS_TASK_UNDELIVERED;
2717 enum exec_status status = SAS_ABORTED_TASK; 2748 enum exec_status status = SAS_ABORTED_TASK;
2718 enum isci_request_status request_status;
2719 enum isci_completion_selection complete_to_host
2720 = isci_perform_normal_io_completion;
2721 2749
2722 dev_dbg(&ihost->pdev->dev, 2750 dev_dbg(&ihost->pdev->dev,
2723 "%s: request = %p, task = %p,\n" 2751 "%s: request = %p, task = %p, "
2724 "task->data_dir = %d completion_status = 0x%x\n", 2752 "task->data_dir = %d completion_status = 0x%x\n",
2725 __func__, 2753 __func__, request, task, task->data_dir, completion_status);
2726 request,
2727 task,
2728 task->data_dir,
2729 completion_status);
2730 2754
2731 spin_lock(&request->state_lock); 2755 /* The request is done from an SCU HW perspective. */
2732 request_status = request->status;
2733 2756
2734 /* Decode the request status. Note that if the request has been 2757 /* This is an active request being completed from the core. */
2735 * aborted by a task management function, we don't care 2758 switch (completion_status) {
2736 * what the status is.
2737 */
2738 switch (request_status) {
2739
2740 case aborted:
2741 /* "aborted" indicates that the request was aborted by a task
2742 * management function, since once a task management request is
2743 * perfomed by the device, the request only completes because
2744 * of the subsequent driver terminate.
2745 *
2746 * Aborted also means an external thread is explicitly managing
2747 * this request, so that we do not complete it up the stack.
2748 *
2749 * The target is still there (since the TMF was successful).
2750 */
2751 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2752 response = SAS_TASK_COMPLETE;
2753 2759
2754 /* See if the device has been/is being stopped. Note 2760 case SCI_IO_FAILURE_RESPONSE_VALID:
2755 * that we ignore the quiesce state, since we are 2761 dev_dbg(&ihost->pdev->dev,
2756 * concerned about the actual device state. 2762 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2757 */ 2763 __func__, request, task);
2758 if (!idev) 2764
2759 status = SAS_DEVICE_UNKNOWN; 2765 if (sas_protocol_ata(task->task_proto)) {
2760 else 2766 isci_process_stp_response(task, &request->stp.rsp);
2761 status = SAS_ABORTED_TASK; 2767 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2768
2769 /* crack the iu response buffer. */
2770 resp_iu = &request->ssp.rsp;
2771 isci_request_process_response_iu(task, resp_iu,
2772 &ihost->pdev->dev);
2773
2774 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2775
2776 dev_err(&ihost->pdev->dev,
2777 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2778 "SAS_PROTOCOL_SMP protocol\n",
2779 __func__);
2762 2780
2763 complete_to_host = isci_perform_aborted_io_completion; 2781 } else
2764 /* This was an aborted request. */ 2782 dev_err(&ihost->pdev->dev,
2783 "%s: unknown protocol\n", __func__);
2765 2784
2766 spin_unlock(&request->state_lock); 2785 /* use the task status set in the task struct by the
2786 * isci_request_process_response_iu call.
2787 */
2788 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2789 response = task->task_status.resp;
2790 status = task->task_status.stat;
2767 break; 2791 break;
2768 2792
2769 case aborting: 2793 case SCI_IO_SUCCESS:
2770 /* aborting means that the task management function tried and 2794 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2771 * failed to abort the request. We need to note the request 2795
2772 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the 2796 response = SAS_TASK_COMPLETE;
2773 * target as down. 2797 status = SAM_STAT_GOOD;
2774 *
2775 * Aborting also means an external thread is explicitly managing
2776 * this request, so that we do not complete it up the stack.
2777 */
2778 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2798 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2779 response = SAS_TASK_UNDELIVERED;
2780 2799
2781 if (!idev) 2800 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2782 /* The device has been /is being stopped. Note that
2783 * we ignore the quiesce state, since we are
2784 * concerned about the actual device state.
2785 */
2786 status = SAS_DEVICE_UNKNOWN;
2787 else
2788 status = SAS_PHY_DOWN;
2789 2801
2790 complete_to_host = isci_perform_aborted_io_completion; 2802 /* This was an SSP / STP / SATA transfer.
2803 * There is a possibility that less data than
2804 * the maximum was transferred.
2805 */
2806 u32 transferred_length = sci_req_tx_bytes(request);
2791 2807
2792 /* This was an aborted request. */ 2808 task->task_status.residual
2809 = task->total_xfer_len - transferred_length;
2810
2811 /* If there were residual bytes, call this an
2812 * underrun.
2813 */
2814 if (task->task_status.residual != 0)
2815 status = SAS_DATA_UNDERRUN;
2793 2816
2794 spin_unlock(&request->state_lock); 2817 dev_dbg(&ihost->pdev->dev,
2818 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2819 __func__, status);
2820
2821 } else
2822 dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n",
2823 __func__);
2795 break; 2824 break;
2796 2825
2797 case terminating: 2826 case SCI_IO_FAILURE_TERMINATED:
2798 2827
2799 /* This was an terminated request. This happens when 2828 dev_dbg(&ihost->pdev->dev,
2800 * the I/O is being terminated because of an action on 2829 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2801 * the device (reset, tear down, etc.), and the I/O needs 2830 __func__, request, task);
2802 * to be completed up the stack. 2831
2803 */ 2832 /* The request was terminated explicitly. */
2804 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2833 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2805 response = SAS_TASK_UNDELIVERED; 2834 response = SAS_TASK_UNDELIVERED;
2806 2835
2807 /* See if the device has been/is being stopped. Note 2836 /* See if the device has been/is being stopped. Note
2808 * that we ignore the quiesce state, since we are 2837 * that we ignore the quiesce state, since we are
2809 * concerned about the actual device state. 2838 * concerned about the actual device state.
2810 */ 2839 */
2811 if (!idev) 2840 if (!idev)
2812 status = SAS_DEVICE_UNKNOWN; 2841 status = SAS_DEVICE_UNKNOWN;
2813 else 2842 else
2814 status = SAS_ABORTED_TASK; 2843 status = SAS_ABORTED_TASK;
2815
2816 complete_to_host = isci_perform_aborted_io_completion;
2817
2818 /* This was a terminated request. */
2819
2820 spin_unlock(&request->state_lock);
2821 break; 2844 break;
2822 2845
2823 case dead: 2846 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2824 /* This was a terminated request that timed-out during the
2825 * termination process. There is no task to complete to
2826 * libsas.
2827 */
2828 complete_to_host = isci_perform_normal_io_completion;
2829 spin_unlock(&request->state_lock);
2830 break;
2831 2847
2832 default: 2848 isci_request_handle_controller_specific_errors(idev, request,
2833 2849 task, &response,
2834 /* The request is done from an SCU HW perspective. */ 2850 &status);
2835 request->status = completed; 2851 break;
2836
2837 spin_unlock(&request->state_lock);
2838
2839 /* This is an active request being completed from the core. */
2840 switch (completion_status) {
2841
2842 case SCI_IO_FAILURE_RESPONSE_VALID:
2843 dev_dbg(&ihost->pdev->dev,
2844 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2845 __func__,
2846 request,
2847 task);
2848
2849 if (sas_protocol_ata(task->task_proto)) {
2850 isci_process_stp_response(task, &request->stp.rsp);
2851 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2852
2853 /* crack the iu response buffer. */
2854 resp_iu = &request->ssp.rsp;
2855 isci_request_process_response_iu(task, resp_iu,
2856 &ihost->pdev->dev);
2857
2858 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2859
2860 dev_err(&ihost->pdev->dev,
2861 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2862 "SAS_PROTOCOL_SMP protocol\n",
2863 __func__);
2864
2865 } else
2866 dev_err(&ihost->pdev->dev,
2867 "%s: unknown protocol\n", __func__);
2868
2869 /* use the task status set in the task struct by the
2870 * isci_request_process_response_iu call.
2871 */
2872 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2873 response = task->task_status.resp;
2874 status = task->task_status.stat;
2875 break;
2876 2852
2877 case SCI_IO_SUCCESS: 2853 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2878 case SCI_IO_SUCCESS_IO_DONE_EARLY: 2854 /* This is a special case, in that the I/O completion
2855 * is telling us that the device needs a reset.
2856 * In order for the device reset condition to be
2857 * noticed, the I/O has to be handled in the error
2858 * handler. Set the reset flag and cause the
2859 * SCSI error thread to be scheduled.
2860 */
2861 spin_lock_irqsave(&task->task_state_lock, task_flags);
2862 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2863 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2879 2864
2880 response = SAS_TASK_COMPLETE; 2865 /* Fail the I/O. */
2881 status = SAM_STAT_GOOD; 2866 response = SAS_TASK_UNDELIVERED;
2882 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2867 status = SAM_STAT_TASK_ABORTED;
2883 2868
2884 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { 2869 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2870 break;
2885 2871
2886 /* This was an SSP / STP / SATA transfer. 2872 case SCI_FAILURE_RETRY_REQUIRED:
2887 * There is a possibility that less data than
2888 * the maximum was transferred.
2889 */
2890 u32 transferred_length = sci_req_tx_bytes(request);
2891 2873
2892 task->task_status.residual 2874 /* Fail the I/O so it can be retried. */
2893 = task->total_xfer_len - transferred_length; 2875 response = SAS_TASK_UNDELIVERED;
2876 if (!idev)
2877 status = SAS_DEVICE_UNKNOWN;
2878 else
2879 status = SAS_ABORTED_TASK;
2894 2880
2895 /* If there were residual bytes, call this an 2881 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2896 * underrun. 2882 break;
2897 */
2898 if (task->task_status.residual != 0)
2899 status = SAS_DATA_UNDERRUN;
2900 2883
2901 dev_dbg(&ihost->pdev->dev,
2902 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2903 __func__,
2904 status);
2905 2884
2906 } else 2885 default:
2907 dev_dbg(&ihost->pdev->dev, 2886 /* Catch any otherwise unhandled error codes here. */
2908 "%s: SCI_IO_SUCCESS\n", 2887 dev_dbg(&ihost->pdev->dev,
2909 __func__); 2888 "%s: invalid completion code: 0x%x - "
2889 "isci_request = %p\n",
2890 __func__, completion_status, request);
2910 2891
2911 break; 2892 response = SAS_TASK_UNDELIVERED;
2912 2893
2913 case SCI_IO_FAILURE_TERMINATED: 2894 /* See if the device has been/is being stopped. Note
2914 dev_dbg(&ihost->pdev->dev, 2895 * that we ignore the quiesce state, since we are
2915 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", 2896 * concerned about the actual device state.
2916 __func__, 2897 */
2917 request, 2898 if (!idev)
2918 task); 2899 status = SAS_DEVICE_UNKNOWN;
2900 else
2901 status = SAS_ABORTED_TASK;
2919 2902
2920 /* The request was terminated explicitly. No handling 2903 if (SAS_PROTOCOL_SMP == task->task_proto)
2921 * is needed in the SCSI error handler path.
2922 */
2923 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2904 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2924 response = SAS_TASK_UNDELIVERED; 2905 else
2925
2926 /* See if the device has been/is being stopped. Note
2927 * that we ignore the quiesce state, since we are
2928 * concerned about the actual device state.
2929 */
2930 if (!idev)
2931 status = SAS_DEVICE_UNKNOWN;
2932 else
2933 status = SAS_ABORTED_TASK;
2934
2935 complete_to_host = isci_perform_normal_io_completion;
2936 break;
2937
2938 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2939
2940 isci_request_handle_controller_specific_errors(
2941 idev, request, task, &response, &status,
2942 &complete_to_host);
2943
2944 break;
2945
2946 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2947 /* This is a special case, in that the I/O completion
2948 * is telling us that the device needs a reset.
2949 * In order for the device reset condition to be
2950 * noticed, the I/O has to be handled in the error
2951 * handler. Set the reset flag and cause the
2952 * SCSI error thread to be scheduled.
2953 */
2954 spin_lock_irqsave(&task->task_state_lock, task_flags);
2955 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2956 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2957
2958 /* Fail the I/O. */
2959 response = SAS_TASK_UNDELIVERED;
2960 status = SAM_STAT_TASK_ABORTED;
2961
2962 complete_to_host = isci_perform_error_io_completion;
2963 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2906 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2964 break;
2965
2966 case SCI_FAILURE_RETRY_REQUIRED:
2967
2968 /* Fail the I/O so it can be retried. */
2969 response = SAS_TASK_UNDELIVERED;
2970 if (!idev)
2971 status = SAS_DEVICE_UNKNOWN;
2972 else
2973 status = SAS_ABORTED_TASK;
2974
2975 complete_to_host = isci_perform_normal_io_completion;
2976 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2977 break;
2978
2979
2980 default:
2981 /* Catch any otherwise unhandled error codes here. */
2982 dev_dbg(&ihost->pdev->dev,
2983 "%s: invalid completion code: 0x%x - "
2984 "isci_request = %p\n",
2985 __func__, completion_status, request);
2986
2987 response = SAS_TASK_UNDELIVERED;
2988
2989 /* See if the device has been/is being stopped. Note
2990 * that we ignore the quiesce state, since we are
2991 * concerned about the actual device state.
2992 */
2993 if (!idev)
2994 status = SAS_DEVICE_UNKNOWN;
2995 else
2996 status = SAS_ABORTED_TASK;
2997
2998 if (SAS_PROTOCOL_SMP == task->task_proto) {
2999 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3000 complete_to_host = isci_perform_normal_io_completion;
3001 } else {
3002 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3003 complete_to_host = isci_perform_error_io_completion;
3004 }
3005 break;
3006 }
3007 break; 2907 break;
3008 } 2908 }
3009 2909
@@ -3038,10 +2938,18 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
3038 break; 2938 break;
3039 } 2939 }
3040 2940
3041 /* Put the completed request on the correct list */ 2941 spin_lock_irqsave(&task->task_state_lock, task_flags);
3042 isci_task_save_for_upper_layer_completion(ihost, request, response, 2942
3043 status, complete_to_host 2943 task->task_status.resp = response;
3044 ); 2944 task->task_status.stat = status;
2945
2946 if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
2947 /* Normal notification (task_done) */
2948 task->task_state_flags |= SAS_TASK_STATE_DONE;
2949 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
2950 SAS_TASK_STATE_PENDING);
2951 }
2952 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
3045 2953
3046 /* complete the io request to the core. */ 2954 /* complete the io request to the core. */
3047 sci_controller_complete_io(ihost, request->target_device, request); 2955 sci_controller_complete_io(ihost, request->target_device, request);
@@ -3051,6 +2959,8 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
3051 * task to recognize the already completed case. 2959 * task to recognize the already completed case.
3052 */ 2960 */
3053 set_bit(IREQ_TERMINATED, &request->flags); 2961 set_bit(IREQ_TERMINATED, &request->flags);
2962
2963 ireq_done(ihost, request, task);
3054} 2964}
3055 2965
3056static void sci_request_started_state_enter(struct sci_base_state_machine *sm) 2966static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
@@ -3169,7 +3079,7 @@ sci_general_request_construct(struct isci_host *ihost,
3169 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); 3079 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
3170 3080
3171 ireq->target_device = idev; 3081 ireq->target_device = idev;
3172 ireq->protocol = SCIC_NO_PROTOCOL; 3082 ireq->protocol = SAS_PROTOCOL_NONE;
3173 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 3083 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3174 3084
3175 ireq->sci_status = SCI_SUCCESS; 3085 ireq->sci_status = SCI_SUCCESS;
@@ -3193,7 +3103,7 @@ sci_io_request_construct(struct isci_host *ihost,
3193 3103
3194 if (dev->dev_type == SAS_END_DEV) 3104 if (dev->dev_type == SAS_END_DEV)
3195 /* pass */; 3105 /* pass */;
3196 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) 3106 else if (dev_is_sata(dev))
3197 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 3107 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
3198 else if (dev_is_expander(dev)) 3108 else if (dev_is_expander(dev))
3199 /* pass */; 3109 /* pass */;
@@ -3215,10 +3125,15 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost,
3215 /* Build the common part of the request */ 3125 /* Build the common part of the request */
3216 sci_general_request_construct(ihost, idev, ireq); 3126 sci_general_request_construct(ihost, idev, ireq);
3217 3127
3218 if (dev->dev_type == SAS_END_DEV || 3128 if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
3219 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3220 set_bit(IREQ_TMF, &ireq->flags); 3129 set_bit(IREQ_TMF, &ireq->flags);
3221 memset(ireq->tc, 0, sizeof(struct scu_task_context)); 3130 memset(ireq->tc, 0, sizeof(struct scu_task_context));
3131
3132 /* Set the protocol indicator. */
3133 if (dev_is_sata(dev))
3134 ireq->protocol = SAS_PROTOCOL_STP;
3135 else
3136 ireq->protocol = SAS_PROTOCOL_SSP;
3222 } else 3137 } else
3223 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3138 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3224 3139
@@ -3311,7 +3226,7 @@ sci_io_request_construct_smp(struct device *dev,
3311 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) 3226 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3312 return SCI_FAILURE; 3227 return SCI_FAILURE;
3313 3228
3314 ireq->protocol = SCIC_SMP_PROTOCOL; 3229 ireq->protocol = SAS_PROTOCOL_SMP;
3315 3230
3316 /* byte swap the smp request. */ 3231 /* byte swap the smp request. */
3317 3232
@@ -3496,9 +3411,6 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t
3496 ireq->io_request_completion = NULL; 3411 ireq->io_request_completion = NULL;
3497 ireq->flags = 0; 3412 ireq->flags = 0;
3498 ireq->num_sg_entries = 0; 3413 ireq->num_sg_entries = 0;
3499 INIT_LIST_HEAD(&ireq->completed_node);
3500 INIT_LIST_HEAD(&ireq->dev_node);
3501 isci_request_change_state(ireq, allocated);
3502 3414
3503 return ireq; 3415 return ireq;
3504} 3416}
@@ -3582,26 +3494,15 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
3582 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3494 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3583 return status; 3495 return status;
3584 } 3496 }
3585
3586 /* Either I/O started OK, or the core has signaled that 3497 /* Either I/O started OK, or the core has signaled that
3587 * the device needs a target reset. 3498 * the device needs a target reset.
3588 *
3589 * In either case, hold onto the I/O for later.
3590 *
3591 * Update it's status and add it to the list in the
3592 * remote device object.
3593 */ 3499 */
3594 list_add(&ireq->dev_node, &idev->reqs_in_process); 3500 if (status != SCI_SUCCESS) {
3595
3596 if (status == SCI_SUCCESS) {
3597 isci_request_change_state(ireq, started);
3598 } else {
3599 /* The request did not really start in the 3501 /* The request did not really start in the
3600 * hardware, so clear the request handle 3502 * hardware, so clear the request handle
3601 * here so no terminations will be done. 3503 * here so no terminations will be done.
3602 */ 3504 */
3603 set_bit(IREQ_TERMINATED, &ireq->flags); 3505 set_bit(IREQ_TERMINATED, &ireq->flags);
3604 isci_request_change_state(ireq, completed);
3605 } 3506 }
3606 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3507 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3607 3508
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index 057f2378452d..aff95317fcf4 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -61,30 +61,6 @@
61#include "scu_task_context.h" 61#include "scu_task_context.h"
62 62
63/** 63/**
64 * struct isci_request_status - This enum defines the possible states of an I/O
65 * request.
66 *
67 *
68 */
69enum isci_request_status {
70 unallocated = 0x00,
71 allocated = 0x01,
72 started = 0x02,
73 completed = 0x03,
74 aborting = 0x04,
75 aborted = 0x05,
76 terminating = 0x06,
77 dead = 0x07
78};
79
80enum sci_request_protocol {
81 SCIC_NO_PROTOCOL,
82 SCIC_SMP_PROTOCOL,
83 SCIC_SSP_PROTOCOL,
84 SCIC_STP_PROTOCOL
85}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
86
87/**
88 * isci_stp_request - extra request infrastructure to handle pio/atapi protocol 64 * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
89 * @pio_len - number of bytes requested at PIO setup 65 * @pio_len - number of bytes requested at PIO setup
90 * @status - pio setup ending status value to tell us if we need 66 * @status - pio setup ending status value to tell us if we need
@@ -104,11 +80,14 @@ struct isci_stp_request {
104}; 80};
105 81
106struct isci_request { 82struct isci_request {
107 enum isci_request_status status;
108 #define IREQ_COMPLETE_IN_TARGET 0 83 #define IREQ_COMPLETE_IN_TARGET 0
109 #define IREQ_TERMINATED 1 84 #define IREQ_TERMINATED 1
110 #define IREQ_TMF 2 85 #define IREQ_TMF 2
111 #define IREQ_ACTIVE 3 86 #define IREQ_ACTIVE 3
87 #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */
88 #define IREQ_TC_ABORT_POSTED 5
89 #define IREQ_ABORT_PATH_ACTIVE 6
90 #define IREQ_NO_AUTO_FREE_TAG 7 /* Set when being explicitly managed */
112 unsigned long flags; 91 unsigned long flags;
113 /* XXX kill ttype and ttype_ptr, allocate full sas_task */ 92 /* XXX kill ttype and ttype_ptr, allocate full sas_task */
114 union ttype_ptr_union { 93 union ttype_ptr_union {
@@ -116,11 +95,6 @@ struct isci_request {
116 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ 95 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
117 } ttype_ptr; 96 } ttype_ptr;
118 struct isci_host *isci_host; 97 struct isci_host *isci_host;
119 /* For use in the requests_to_{complete|abort} lists: */
120 struct list_head completed_node;
121 /* For use in the reqs_in_process list: */
122 struct list_head dev_node;
123 spinlock_t state_lock;
124 dma_addr_t request_daddr; 98 dma_addr_t request_daddr;
125 dma_addr_t zero_scatter_daddr; 99 dma_addr_t zero_scatter_daddr;
126 unsigned int num_sg_entries; 100 unsigned int num_sg_entries;
@@ -140,7 +114,7 @@ struct isci_request {
140 struct isci_host *owning_controller; 114 struct isci_host *owning_controller;
141 struct isci_remote_device *target_device; 115 struct isci_remote_device *target_device;
142 u16 io_tag; 116 u16 io_tag;
143 enum sci_request_protocol protocol; 117 enum sas_protocol protocol;
144 u32 scu_status; /* hardware result */ 118 u32 scu_status; /* hardware result */
145 u32 sci_status; /* upper layer disposition */ 119 u32 sci_status; /* upper layer disposition */
146 u32 post_context; 120 u32 post_context;
@@ -309,92 +283,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
309 return ireq->request_daddr + (requested_addr - base_addr); 283 return ireq->request_daddr + (requested_addr - base_addr);
310} 284}
311 285
312/**
313 * isci_request_change_state() - This function sets the status of the request
314 * object.
315 * @request: This parameter points to the isci_request object
316 * @status: This Parameter is the new status of the object
317 *
318 */
319static inline enum isci_request_status
320isci_request_change_state(struct isci_request *isci_request,
321 enum isci_request_status status)
322{
323 enum isci_request_status old_state;
324 unsigned long flags;
325
326 dev_dbg(&isci_request->isci_host->pdev->dev,
327 "%s: isci_request = %p, state = 0x%x\n",
328 __func__,
329 isci_request,
330 status);
331
332 BUG_ON(isci_request == NULL);
333
334 spin_lock_irqsave(&isci_request->state_lock, flags);
335 old_state = isci_request->status;
336 isci_request->status = status;
337 spin_unlock_irqrestore(&isci_request->state_lock, flags);
338
339 return old_state;
340}
341
342/**
343 * isci_request_change_started_to_newstate() - This function sets the status of
344 * the request object.
345 * @request: This parameter points to the isci_request object
346 * @status: This Parameter is the new status of the object
347 *
348 * state previous to any change.
349 */
350static inline enum isci_request_status
351isci_request_change_started_to_newstate(struct isci_request *isci_request,
352 struct completion *completion_ptr,
353 enum isci_request_status newstate)
354{
355 enum isci_request_status old_state;
356 unsigned long flags;
357
358 spin_lock_irqsave(&isci_request->state_lock, flags);
359
360 old_state = isci_request->status;
361
362 if (old_state == started || old_state == aborting) {
363 BUG_ON(isci_request->io_request_completion != NULL);
364
365 isci_request->io_request_completion = completion_ptr;
366 isci_request->status = newstate;
367 }
368
369 spin_unlock_irqrestore(&isci_request->state_lock, flags);
370
371 dev_dbg(&isci_request->isci_host->pdev->dev,
372 "%s: isci_request = %p, old_state = 0x%x\n",
373 __func__,
374 isci_request,
375 old_state);
376
377 return old_state;
378}
379
380/**
381 * isci_request_change_started_to_aborted() - This function sets the status of
382 * the request object.
383 * @request: This parameter points to the isci_request object
384 * @completion_ptr: This parameter is saved as the kernel completion structure
385 * signalled when the old request completes.
386 *
387 * state previous to any change.
388 */
389static inline enum isci_request_status
390isci_request_change_started_to_aborted(struct isci_request *isci_request,
391 struct completion *completion_ptr)
392{
393 return isci_request_change_started_to_newstate(isci_request,
394 completion_ptr,
395 aborted);
396}
397
398#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) 286#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
399 287
400#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) 288#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
@@ -404,8 +292,6 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
404 u16 tag); 292 u16 tag);
405int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, 293int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
406 struct sas_task *task, u16 tag); 294 struct sas_task *task, u16 tag);
407void isci_terminate_pending_requests(struct isci_host *ihost,
408 struct isci_remote_device *idev);
409enum sci_status 295enum sci_status
410sci_task_request_construct(struct isci_host *ihost, 296sci_task_request_construct(struct isci_host *ihost,
411 struct isci_remote_device *idev, 297 struct isci_remote_device *idev,
@@ -421,5 +307,4 @@ static inline int isci_task_is_ncq_recovery(struct sas_task *task)
421 task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ); 307 task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
422 308
423} 309}
424
425#endif /* !defined(_ISCI_REQUEST_H_) */ 310#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
index c8b329c695f9..071cb74a211c 100644
--- a/drivers/scsi/isci/scu_completion_codes.h
+++ b/drivers/scsi/isci/scu_completion_codes.h
@@ -224,6 +224,7 @@
224 * 32-bit value like we want, each immediate value must be cast to a u32. 224 * 32-bit value like we want, each immediate value must be cast to a u32.
225 */ 225 */
226#define SCU_TASK_DONE_GOOD ((u32)0x00) 226#define SCU_TASK_DONE_GOOD ((u32)0x00)
227#define SCU_TASK_DONE_TX_RAW_CMD_ERR ((u32)0x08)
227#define SCU_TASK_DONE_CRC_ERR ((u32)0x14) 228#define SCU_TASK_DONE_CRC_ERR ((u32)0x14)
228#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14) 229#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14)
229#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15) 230#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15)
@@ -237,6 +238,7 @@
237#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A) 238#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A)
238#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A) 239#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A)
239#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B) 240#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B)
241#define SCU_TASK_DONE_BREAK_RCVD ((u32)0x1B)
240#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B) 242#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B)
241#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C) 243#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C)
242#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C) 244#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C)
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 374254ede9d4..6bc74eb012c9 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -78,54 +78,25 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
78 enum exec_status status) 78 enum exec_status status)
79 79
80{ 80{
81 enum isci_completion_selection disposition; 81 unsigned long flags;
82 82
83 disposition = isci_perform_normal_io_completion; 83 /* Normal notification (task_done) */
84 disposition = isci_task_set_completion_status(task, response, status, 84 dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n",
85 disposition); 85 __func__, task, response, status);
86 86
87 /* Tasks aborted specifically by a call to the lldd_abort_task 87 spin_lock_irqsave(&task->task_state_lock, flags);
88 * function should not be completed to the host in the regular path.
89 */
90 switch (disposition) {
91 case isci_perform_normal_io_completion:
92 /* Normal notification (task_done) */
93 dev_dbg(&ihost->pdev->dev,
94 "%s: Normal - task = %p, response=%d, "
95 "status=%d\n",
96 __func__, task, response, status);
97
98 task->lldd_task = NULL;
99 task->task_done(task);
100 break;
101
102 case isci_perform_aborted_io_completion:
103 /*
104 * No notification because this request is already in the
105 * abort path.
106 */
107 dev_dbg(&ihost->pdev->dev,
108 "%s: Aborted - task = %p, response=%d, "
109 "status=%d\n",
110 __func__, task, response, status);
111 break;
112 88
113 case isci_perform_error_io_completion: 89 task->task_status.resp = response;
114 /* Use sas_task_abort */ 90 task->task_status.stat = status;
115 dev_dbg(&ihost->pdev->dev,
116 "%s: Error - task = %p, response=%d, "
117 "status=%d\n",
118 __func__, task, response, status);
119 sas_task_abort(task);
120 break;
121 91
122 default: 92 /* Normal notification (task_done) */
123 dev_dbg(&ihost->pdev->dev, 93 task->task_state_flags |= SAS_TASK_STATE_DONE;
124 "%s: isci task notification default case!", 94 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
125 __func__); 95 SAS_TASK_STATE_PENDING);
126 sas_task_abort(task); 96 task->lldd_task = NULL;
127 break; 97 spin_unlock_irqrestore(&task->task_state_lock, flags);
128 } 98
99 task->task_done(task);
129} 100}
130 101
131#define for_each_sas_task(num, task) \ 102#define for_each_sas_task(num, task) \
@@ -289,60 +260,6 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
289 return ireq; 260 return ireq;
290} 261}
291 262
292/**
293* isci_request_mark_zombie() - This function must be called with scic_lock held.
294*/
295static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
296{
297 struct completion *tmf_completion = NULL;
298 struct completion *req_completion;
299
300 /* Set the request state to "dead". */
301 ireq->status = dead;
302
303 req_completion = ireq->io_request_completion;
304 ireq->io_request_completion = NULL;
305
306 if (test_bit(IREQ_TMF, &ireq->flags)) {
307 /* Break links with the TMF request. */
308 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
309
310 /* In the case where a task request is dying,
311 * the thread waiting on the complete will sit and
312 * timeout unless we wake it now. Since the TMF
313 * has a default error status, complete it here
314 * to wake the waiting thread.
315 */
316 if (tmf) {
317 tmf_completion = tmf->complete;
318 tmf->complete = NULL;
319 }
320 ireq->ttype_ptr.tmf_task_ptr = NULL;
321 dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
322 __func__, tmf->tmf_code, tmf->io_tag);
323 } else {
324 /* Break links with the sas_task - the callback is done
325 * elsewhere.
326 */
327 struct sas_task *task = isci_request_access_task(ireq);
328
329 if (task)
330 task->lldd_task = NULL;
331
332 ireq->ttype_ptr.io_task_ptr = NULL;
333 }
334
335 dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
336 ireq->io_tag);
337
338 /* Don't force waiting threads to timeout. */
339 if (req_completion)
340 complete(req_completion);
341
342 if (tmf_completion != NULL)
343 complete(tmf_completion);
344}
345
346static int isci_task_execute_tmf(struct isci_host *ihost, 263static int isci_task_execute_tmf(struct isci_host *ihost,
347 struct isci_remote_device *idev, 264 struct isci_remote_device *idev,
348 struct isci_tmf *tmf, unsigned long timeout_ms) 265 struct isci_tmf *tmf, unsigned long timeout_ms)
@@ -400,17 +317,11 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
400 spin_unlock_irqrestore(&ihost->scic_lock, flags); 317 spin_unlock_irqrestore(&ihost->scic_lock, flags);
401 goto err_tci; 318 goto err_tci;
402 } 319 }
403
404 if (tmf->cb_state_func != NULL)
405 tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
406
407 isci_request_change_state(ireq, started);
408
409 /* add the request to the remote device request list. */
410 list_add(&ireq->dev_node, &idev->reqs_in_process);
411
412 spin_unlock_irqrestore(&ihost->scic_lock, flags); 320 spin_unlock_irqrestore(&ihost->scic_lock, flags);
413 321
322 /* The RNC must be unsuspended before the TMF can get a response. */
323 isci_remote_device_resume_from_abort(ihost, idev);
324
414 /* Wait for the TMF to complete, or a timeout. */ 325 /* Wait for the TMF to complete, or a timeout. */
415 timeleft = wait_for_completion_timeout(&completion, 326 timeleft = wait_for_completion_timeout(&completion,
416 msecs_to_jiffies(timeout_ms)); 327 msecs_to_jiffies(timeout_ms));
@@ -419,32 +330,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
419 /* The TMF did not complete - this could be because 330 /* The TMF did not complete - this could be because
420 * of an unplug. Terminate the TMF request now. 331 * of an unplug. Terminate the TMF request now.
421 */ 332 */
422 spin_lock_irqsave(&ihost->scic_lock, flags); 333 isci_remote_device_suspend_terminate(ihost, idev, ireq);
423
424 if (tmf->cb_state_func != NULL)
425 tmf->cb_state_func(isci_tmf_timed_out, tmf,
426 tmf->cb_data);
427
428 sci_controller_terminate_request(ihost, idev, ireq);
429
430 spin_unlock_irqrestore(&ihost->scic_lock, flags);
431
432 timeleft = wait_for_completion_timeout(
433 &completion,
434 msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
435
436 if (!timeleft) {
437 /* Strange condition - the termination of the TMF
438 * request timed-out.
439 */
440 spin_lock_irqsave(&ihost->scic_lock, flags);
441
442 /* If the TMF status has not changed, kill it. */
443 if (tmf->status == SCI_FAILURE_TIMEOUT)
444 isci_request_mark_zombie(ihost, ireq);
445
446 spin_unlock_irqrestore(&ihost->scic_lock, flags);
447 }
448 } 334 }
449 335
450 isci_print_tmf(ihost, tmf); 336 isci_print_tmf(ihost, tmf);
@@ -476,315 +362,21 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
476} 362}
477 363
478static void isci_task_build_tmf(struct isci_tmf *tmf, 364static void isci_task_build_tmf(struct isci_tmf *tmf,
479 enum isci_tmf_function_codes code, 365 enum isci_tmf_function_codes code)
480 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
481 struct isci_tmf *,
482 void *),
483 void *cb_data)
484{ 366{
485 memset(tmf, 0, sizeof(*tmf)); 367 memset(tmf, 0, sizeof(*tmf));
486 368 tmf->tmf_code = code;
487 tmf->tmf_code = code;
488 tmf->cb_state_func = tmf_sent_cb;
489 tmf->cb_data = cb_data;
490} 369}
491 370
492static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, 371static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
493 enum isci_tmf_function_codes code, 372 enum isci_tmf_function_codes code,
494 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
495 struct isci_tmf *,
496 void *),
497 struct isci_request *old_request) 373 struct isci_request *old_request)
498{ 374{
499 isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request); 375 isci_task_build_tmf(tmf, code);
500 tmf->io_tag = old_request->io_tag; 376 tmf->io_tag = old_request->io_tag;
501} 377}
502 378
503/** 379/**
504 * isci_task_validate_request_to_abort() - This function checks the given I/O
505 * against the "started" state. If the request is still "started", it's
506 * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
507 * BEFORE CALLING THIS FUNCTION.
508 * @isci_request: This parameter specifies the request object to control.
509 * @isci_host: This parameter specifies the ISCI host object
510 * @isci_device: This is the device to which the request is pending.
511 * @aborted_io_completion: This is a completion structure that will be added to
512 * the request in case it is changed to aborting; this completion is
513 * triggered when the request is fully completed.
514 *
515 * Either "started" on successful change of the task status to "aborted", or
516 * "unallocated" if the task cannot be controlled.
517 */
518static enum isci_request_status isci_task_validate_request_to_abort(
519 struct isci_request *isci_request,
520 struct isci_host *isci_host,
521 struct isci_remote_device *isci_device,
522 struct completion *aborted_io_completion)
523{
524 enum isci_request_status old_state = unallocated;
525
526 /* Only abort the task if it's in the
527 * device's request_in_process list
528 */
529 if (isci_request && !list_empty(&isci_request->dev_node)) {
530 old_state = isci_request_change_started_to_aborted(
531 isci_request, aborted_io_completion);
532
533 }
534
535 return old_state;
536}
537
538static int isci_request_is_dealloc_managed(enum isci_request_status stat)
539{
540 switch (stat) {
541 case aborted:
542 case aborting:
543 case terminating:
544 case completed:
545 case dead:
546 return true;
547 default:
548 return false;
549 }
550}
551
552/**
553 * isci_terminate_request_core() - This function will terminate the given
554 * request, and wait for it to complete. This function must only be called
555 * from a thread that can wait. Note that the request is terminated and
556 * completed (back to the host, if started there).
557 * @ihost: This SCU.
558 * @idev: The target.
559 * @isci_request: The I/O request to be terminated.
560 *
561 */
562static void isci_terminate_request_core(struct isci_host *ihost,
563 struct isci_remote_device *idev,
564 struct isci_request *isci_request)
565{
566 enum sci_status status = SCI_SUCCESS;
567 bool was_terminated = false;
568 bool needs_cleanup_handling = false;
569 unsigned long flags;
570 unsigned long termination_completed = 1;
571 struct completion *io_request_completion;
572
573 dev_dbg(&ihost->pdev->dev,
574 "%s: device = %p; request = %p\n",
575 __func__, idev, isci_request);
576
577 spin_lock_irqsave(&ihost->scic_lock, flags);
578
579 io_request_completion = isci_request->io_request_completion;
580
581 /* Note that we are not going to control
582 * the target to abort the request.
583 */
584 set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
585
586 /* Make sure the request wasn't just sitting around signalling
587 * device condition (if the request handle is NULL, then the
588 * request completed but needed additional handling here).
589 */
590 if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
591 was_terminated = true;
592 needs_cleanup_handling = true;
593 status = sci_controller_terminate_request(ihost,
594 idev,
595 isci_request);
596 }
597 spin_unlock_irqrestore(&ihost->scic_lock, flags);
598
599 /*
600 * The only time the request to terminate will
601 * fail is when the io request is completed and
602 * being aborted.
603 */
604 if (status != SCI_SUCCESS) {
605 dev_dbg(&ihost->pdev->dev,
606 "%s: sci_controller_terminate_request"
607 " returned = 0x%x\n",
608 __func__, status);
609
610 isci_request->io_request_completion = NULL;
611
612 } else {
613 if (was_terminated) {
614 dev_dbg(&ihost->pdev->dev,
615 "%s: before completion wait (%p/%p)\n",
616 __func__, isci_request, io_request_completion);
617
618 /* Wait here for the request to complete. */
619 termination_completed
620 = wait_for_completion_timeout(
621 io_request_completion,
622 msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
623
624 if (!termination_completed) {
625
626 /* The request to terminate has timed out. */
627 spin_lock_irqsave(&ihost->scic_lock, flags);
628
629 /* Check for state changes. */
630 if (!test_bit(IREQ_TERMINATED,
631 &isci_request->flags)) {
632
633 /* The best we can do is to have the
634 * request die a silent death if it
635 * ever really completes.
636 */
637 isci_request_mark_zombie(ihost,
638 isci_request);
639 needs_cleanup_handling = true;
640 } else
641 termination_completed = 1;
642
643 spin_unlock_irqrestore(&ihost->scic_lock,
644 flags);
645
646 if (!termination_completed) {
647
648 dev_dbg(&ihost->pdev->dev,
649 "%s: *** Timeout waiting for "
650 "termination(%p/%p)\n",
651 __func__, io_request_completion,
652 isci_request);
653
654 /* The request can no longer be referenced
655 * safely since it may go away if the
656 * termination every really does complete.
657 */
658 isci_request = NULL;
659 }
660 }
661 if (termination_completed)
662 dev_dbg(&ihost->pdev->dev,
663 "%s: after completion wait (%p/%p)\n",
664 __func__, isci_request, io_request_completion);
665 }
666
667 if (termination_completed) {
668
669 isci_request->io_request_completion = NULL;
670
671 /* Peek at the status of the request. This will tell
672 * us if there was special handling on the request such that it
673 * needs to be detached and freed here.
674 */
675 spin_lock_irqsave(&isci_request->state_lock, flags);
676
677 needs_cleanup_handling
678 = isci_request_is_dealloc_managed(
679 isci_request->status);
680
681 spin_unlock_irqrestore(&isci_request->state_lock, flags);
682
683 }
684 if (needs_cleanup_handling) {
685
686 dev_dbg(&ihost->pdev->dev,
687 "%s: cleanup isci_device=%p, request=%p\n",
688 __func__, idev, isci_request);
689
690 if (isci_request != NULL) {
691 spin_lock_irqsave(&ihost->scic_lock, flags);
692 isci_free_tag(ihost, isci_request->io_tag);
693 isci_request_change_state(isci_request, unallocated);
694 list_del_init(&isci_request->dev_node);
695 spin_unlock_irqrestore(&ihost->scic_lock, flags);
696 }
697 }
698 }
699}
700
701/**
702 * isci_terminate_pending_requests() - This function will change the all of the
703 * requests on the given device's state to "aborting", will terminate the
704 * requests, and wait for them to complete. This function must only be
705 * called from a thread that can wait. Note that the requests are all
706 * terminated and completed (back to the host, if started there).
707 * @isci_host: This parameter specifies SCU.
708 * @idev: This parameter specifies the target.
709 *
710 */
711void isci_terminate_pending_requests(struct isci_host *ihost,
712 struct isci_remote_device *idev)
713{
714 struct completion request_completion;
715 enum isci_request_status old_state;
716 unsigned long flags;
717 LIST_HEAD(list);
718
719 spin_lock_irqsave(&ihost->scic_lock, flags);
720 list_splice_init(&idev->reqs_in_process, &list);
721
722 /* assumes that isci_terminate_request_core deletes from the list */
723 while (!list_empty(&list)) {
724 struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
725
726 /* Change state to "terminating" if it is currently
727 * "started".
728 */
729 old_state = isci_request_change_started_to_newstate(ireq,
730 &request_completion,
731 terminating);
732 switch (old_state) {
733 case started:
734 case completed:
735 case aborting:
736 break;
737 default:
738 /* termination in progress, or otherwise dispositioned.
739 * We know the request was on 'list' so should be safe
740 * to move it back to reqs_in_process
741 */
742 list_move(&ireq->dev_node, &idev->reqs_in_process);
743 ireq = NULL;
744 break;
745 }
746
747 if (!ireq)
748 continue;
749 spin_unlock_irqrestore(&ihost->scic_lock, flags);
750
751 init_completion(&request_completion);
752
753 dev_dbg(&ihost->pdev->dev,
754 "%s: idev=%p request=%p; task=%p old_state=%d\n",
755 __func__, idev, ireq,
756 (!test_bit(IREQ_TMF, &ireq->flags)
757 ? isci_request_access_task(ireq)
758 : NULL),
759 old_state);
760
761 /* If the old_state is started:
762 * This request was not already being aborted. If it had been,
763 * then the aborting I/O (ie. the TMF request) would not be in
764 * the aborting state, and thus would be terminated here. Note
765 * that since the TMF completion's call to the kernel function
766 * "complete()" does not happen until the pending I/O request
767 * terminate fully completes, we do not have to implement a
768 * special wait here for already aborting requests - the
769 * termination of the TMF request will force the request
770 * to finish it's already started terminate.
771 *
772 * If old_state == completed:
773 * This request completed from the SCU hardware perspective
774 * and now just needs cleaning up in terms of freeing the
775 * request and potentially calling up to libsas.
776 *
777 * If old_state == aborting:
778 * This request has already gone through a TMF timeout, but may
779 * not have been terminated; needs cleaning up at least.
780 */
781 isci_terminate_request_core(ihost, idev, ireq);
782 spin_lock_irqsave(&ihost->scic_lock, flags);
783 }
784 spin_unlock_irqrestore(&ihost->scic_lock, flags);
785}
786
787/**
788 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain 380 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
789 * Template functions. 381 * Template functions.
790 * @lun: This parameter specifies the lun to be reset. 382 * @lun: This parameter specifies the lun to be reset.
@@ -807,7 +399,7 @@ static int isci_task_send_lu_reset_sas(
807 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or 399 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
808 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). 400 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
809 */ 401 */
810 isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL); 402 isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset);
811 403
812 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ 404 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
813 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); 405 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
@@ -826,42 +418,44 @@ static int isci_task_send_lu_reset_sas(
826 418
827int isci_task_lu_reset(struct domain_device *dev, u8 *lun) 419int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
828{ 420{
829 struct isci_host *isci_host = dev_to_ihost(dev); 421 struct isci_host *ihost = dev_to_ihost(dev);
830 struct isci_remote_device *isci_device; 422 struct isci_remote_device *idev;
831 unsigned long flags; 423 unsigned long flags;
832 int ret; 424 int ret = TMF_RESP_FUNC_COMPLETE;
833 425
834 spin_lock_irqsave(&isci_host->scic_lock, flags); 426 spin_lock_irqsave(&ihost->scic_lock, flags);
835 isci_device = isci_lookup_device(dev); 427 idev = isci_get_device(dev->lldd_dev);
836 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 428 spin_unlock_irqrestore(&ihost->scic_lock, flags);
837 429
838 dev_dbg(&isci_host->pdev->dev, 430 dev_dbg(&ihost->pdev->dev,
839 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", 431 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
840 __func__, dev, isci_host, isci_device); 432 __func__, dev, ihost, idev);
841 433
842 if (!isci_device) { 434 if (!idev) {
843 /* If the device is gone, stop the escalations. */ 435 /* If the device is gone, escalate to I_T_Nexus_Reset. */
844 dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__); 436 dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__);
845 437
846 ret = TMF_RESP_FUNC_COMPLETE; 438 ret = TMF_RESP_FUNC_FAILED;
847 goto out; 439 goto out;
848 } 440 }
849 441
850 /* Send the task management part of the reset. */ 442 /* Suspend the RNC, kill all TCs */
851 if (dev_is_sata(dev)) { 443 if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
852 sas_ata_schedule_reset(dev); 444 != SCI_SUCCESS) {
853 ret = TMF_RESP_FUNC_COMPLETE; 445 /* The suspend/terminate only fails if isci_get_device fails */
854 } else 446 ret = TMF_RESP_FUNC_FAILED;
855 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun); 447 goto out;
856 448 }
857 /* If the LUN reset worked, all the I/O can now be terminated. */ 449 /* All pending I/Os have been terminated and cleaned up. */
858 if (ret == TMF_RESP_FUNC_COMPLETE) 450 if (!test_bit(IDEV_GONE, &idev->flags)) {
859 /* Terminate all I/O now. */ 451 if (dev_is_sata(dev))
860 isci_terminate_pending_requests(isci_host, 452 sas_ata_schedule_reset(dev);
861 isci_device); 453 else
862 454 /* Send the task management part of the reset. */
455 ret = isci_task_send_lu_reset_sas(ihost, idev, lun);
456 }
863 out: 457 out:
864 isci_put_device(isci_device); 458 isci_put_device(idev);
865 return ret; 459 return ret;
866} 460}
867 461
@@ -882,63 +476,6 @@ int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
882/* Task Management Functions. Must be called from process context. */ 476/* Task Management Functions. Must be called from process context. */
883 477
884/** 478/**
885 * isci_abort_task_process_cb() - This is a helper function for the abort task
886 * TMF command. It manages the request state with respect to the successful
887 * transmission / completion of the abort task request.
888 * @cb_state: This parameter specifies when this function was called - after
889 * the TMF request has been started and after it has timed-out.
890 * @tmf: This parameter specifies the TMF in progress.
891 *
892 *
893 */
894static void isci_abort_task_process_cb(
895 enum isci_tmf_cb_state cb_state,
896 struct isci_tmf *tmf,
897 void *cb_data)
898{
899 struct isci_request *old_request;
900
901 old_request = (struct isci_request *)cb_data;
902
903 dev_dbg(&old_request->isci_host->pdev->dev,
904 "%s: tmf=%p, old_request=%p\n",
905 __func__, tmf, old_request);
906
907 switch (cb_state) {
908
909 case isci_tmf_started:
910 /* The TMF has been started. Nothing to do here, since the
911 * request state was already set to "aborted" by the abort
912 * task function.
913 */
914 if ((old_request->status != aborted)
915 && (old_request->status != completed))
916 dev_dbg(&old_request->isci_host->pdev->dev,
917 "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
918 __func__, old_request->status, tmf, old_request);
919 break;
920
921 case isci_tmf_timed_out:
922
923 /* Set the task's state to "aborting", since the abort task
924 * function thread set it to "aborted" (above) in anticipation
925 * of the task management request working correctly. Since the
926 * timeout has now fired, the TMF request failed. We set the
927 * state such that the request completion will indicate the
928 * device is no longer present.
929 */
930 isci_request_change_state(old_request, aborting);
931 break;
932
933 default:
934 dev_dbg(&old_request->isci_host->pdev->dev,
935 "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
936 __func__, cb_state, tmf, old_request);
937 break;
938 }
939}
940
941/**
942 * isci_task_abort_task() - This function is one of the SAS Domain Template 479 * isci_task_abort_task() - This function is one of the SAS Domain Template
943 * functions. This function is called by libsas to abort a specified task. 480 * functions. This function is called by libsas to abort a specified task.
944 * @task: This parameter specifies the SAS task to abort. 481 * @task: This parameter specifies the SAS task to abort.
@@ -947,22 +484,20 @@ static void isci_abort_task_process_cb(
947 */ 484 */
948int isci_task_abort_task(struct sas_task *task) 485int isci_task_abort_task(struct sas_task *task)
949{ 486{
950 struct isci_host *isci_host = dev_to_ihost(task->dev); 487 struct isci_host *ihost = dev_to_ihost(task->dev);
951 DECLARE_COMPLETION_ONSTACK(aborted_io_completion); 488 DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
952 struct isci_request *old_request = NULL; 489 struct isci_request *old_request = NULL;
953 enum isci_request_status old_state; 490 struct isci_remote_device *idev = NULL;
954 struct isci_remote_device *isci_device = NULL;
955 struct isci_tmf tmf; 491 struct isci_tmf tmf;
956 int ret = TMF_RESP_FUNC_FAILED; 492 int ret = TMF_RESP_FUNC_FAILED;
957 unsigned long flags; 493 unsigned long flags;
958 int perform_termination = 0;
959 494
960 /* Get the isci_request reference from the task. Note that 495 /* Get the isci_request reference from the task. Note that
961 * this check does not depend on the pending request list 496 * this check does not depend on the pending request list
962 * in the device, because tasks driving resets may land here 497 * in the device, because tasks driving resets may land here
963 * after completion in the core. 498 * after completion in the core.
964 */ 499 */
965 spin_lock_irqsave(&isci_host->scic_lock, flags); 500 spin_lock_irqsave(&ihost->scic_lock, flags);
966 spin_lock(&task->task_state_lock); 501 spin_lock(&task->task_state_lock);
967 502
968 old_request = task->lldd_task; 503 old_request = task->lldd_task;
@@ -971,20 +506,29 @@ int isci_task_abort_task(struct sas_task *task)
971 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && 506 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
972 (task->task_state_flags & SAS_TASK_AT_INITIATOR) && 507 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
973 old_request) 508 old_request)
974 isci_device = isci_lookup_device(task->dev); 509 idev = isci_get_device(task->dev->lldd_dev);
975 510
976 spin_unlock(&task->task_state_lock); 511 spin_unlock(&task->task_state_lock);
977 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 512 spin_unlock_irqrestore(&ihost->scic_lock, flags);
978 513
979 dev_dbg(&isci_host->pdev->dev, 514 dev_warn(&ihost->pdev->dev,
980 "%s: dev = %p, task = %p, old_request == %p\n", 515 "%s: dev = %p (%s%s), task = %p, old_request == %p\n",
981 __func__, isci_device, task, old_request); 516 __func__, idev,
517 (dev_is_sata(task->dev) ? "STP/SATA"
518 : ((dev_is_expander(task->dev))
519 ? "SMP"
520 : "SSP")),
521 ((idev) ? ((test_bit(IDEV_GONE, &idev->flags))
522 ? " IDEV_GONE"
523 : "")
524 : " <NULL>"),
525 task, old_request);
982 526
983 /* Device reset conditions signalled in task_state_flags are the 527 /* Device reset conditions signalled in task_state_flags are the
984 * responsbility of libsas to observe at the start of the error 528 * responsbility of libsas to observe at the start of the error
985 * handler thread. 529 * handler thread.
986 */ 530 */
987 if (!isci_device || !old_request) { 531 if (!idev || !old_request) {
988 /* The request has already completed and there 532 /* The request has already completed and there
989 * is nothing to do here other than to set the task 533 * is nothing to do here other than to set the task
990 * done bit, and indicate that the task abort function 534 * done bit, and indicate that the task abort function
@@ -998,108 +542,72 @@ int isci_task_abort_task(struct sas_task *task)
998 542
999 ret = TMF_RESP_FUNC_COMPLETE; 543 ret = TMF_RESP_FUNC_COMPLETE;
1000 544
1001 dev_dbg(&isci_host->pdev->dev, 545 dev_warn(&ihost->pdev->dev,
1002 "%s: abort task not needed for %p\n", 546 "%s: abort task not needed for %p\n",
1003 __func__, task); 547 __func__, task);
1004 goto out; 548 goto out;
1005 } 549 }
1006 550 /* Suspend the RNC, kill the TC */
1007 spin_lock_irqsave(&isci_host->scic_lock, flags); 551 if (isci_remote_device_suspend_terminate(ihost, idev, old_request)
1008 552 != SCI_SUCCESS) {
1009 /* Check the request status and change to "aborted" if currently 553 dev_warn(&ihost->pdev->dev,
1010 * "starting"; if true then set the I/O kernel completion 554 "%s: isci_remote_device_reset_terminate(dev=%p, "
1011 * struct that will be triggered when the request completes. 555 "req=%p, task=%p) failed\n",
1012 */ 556 __func__, idev, old_request, task);
1013 old_state = isci_task_validate_request_to_abort( 557 ret = TMF_RESP_FUNC_FAILED;
1014 old_request, isci_host, isci_device,
1015 &aborted_io_completion);
1016 if ((old_state != started) &&
1017 (old_state != completed) &&
1018 (old_state != aborting)) {
1019
1020 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1021
1022 /* The request was already being handled by someone else (because
1023 * they got to set the state away from started).
1024 */
1025 dev_dbg(&isci_host->pdev->dev,
1026 "%s: device = %p; old_request %p already being aborted\n",
1027 __func__,
1028 isci_device, old_request);
1029 ret = TMF_RESP_FUNC_COMPLETE;
1030 goto out; 558 goto out;
1031 } 559 }
560 spin_lock_irqsave(&ihost->scic_lock, flags);
561
1032 if (task->task_proto == SAS_PROTOCOL_SMP || 562 if (task->task_proto == SAS_PROTOCOL_SMP ||
1033 sas_protocol_ata(task->task_proto) || 563 sas_protocol_ata(task->task_proto) ||
1034 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { 564 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) ||
565 test_bit(IDEV_GONE, &idev->flags)) {
1035 566
1036 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 567 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1037 568
1038 dev_dbg(&isci_host->pdev->dev, 569 /* No task to send, so explicitly resume the device here */
1039 "%s: %s request" 570 isci_remote_device_resume_from_abort(ihost, idev);
1040 " or complete_in_target (%d), thus no TMF\n",
1041 __func__,
1042 ((task->task_proto == SAS_PROTOCOL_SMP)
1043 ? "SMP"
1044 : (sas_protocol_ata(task->task_proto)
1045 ? "SATA/STP"
1046 : "<other>")
1047 ),
1048 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
1049
1050 if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1051 spin_lock_irqsave(&task->task_state_lock, flags);
1052 task->task_state_flags |= SAS_TASK_STATE_DONE;
1053 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1054 SAS_TASK_STATE_PENDING);
1055 spin_unlock_irqrestore(&task->task_state_lock, flags);
1056 ret = TMF_RESP_FUNC_COMPLETE;
1057 } else {
1058 spin_lock_irqsave(&task->task_state_lock, flags);
1059 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1060 SAS_TASK_STATE_PENDING);
1061 spin_unlock_irqrestore(&task->task_state_lock, flags);
1062 }
1063 571
1064 /* STP and SMP devices are not sent a TMF, but the 572 dev_warn(&ihost->pdev->dev,
1065 * outstanding I/O request is terminated below. This is 573 "%s: %s request"
1066 * because SATA/STP and SMP discovery path timeouts directly 574 " or complete_in_target (%d), "
1067 * call the abort task interface for cleanup. 575 "or IDEV_GONE (%d), thus no TMF\n",
1068 */ 576 __func__,
1069 perform_termination = 1; 577 ((task->task_proto == SAS_PROTOCOL_SMP)
578 ? "SMP"
579 : (sas_protocol_ata(task->task_proto)
580 ? "SATA/STP"
581 : "<other>")
582 ),
583 test_bit(IREQ_COMPLETE_IN_TARGET,
584 &old_request->flags),
585 test_bit(IDEV_GONE, &idev->flags));
586
587 spin_lock_irqsave(&task->task_state_lock, flags);
588 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
589 SAS_TASK_STATE_PENDING);
590 task->task_state_flags |= SAS_TASK_STATE_DONE;
591 spin_unlock_irqrestore(&task->task_state_lock, flags);
1070 592
593 ret = TMF_RESP_FUNC_COMPLETE;
1071 } else { 594 } else {
1072 /* Fill in the tmf stucture */ 595 /* Fill in the tmf stucture */
1073 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, 596 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
1074 isci_abort_task_process_cb,
1075 old_request); 597 old_request);
1076 598
1077 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 599 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1078 600
601 /* Send the task management request. */
1079 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ 602 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
1080 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, 603 ret = isci_task_execute_tmf(ihost, idev, &tmf,
1081 ISCI_ABORT_TASK_TIMEOUT_MS); 604 ISCI_ABORT_TASK_TIMEOUT_MS);
1082
1083 if (ret == TMF_RESP_FUNC_COMPLETE)
1084 perform_termination = 1;
1085 else
1086 dev_dbg(&isci_host->pdev->dev,
1087 "%s: isci_task_send_tmf failed\n", __func__);
1088 } 605 }
1089 if (perform_termination) { 606out:
1090 set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags); 607 dev_warn(&ihost->pdev->dev,
1091 608 "%s: Done; dev = %p, task = %p , old_request == %p\n",
1092 /* Clean up the request on our side, and wait for the aborted 609 __func__, idev, task, old_request);
1093 * I/O to complete. 610 isci_put_device(idev);
1094 */
1095 isci_terminate_request_core(isci_host, isci_device,
1096 old_request);
1097 }
1098
1099 /* Make sure we do not leave a reference to aborted_io_completion */
1100 old_request->io_request_completion = NULL;
1101 out:
1102 isci_put_device(isci_device);
1103 return ret; 611 return ret;
1104} 612}
1105 613
@@ -1195,14 +703,11 @@ isci_task_request_complete(struct isci_host *ihost,
1195{ 703{
1196 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 704 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1197 struct completion *tmf_complete = NULL; 705 struct completion *tmf_complete = NULL;
1198 struct completion *request_complete = ireq->io_request_completion;
1199 706
1200 dev_dbg(&ihost->pdev->dev, 707 dev_dbg(&ihost->pdev->dev,
1201 "%s: request = %p, status=%d\n", 708 "%s: request = %p, status=%d\n",
1202 __func__, ireq, completion_status); 709 __func__, ireq, completion_status);
1203 710
1204 isci_request_change_state(ireq, completed);
1205
1206 set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); 711 set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
1207 712
1208 if (tmf) { 713 if (tmf) {
@@ -1226,20 +731,11 @@ isci_task_request_complete(struct isci_host *ihost,
1226 */ 731 */
1227 set_bit(IREQ_TERMINATED, &ireq->flags); 732 set_bit(IREQ_TERMINATED, &ireq->flags);
1228 733
1229 /* As soon as something is in the terminate path, deallocation is 734 if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
1230 * managed there. Note that the final non-managed state of a task 735 wake_up_all(&ihost->eventq);
1231 * request is "completed".
1232 */
1233 if ((ireq->status == completed) ||
1234 !isci_request_is_dealloc_managed(ireq->status)) {
1235 isci_request_change_state(ireq, unallocated);
1236 isci_free_tag(ihost, ireq->io_tag);
1237 list_del_init(&ireq->dev_node);
1238 }
1239 736
1240 /* "request_complete" is set if the task was being terminated. */ 737 if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
1241 if (request_complete) 738 isci_free_tag(ihost, ireq->io_tag);
1242 complete(request_complete);
1243 739
1244 /* The task management part completes last. */ 740 /* The task management part completes last. */
1245 if (tmf_complete) 741 if (tmf_complete)
@@ -1250,48 +746,38 @@ static int isci_reset_device(struct isci_host *ihost,
1250 struct domain_device *dev, 746 struct domain_device *dev,
1251 struct isci_remote_device *idev) 747 struct isci_remote_device *idev)
1252{ 748{
1253 int rc; 749 int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1;
1254 unsigned long flags;
1255 enum sci_status status;
1256 struct sas_phy *phy = sas_get_local_phy(dev); 750 struct sas_phy *phy = sas_get_local_phy(dev);
1257 struct isci_port *iport = dev->port->lldd_port; 751 struct isci_port *iport = dev->port->lldd_port;
1258 752
1259 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); 753 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1260 754
1261 spin_lock_irqsave(&ihost->scic_lock, flags); 755 /* Suspend the RNC, terminate all outstanding TCs. */
1262 status = sci_remote_device_reset(idev); 756 if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
1263 spin_unlock_irqrestore(&ihost->scic_lock, flags); 757 != SCI_SUCCESS) {
1264
1265 if (status != SCI_SUCCESS) {
1266 dev_dbg(&ihost->pdev->dev,
1267 "%s: sci_remote_device_reset(%p) returned %d!\n",
1268 __func__, idev, status);
1269 rc = TMF_RESP_FUNC_FAILED; 758 rc = TMF_RESP_FUNC_FAILED;
1270 goto out; 759 goto out;
1271 } 760 }
1272 761 /* Note that since the termination for outstanding requests succeeded,
1273 if (scsi_is_sas_phy_local(phy)) { 762 * this function will return success. This is because the resets will
1274 struct isci_phy *iphy = &ihost->phys[phy->number]; 763 * only fail if the device has been removed (ie. hotplug), and the
1275 764 * primary duty of this function is to cleanup tasks, so that is the
1276 rc = isci_port_perform_hard_reset(ihost, iport, iphy); 765 * relevant status.
1277 } else 766 */
1278 rc = sas_phy_reset(phy, !dev_is_sata(dev)); 767 if (!test_bit(IDEV_GONE, &idev->flags)) {
1279 768 if (scsi_is_sas_phy_local(phy)) {
1280 /* Terminate in-progress I/O now. */ 769 struct isci_phy *iphy = &ihost->phys[phy->number];
1281 isci_remote_device_nuke_requests(ihost, idev); 770
1282 771 reset_stat = isci_port_perform_hard_reset(ihost, iport,
1283 /* Since all pending TCs have been cleaned, resume the RNC. */ 772 iphy);
1284 spin_lock_irqsave(&ihost->scic_lock, flags); 773 } else
1285 status = sci_remote_device_reset_complete(idev); 774 reset_stat = sas_phy_reset(phy, !dev_is_sata(dev));
1286 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1287
1288 if (status != SCI_SUCCESS) {
1289 dev_dbg(&ihost->pdev->dev,
1290 "%s: sci_remote_device_reset_complete(%p) "
1291 "returned %d!\n", __func__, idev, status);
1292 } 775 }
776 /* Explicitly resume the RNC here, since there was no task sent. */
777 isci_remote_device_resume_from_abort(ihost, idev);
1293 778
1294 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev); 779 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n",
780 __func__, idev, reset_stat);
1295 out: 781 out:
1296 sas_put_local_phy(phy); 782 sas_put_local_phy(phy);
1297 return rc; 783 return rc;
@@ -1305,7 +791,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
1305 int ret; 791 int ret;
1306 792
1307 spin_lock_irqsave(&ihost->scic_lock, flags); 793 spin_lock_irqsave(&ihost->scic_lock, flags);
1308 idev = isci_lookup_device(dev); 794 idev = isci_get_device(dev->lldd_dev);
1309 spin_unlock_irqrestore(&ihost->scic_lock, flags); 795 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1310 796
1311 if (!idev) { 797 if (!idev) {
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 7b6d0e32fd9b..9c06cbad1d26 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -63,19 +63,6 @@
63struct isci_request; 63struct isci_request;
64 64
65/** 65/**
66 * enum isci_tmf_cb_state - This enum defines the possible states in which the
67 * TMF callback function is invoked during the TMF execution process.
68 *
69 *
70 */
71enum isci_tmf_cb_state {
72
73 isci_tmf_init_state = 0,
74 isci_tmf_started,
75 isci_tmf_timed_out
76};
77
78/**
79 * enum isci_tmf_function_codes - This enum defines the possible preparations 66 * enum isci_tmf_function_codes - This enum defines the possible preparations
80 * of task management requests. 67 * of task management requests.
81 * 68 *
@@ -87,6 +74,7 @@ enum isci_tmf_function_codes {
87 isci_tmf_ssp_task_abort = TMF_ABORT_TASK, 74 isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
88 isci_tmf_ssp_lun_reset = TMF_LU_RESET, 75 isci_tmf_ssp_lun_reset = TMF_LU_RESET,
89}; 76};
77
90/** 78/**
91 * struct isci_tmf - This class represents the task management object which 79 * struct isci_tmf - This class represents the task management object which
92 * acts as an interface to libsas for processing task management requests 80 * acts as an interface to libsas for processing task management requests
@@ -106,15 +94,6 @@ struct isci_tmf {
106 u16 io_tag; 94 u16 io_tag;
107 enum isci_tmf_function_codes tmf_code; 95 enum isci_tmf_function_codes tmf_code;
108 int status; 96 int status;
109
110 /* The optional callback function allows the user process to
111 * track the TMF transmit / timeout conditions.
112 */
113 void (*cb_state_func)(
114 enum isci_tmf_cb_state,
115 struct isci_tmf *, void *);
116 void *cb_data;
117
118}; 97};
119 98
120static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) 99static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
@@ -208,113 +187,4 @@ int isci_queuecommand(
208 struct scsi_cmnd *scsi_cmd, 187 struct scsi_cmnd *scsi_cmd,
209 void (*donefunc)(struct scsi_cmnd *)); 188 void (*donefunc)(struct scsi_cmnd *));
210 189
211/**
212 * enum isci_completion_selection - This enum defines the possible actions to
213 * take with respect to a given request's notification back to libsas.
214 *
215 *
216 */
217enum isci_completion_selection {
218
219 isci_perform_normal_io_completion, /* Normal notify (task_done) */
220 isci_perform_aborted_io_completion, /* No notification. */
221 isci_perform_error_io_completion /* Use sas_task_abort */
222};
223
224/**
225 * isci_task_set_completion_status() - This function sets the completion status
226 * for the request.
227 * @task: This parameter is the completed request.
228 * @response: This parameter is the response code for the completed task.
229 * @status: This parameter is the status code for the completed task.
230 *
231* @return The new notification mode for the request.
232*/
233static inline enum isci_completion_selection
234isci_task_set_completion_status(
235 struct sas_task *task,
236 enum service_response response,
237 enum exec_status status,
238 enum isci_completion_selection task_notification_selection)
239{
240 unsigned long flags;
241
242 spin_lock_irqsave(&task->task_state_lock, flags);
243
244 /* If a device reset is being indicated, make sure the I/O
245 * is in the error path.
246 */
247 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
248 /* Fail the I/O to make sure it goes into the error path. */
249 response = SAS_TASK_UNDELIVERED;
250 status = SAM_STAT_TASK_ABORTED;
251
252 task_notification_selection = isci_perform_error_io_completion;
253 }
254 task->task_status.resp = response;
255 task->task_status.stat = status;
256
257 switch (task->task_proto) {
258
259 case SAS_PROTOCOL_SATA:
260 case SAS_PROTOCOL_STP:
261 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
262
263 if (task_notification_selection
264 == isci_perform_error_io_completion) {
265 /* SATA/STP I/O has it's own means of scheduling device
266 * error handling on the normal path.
267 */
268 task_notification_selection
269 = isci_perform_normal_io_completion;
270 }
271 break;
272 default:
273 break;
274 }
275
276 switch (task_notification_selection) {
277
278 case isci_perform_error_io_completion:
279
280 if (task->task_proto == SAS_PROTOCOL_SMP) {
281 /* There is no error escalation in the SMP case.
282 * Convert to a normal completion to avoid the
283 * timeout in the discovery path and to let the
284 * next action take place quickly.
285 */
286 task_notification_selection
287 = isci_perform_normal_io_completion;
288
289 /* Fall through to the normal case... */
290 } else {
291 /* Use sas_task_abort */
292 /* Leave SAS_TASK_STATE_DONE clear
293 * Leave SAS_TASK_AT_INITIATOR set.
294 */
295 break;
296 }
297
298 case isci_perform_aborted_io_completion:
299 /* This path can occur with task-managed requests as well as
300 * requests terminated because of LUN or device resets.
301 */
302 /* Fall through to the normal case... */
303 case isci_perform_normal_io_completion:
304 /* Normal notification (task_done) */
305 task->task_state_flags |= SAS_TASK_STATE_DONE;
306 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
307 SAS_TASK_STATE_PENDING);
308 break;
309 default:
310 WARN_ONCE(1, "unknown task_notification_selection: %d\n",
311 task_notification_selection);
312 break;
313 }
314
315 spin_unlock_irqrestore(&task->task_state_lock, flags);
316
317 return task_notification_selection;
318
319}
320#endif /* !defined(_SCI_TASK_H_) */ 190#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
index 16f88ab939c8..04a6d0d59a22 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.c
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -57,31 +57,19 @@
57#include "unsolicited_frame_control.h" 57#include "unsolicited_frame_control.h"
58#include "registers.h" 58#include "registers.h"
59 59
60int sci_unsolicited_frame_control_construct(struct isci_host *ihost) 60void sci_unsolicited_frame_control_construct(struct isci_host *ihost)
61{ 61{
62 struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; 62 struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
63 struct sci_unsolicited_frame *uf; 63 struct sci_unsolicited_frame *uf;
64 u32 buf_len, header_len, i; 64 dma_addr_t dma = ihost->ufi_dma;
65 dma_addr_t dma; 65 void *virt = ihost->ufi_buf;
66 size_t size; 66 int i;
67 void *virt;
68
69 /*
70 * Prepare all of the memory sizes for the UF headers, UF address
71 * table, and UF buffers themselves.
72 */
73 buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
74 header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
75 size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
76 67
77 /* 68 /*
78 * The Unsolicited Frame buffers are set at the start of the UF 69 * The Unsolicited Frame buffers are set at the start of the UF
79 * memory descriptor entry. The headers and address table will be 70 * memory descriptor entry. The headers and address table will be
80 * placed after the buffers. 71 * placed after the buffers.
81 */ 72 */
82 virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
83 if (!virt)
84 return -ENOMEM;
85 73
86 /* 74 /*
87 * Program the location of the UF header table into the SCU. 75 * Program the location of the UF header table into the SCU.
@@ -93,8 +81,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
93 * headers, since we program the UF address table pointers to 81 * headers, since we program the UF address table pointers to
94 * NULL. 82 * NULL.
95 */ 83 */
96 uf_control->headers.physical_address = dma + buf_len; 84 uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE;
97 uf_control->headers.array = virt + buf_len; 85 uf_control->headers.array = virt + SCI_UFI_BUF_SIZE;
98 86
99 /* 87 /*
100 * Program the location of the UF address table into the SCU. 88 * Program the location of the UF address table into the SCU.
@@ -103,8 +91,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
103 * byte boundary already due to above programming headers being on a 91 * byte boundary already due to above programming headers being on a
104 * 64-bit boundary and headers are on a 64-bytes in size. 92 * 64-bit boundary and headers are on a 64-bytes in size.
105 */ 93 */
106 uf_control->address_table.physical_address = dma + buf_len + header_len; 94 uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
107 uf_control->address_table.array = virt + buf_len + header_len; 95 uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
108 uf_control->get = 0; 96 uf_control->get = 0;
109 97
110 /* 98 /*
@@ -135,8 +123,6 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
135 virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; 123 virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
136 dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; 124 dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
137 } 125 }
138
139 return 0;
140} 126}
141 127
142enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, 128enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
index 75d896686f5a..1bc551ec611f 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.h
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -257,9 +257,13 @@ struct sci_unsolicited_frame_control {
257 257
258}; 258};
259 259
260#define SCI_UFI_BUF_SIZE (SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE)
261#define SCI_UFI_HDR_SIZE (SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header))
262#define SCI_UFI_TOTAL_SIZE (SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE + SCU_MAX_UNSOLICITED_FRAMES * sizeof(u64))
263
260struct isci_host; 264struct isci_host;
261 265
262int sci_unsolicited_frame_control_construct(struct isci_host *ihost); 266void sci_unsolicited_frame_control_construct(struct isci_host *ihost);
263 267
264enum sci_status sci_unsolicited_frame_control_get_header( 268enum sci_status sci_unsolicited_frame_control_get_header(
265 struct sci_unsolicited_frame_control *uf_control, 269 struct sci_unsolicited_frame_control *uf_control,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index cc83b66d45b7..c1402fb499ab 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -648,6 +648,7 @@ int fc_lport_destroy(struct fc_lport *lport)
648 lport->tt.fcp_abort_io(lport); 648 lport->tt.fcp_abort_io(lport);
649 lport->tt.disc_stop_final(lport); 649 lport->tt.disc_stop_final(lport);
650 lport->tt.exch_mgr_reset(lport, 0, 0); 650 lport->tt.exch_mgr_reset(lport, 0, 0);
651 cancel_delayed_work_sync(&lport->retry_work);
651 fc_fc4_del_lport(lport); 652 fc_fc4_del_lport(lport);
652 return 0; 653 return 0;
653} 654}
@@ -1564,7 +1565,6 @@ static void fc_lport_timeout(struct work_struct *work)
1564 1565
1565 switch (lport->state) { 1566 switch (lport->state) {
1566 case LPORT_ST_DISABLED: 1567 case LPORT_ST_DISABLED:
1567 WARN_ON(1);
1568 break; 1568 break;
1569 case LPORT_ST_READY: 1569 case LPORT_ST_READY:
1570 break; 1570 break;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 3a1ffdd6d831..e5da6da20f8a 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -93,6 +93,9 @@ struct lpfc_sli2_slim;
93/* lpfc wait event data ready flag */ 93/* lpfc wait event data ready flag */
94#define LPFC_DATA_READY (1<<0) 94#define LPFC_DATA_READY (1<<0)
95 95
96/* queue dump line buffer size */
97#define LPFC_LBUF_SZ 128
98
96enum lpfc_polling_flags { 99enum lpfc_polling_flags {
97 ENABLE_FCP_RING_POLLING = 0x1, 100 ENABLE_FCP_RING_POLLING = 0x1,
98 DISABLE_FCP_RING_INT = 0x2 101 DISABLE_FCP_RING_INT = 0x2
@@ -620,6 +623,7 @@ struct lpfc_hba {
620#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ 623#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
621#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ 624#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
622#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ 625#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
626#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
623 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 627 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
624 struct lpfc_dmabuf slim2p; 628 struct lpfc_dmabuf slim2p;
625 629
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 141e4b40bb55..253d9a857346 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2011 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -599,6 +599,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
599 599
600 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; 600 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
601 cmdiocbq->context1 = dd_data; 601 cmdiocbq->context1 = dd_data;
602 cmdiocbq->context_un.ndlp = ndlp;
602 cmdiocbq->context2 = rspiocbq; 603 cmdiocbq->context2 = rspiocbq;
603 dd_data->type = TYPE_IOCB; 604 dd_data->type = TYPE_IOCB;
604 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 605 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -3978,6 +3979,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3978 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) { 3979 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
3979 switch (opcode) { 3980 switch (opcode) {
3980 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: 3981 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
3982 case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
3981 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3983 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3982 "3106 Handled SLI_CONFIG " 3984 "3106 Handled SLI_CONFIG "
3983 "subsys_comn, opcode:x%x\n", 3985 "subsys_comn, opcode:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index edfe61fc52b1..67f7d0a160d1 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2010 Emulex. All rights reserved. * 4 * Copyright (C) 2010-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -249,6 +249,7 @@ struct lpfc_sli_config_emb1_subsys {
249#define COMN_OPCODE_READ_OBJECT_LIST 0xAD 249#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
250#define COMN_OPCODE_DELETE_OBJECT 0xAE 250#define COMN_OPCODE_DELETE_OBJECT 0xAE
251#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES 0x79 251#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES 0x79
252#define COMN_OPCODE_GET_CNTL_ATTRIBUTES 0x20
252 uint32_t timeout; 253 uint32_t timeout;
253 uint32_t request_length; 254 uint32_t request_length;
254 uint32_t word9; 255 uint32_t word9;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 330dd7192a7f..9b2a16f3bc79 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -254,6 +254,7 @@ int
254lpfc_sli_handle_fast_ring_event(struct lpfc_hba *, 254lpfc_sli_handle_fast_ring_event(struct lpfc_hba *,
255 struct lpfc_sli_ring *, uint32_t); 255 struct lpfc_sli_ring *, uint32_t);
256 256
257struct lpfc_iocbq *__lpfc_sli_get_iocbq(struct lpfc_hba *);
257struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); 258struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
258void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); 259void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
259uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 260uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
@@ -460,6 +461,7 @@ int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
460int lpfc_issue_reg_vfi(struct lpfc_vport *); 461int lpfc_issue_reg_vfi(struct lpfc_vport *);
461int lpfc_issue_unreg_vfi(struct lpfc_vport *); 462int lpfc_issue_unreg_vfi(struct lpfc_vport *);
462int lpfc_selective_reset(struct lpfc_hba *); 463int lpfc_selective_reset(struct lpfc_hba *);
463int lpfc_sli4_read_config(struct lpfc_hba *phba); 464int lpfc_sli4_read_config(struct lpfc_hba *);
464int lpfc_scsi_buf_update(struct lpfc_hba *phba); 465void lpfc_sli4_node_prep(struct lpfc_hba *);
465void lpfc_sli4_node_prep(struct lpfc_hba *phba); 466int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
467void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index af04b0d6688d..3217d63ed282 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -4466,3 +4466,49 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
4466#endif 4466#endif
4467 return; 4467 return;
4468} 4468}
4469
4470/*
4471 * Driver debug utility routines outside of debugfs. The debug utility
4472 * routines implemented here is intended to be used in the instrumented
4473 * debug driver for debugging host or port issues.
4474 */
4475
4476/**
4477 * lpfc_debug_dump_all_queues - dump all the queues with a hba
4478 * @phba: Pointer to HBA context object.
4479 *
4480 * This function dumps entries of all the queues asociated with the @phba.
4481 **/
4482void
4483lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
4484{
4485 int fcp_wqidx;
4486
4487 /*
4488 * Dump Work Queues (WQs)
4489 */
4490 lpfc_debug_dump_mbx_wq(phba);
4491 lpfc_debug_dump_els_wq(phba);
4492
4493 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
4494 lpfc_debug_dump_fcp_wq(phba, fcp_wqidx);
4495
4496 lpfc_debug_dump_hdr_rq(phba);
4497 lpfc_debug_dump_dat_rq(phba);
4498 /*
4499 * Dump Complete Queues (CQs)
4500 */
4501 lpfc_debug_dump_mbx_cq(phba);
4502 lpfc_debug_dump_els_cq(phba);
4503
4504 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
4505 lpfc_debug_dump_fcp_cq(phba, fcp_wqidx);
4506
4507 /*
4508 * Dump Event Queues (EQs)
4509 */
4510 lpfc_debug_dump_sp_eq(phba);
4511
4512 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
4513 lpfc_debug_dump_fcp_eq(phba, fcp_wqidx);
4514}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index f83bd944edd8..616c400dae14 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -267,3 +267,421 @@ struct lpfc_idiag {
267#define LPFC_DISC_TRC_DISCOVERY 0xef /* common mask for general 267#define LPFC_DISC_TRC_DISCOVERY 0xef /* common mask for general
268 * discovery */ 268 * discovery */
269#endif /* H_LPFC_DEBUG_FS */ 269#endif /* H_LPFC_DEBUG_FS */
270
271
272/*
273 * Driver debug utility routines outside of debugfs. The debug utility
274 * routines implemented here is intended to be used in the instrumented
275 * debug driver for debugging host or port issues.
276 */
277
278/**
279 * lpfc_debug_dump_qe - dump an specific entry from a queue
280 * @q: Pointer to the queue descriptor.
281 * @idx: Index to the entry on the queue.
282 *
283 * This function dumps an entry indexed by @idx from a queue specified by the
284 * queue descriptor @q.
285 **/
286static inline void
287lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
288{
289 char line_buf[LPFC_LBUF_SZ];
290 int i, esize, qe_word_cnt, len;
291 uint32_t *pword;
292
293 /* sanity checks */
294 if (!q)
295 return;
296 if (idx >= q->entry_count)
297 return;
298
299 esize = q->entry_size;
300 qe_word_cnt = esize / sizeof(uint32_t);
301 pword = q->qe[idx].address;
302
303 len = 0;
304 len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
305 if (qe_word_cnt > 8)
306 printk(KERN_ERR "%s\n", line_buf);
307
308 for (i = 0; i < qe_word_cnt; i++) {
309 if (!(i % 8)) {
310 if (i != 0)
311 printk(KERN_ERR "%s\n", line_buf);
312 if (qe_word_cnt > 8) {
313 len = 0;
314 memset(line_buf, 0, LPFC_LBUF_SZ);
315 len += snprintf(line_buf+len, LPFC_LBUF_SZ-len,
316 "%03d: ", i);
317 }
318 }
319 len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
320 ((uint32_t)*pword) & 0xffffffff);
321 pword++;
322 }
323 if (qe_word_cnt <= 8 || (i - 1) % 8)
324 printk(KERN_ERR "%s\n", line_buf);
325}
326
327/**
328 * lpfc_debug_dump_q - dump all entries from an specific queue
329 * @q: Pointer to the queue descriptor.
330 *
331 * This function dumps all entries from a queue specified by the queue
332 * descriptor @q.
333 **/
334static inline void
335lpfc_debug_dump_q(struct lpfc_queue *q)
336{
337 int idx, entry_count;
338
339 /* sanity check */
340 if (!q)
341 return;
342
343 dev_printk(KERN_ERR, &(((q->phba))->pcidev)->dev,
344 "%d: [qid:%d, type:%d, subtype:%d, "
345 "qe_size:%d, qe_count:%d, "
346 "host_index:%d, port_index:%d]\n",
347 (q->phba)->brd_no,
348 q->queue_id, q->type, q->subtype,
349 q->entry_size, q->entry_count,
350 q->host_index, q->hba_index);
351 entry_count = q->entry_count;
352 for (idx = 0; idx < entry_count; idx++)
353 lpfc_debug_dump_qe(q, idx);
354 printk(KERN_ERR "\n");
355}
356
357/**
358 * lpfc_debug_dump_fcp_wq - dump all entries from a fcp work queue
359 * @phba: Pointer to HBA context object.
360 * @fcp_wqidx: Index to a FCP work queue.
361 *
362 * This function dumps all entries from a FCP work queue specified by the
363 * @fcp_wqidx.
364 **/
365static inline void
366lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
367{
368 /* sanity check */
369 if (fcp_wqidx >= phba->cfg_fcp_wq_count)
370 return;
371
372 printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
373 fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id);
374 lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[fcp_wqidx]);
375}
376
377/**
378 * lpfc_debug_dump_fcp_cq - dump all entries from a fcp work queue's cmpl queue
379 * @phba: Pointer to HBA context object.
380 * @fcp_wqidx: Index to a FCP work queue.
381 *
382 * This function dumps all entries from a FCP complete queue which is
383 * associated to the FCP work queue specified by the @fcp_wqidx.
384 **/
385static inline void
386lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
387{
388 int fcp_cqidx, fcp_cqid;
389
390 /* sanity check */
391 if (fcp_wqidx >= phba->cfg_fcp_wq_count)
392 return;
393
394 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
395 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
396 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
397 break;
398 if (fcp_cqidx >= phba->cfg_fcp_eq_count)
399 return;
400
401 printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n",
402 fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
403 fcp_cqidx, fcp_cqid);
404 lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[fcp_cqidx]);
405}
406
407/**
408 * lpfc_debug_dump_fcp_eq - dump all entries from a fcp work queue's evt queue
409 * @phba: Pointer to HBA context object.
410 * @fcp_wqidx: Index to a FCP work queue.
411 *
412 * This function dumps all entries from a FCP event queue which is
413 * associated to the FCP work queue specified by the @fcp_wqidx.
414 **/
415static inline void
416lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx)
417{
418 struct lpfc_queue *qdesc;
419 int fcp_eqidx, fcp_eqid;
420 int fcp_cqidx, fcp_cqid;
421
422 /* sanity check */
423 if (fcp_wqidx >= phba->cfg_fcp_wq_count)
424 return;
425 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
426 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
427 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
428 break;
429 if (fcp_cqidx >= phba->cfg_fcp_eq_count)
430 return;
431
432 if (phba->cfg_fcp_eq_count == 0) {
433 fcp_eqidx = -1;
434 fcp_eqid = phba->sli4_hba.sp_eq->queue_id;
435 qdesc = phba->sli4_hba.sp_eq;
436 } else {
437 fcp_eqidx = fcp_cqidx;
438 fcp_eqid = phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id;
439 qdesc = phba->sli4_hba.fp_eq[fcp_eqidx];
440 }
441
442 printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
443 "EQ[Idx:%d|Qid:%d]\n",
444 fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
445 fcp_cqidx, fcp_cqid, fcp_eqidx, fcp_eqid);
446 lpfc_debug_dump_q(qdesc);
447}
448
449/**
450 * lpfc_debug_dump_els_wq - dump all entries from the els work queue
451 * @phba: Pointer to HBA context object.
452 *
453 * This function dumps all entries from the ELS work queue.
454 **/
455static inline void
456lpfc_debug_dump_els_wq(struct lpfc_hba *phba)
457{
458 printk(KERN_ERR "ELS WQ: WQ[Qid:%d]:\n",
459 phba->sli4_hba.els_wq->queue_id);
460 lpfc_debug_dump_q(phba->sli4_hba.els_wq);
461}
462
463/**
464 * lpfc_debug_dump_mbx_wq - dump all entries from the mbox work queue
465 * @phba: Pointer to HBA context object.
466 *
467 * This function dumps all entries from the MBOX work queue.
468 **/
469static inline void
470lpfc_debug_dump_mbx_wq(struct lpfc_hba *phba)
471{
472 printk(KERN_ERR "MBX WQ: WQ[Qid:%d]\n",
473 phba->sli4_hba.mbx_wq->queue_id);
474 lpfc_debug_dump_q(phba->sli4_hba.mbx_wq);
475}
476
477/**
478 * lpfc_debug_dump_dat_rq - dump all entries from the receive data queue
479 * @phba: Pointer to HBA context object.
480 *
481 * This function dumps all entries from the receive data queue.
482 **/
483static inline void
484lpfc_debug_dump_dat_rq(struct lpfc_hba *phba)
485{
486 printk(KERN_ERR "DAT RQ: RQ[Qid:%d]\n",
487 phba->sli4_hba.dat_rq->queue_id);
488 lpfc_debug_dump_q(phba->sli4_hba.dat_rq);
489}
490
491/**
492 * lpfc_debug_dump_hdr_rq - dump all entries from the receive header queue
493 * @phba: Pointer to HBA context object.
494 *
495 * This function dumps all entries from the receive header queue.
496 **/
497static inline void
498lpfc_debug_dump_hdr_rq(struct lpfc_hba *phba)
499{
500 printk(KERN_ERR "HDR RQ: RQ[Qid:%d]\n",
501 phba->sli4_hba.hdr_rq->queue_id);
502 lpfc_debug_dump_q(phba->sli4_hba.hdr_rq);
503}
504
505/**
506 * lpfc_debug_dump_els_cq - dump all entries from the els complete queue
507 * @phba: Pointer to HBA context object.
508 *
509 * This function dumps all entries from the els complete queue.
510 **/
511static inline void
512lpfc_debug_dump_els_cq(struct lpfc_hba *phba)
513{
514 printk(KERN_ERR "ELS CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
515 phba->sli4_hba.els_wq->queue_id,
516 phba->sli4_hba.els_cq->queue_id);
517 lpfc_debug_dump_q(phba->sli4_hba.els_cq);
518}
519
520/**
521 * lpfc_debug_dump_mbx_cq - dump all entries from the mbox complete queue
522 * @phba: Pointer to HBA context object.
523 *
524 * This function dumps all entries from the mbox complete queue.
525 **/
526static inline void
527lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
528{
529 printk(KERN_ERR "MBX CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
530 phba->sli4_hba.mbx_wq->queue_id,
531 phba->sli4_hba.mbx_cq->queue_id);
532 lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
533}
534
535/**
536 * lpfc_debug_dump_sp_eq - dump all entries from slow-path event queue
537 * @phba: Pointer to HBA context object.
538 *
539 * This function dumps all entries from the slow-path event queue.
540 **/
541static inline void
542lpfc_debug_dump_sp_eq(struct lpfc_hba *phba)
543{
544 printk(KERN_ERR "SP EQ: WQ[Qid:%d/Qid:%d]->CQ[Qid:%d/Qid:%d]->"
545 "EQ[Qid:%d]:\n",
546 phba->sli4_hba.mbx_wq->queue_id,
547 phba->sli4_hba.els_wq->queue_id,
548 phba->sli4_hba.mbx_cq->queue_id,
549 phba->sli4_hba.els_cq->queue_id,
550 phba->sli4_hba.sp_eq->queue_id);
551 lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
552}
553
554/**
555 * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
556 * @phba: Pointer to HBA context object.
557 * @qid: Work queue identifier.
558 *
559 * This function dumps all entries from a work queue identified by the queue
560 * identifier.
561 **/
562static inline void
563lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
564{
565 int wq_idx;
566
567 for (wq_idx = 0; wq_idx < phba->cfg_fcp_wq_count; wq_idx++)
568 if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
569 break;
570 if (wq_idx < phba->cfg_fcp_wq_count) {
571 printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
572 lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
573 return;
574 }
575
576 if (phba->sli4_hba.els_wq->queue_id == qid) {
577 printk(KERN_ERR "ELS WQ[Qid:%d]\n", qid);
578 lpfc_debug_dump_q(phba->sli4_hba.els_wq);
579 }
580}
581
582/**
583 * lpfc_debug_dump_mq_by_id - dump all entries from a mbox queue by queue id
584 * @phba: Pointer to HBA context object.
585 * @qid: Mbox work queue identifier.
586 *
587 * This function dumps all entries from a mbox work queue identified by the
588 * queue identifier.
589 **/
590static inline void
591lpfc_debug_dump_mq_by_id(struct lpfc_hba *phba, int qid)
592{
593 if (phba->sli4_hba.mbx_wq->queue_id == qid) {
594 printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid);
595 lpfc_debug_dump_q(phba->sli4_hba.mbx_wq);
596 }
597}
598
599/**
600 * lpfc_debug_dump_rq_by_id - dump all entries from a receive queue by queue id
601 * @phba: Pointer to HBA context object.
602 * @qid: Receive queue identifier.
603 *
604 * This function dumps all entries from a receive queue identified by the
605 * queue identifier.
606 **/
607static inline void
608lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid)
609{
610 if (phba->sli4_hba.hdr_rq->queue_id == qid) {
611 printk(KERN_ERR "HDR RQ[Qid:%d]\n", qid);
612 lpfc_debug_dump_q(phba->sli4_hba.hdr_rq);
613 return;
614 }
615 if (phba->sli4_hba.dat_rq->queue_id == qid) {
616 printk(KERN_ERR "DAT RQ[Qid:%d]\n", qid);
617 lpfc_debug_dump_q(phba->sli4_hba.dat_rq);
618 }
619}
620
621/**
622 * lpfc_debug_dump_cq_by_id - dump all entries from a cmpl queue by queue id
623 * @phba: Pointer to HBA context object.
624 * @qid: Complete queue identifier.
625 *
626 * This function dumps all entries from a complete queue identified by the
627 * queue identifier.
628 **/
629static inline void
630lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
631{
632 int cq_idx = 0;
633
634 do {
635 if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
636 break;
637 } while (++cq_idx < phba->cfg_fcp_eq_count);
638
639 if (cq_idx < phba->cfg_fcp_eq_count) {
640 printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
641 lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
642 return;
643 }
644
645 if (phba->sli4_hba.els_cq->queue_id == qid) {
646 printk(KERN_ERR "ELS CQ[Qid:%d]\n", qid);
647 lpfc_debug_dump_q(phba->sli4_hba.els_cq);
648 return;
649 }
650
651 if (phba->sli4_hba.mbx_cq->queue_id == qid) {
652 printk(KERN_ERR "MBX CQ[Qid:%d]\n", qid);
653 lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
654 }
655}
656
657/**
658 * lpfc_debug_dump_eq_by_id - dump all entries from an event queue by queue id
659 * @phba: Pointer to HBA context object.
660 * @qid: Complete queue identifier.
661 *
662 * This function dumps all entries from an event queue identified by the
663 * queue identifier.
664 **/
665static inline void
666lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
667{
668 int eq_idx;
669
670 for (eq_idx = 0; eq_idx < phba->cfg_fcp_eq_count; eq_idx++) {
671 if (phba->sli4_hba.fp_eq[eq_idx]->queue_id == qid)
672 break;
673 }
674
675 if (eq_idx < phba->cfg_fcp_eq_count) {
676 printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
677 lpfc_debug_dump_q(phba->sli4_hba.fp_eq[eq_idx]);
678 return;
679 }
680
681 if (phba->sli4_hba.sp_eq->queue_id == qid) {
682 printk(KERN_ERR "SP EQ[|Qid:%d]\n", qid);
683 lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
684 }
685}
686
687void lpfc_debug_dump_all_queues(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 3407b39e0a3f..d54ae1999797 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -230,27 +230,43 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
230 230
231 INIT_LIST_HEAD(&pbuflist->list); 231 INIT_LIST_HEAD(&pbuflist->list);
232 232
233 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
234 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
235 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
236 icmd->un.elsreq64.remoteID = did; /* DID */
237 if (expectRsp) { 233 if (expectRsp) {
234 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
235 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
236 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
238 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 237 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
238
239 icmd->un.elsreq64.remoteID = did; /* DID */
239 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 240 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
240 icmd->ulpTimeout = phba->fc_ratov * 2; 241 icmd->ulpTimeout = phba->fc_ratov * 2;
241 } else { 242 } else {
242 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64); 243 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
244 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
245 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
246 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
247 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
243 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 248 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
244 } 249 }
245 icmd->ulpBdeCount = 1; 250 icmd->ulpBdeCount = 1;
246 icmd->ulpLe = 1; 251 icmd->ulpLe = 1;
247 icmd->ulpClass = CLASS3; 252 icmd->ulpClass = CLASS3;
248 253
249 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 254 /*
250 icmd->un.elsreq64.myID = vport->fc_myDID; 255 * If we have NPIV enabled, we want to send ELS traffic by VPI.
256 * For SLI4, since the driver controls VPIs we also want to include
257 * all ELS pt2pt protocol traffic as well.
258 */
259 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
260 ((phba->sli_rev == LPFC_SLI_REV4) &&
261 (vport->fc_flag & FC_PT2PT))) {
262
263 if (expectRsp) {
264 icmd->un.elsreq64.myID = vport->fc_myDID;
265
266 /* For ELS_REQUEST64_CR, use the VPI by default */
267 icmd->ulpContext = phba->vpi_ids[vport->vpi];
268 }
251 269
252 /* For ELS_REQUEST64_CR, use the VPI by default */
253 icmd->ulpContext = phba->vpi_ids[vport->vpi];
254 icmd->ulpCt_h = 0; 270 icmd->ulpCt_h = 0;
255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 271 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
256 if (elscmd == ELS_CMD_ECHO) 272 if (elscmd == ELS_CMD_ECHO)
@@ -438,9 +454,10 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
438 int rc = 0; 454 int rc = 0;
439 455
440 sp = &phba->fc_fabparam; 456 sp = &phba->fc_fabparam;
441 /* move forward in case of SLI4 FC port loopback test */ 457 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
442 if ((phba->sli_rev == LPFC_SLI_REV4) && 458 if ((phba->sli_rev == LPFC_SLI_REV4) &&
443 !(phba->link_flag & LS_LOOPBACK_MODE)) { 459 !(phba->link_flag & LS_LOOPBACK_MODE) &&
460 !(vport->fc_flag & FC_PT2PT)) {
444 ndlp = lpfc_findnode_did(vport, Fabric_DID); 461 ndlp = lpfc_findnode_did(vport, Fabric_DID);
445 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 462 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
446 rc = -ENODEV; 463 rc = -ENODEV;
@@ -707,14 +724,17 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
707 lpfc_sli4_unreg_all_rpis(vport); 724 lpfc_sli4_unreg_all_rpis(vport);
708 lpfc_mbx_unreg_vpi(vport); 725 lpfc_mbx_unreg_vpi(vport);
709 spin_lock_irq(shost->host_lock); 726 spin_lock_irq(shost->host_lock);
710 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
711 /*
712 * If VPI is unreged, driver need to do INIT_VPI
713 * before re-registering
714 */
715 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 727 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
716 spin_unlock_irq(shost->host_lock); 728 spin_unlock_irq(shost->host_lock);
717 } 729 }
730
731 /*
732 * For SLI3 and SLI4, the VPI needs to be reregistered in
733 * response to this fabric parameter change event.
734 */
735 spin_lock_irq(shost->host_lock);
736 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
737 spin_unlock_irq(shost->host_lock);
718 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 738 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
719 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 739 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
720 /* 740 /*
@@ -817,6 +837,17 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
817 mempool_free(mbox, phba->mbox_mem_pool); 837 mempool_free(mbox, phba->mbox_mem_pool);
818 goto fail; 838 goto fail;
819 } 839 }
840
841 /*
842 * For SLI4, the VFI/VPI are registered AFTER the
843 * Nport with the higher WWPN sends the PLOGI with
844 * an assigned NPortId.
845 */
846
847 /* not equal */
848 if ((phba->sli_rev == LPFC_SLI_REV4) && rc)
849 lpfc_issue_reg_vfi(vport);
850
820 /* Decrement ndlp reference count indicating that ndlp can be 851 /* Decrement ndlp reference count indicating that ndlp can be
821 * safely released when other references to it are done. 852 * safely released when other references to it are done.
822 */ 853 */
@@ -2972,7 +3003,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2972 * ABTS we cannot generate and RRQ. 3003 * ABTS we cannot generate and RRQ.
2973 */ 3004 */
2974 lpfc_set_rrq_active(phba, ndlp, 3005 lpfc_set_rrq_active(phba, ndlp,
2975 cmdiocb->sli4_xritag, 0, 0); 3006 cmdiocb->sli4_lxritag, 0, 0);
2976 } 3007 }
2977 break; 3008 break;
2978 case IOSTAT_LOCAL_REJECT: 3009 case IOSTAT_LOCAL_REJECT:
@@ -3803,10 +3834,11 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3803 /* Xmit ELS ACC response tag <ulpIoTag> */ 3834 /* Xmit ELS ACC response tag <ulpIoTag> */
3804 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3835 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3805 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, " 3836 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
3806 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n", 3837 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
3838 "fc_flag x%x\n",
3807 elsiocb->iotag, elsiocb->iocb.ulpContext, 3839 elsiocb->iotag, elsiocb->iocb.ulpContext,
3808 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3840 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3809 ndlp->nlp_rpi); 3841 ndlp->nlp_rpi, vport->fc_flag);
3810 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 3842 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
3811 spin_lock_irq(shost->host_lock); 3843 spin_lock_irq(shost->host_lock);
3812 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 3844 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
@@ -4936,8 +4968,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4936 return 1; 4968 return 1;
4937 } 4969 }
4938 4970
4939 did = Fabric_DID;
4940
4941 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) { 4971 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
4942 /* For a FLOGI we accept, then if our portname is greater 4972 /* For a FLOGI we accept, then if our portname is greater
4943 * then the remote portname we initiate Nport login. 4973 * then the remote portname we initiate Nport login.
@@ -4976,26 +5006,82 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4976 spin_lock_irq(shost->host_lock); 5006 spin_lock_irq(shost->host_lock);
4977 vport->fc_flag |= FC_PT2PT_PLOGI; 5007 vport->fc_flag |= FC_PT2PT_PLOGI;
4978 spin_unlock_irq(shost->host_lock); 5008 spin_unlock_irq(shost->host_lock);
5009
5010 /* If we have the high WWPN we can assign our own
5011 * myDID; otherwise, we have to WAIT for a PLOGI
5012 * from the remote NPort to find out what it
5013 * will be.
5014 */
5015 vport->fc_myDID = PT2PT_LocalID;
4979 } 5016 }
5017
5018 /*
5019 * The vport state should go to LPFC_FLOGI only
5020 * AFTER we issue a FLOGI, not receive one.
5021 */
4980 spin_lock_irq(shost->host_lock); 5022 spin_lock_irq(shost->host_lock);
4981 vport->fc_flag |= FC_PT2PT; 5023 vport->fc_flag |= FC_PT2PT;
4982 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 5024 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4983 spin_unlock_irq(shost->host_lock); 5025 spin_unlock_irq(shost->host_lock);
5026
5027 /*
5028 * We temporarily set fc_myDID to make it look like we are
5029 * a Fabric. This is done just so we end up with the right
5030 * did / sid on the FLOGI ACC rsp.
5031 */
5032 did = vport->fc_myDID;
5033 vport->fc_myDID = Fabric_DID;
5034
4984 } else { 5035 } else {
4985 /* Reject this request because invalid parameters */ 5036 /* Reject this request because invalid parameters */
4986 stat.un.b.lsRjtRsvd0 = 0; 5037 stat.un.b.lsRjtRsvd0 = 0;
4987 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5038 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4988 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 5039 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
4989 stat.un.b.vendorUnique = 0; 5040 stat.un.b.vendorUnique = 0;
5041
5042 /*
5043 * We temporarily set fc_myDID to make it look like we are
5044 * a Fabric. This is done just so we end up with the right
5045 * did / sid on the FLOGI LS_RJT rsp.
5046 */
5047 did = vport->fc_myDID;
5048 vport->fc_myDID = Fabric_DID;
5049
4990 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 5050 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4991 NULL); 5051 NULL);
5052
5053 /* Now lets put fc_myDID back to what its supposed to be */
5054 vport->fc_myDID = did;
5055
4992 return 1; 5056 return 1;
4993 } 5057 }
4994 5058
4995 /* Send back ACC */ 5059 /* Send back ACC */
4996 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); 5060 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
4997 5061
5062 /* Now lets put fc_myDID back to what its supposed to be */
5063 vport->fc_myDID = did;
5064
5065 if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
5066
5067 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5068 if (!mbox)
5069 goto fail;
5070
5071 lpfc_config_link(phba, mbox);
5072
5073 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5074 mbox->vport = vport;
5075 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5076 if (rc == MBX_NOT_FINISHED) {
5077 mempool_free(mbox, phba->mbox_mem_pool);
5078 goto fail;
5079 }
5080 }
5081
4998 return 0; 5082 return 0;
5083fail:
5084 return 1;
4999} 5085}
5000 5086
5001/** 5087/**
@@ -5176,7 +5262,6 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5176 } 5262 }
5177 5263
5178 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 5264 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
5179 mempool_free(pmb, phba->mbox_mem_pool);
5180 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5265 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5181 lpfc_max_els_tries, ndlp, 5266 lpfc_max_els_tries, ndlp,
5182 ndlp->nlp_DID, ELS_CMD_ACC); 5267 ndlp->nlp_DID, ELS_CMD_ACC);
@@ -5184,8 +5269,10 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5184 /* Decrement the ndlp reference count from previous mbox command */ 5269 /* Decrement the ndlp reference count from previous mbox command */
5185 lpfc_nlp_put(ndlp); 5270 lpfc_nlp_put(ndlp);
5186 5271
5187 if (!elsiocb) 5272 if (!elsiocb) {
5273 mempool_free(pmb, phba->mbox_mem_pool);
5188 return; 5274 return;
5275 }
5189 5276
5190 icmd = &elsiocb->iocb; 5277 icmd = &elsiocb->iocb;
5191 icmd->ulpContext = rxid; 5278 icmd->ulpContext = rxid;
@@ -5202,7 +5289,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5202 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 5289 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
5203 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 5290 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
5204 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 5291 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
5205 5292 mempool_free(pmb, phba->mbox_mem_pool);
5206 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 5293 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
5207 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5294 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5208 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 5295 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
@@ -5586,7 +5673,7 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5586 pcmd += sizeof(uint32_t); 5673 pcmd += sizeof(uint32_t);
5587 els_rrq = (struct RRQ *) pcmd; 5674 els_rrq = (struct RRQ *) pcmd;
5588 5675
5589 bf_set(rrq_oxid, els_rrq, rrq->xritag); 5676 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
5590 bf_set(rrq_rxid, els_rrq, rrq->rxid); 5677 bf_set(rrq_rxid, els_rrq, rrq->rxid);
5591 bf_set(rrq_did, els_rrq, vport->fc_myDID); 5678 bf_set(rrq_did, els_rrq, vport->fc_myDID);
5592 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 5679 els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
@@ -7873,7 +7960,9 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7873 sglq_entry->state = SGL_FREED; 7960 sglq_entry->state = SGL_FREED;
7874 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7961 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7875 spin_unlock_irqrestore(&phba->hbalock, iflag); 7962 spin_unlock_irqrestore(&phba->hbalock, iflag);
7876 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1); 7963 lpfc_set_rrq_active(phba, ndlp,
7964 sglq_entry->sli4_lxritag,
7965 rxid, 1);
7877 7966
7878 /* Check if TXQ queue needs to be serviced */ 7967 /* Check if TXQ queue needs to be serviced */
7879 if (pring->txq_cnt) 7968 if (pring->txq_cnt)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index b507536dc5b5..5bb269e224f6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -713,6 +713,7 @@ lpfc_do_work(void *p)
713 int rc; 713 int rc;
714 714
715 set_user_nice(current, -20); 715 set_user_nice(current, -20);
716 current->flags |= PF_NOFREEZE;
716 phba->data_flags = 0; 717 phba->data_flags = 0;
717 718
718 while (!kthread_should_stop()) { 719 while (!kthread_should_stop()) {
@@ -1094,7 +1095,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1094 /* Start discovery by sending a FLOGI. port_state is identically 1095 /* Start discovery by sending a FLOGI. port_state is identically
1095 * LPFC_FLOGI while waiting for FLOGI cmpl 1096 * LPFC_FLOGI while waiting for FLOGI cmpl
1096 */ 1097 */
1097 if (vport->port_state != LPFC_FLOGI) 1098 if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
1098 lpfc_initial_flogi(vport); 1099 lpfc_initial_flogi(vport);
1099 return; 1100 return;
1100 1101
@@ -2881,9 +2882,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2881 } 2882 }
2882 2883
2883 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2884 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2884 /* For private loop just start discovery and we are done. */ 2885 /*
2885 if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && 2886 * For private loop or for NPort pt2pt,
2886 !(vport->fc_flag & FC_PUBLIC_LOOP)) { 2887 * just start discovery and we are done.
2888 */
2889 if ((vport->fc_flag & FC_PT2PT) ||
2890 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
2891 !(vport->fc_flag & FC_PUBLIC_LOOP))) {
2892
2887 /* Use loop map to make discovery list */ 2893 /* Use loop map to make discovery list */
2888 lpfc_disc_list_loopmap(vport); 2894 lpfc_disc_list_loopmap(vport);
2889 /* Start discovery */ 2895 /* Start discovery */
@@ -5490,9 +5496,9 @@ lpfc_nlp_release(struct kref *kref)
5490 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 5496 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
5491 5497
5492 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5498 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
5493 "0279 lpfc_nlp_release: ndlp:x%p " 5499 "0279 lpfc_nlp_release: ndlp:x%p did %x "
5494 "usgmap:x%x refcnt:%d\n", 5500 "usgmap:x%x refcnt:%d\n",
5495 (void *)ndlp, ndlp->nlp_usg_map, 5501 (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
5496 atomic_read(&ndlp->kref.refcount)); 5502 atomic_read(&ndlp->kref.refcount));
5497 5503
5498 /* remove ndlp from action. */ 5504 /* remove ndlp from action. */
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 5f280b5ae3db..41bb1d2fb625 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -3374,6 +3374,9 @@ typedef struct {
3374 WORD5 w5; /* Header control/status word */ 3374 WORD5 w5; /* Header control/status word */
3375} XMT_SEQ_FIELDS64; 3375} XMT_SEQ_FIELDS64;
3376 3376
3377/* This word is remote ports D_ID for XMIT_ELS_RSP64 */
3378#define xmit_els_remoteID xrsqRo
3379
3377/* IOCB Command template for 64 bit RCV_SEQUENCE64 */ 3380/* IOCB Command template for 64 bit RCV_SEQUENCE64 */
3378typedef struct { 3381typedef struct {
3379 struct ulp_bde64 rcvBde; 3382 struct ulp_bde64 rcvBde;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 91f09761bd32..f1946dfda5b4 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -228,19 +228,15 @@ struct lpfc_sli4_flags {
228#define lpfc_idx_rsrc_rdy_MASK 0x00000001 228#define lpfc_idx_rsrc_rdy_MASK 0x00000001
229#define lpfc_idx_rsrc_rdy_WORD word0 229#define lpfc_idx_rsrc_rdy_WORD word0
230#define LPFC_IDX_RSRC_RDY 1 230#define LPFC_IDX_RSRC_RDY 1
231#define lpfc_xri_rsrc_rdy_SHIFT 1 231#define lpfc_rpi_rsrc_rdy_SHIFT 1
232#define lpfc_xri_rsrc_rdy_MASK 0x00000001
233#define lpfc_xri_rsrc_rdy_WORD word0
234#define LPFC_XRI_RSRC_RDY 1
235#define lpfc_rpi_rsrc_rdy_SHIFT 2
236#define lpfc_rpi_rsrc_rdy_MASK 0x00000001 232#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
237#define lpfc_rpi_rsrc_rdy_WORD word0 233#define lpfc_rpi_rsrc_rdy_WORD word0
238#define LPFC_RPI_RSRC_RDY 1 234#define LPFC_RPI_RSRC_RDY 1
239#define lpfc_vpi_rsrc_rdy_SHIFT 3 235#define lpfc_vpi_rsrc_rdy_SHIFT 2
240#define lpfc_vpi_rsrc_rdy_MASK 0x00000001 236#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
241#define lpfc_vpi_rsrc_rdy_WORD word0 237#define lpfc_vpi_rsrc_rdy_WORD word0
242#define LPFC_VPI_RSRC_RDY 1 238#define LPFC_VPI_RSRC_RDY 1
243#define lpfc_vfi_rsrc_rdy_SHIFT 4 239#define lpfc_vfi_rsrc_rdy_SHIFT 3
244#define lpfc_vfi_rsrc_rdy_MASK 0x00000001 240#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
245#define lpfc_vfi_rsrc_rdy_WORD word0 241#define lpfc_vfi_rsrc_rdy_WORD word0
246#define LPFC_VFI_RSRC_RDY 1 242#define LPFC_VFI_RSRC_RDY 1
@@ -3299,7 +3295,13 @@ struct els_request64_wqe {
3299struct xmit_els_rsp64_wqe { 3295struct xmit_els_rsp64_wqe {
3300 struct ulp_bde64 bde; 3296 struct ulp_bde64 bde;
3301 uint32_t response_payload_len; 3297 uint32_t response_payload_len;
3302 uint32_t rsvd4; 3298 uint32_t word4;
3299#define els_rsp64_sid_SHIFT 0
3300#define els_rsp64_sid_MASK 0x00FFFFFF
3301#define els_rsp64_sid_WORD word4
3302#define els_rsp64_sp_SHIFT 24
3303#define els_rsp64_sp_MASK 0x00000001
3304#define els_rsp64_sp_WORD word4
3303 struct wqe_did wqe_dest; 3305 struct wqe_did wqe_dest;
3304 struct wqe_common wqe_com; /* words 6-11 */ 3306 struct wqe_common wqe_com; /* words 6-11 */
3305 uint32_t word12; 3307 uint32_t word12;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 9598fdcb08ab..411ed48d79da 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -64,8 +64,8 @@ static int lpfc_sli4_queue_verify(struct lpfc_hba *);
64static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 64static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
65static int lpfc_setup_endian_order(struct lpfc_hba *); 65static int lpfc_setup_endian_order(struct lpfc_hba *);
66static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 66static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
67static void lpfc_free_sgl_list(struct lpfc_hba *); 67static void lpfc_free_els_sgl_list(struct lpfc_hba *);
68static int lpfc_init_sgl_list(struct lpfc_hba *); 68static void lpfc_init_sgl_list(struct lpfc_hba *);
69static int lpfc_init_active_sgl_array(struct lpfc_hba *); 69static int lpfc_init_active_sgl_array(struct lpfc_hba *);
70static void lpfc_free_active_sgl(struct lpfc_hba *); 70static void lpfc_free_active_sgl(struct lpfc_hba *);
71static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 71static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
@@ -2767,47 +2767,14 @@ lpfc_offline(struct lpfc_hba *phba)
2767} 2767}
2768 2768
2769/** 2769/**
2770 * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
2771 * @phba: pointer to lpfc hba data structure.
2772 *
2773 * This routine goes through all the scsi buffers in the system and updates the
2774 * Physical XRIs assigned to the SCSI buffer because these may change after any
2775 * firmware reset
2776 *
2777 * Return codes
2778 * 0 - successful (for now, it always returns 0)
2779 **/
2780int
2781lpfc_scsi_buf_update(struct lpfc_hba *phba)
2782{
2783 struct lpfc_scsi_buf *sb, *sb_next;
2784
2785 spin_lock_irq(&phba->hbalock);
2786 spin_lock(&phba->scsi_buf_list_lock);
2787 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2788 sb->cur_iocbq.sli4_xritag =
2789 phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
2790 set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask);
2791 phba->sli4_hba.max_cfg_param.xri_used++;
2792 phba->sli4_hba.xri_count++;
2793 }
2794 spin_unlock(&phba->scsi_buf_list_lock);
2795 spin_unlock_irq(&phba->hbalock);
2796 return 0;
2797}
2798
2799/**
2800 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2770 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2801 * @phba: pointer to lpfc hba data structure. 2771 * @phba: pointer to lpfc hba data structure.
2802 * 2772 *
2803 * This routine is to free all the SCSI buffers and IOCBs from the driver 2773 * This routine is to free all the SCSI buffers and IOCBs from the driver
2804 * list back to kernel. It is called from lpfc_pci_remove_one to free 2774 * list back to kernel. It is called from lpfc_pci_remove_one to free
2805 * the internal resources before the device is removed from the system. 2775 * the internal resources before the device is removed from the system.
2806 *
2807 * Return codes
2808 * 0 - successful (for now, it always returns 0)
2809 **/ 2776 **/
2810static int 2777static void
2811lpfc_scsi_free(struct lpfc_hba *phba) 2778lpfc_scsi_free(struct lpfc_hba *phba)
2812{ 2779{
2813 struct lpfc_scsi_buf *sb, *sb_next; 2780 struct lpfc_scsi_buf *sb, *sb_next;
@@ -2833,7 +2800,178 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2833 } 2800 }
2834 2801
2835 spin_unlock_irq(&phba->hbalock); 2802 spin_unlock_irq(&phba->hbalock);
2803}
2804
2805/**
2806 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
2807 * @phba: pointer to lpfc hba data structure.
2808 *
2809 * This routine first calculates the sizes of the current els and allocated
2810 * scsi sgl lists, and then goes through all sgls to updates the physical
2811 * XRIs assigned due to port function reset. During port initialization, the
2812 * current els and allocated scsi sgl lists are 0s.
2813 *
2814 * Return codes
2815 * 0 - successful (for now, it always returns 0)
2816 **/
2817int
2818lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2819{
2820 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
2821 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
2822 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
2823 LIST_HEAD(els_sgl_list);
2824 LIST_HEAD(scsi_sgl_list);
2825 int rc;
2826
2827 /*
2828 * update on pci function's els xri-sgl list
2829 */
2830 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
2831 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
2832 /* els xri-sgl expanded */
2833 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
2834 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2835 "3157 ELS xri-sgl count increased from "
2836 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2837 els_xri_cnt);
2838 /* allocate the additional els sgls */
2839 for (i = 0; i < xri_cnt; i++) {
2840 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
2841 GFP_KERNEL);
2842 if (sglq_entry == NULL) {
2843 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2844 "2562 Failure to allocate an "
2845 "ELS sgl entry:%d\n", i);
2846 rc = -ENOMEM;
2847 goto out_free_mem;
2848 }
2849 sglq_entry->buff_type = GEN_BUFF_TYPE;
2850 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
2851 &sglq_entry->phys);
2852 if (sglq_entry->virt == NULL) {
2853 kfree(sglq_entry);
2854 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2855 "2563 Failure to allocate an "
2856 "ELS mbuf:%d\n", i);
2857 rc = -ENOMEM;
2858 goto out_free_mem;
2859 }
2860 sglq_entry->sgl = sglq_entry->virt;
2861 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
2862 sglq_entry->state = SGL_FREED;
2863 list_add_tail(&sglq_entry->list, &els_sgl_list);
2864 }
2865 spin_lock(&phba->hbalock);
2866 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2867 spin_unlock(&phba->hbalock);
2868 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
2869 /* els xri-sgl shrinked */
2870 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
2871 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2872 "3158 ELS xri-sgl count decreased from "
2873 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2874 els_xri_cnt);
2875 spin_lock_irq(&phba->hbalock);
2876 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
2877 spin_unlock_irq(&phba->hbalock);
2878 /* release extra els sgls from list */
2879 for (i = 0; i < xri_cnt; i++) {
2880 list_remove_head(&els_sgl_list,
2881 sglq_entry, struct lpfc_sglq, list);
2882 if (sglq_entry) {
2883 lpfc_mbuf_free(phba, sglq_entry->virt,
2884 sglq_entry->phys);
2885 kfree(sglq_entry);
2886 }
2887 }
2888 spin_lock_irq(&phba->hbalock);
2889 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2890 spin_unlock_irq(&phba->hbalock);
2891 } else
2892 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2893 "3163 ELS xri-sgl count unchanged: %d\n",
2894 els_xri_cnt);
2895 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
2896
2897 /* update xris to els sgls on the list */
2898 sglq_entry = NULL;
2899 sglq_entry_next = NULL;
2900 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
2901 &phba->sli4_hba.lpfc_sgl_list, list) {
2902 lxri = lpfc_sli4_next_xritag(phba);
2903 if (lxri == NO_XRI) {
2904 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2905 "2400 Failed to allocate xri for "
2906 "ELS sgl\n");
2907 rc = -ENOMEM;
2908 goto out_free_mem;
2909 }
2910 sglq_entry->sli4_lxritag = lxri;
2911 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
2912 }
2913
2914 /*
2915 * update on pci function's allocated scsi xri-sgl list
2916 */
2917 phba->total_scsi_bufs = 0;
2918
2919 /* maximum number of xris available for scsi buffers */
2920 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
2921 els_xri_cnt;
2922
2923 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2924 "2401 Current allocated SCSI xri-sgl count:%d, "
2925 "maximum SCSI xri count:%d\n",
2926 phba->sli4_hba.scsi_xri_cnt,
2927 phba->sli4_hba.scsi_xri_max);
2928
2929 spin_lock_irq(&phba->scsi_buf_list_lock);
2930 list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list);
2931 spin_unlock_irq(&phba->scsi_buf_list_lock);
2932
2933 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
2934 /* max scsi xri shrinked below the allocated scsi buffers */
2935 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
2936 phba->sli4_hba.scsi_xri_max;
2937 /* release the extra allocated scsi buffers */
2938 for (i = 0; i < scsi_xri_cnt; i++) {
2939 list_remove_head(&scsi_sgl_list, psb,
2940 struct lpfc_scsi_buf, list);
2941 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data,
2942 psb->dma_handle);
2943 kfree(psb);
2944 }
2945 spin_lock_irq(&phba->scsi_buf_list_lock);
2946 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
2947 spin_unlock_irq(&phba->scsi_buf_list_lock);
2948 }
2949
2950 /* update xris associated to remaining allocated scsi buffers */
2951 psb = NULL;
2952 psb_next = NULL;
2953 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
2954 lxri = lpfc_sli4_next_xritag(phba);
2955 if (lxri == NO_XRI) {
2956 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2957 "2560 Failed to allocate xri for "
2958 "scsi buffer\n");
2959 rc = -ENOMEM;
2960 goto out_free_mem;
2961 }
2962 psb->cur_iocbq.sli4_lxritag = lxri;
2963 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
2964 }
2965 spin_lock(&phba->scsi_buf_list_lock);
2966 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
2967 spin_unlock(&phba->scsi_buf_list_lock);
2968
2836 return 0; 2969 return 0;
2970
2971out_free_mem:
2972 lpfc_free_els_sgl_list(phba);
2973 lpfc_scsi_free(phba);
2974 return rc;
2837} 2975}
2838 2976
2839/** 2977/**
@@ -4636,18 +4774,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4636 if (rc) 4774 if (rc)
4637 goto out_free_bsmbx; 4775 goto out_free_bsmbx;
4638 4776
4639 /* Initialize and populate the iocb list per host */ 4777 /* Initialize sgl lists per host */
4640 rc = lpfc_init_sgl_list(phba); 4778 lpfc_init_sgl_list(phba);
4641 if (rc) { 4779
4642 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4780 /* Allocate and initialize active sgl array */
4643 "1400 Failed to initialize sgl list.\n");
4644 goto out_destroy_cq_event_pool;
4645 }
4646 rc = lpfc_init_active_sgl_array(phba); 4781 rc = lpfc_init_active_sgl_array(phba);
4647 if (rc) { 4782 if (rc) {
4648 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4649 "1430 Failed to initialize sgl list.\n"); 4784 "1430 Failed to initialize sgl list.\n");
4650 goto out_free_sgl_list; 4785 goto out_destroy_cq_event_pool;
4651 } 4786 }
4652 rc = lpfc_sli4_init_rpi_hdrs(phba); 4787 rc = lpfc_sli4_init_rpi_hdrs(phba);
4653 if (rc) { 4788 if (rc) {
@@ -4722,8 +4857,6 @@ out_remove_rpi_hdrs:
4722 lpfc_sli4_remove_rpi_hdrs(phba); 4857 lpfc_sli4_remove_rpi_hdrs(phba);
4723out_free_active_sgl: 4858out_free_active_sgl:
4724 lpfc_free_active_sgl(phba); 4859 lpfc_free_active_sgl(phba);
4725out_free_sgl_list:
4726 lpfc_free_sgl_list(phba);
4727out_destroy_cq_event_pool: 4860out_destroy_cq_event_pool:
4728 lpfc_sli4_cq_event_pool_destroy(phba); 4861 lpfc_sli4_cq_event_pool_destroy(phba);
4729out_free_bsmbx: 4862out_free_bsmbx:
@@ -4760,10 +4893,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4760 4893
4761 /* Free the ELS sgl list */ 4894 /* Free the ELS sgl list */
4762 lpfc_free_active_sgl(phba); 4895 lpfc_free_active_sgl(phba);
4763 lpfc_free_sgl_list(phba); 4896 lpfc_free_els_sgl_list(phba);
4764
4765 /* Free the SCSI sgl management array */
4766 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4767 4897
4768 /* Free the completion queue EQ event pool */ 4898 /* Free the completion queue EQ event pool */
4769 lpfc_sli4_cq_event_release_all(phba); 4899 lpfc_sli4_cq_event_release_all(phba);
@@ -4990,29 +5120,42 @@ out_free_iocbq:
4990} 5120}
4991 5121
4992/** 5122/**
4993 * lpfc_free_sgl_list - Free sgl list. 5123 * lpfc_free_sgl_list - Free a given sgl list.
4994 * @phba: pointer to lpfc hba data structure. 5124 * @phba: pointer to lpfc hba data structure.
5125 * @sglq_list: pointer to the head of sgl list.
4995 * 5126 *
4996 * This routine is invoked to free the driver's sgl list and memory. 5127 * This routine is invoked to free a give sgl list and memory.
4997 **/ 5128 **/
4998static void 5129void
4999lpfc_free_sgl_list(struct lpfc_hba *phba) 5130lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
5000{ 5131{
5001 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 5132 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
5133
5134 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
5135 list_del(&sglq_entry->list);
5136 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5137 kfree(sglq_entry);
5138 }
5139}
5140
5141/**
5142 * lpfc_free_els_sgl_list - Free els sgl list.
5143 * @phba: pointer to lpfc hba data structure.
5144 *
5145 * This routine is invoked to free the driver's els sgl list and memory.
5146 **/
5147static void
5148lpfc_free_els_sgl_list(struct lpfc_hba *phba)
5149{
5002 LIST_HEAD(sglq_list); 5150 LIST_HEAD(sglq_list);
5003 5151
5152 /* Retrieve all els sgls from driver list */
5004 spin_lock_irq(&phba->hbalock); 5153 spin_lock_irq(&phba->hbalock);
5005 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 5154 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5006 spin_unlock_irq(&phba->hbalock); 5155 spin_unlock_irq(&phba->hbalock);
5007 5156
5008 list_for_each_entry_safe(sglq_entry, sglq_next, 5157 /* Now free the sgl list */
5009 &sglq_list, list) { 5158 lpfc_free_sgl_list(phba, &sglq_list);
5010 list_del(&sglq_entry->list);
5011 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5012 kfree(sglq_entry);
5013 phba->sli4_hba.total_sglq_bufs--;
5014 }
5015 kfree(phba->sli4_hba.lpfc_els_sgl_array);
5016} 5159}
5017 5160
5018/** 5161/**
@@ -5057,99 +5200,19 @@ lpfc_free_active_sgl(struct lpfc_hba *phba)
5057 * This routine is invoked to allocate and initizlize the driver's sgl 5200 * This routine is invoked to allocate and initizlize the driver's sgl
5058 * list and set up the sgl xritag tag array accordingly. 5201 * list and set up the sgl xritag tag array accordingly.
5059 * 5202 *
5060 * Return codes
5061 * 0 - successful
5062 * other values - error
5063 **/ 5203 **/
5064static int 5204static void
5065lpfc_init_sgl_list(struct lpfc_hba *phba) 5205lpfc_init_sgl_list(struct lpfc_hba *phba)
5066{ 5206{
5067 struct lpfc_sglq *sglq_entry = NULL;
5068 int i;
5069 int els_xri_cnt;
5070
5071 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
5072 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5073 "2400 ELS XRI count %d.\n",
5074 els_xri_cnt);
5075 /* Initialize and populate the sglq list per host/VF. */ 5207 /* Initialize and populate the sglq list per host/VF. */
5076 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 5208 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5077 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 5209 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5078 5210
5079 /* Sanity check on XRI management */ 5211 /* els xri-sgl book keeping */
5080 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 5212 phba->sli4_hba.els_xri_cnt = 0;
5081 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5082 "2562 No room left for SCSI XRI allocation: "
5083 "max_xri=%d, els_xri=%d\n",
5084 phba->sli4_hba.max_cfg_param.max_xri,
5085 els_xri_cnt);
5086 return -ENOMEM;
5087 }
5088
5089 /* Allocate memory for the ELS XRI management array */
5090 phba->sli4_hba.lpfc_els_sgl_array =
5091 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
5092 GFP_KERNEL);
5093
5094 if (!phba->sli4_hba.lpfc_els_sgl_array) {
5095 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5096 "2401 Failed to allocate memory for ELS "
5097 "XRI management array of size %d.\n",
5098 els_xri_cnt);
5099 return -ENOMEM;
5100 }
5101 5213
5102 /* Keep the SCSI XRI into the XRI management array */ 5214 /* scsi xri-buffer book keeping */
5103 phba->sli4_hba.scsi_xri_max =
5104 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
5105 phba->sli4_hba.scsi_xri_cnt = 0; 5215 phba->sli4_hba.scsi_xri_cnt = 0;
5106 phba->sli4_hba.lpfc_scsi_psb_array =
5107 kzalloc((sizeof(struct lpfc_scsi_buf *) *
5108 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
5109
5110 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
5111 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5112 "2563 Failed to allocate memory for SCSI "
5113 "XRI management array of size %d.\n",
5114 phba->sli4_hba.scsi_xri_max);
5115 kfree(phba->sli4_hba.lpfc_els_sgl_array);
5116 return -ENOMEM;
5117 }
5118
5119 for (i = 0; i < els_xri_cnt; i++) {
5120 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
5121 if (sglq_entry == NULL) {
5122 printk(KERN_ERR "%s: only allocated %d sgls of "
5123 "expected %d count. Unloading driver.\n",
5124 __func__, i, els_xri_cnt);
5125 goto out_free_mem;
5126 }
5127
5128 sglq_entry->buff_type = GEN_BUFF_TYPE;
5129 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
5130 if (sglq_entry->virt == NULL) {
5131 kfree(sglq_entry);
5132 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
5133 "Unloading driver.\n", __func__);
5134 goto out_free_mem;
5135 }
5136 sglq_entry->sgl = sglq_entry->virt;
5137 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
5138
5139 /* The list order is used by later block SGL registraton */
5140 spin_lock_irq(&phba->hbalock);
5141 sglq_entry->state = SGL_FREED;
5142 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
5143 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
5144 phba->sli4_hba.total_sglq_bufs++;
5145 spin_unlock_irq(&phba->hbalock);
5146 }
5147 return 0;
5148
5149out_free_mem:
5150 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
5151 lpfc_free_sgl_list(phba);
5152 return -ENOMEM;
5153} 5216}
5154 5217
5155/** 5218/**
@@ -7320,9 +7383,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7320 phba->sli4_hba.u.if_type2.ERR2regaddr); 7383 phba->sli4_hba.u.if_type2.ERR2regaddr);
7321 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7322 "2890 Port error detected during port " 7385 "2890 Port error detected during port "
7323 "reset(%d): port status reg 0x%x, " 7386 "reset(%d): wait_tmo:%d ms, "
7387 "port status reg 0x%x, "
7324 "error 1=0x%x, error 2=0x%x\n", 7388 "error 1=0x%x, error 2=0x%x\n",
7325 num_resets, reg_data.word0, 7389 num_resets, rdy_chk*10,
7390 reg_data.word0,
7326 phba->work_status[0], 7391 phba->work_status[0],
7327 phba->work_status[1]); 7392 phba->work_status[1]);
7328 rc = -ENODEV; 7393 rc = -ENODEV;
@@ -8694,8 +8759,11 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8694 /* Release all the vports against this physical port */ 8759 /* Release all the vports against this physical port */
8695 vports = lpfc_create_vport_work_array(phba); 8760 vports = lpfc_create_vport_work_array(phba);
8696 if (vports != NULL) 8761 if (vports != NULL)
8697 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8762 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
8763 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
8764 continue;
8698 fc_vport_terminate(vports[i]->fc_vport); 8765 fc_vport_terminate(vports[i]->fc_vport);
8766 }
8699 lpfc_destroy_vport_work_array(phba, vports); 8767 lpfc_destroy_vport_work_array(phba, vports);
8700 8768
8701 /* Remove FC host and then SCSI host with the physical port */ 8769 /* Remove FC host and then SCSI host with the physical port */
@@ -9115,8 +9183,12 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
9115 return 50; 9183 return 50;
9116 else if (max_xri <= 1024) 9184 else if (max_xri <= 1024)
9117 return 100; 9185 return 100;
9118 else 9186 else if (max_xri <= 1536)
9119 return 150; 9187 return 150;
9188 else if (max_xri <= 2048)
9189 return 200;
9190 else
9191 return 250;
9120 } else 9192 } else
9121 return 0; 9193 return 0;
9122} 9194}
@@ -9455,8 +9527,11 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9455 /* Release all the vports against this physical port */ 9527 /* Release all the vports against this physical port */
9456 vports = lpfc_create_vport_work_array(phba); 9528 vports = lpfc_create_vport_work_array(phba);
9457 if (vports != NULL) 9529 if (vports != NULL)
9458 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 9530 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9531 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
9532 continue;
9459 fc_vport_terminate(vports[i]->fc_vport); 9533 fc_vport_terminate(vports[i]->fc_vport);
9534 }
9460 lpfc_destroy_vport_work_array(phba, vports); 9535 lpfc_destroy_vport_work_array(phba, vports);
9461 9536
9462 /* Remove FC host and then SCSI host with the physical port */ 9537 /* Remove FC host and then SCSI host with the physical port */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 15ca2a9a0cdd..9133a97f045f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -367,8 +367,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
367 return 1; 367 return 1;
368 } 368 }
369 369
370 /* Check for Nport to NPort pt2pt protocol */
370 if ((vport->fc_flag & FC_PT2PT) && 371 if ((vport->fc_flag & FC_PT2PT) &&
371 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 372 !(vport->fc_flag & FC_PT2PT_PLOGI)) {
373
372 /* rcv'ed PLOGI decides what our NPortId will be */ 374 /* rcv'ed PLOGI decides what our NPortId will be */
373 vport->fc_myDID = icmd->un.rcvels.parmRo; 375 vport->fc_myDID = icmd->un.rcvels.parmRo;
374 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 376 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -382,6 +384,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
382 mempool_free(mbox, phba->mbox_mem_pool); 384 mempool_free(mbox, phba->mbox_mem_pool);
383 goto out; 385 goto out;
384 } 386 }
387 /*
388 * For SLI4, the VFI/VPI are registered AFTER the
389 * Nport with the higher WWPN sends us a PLOGI with
390 * our assigned NPortId.
391 */
392 if (phba->sli_rev == LPFC_SLI_REV4)
393 lpfc_issue_reg_vfi(vport);
385 394
386 lpfc_can_disctmo(vport); 395 lpfc_can_disctmo(vport);
387 } 396 }
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 88f3a83dbd2e..66e09069f281 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -399,6 +399,14 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
399 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 399 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
400 num_cmd_success = atomic_read(&phba->num_cmd_success); 400 num_cmd_success = atomic_read(&phba->num_cmd_success);
401 401
402 /*
403 * The error and success command counters are global per
404 * driver instance. If another handler has already
405 * operated on this error event, just exit.
406 */
407 if (num_rsrc_err == 0)
408 return;
409
402 vports = lpfc_create_vport_work_array(phba); 410 vports = lpfc_create_vport_work_array(phba);
403 if (vports != NULL) 411 if (vports != NULL)
404 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 412 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
@@ -688,7 +696,8 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
688 rrq_empty = list_empty(&phba->active_rrq_list); 696 rrq_empty = list_empty(&phba->active_rrq_list);
689 spin_unlock_irqrestore(&phba->hbalock, iflag); 697 spin_unlock_irqrestore(&phba->hbalock, iflag);
690 if (ndlp) { 698 if (ndlp) {
691 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1); 699 lpfc_set_rrq_active(phba, ndlp,
700 psb->cur_iocbq.sli4_lxritag, rxid, 1);
692 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 701 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
693 } 702 }
694 lpfc_release_scsi_buf_s4(phba, psb); 703 lpfc_release_scsi_buf_s4(phba, psb);
@@ -718,72 +727,162 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
718} 727}
719 728
720/** 729/**
721 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block 730 * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
722 * @phba: pointer to lpfc hba data structure. 731 * @phba: pointer to lpfc hba data structure.
732 * @post_sblist: pointer to the scsi buffer list.
723 * 733 *
724 * This routine walks the list of scsi buffers that have been allocated and 734 * This routine walks a list of scsi buffers that was passed in. It attempts
725 * repost them to the HBA by using SGL block post. This is needed after a 735 * to construct blocks of scsi buffer sgls which contains contiguous xris and
726 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 736 * uses the non-embedded SGL block post mailbox commands to post to the port.
727 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list 737 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
728 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. 738 * embedded SGL post mailbox command for posting. The @post_sblist passed in
739 * must be local list, thus no lock is needed when manipulate the list.
729 * 740 *
730 * Returns: 0 = success, non-zero failure. 741 * Returns: 0 = failure, non-zero number of successfully posted buffers.
731 **/ 742 **/
732int 743int
733lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) 744lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
745 struct list_head *post_sblist, int sb_count)
734{ 746{
735 struct lpfc_scsi_buf *psb; 747 struct lpfc_scsi_buf *psb, *psb_next;
736 int index, status, bcnt = 0, rcnt = 0, rc = 0; 748 int status;
737 LIST_HEAD(sblist); 749 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
738 750 dma_addr_t pdma_phys_bpl1;
739 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) { 751 int last_xritag = NO_XRI;
740 psb = phba->sli4_hba.lpfc_scsi_psb_array[index]; 752 LIST_HEAD(prep_sblist);
741 if (psb) { 753 LIST_HEAD(blck_sblist);
742 /* Remove from SCSI buffer list */ 754 LIST_HEAD(scsi_sblist);
743 list_del(&psb->list); 755
744 /* Add it to a local SCSI buffer list */ 756 /* sanity check */
745 list_add_tail(&psb->list, &sblist); 757 if (sb_count <= 0)
746 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) { 758 return -EINVAL;
747 bcnt = rcnt; 759
748 rcnt = 0; 760 list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
761 list_del_init(&psb->list);
762 block_cnt++;
763 if ((last_xritag != NO_XRI) &&
764 (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
765 /* a hole in xri block, form a sgl posting block */
766 list_splice_init(&prep_sblist, &blck_sblist);
767 post_cnt = block_cnt - 1;
768 /* prepare list for next posting block */
769 list_add_tail(&psb->list, &prep_sblist);
770 block_cnt = 1;
771 } else {
772 /* prepare list for next posting block */
773 list_add_tail(&psb->list, &prep_sblist);
774 /* enough sgls for non-embed sgl mbox command */
775 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
776 list_splice_init(&prep_sblist, &blck_sblist);
777 post_cnt = block_cnt;
778 block_cnt = 0;
749 } 779 }
750 } else 780 }
751 /* A hole present in the XRI array, need to skip */ 781 num_posting++;
752 bcnt = rcnt; 782 last_xritag = psb->cur_iocbq.sli4_xritag;
753 783
754 if (index == phba->sli4_hba.scsi_xri_cnt - 1) 784 /* end of repost sgl list condition for SCSI buffers */
755 /* End of XRI array for SCSI buffer, complete */ 785 if (num_posting == sb_count) {
756 bcnt = rcnt; 786 if (post_cnt == 0) {
787 /* last sgl posting block */
788 list_splice_init(&prep_sblist, &blck_sblist);
789 post_cnt = block_cnt;
790 } else if (block_cnt == 1) {
791 /* last single sgl with non-contiguous xri */
792 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
793 pdma_phys_bpl1 = psb->dma_phys_bpl +
794 SGL_PAGE_SIZE;
795 else
796 pdma_phys_bpl1 = 0;
797 status = lpfc_sli4_post_sgl(phba,
798 psb->dma_phys_bpl,
799 pdma_phys_bpl1,
800 psb->cur_iocbq.sli4_xritag);
801 if (status) {
802 /* failure, put on abort scsi list */
803 psb->exch_busy = 1;
804 } else {
805 /* success, put on SCSI buffer list */
806 psb->exch_busy = 0;
807 psb->status = IOSTAT_SUCCESS;
808 num_posted++;
809 }
810 /* success, put on SCSI buffer sgl list */
811 list_add_tail(&psb->list, &scsi_sblist);
812 }
813 }
757 814
758 /* Continue until collect up to a nembed page worth of sgls */ 815 /* continue until a nembed page worth of sgls */
759 if (bcnt == 0) 816 if (post_cnt == 0)
760 continue; 817 continue;
761 /* Now, post the SCSI buffer list sgls as a block */ 818
762 if (!phba->sli4_hba.extents_in_use) 819 /* post block of SCSI buffer list sgls */
763 status = lpfc_sli4_post_scsi_sgl_block(phba, 820 status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
764 &sblist, 821 post_cnt);
765 bcnt); 822
766 else 823 /* don't reset xirtag due to hole in xri block */
767 status = lpfc_sli4_post_scsi_sgl_blk_ext(phba, 824 if (block_cnt == 0)
768 &sblist, 825 last_xritag = NO_XRI;
769 bcnt); 826
770 /* Reset SCSI buffer count for next round of posting */ 827 /* reset SCSI buffer post count for next round of posting */
771 bcnt = 0; 828 post_cnt = 0;
772 while (!list_empty(&sblist)) { 829
773 list_remove_head(&sblist, psb, struct lpfc_scsi_buf, 830 /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
774 list); 831 while (!list_empty(&blck_sblist)) {
832 list_remove_head(&blck_sblist, psb,
833 struct lpfc_scsi_buf, list);
775 if (status) { 834 if (status) {
776 /* Put this back on the abort scsi list */ 835 /* failure, put on abort scsi list */
777 psb->exch_busy = 1; 836 psb->exch_busy = 1;
778 rc++;
779 } else { 837 } else {
838 /* success, put on SCSI buffer list */
780 psb->exch_busy = 0; 839 psb->exch_busy = 0;
781 psb->status = IOSTAT_SUCCESS; 840 psb->status = IOSTAT_SUCCESS;
841 num_posted++;
782 } 842 }
783 /* Put it back into the SCSI buffer list */ 843 list_add_tail(&psb->list, &scsi_sblist);
784 lpfc_release_scsi_buf_s4(phba, psb);
785 } 844 }
786 } 845 }
846 /* Push SCSI buffers with sgl posted to the availble list */
847 while (!list_empty(&scsi_sblist)) {
848 list_remove_head(&scsi_sblist, psb,
849 struct lpfc_scsi_buf, list);
850 lpfc_release_scsi_buf_s4(phba, psb);
851 }
852 return num_posted;
853}
854
855/**
856 * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
857 * @phba: pointer to lpfc hba data structure.
858 *
859 * This routine walks the list of scsi buffers that have been allocated and
860 * repost them to the port by using SGL block post. This is needed after a
861 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
862 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
863 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
864 *
865 * Returns: 0 = success, non-zero failure.
866 **/
867int
868lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
869{
870 LIST_HEAD(post_sblist);
871 int num_posted, rc = 0;
872
873 /* get all SCSI buffers need to repost to a local list */
874 spin_lock(&phba->scsi_buf_list_lock);
875 list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
876 spin_unlock(&phba->scsi_buf_list_lock);
877
878 /* post the list of scsi buffer sgls to port if available */
879 if (!list_empty(&post_sblist)) {
880 num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
881 phba->sli4_hba.scsi_xri_cnt);
882 /* failed to post any scsi buffer, return error */
883 if (num_posted == 0)
884 rc = -EIO;
885 }
787 return rc; 886 return rc;
788} 887}
789 888
@@ -792,12 +891,13 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
792 * @vport: The virtual port for which this call being executed. 891 * @vport: The virtual port for which this call being executed.
793 * @num_to_allocate: The requested number of buffers to allocate. 892 * @num_to_allocate: The requested number of buffers to allocate.
794 * 893 *
795 * This routine allocates a scsi buffer for device with SLI-4 interface spec, 894 * This routine allocates scsi buffers for device with SLI-4 interface spec,
796 * the scsi buffer contains all the necessary information needed to initiate 895 * the scsi buffer contains all the necessary information needed to initiate
797 * a SCSI I/O. 896 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
897 * them on a list, it post them to the port by using SGL block post.
798 * 898 *
799 * Return codes: 899 * Return codes:
800 * int - number of scsi buffers that were allocated. 900 * int - number of scsi buffers that were allocated and posted.
801 * 0 = failure, less than num_to_alloc is a partial failure. 901 * 0 = failure, less than num_to_alloc is a partial failure.
802 **/ 902 **/
803static int 903static int
@@ -810,22 +910,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
810 dma_addr_t pdma_phys_fcp_cmd; 910 dma_addr_t pdma_phys_fcp_cmd;
811 dma_addr_t pdma_phys_fcp_rsp; 911 dma_addr_t pdma_phys_fcp_rsp;
812 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; 912 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
813 uint16_t iotag, last_xritag = NO_XRI, lxri = 0; 913 uint16_t iotag, lxri = 0;
814 int status = 0, index; 914 int bcnt, num_posted;
815 int bcnt; 915 LIST_HEAD(prep_sblist);
816 int non_sequential_xri = 0; 916 LIST_HEAD(post_sblist);
817 LIST_HEAD(sblist); 917 LIST_HEAD(scsi_sblist);
818 918
819 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 919 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
820 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 920 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
821 if (!psb) 921 if (!psb)
822 break; 922 break;
823
824 /* 923 /*
825 * Get memory from the pci pool to map the virt space to pci bus 924 * Get memory from the pci pool to map the virt space to
826 * space for an I/O. The DMA buffer includes space for the 925 * pci bus space for an I/O. The DMA buffer includes space
827 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 926 * for the struct fcp_cmnd, struct fcp_rsp and the number
828 * necessary to support the sg_tablesize. 927 * of bde's necessary to support the sg_tablesize.
829 */ 928 */
830 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, 929 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
831 GFP_KERNEL, &psb->dma_handle); 930 GFP_KERNEL, &psb->dma_handle);
@@ -833,8 +932,6 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
833 kfree(psb); 932 kfree(psb);
834 break; 933 break;
835 } 934 }
836
837 /* Initialize virtual ptrs to dma_buf region. */
838 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 935 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
839 936
840 /* Allocate iotag for psb->cur_iocbq. */ 937 /* Allocate iotag for psb->cur_iocbq. */
@@ -855,16 +952,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
855 } 952 }
856 psb->cur_iocbq.sli4_lxritag = lxri; 953 psb->cur_iocbq.sli4_lxritag = lxri;
857 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 954 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
858 if (last_xritag != NO_XRI
859 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
860 non_sequential_xri = 1;
861 } else
862 list_add_tail(&psb->list, &sblist);
863 last_xritag = psb->cur_iocbq.sli4_xritag;
864
865 index = phba->sli4_hba.scsi_xri_cnt++;
866 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 955 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
867
868 psb->fcp_bpl = psb->data; 956 psb->fcp_bpl = psb->data;
869 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) 957 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
870 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 958 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
@@ -880,9 +968,9 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
880 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 968 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
881 969
882 /* 970 /*
883 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 971 * The first two bdes are the FCP_CMD and FCP_RSP.
884 * are sg list bdes. Initialize the first two and leave the 972 * The balance are sg list bdes. Initialize the
885 * rest for queuecommand. 973 * first two and leave the rest for queuecommand.
886 */ 974 */
887 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 975 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
888 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 976 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
@@ -917,62 +1005,31 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
917 iocb->ulpBdeCount = 1; 1005 iocb->ulpBdeCount = 1;
918 iocb->ulpLe = 1; 1006 iocb->ulpLe = 1;
919 iocb->ulpClass = CLASS3; 1007 iocb->ulpClass = CLASS3;
920 psb->cur_iocbq.context1 = psb; 1008 psb->cur_iocbq.context1 = psb;
921 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 1009 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
922 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; 1010 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
923 else 1011 else
924 pdma_phys_bpl1 = 0; 1012 pdma_phys_bpl1 = 0;
925 psb->dma_phys_bpl = pdma_phys_bpl; 1013 psb->dma_phys_bpl = pdma_phys_bpl;
926 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb; 1014
927 if (non_sequential_xri) { 1015 /* add the scsi buffer to a post list */
928 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl, 1016 list_add_tail(&psb->list, &post_sblist);
929 pdma_phys_bpl1, 1017 spin_lock_irq(&phba->scsi_buf_list_lock);
930 psb->cur_iocbq.sli4_xritag); 1018 phba->sli4_hba.scsi_xri_cnt++;
931 if (status) { 1019 spin_unlock_irq(&phba->scsi_buf_list_lock);
932 /* Put this back on the abort scsi list */
933 psb->exch_busy = 1;
934 } else {
935 psb->exch_busy = 0;
936 psb->status = IOSTAT_SUCCESS;
937 }
938 /* Put it back into the SCSI buffer list */
939 lpfc_release_scsi_buf_s4(phba, psb);
940 break;
941 }
942 }
943 if (bcnt) {
944 if (!phba->sli4_hba.extents_in_use)
945 status = lpfc_sli4_post_scsi_sgl_block(phba,
946 &sblist,
947 bcnt);
948 else
949 status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
950 &sblist,
951 bcnt);
952
953 if (status) {
954 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
955 "3021 SCSI SGL post error %d\n",
956 status);
957 bcnt = 0;
958 }
959 /* Reset SCSI buffer count for next round of posting */
960 while (!list_empty(&sblist)) {
961 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
962 list);
963 if (status) {
964 /* Put this back on the abort scsi list */
965 psb->exch_busy = 1;
966 } else {
967 psb->exch_busy = 0;
968 psb->status = IOSTAT_SUCCESS;
969 }
970 /* Put it back into the SCSI buffer list */
971 lpfc_release_scsi_buf_s4(phba, psb);
972 }
973 } 1020 }
1021 lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1022 "3021 Allocate %d out of %d requested new SCSI "
1023 "buffers\n", bcnt, num_to_alloc);
1024
1025 /* post the list of scsi buffer sgls to port if available */
1026 if (!list_empty(&post_sblist))
1027 num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
1028 &post_sblist, bcnt);
1029 else
1030 num_posted = 0;
974 1031
975 return bcnt + non_sequential_xri; 1032 return num_posted;
976} 1033}
977 1034
978/** 1035/**
@@ -1043,7 +1100,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1043 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, 1100 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
1044 list) { 1101 list) {
1045 if (lpfc_test_rrq_active(phba, ndlp, 1102 if (lpfc_test_rrq_active(phba, ndlp,
1046 lpfc_cmd->cur_iocbq.sli4_xritag)) 1103 lpfc_cmd->cur_iocbq.sli4_lxritag))
1047 continue; 1104 continue;
1048 list_del(&lpfc_cmd->list); 1105 list_del(&lpfc_cmd->list);
1049 found = 1; 1106 found = 1;
@@ -1897,7 +1954,9 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1897 dma_addr_t physaddr; 1954 dma_addr_t physaddr;
1898 int i = 0, num_bde = 0, status; 1955 int i = 0, num_bde = 0, status;
1899 int datadir = sc->sc_data_direction; 1956 int datadir = sc->sc_data_direction;
1957#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1900 uint32_t rc; 1958 uint32_t rc;
1959#endif
1901 uint32_t checking = 1; 1960 uint32_t checking = 1;
1902 uint32_t reftag; 1961 uint32_t reftag;
1903 unsigned blksize; 1962 unsigned blksize;
@@ -2034,7 +2093,9 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2034 int datadir = sc->sc_data_direction; 2093 int datadir = sc->sc_data_direction;
2035 unsigned char pgdone = 0, alldone = 0; 2094 unsigned char pgdone = 0, alldone = 0;
2036 unsigned blksize; 2095 unsigned blksize;
2096#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2037 uint32_t rc; 2097 uint32_t rc;
2098#endif
2038 uint32_t checking = 1; 2099 uint32_t checking = 1;
2039 uint32_t reftag; 2100 uint32_t reftag;
2040 uint8_t txop, rxop; 2101 uint8_t txop, rxop;
@@ -2253,7 +2314,9 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2253 uint32_t reftag; 2314 uint32_t reftag;
2254 unsigned blksize; 2315 unsigned blksize;
2255 uint8_t txop, rxop; 2316 uint8_t txop, rxop;
2317#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2256 uint32_t rc; 2318 uint32_t rc;
2319#endif
2257 uint32_t checking = 1; 2320 uint32_t checking = 1;
2258 uint32_t dma_len; 2321 uint32_t dma_len;
2259 uint32_t dma_offset = 0; 2322 uint32_t dma_offset = 0;
@@ -2383,7 +2446,9 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2383 uint32_t reftag; 2446 uint32_t reftag;
2384 uint8_t txop, rxop; 2447 uint8_t txop, rxop;
2385 uint32_t dma_len; 2448 uint32_t dma_len;
2449#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2386 uint32_t rc; 2450 uint32_t rc;
2451#endif
2387 uint32_t checking = 1; 2452 uint32_t checking = 1;
2388 uint32_t dma_offset = 0; 2453 uint32_t dma_offset = 0;
2389 int num_sge = 0; 2454 int num_sge = 0;
@@ -3604,11 +3669,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3604 logit = LOG_FCP | LOG_FCP_UNDER; 3669 logit = LOG_FCP | LOG_FCP_UNDER;
3605 lpfc_printf_vlog(vport, KERN_WARNING, logit, 3670 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3606 "9030 FCP cmd x%x failed <%d/%d> " 3671 "9030 FCP cmd x%x failed <%d/%d> "
3607 "status: x%x result: x%x Data: x%x x%x\n", 3672 "status: x%x result: x%x "
3673 "sid: x%x did: x%x oxid: x%x "
3674 "Data: x%x x%x\n",
3608 cmd->cmnd[0], 3675 cmd->cmnd[0],
3609 cmd->device ? cmd->device->id : 0xffff, 3676 cmd->device ? cmd->device->id : 0xffff,
3610 cmd->device ? cmd->device->lun : 0xffff, 3677 cmd->device ? cmd->device->lun : 0xffff,
3611 lpfc_cmd->status, lpfc_cmd->result, 3678 lpfc_cmd->status, lpfc_cmd->result,
3679 vport->fc_myDID, pnode->nlp_DID,
3680 phba->sli_rev == LPFC_SLI_REV4 ?
3681 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3612 pIocbOut->iocb.ulpContext, 3682 pIocbOut->iocb.ulpContext,
3613 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 3683 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
3614 3684
@@ -3689,8 +3759,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3689 * ABTS we cannot generate and RRQ. 3759 * ABTS we cannot generate and RRQ.
3690 */ 3760 */
3691 lpfc_set_rrq_active(phba, pnode, 3761 lpfc_set_rrq_active(phba, pnode,
3692 lpfc_cmd->cur_iocbq.sli4_xritag, 3762 lpfc_cmd->cur_iocbq.sli4_lxritag,
3693 0, 0); 3763 0, 0);
3694 } 3764 }
3695 /* else: fall through */ 3765 /* else: fall through */
3696 default: 3766 default:
@@ -4348,8 +4418,20 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4348 ret = fc_block_scsi_eh(cmnd); 4418 ret = fc_block_scsi_eh(cmnd);
4349 if (ret) 4419 if (ret)
4350 return ret; 4420 return ret;
4421
4422 spin_lock_irq(&phba->hbalock);
4423 /* driver queued commands are in process of being flushed */
4424 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4425 spin_unlock_irq(&phba->hbalock);
4426 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4427 "3168 SCSI Layer abort requested I/O has been "
4428 "flushed by LLD.\n");
4429 return FAILED;
4430 }
4431
4351 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 4432 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4352 if (!lpfc_cmd) { 4433 if (!lpfc_cmd) {
4434 spin_unlock_irq(&phba->hbalock);
4353 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4435 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4354 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 4436 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4355 "x%x ID %d LUN %d\n", 4437 "x%x ID %d LUN %d\n",
@@ -4357,23 +4439,34 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4357 return SUCCESS; 4439 return SUCCESS;
4358 } 4440 }
4359 4441
4442 iocb = &lpfc_cmd->cur_iocbq;
4443 /* the command is in process of being cancelled */
4444 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4445 spin_unlock_irq(&phba->hbalock);
4446 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4447 "3169 SCSI Layer abort requested I/O has been "
4448 "cancelled by LLD.\n");
4449 return FAILED;
4450 }
4360 /* 4451 /*
4361 * If pCmd field of the corresponding lpfc_scsi_buf structure 4452 * If pCmd field of the corresponding lpfc_scsi_buf structure
4362 * points to a different SCSI command, then the driver has 4453 * points to a different SCSI command, then the driver has
4363 * already completed this command, but the midlayer did not 4454 * already completed this command, but the midlayer did not
4364 * see the completion before the eh fired. Just return 4455 * see the completion before the eh fired. Just return SUCCESS.
4365 * SUCCESS.
4366 */ 4456 */
4367 iocb = &lpfc_cmd->cur_iocbq; 4457 if (lpfc_cmd->pCmd != cmnd) {
4368 if (lpfc_cmd->pCmd != cmnd) 4458 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4369 goto out; 4459 "3170 SCSI Layer abort requested I/O has been "
4460 "completed by LLD.\n");
4461 goto out_unlock;
4462 }
4370 4463
4371 BUG_ON(iocb->context1 != lpfc_cmd); 4464 BUG_ON(iocb->context1 != lpfc_cmd);
4372 4465
4373 abtsiocb = lpfc_sli_get_iocbq(phba); 4466 abtsiocb = __lpfc_sli_get_iocbq(phba);
4374 if (abtsiocb == NULL) { 4467 if (abtsiocb == NULL) {
4375 ret = FAILED; 4468 ret = FAILED;
4376 goto out; 4469 goto out_unlock;
4377 } 4470 }
4378 4471
4379 /* 4472 /*
@@ -4405,6 +4498,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4405 4498
4406 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 4499 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4407 abtsiocb->vport = vport; 4500 abtsiocb->vport = vport;
4501 /* no longer need the lock after this point */
4502 spin_unlock_irq(&phba->hbalock);
4503
4408 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == 4504 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
4409 IOCB_ERROR) { 4505 IOCB_ERROR) {
4410 lpfc_sli_release_iocbq(phba, abtsiocb); 4506 lpfc_sli_release_iocbq(phba, abtsiocb);
@@ -4421,10 +4517,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4421 wait_event_timeout(waitq, 4517 wait_event_timeout(waitq,
4422 (lpfc_cmd->pCmd != cmnd), 4518 (lpfc_cmd->pCmd != cmnd),
4423 (2*vport->cfg_devloss_tmo*HZ)); 4519 (2*vport->cfg_devloss_tmo*HZ));
4424
4425 spin_lock_irq(shost->host_lock);
4426 lpfc_cmd->waitq = NULL; 4520 lpfc_cmd->waitq = NULL;
4427 spin_unlock_irq(shost->host_lock);
4428 4521
4429 if (lpfc_cmd->pCmd == cmnd) { 4522 if (lpfc_cmd->pCmd == cmnd) {
4430 ret = FAILED; 4523 ret = FAILED;
@@ -4434,8 +4527,11 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4434 "LUN %d\n", 4527 "LUN %d\n",
4435 ret, cmnd->device->id, cmnd->device->lun); 4528 ret, cmnd->device->id, cmnd->device->lun);
4436 } 4529 }
4530 goto out;
4437 4531
4438 out: 4532out_unlock:
4533 spin_unlock_irq(&phba->hbalock);
4534out:
4439 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4535 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4440 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 4536 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4441 "LUN %d\n", ret, cmnd->device->id, 4537 "LUN %d\n", ret, cmnd->device->id,
@@ -4863,6 +4959,43 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
4863} 4959}
4864 4960
4865/** 4961/**
4962 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
4963 * @cmnd: Pointer to scsi_cmnd data structure.
4964 *
4965 * This routine does host reset to the adaptor port. It brings the HBA
4966 * offline, performs a board restart, and then brings the board back online.
4967 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
4968 * reject all outstanding SCSI commands to the host and error returned
4969 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
4970 * of error handling, it will only return error if resetting of the adapter
4971 * is not successful; in all other cases, will return success.
4972 *
4973 * Return code :
4974 * 0x2003 - Error
4975 * 0x2002 - Success
4976 **/
4977static int
4978lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
4979{
4980 struct Scsi_Host *shost = cmnd->device->host;
4981 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4982 struct lpfc_hba *phba = vport->phba;
4983 int rc, ret = SUCCESS;
4984
4985 lpfc_offline_prep(phba);
4986 lpfc_offline(phba);
4987 rc = lpfc_sli_brdrestart(phba);
4988 if (rc)
4989 ret = FAILED;
4990 lpfc_online(phba);
4991 lpfc_unblock_mgmt_io(phba);
4992
4993 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4994 "3172 SCSI layer issued Host Reset Data: x%x\n", ret);
4995 return ret;
4996}
4997
4998/**
4866 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point 4999 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
4867 * @sdev: Pointer to scsi_device. 5000 * @sdev: Pointer to scsi_device.
4868 * 5001 *
@@ -4994,6 +5127,7 @@ struct scsi_host_template lpfc_template = {
4994 .eh_device_reset_handler = lpfc_device_reset_handler, 5127 .eh_device_reset_handler = lpfc_device_reset_handler,
4995 .eh_target_reset_handler = lpfc_target_reset_handler, 5128 .eh_target_reset_handler = lpfc_target_reset_handler,
4996 .eh_bus_reset_handler = lpfc_bus_reset_handler, 5129 .eh_bus_reset_handler = lpfc_bus_reset_handler,
5130 .eh_host_reset_handler = lpfc_host_reset_handler,
4997 .slave_alloc = lpfc_slave_alloc, 5131 .slave_alloc = lpfc_slave_alloc,
4998 .slave_configure = lpfc_slave_configure, 5132 .slave_configure = lpfc_slave_configure,
4999 .slave_destroy = lpfc_slave_destroy, 5133 .slave_destroy = lpfc_slave_destroy,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index dbaf5b963bff..b4720a109817 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -67,6 +67,8 @@ static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *); 67 struct hbq_dmabuf *);
68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, 68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *); 69 struct lpfc_cqe *);
70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71 int);
70 72
71static IOCB_t * 73static IOCB_t *
72lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 74lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -500,7 +502,7 @@ lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
500 * allocation is successful, it returns pointer to the newly 502 * allocation is successful, it returns pointer to the newly
501 * allocated iocb object else it returns NULL. 503 * allocated iocb object else it returns NULL.
502 **/ 504 **/
503static struct lpfc_iocbq * 505struct lpfc_iocbq *
504__lpfc_sli_get_iocbq(struct lpfc_hba *phba) 506__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
505{ 507{
506 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 508 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
@@ -875,6 +877,9 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
875 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 877 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
876 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) 878 !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
877 ndlp = piocbq->context_un.ndlp; 879 ndlp = piocbq->context_un.ndlp;
880 else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
881 (piocbq->iocb_flag & LPFC_IO_LIBDFC))
882 ndlp = piocbq->context_un.ndlp;
878 else 883 else
879 ndlp = piocbq->context1; 884 ndlp = piocbq->context1;
880 885
@@ -883,7 +888,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
883 while (!found) { 888 while (!found) {
884 if (!sglq) 889 if (!sglq)
885 return NULL; 890 return NULL;
886 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { 891 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
887 /* This xri has an rrq outstanding for this DID. 892 /* This xri has an rrq outstanding for this DID.
888 * put it back in the list and get another xri. 893 * put it back in the list and get another xri.
889 */ 894 */
@@ -1257,7 +1262,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1257 struct lpfc_iocbq *piocb) 1262 struct lpfc_iocbq *piocb)
1258{ 1263{
1259 list_add_tail(&piocb->list, &pring->txcmplq); 1264 list_add_tail(&piocb->list, &pring->txcmplq);
1260 piocb->iocb_flag |= LPFC_IO_ON_Q; 1265 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1261 pring->txcmplq_cnt++; 1266 pring->txcmplq_cnt++;
1262 if (pring->txcmplq_cnt > pring->txcmplq_max) 1267 if (pring->txcmplq_cnt > pring->txcmplq_max)
1263 pring->txcmplq_max = pring->txcmplq_cnt; 1268 pring->txcmplq_max = pring->txcmplq_cnt;
@@ -2556,9 +2561,9 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2556 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2561 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2557 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2562 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2558 list_del_init(&cmd_iocb->list); 2563 list_del_init(&cmd_iocb->list);
2559 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2564 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2560 pring->txcmplq_cnt--; 2565 pring->txcmplq_cnt--;
2561 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2566 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2562 } 2567 }
2563 return cmd_iocb; 2568 return cmd_iocb;
2564 } 2569 }
@@ -2591,14 +2596,14 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2591 2596
2592 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2597 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2593 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2598 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2594 list_del_init(&cmd_iocb->list); 2599 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2595 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2600 /* remove from txcmpl queue list */
2596 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2601 list_del_init(&cmd_iocb->list);
2602 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2597 pring->txcmplq_cnt--; 2603 pring->txcmplq_cnt--;
2604 return cmd_iocb;
2598 } 2605 }
2599 return cmd_iocb;
2600 } 2606 }
2601
2602 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2607 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2603 "0372 iotag x%x is out off range: max iotag (x%x)\n", 2608 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2604 iotag, phba->sli.last_iotag); 2609 iotag, phba->sli.last_iotag);
@@ -3466,6 +3471,9 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3466 /* Retrieve everything on the txcmplq */ 3471 /* Retrieve everything on the txcmplq */
3467 list_splice_init(&pring->txcmplq, &txcmplq); 3472 list_splice_init(&pring->txcmplq, &txcmplq);
3468 pring->txcmplq_cnt = 0; 3473 pring->txcmplq_cnt = 0;
3474
3475 /* Indicate the I/O queues are flushed */
3476 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3469 spin_unlock_irq(&phba->hbalock); 3477 spin_unlock_irq(&phba->hbalock);
3470 3478
3471 /* Flush the txq */ 3479 /* Flush the txq */
@@ -3877,6 +3885,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3877{ 3885{
3878 struct lpfc_sli *psli = &phba->sli; 3886 struct lpfc_sli *psli = &phba->sli;
3879 uint16_t cfg_value; 3887 uint16_t cfg_value;
3888 int rc;
3880 3889
3881 /* Reset HBA */ 3890 /* Reset HBA */
3882 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3891 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3905,12 +3914,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3905 3914
3906 /* Perform FCoE PCI function reset */ 3915 /* Perform FCoE PCI function reset */
3907 lpfc_sli4_queue_destroy(phba); 3916 lpfc_sli4_queue_destroy(phba);
3908 lpfc_pci_function_reset(phba); 3917 rc = lpfc_pci_function_reset(phba);
3909 3918
3910 /* Restore PCI cmd register */ 3919 /* Restore PCI cmd register */
3911 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3920 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3912 3921
3913 return 0; 3922 return rc;
3914} 3923}
3915 3924
3916/** 3925/**
@@ -4002,6 +4011,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4002{ 4011{
4003 struct lpfc_sli *psli = &phba->sli; 4012 struct lpfc_sli *psli = &phba->sli;
4004 uint32_t hba_aer_enabled; 4013 uint32_t hba_aer_enabled;
4014 int rc;
4005 4015
4006 /* Restart HBA */ 4016 /* Restart HBA */
4007 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4017 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -4011,7 +4021,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4011 /* Take PCIe device Advanced Error Reporting (AER) state */ 4021 /* Take PCIe device Advanced Error Reporting (AER) state */
4012 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4022 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4013 4023
4014 lpfc_sli4_brdreset(phba); 4024 rc = lpfc_sli4_brdreset(phba);
4015 4025
4016 spin_lock_irq(&phba->hbalock); 4026 spin_lock_irq(&phba->hbalock);
4017 phba->pport->stopped = 0; 4027 phba->pport->stopped = 0;
@@ -4028,7 +4038,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4028 4038
4029 lpfc_hba_down_post(phba); 4039 lpfc_hba_down_post(phba);
4030 4040
4031 return 0; 4041 return rc;
4032} 4042}
4033 4043
4034/** 4044/**
@@ -4967,7 +4977,12 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4967 &rsrc_info->u.rsp); 4977 &rsrc_info->u.rsp);
4968 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 4978 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
4969 &rsrc_info->u.rsp); 4979 &rsrc_info->u.rsp);
4970 err_exit: 4980
4981 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4982 "3162 Retrieved extents type-%d from port: count:%d, "
4983 "size:%d\n", type, *extnt_count, *extnt_size);
4984
4985err_exit:
4971 mempool_free(mbox, phba->mbox_mem_pool); 4986 mempool_free(mbox, phba->mbox_mem_pool);
4972 return rc; 4987 return rc;
4973} 4988}
@@ -5051,7 +5066,7 @@ lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5051 * 0: if successful 5066 * 0: if successful
5052 **/ 5067 **/
5053static int 5068static int
5054lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt, 5069lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5055 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5070 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5056{ 5071{
5057 int rc = 0; 5072 int rc = 0;
@@ -5060,7 +5075,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
5060 uint32_t alloc_len, mbox_tmo; 5075 uint32_t alloc_len, mbox_tmo;
5061 5076
5062 /* Calculate the total requested length of the dma memory */ 5077 /* Calculate the total requested length of the dma memory */
5063 req_len = *extnt_cnt * sizeof(uint16_t); 5078 req_len = extnt_cnt * sizeof(uint16_t);
5064 5079
5065 /* 5080 /*
5066 * Calculate the size of an embedded mailbox. The uint32_t 5081 * Calculate the size of an embedded mailbox. The uint32_t
@@ -5075,7 +5090,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
5075 */ 5090 */
5076 *emb = LPFC_SLI4_MBX_EMBED; 5091 *emb = LPFC_SLI4_MBX_EMBED;
5077 if (req_len > emb_len) { 5092 if (req_len > emb_len) {
5078 req_len = *extnt_cnt * sizeof(uint16_t) + 5093 req_len = extnt_cnt * sizeof(uint16_t) +
5079 sizeof(union lpfc_sli4_cfg_shdr) + 5094 sizeof(union lpfc_sli4_cfg_shdr) +
5080 sizeof(uint32_t); 5095 sizeof(uint32_t);
5081 *emb = LPFC_SLI4_MBX_NEMBED; 5096 *emb = LPFC_SLI4_MBX_NEMBED;
@@ -5091,7 +5106,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
5091 "size (x%x)\n", alloc_len, req_len); 5106 "size (x%x)\n", alloc_len, req_len);
5092 return -ENOMEM; 5107 return -ENOMEM;
5093 } 5108 }
5094 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb); 5109 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5095 if (unlikely(rc)) 5110 if (unlikely(rc))
5096 return -EIO; 5111 return -EIO;
5097 5112
@@ -5149,17 +5164,15 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5149 return -ENOMEM; 5164 return -ENOMEM;
5150 } 5165 }
5151 5166
5152 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT, 5167 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5153 "2903 Available Resource Extents " 5168 "2903 Post resource extents type-0x%x: "
5154 "for resource type 0x%x: Count: 0x%x, " 5169 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5155 "Size 0x%x\n", type, rsrc_cnt,
5156 rsrc_size);
5157 5170
5158 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5171 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5159 if (!mbox) 5172 if (!mbox)
5160 return -ENOMEM; 5173 return -ENOMEM;
5161 5174
5162 rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox); 5175 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5163 if (unlikely(rc)) { 5176 if (unlikely(rc)) {
5164 rc = -EIO; 5177 rc = -EIO;
5165 goto err_exit; 5178 goto err_exit;
@@ -5250,6 +5263,7 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5250 rc = -ENOMEM; 5263 rc = -ENOMEM;
5251 goto err_exit; 5264 goto err_exit;
5252 } 5265 }
5266 phba->sli4_hba.max_cfg_param.xri_used = 0;
5253 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * 5267 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5254 sizeof(uint16_t), 5268 sizeof(uint16_t),
5255 GFP_KERNEL); 5269 GFP_KERNEL);
@@ -5420,7 +5434,6 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5420 case LPFC_RSC_TYPE_FCOE_XRI: 5434 case LPFC_RSC_TYPE_FCOE_XRI:
5421 kfree(phba->sli4_hba.xri_bmask); 5435 kfree(phba->sli4_hba.xri_bmask);
5422 kfree(phba->sli4_hba.xri_ids); 5436 kfree(phba->sli4_hba.xri_ids);
5423 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5424 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5437 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5425 &phba->sli4_hba.lpfc_xri_blk_list, list) { 5438 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5426 list_del_init(&rsrc_blk->list); 5439 list_del_init(&rsrc_blk->list);
@@ -5612,7 +5625,6 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5612 goto free_vpi_ids; 5625 goto free_vpi_ids;
5613 } 5626 }
5614 phba->sli4_hba.max_cfg_param.xri_used = 0; 5627 phba->sli4_hba.max_cfg_param.xri_used = 0;
5615 phba->sli4_hba.xri_count = 0;
5616 phba->sli4_hba.xri_ids = kzalloc(count * 5628 phba->sli4_hba.xri_ids = kzalloc(count *
5617 sizeof(uint16_t), 5629 sizeof(uint16_t),
5618 GFP_KERNEL); 5630 GFP_KERNEL);
@@ -5694,7 +5706,6 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5694 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5706 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5695 kfree(phba->sli4_hba.xri_bmask); 5707 kfree(phba->sli4_hba.xri_bmask);
5696 kfree(phba->sli4_hba.xri_ids); 5708 kfree(phba->sli4_hba.xri_ids);
5697 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5698 kfree(phba->sli4_hba.vfi_bmask); 5709 kfree(phba->sli4_hba.vfi_bmask);
5699 kfree(phba->sli4_hba.vfi_ids); 5710 kfree(phba->sli4_hba.vfi_ids);
5700 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5711 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
@@ -5853,6 +5864,149 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5853} 5864}
5854 5865
5855/** 5866/**
5867 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
5868 * @phba: pointer to lpfc hba data structure.
5869 *
5870 * This routine walks the list of els buffers that have been allocated and
5871 * repost them to the port by using SGL block post. This is needed after a
5872 * pci_function_reset/warm_start or start. It attempts to construct blocks
5873 * of els buffer sgls which contains contiguous xris and uses the non-embedded
5874 * SGL block post mailbox commands to post them to the port. For single els
5875 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
5876 * mailbox command for posting.
5877 *
5878 * Returns: 0 = success, non-zero failure.
5879 **/
5880static int
5881lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5882{
5883 struct lpfc_sglq *sglq_entry = NULL;
5884 struct lpfc_sglq *sglq_entry_next = NULL;
5885 struct lpfc_sglq *sglq_entry_first = NULL;
5886 int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
5887 int last_xritag = NO_XRI;
5888 LIST_HEAD(prep_sgl_list);
5889 LIST_HEAD(blck_sgl_list);
5890 LIST_HEAD(allc_sgl_list);
5891 LIST_HEAD(post_sgl_list);
5892 LIST_HEAD(free_sgl_list);
5893
5894 spin_lock(&phba->hbalock);
5895 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
5896 spin_unlock(&phba->hbalock);
5897
5898 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
5899 &allc_sgl_list, list) {
5900 list_del_init(&sglq_entry->list);
5901 block_cnt++;
5902 if ((last_xritag != NO_XRI) &&
5903 (sglq_entry->sli4_xritag != last_xritag + 1)) {
5904 /* a hole in xri block, form a sgl posting block */
5905 list_splice_init(&prep_sgl_list, &blck_sgl_list);
5906 post_cnt = block_cnt - 1;
5907 /* prepare list for next posting block */
5908 list_add_tail(&sglq_entry->list, &prep_sgl_list);
5909 block_cnt = 1;
5910 } else {
5911 /* prepare list for next posting block */
5912 list_add_tail(&sglq_entry->list, &prep_sgl_list);
5913 /* enough sgls for non-embed sgl mbox command */
5914 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
5915 list_splice_init(&prep_sgl_list,
5916 &blck_sgl_list);
5917 post_cnt = block_cnt;
5918 block_cnt = 0;
5919 }
5920 }
5921 num_posted++;
5922
5923 /* keep track of last sgl's xritag */
5924 last_xritag = sglq_entry->sli4_xritag;
5925
5926 /* end of repost sgl list condition for els buffers */
5927 if (num_posted == phba->sli4_hba.els_xri_cnt) {
5928 if (post_cnt == 0) {
5929 list_splice_init(&prep_sgl_list,
5930 &blck_sgl_list);
5931 post_cnt = block_cnt;
5932 } else if (block_cnt == 1) {
5933 status = lpfc_sli4_post_sgl(phba,
5934 sglq_entry->phys, 0,
5935 sglq_entry->sli4_xritag);
5936 if (!status) {
5937 /* successful, put sgl to posted list */
5938 list_add_tail(&sglq_entry->list,
5939 &post_sgl_list);
5940 } else {
5941 /* Failure, put sgl to free list */
5942 lpfc_printf_log(phba, KERN_WARNING,
5943 LOG_SLI,
5944 "3159 Failed to post els "
5945 "sgl, xritag:x%x\n",
5946 sglq_entry->sli4_xritag);
5947 list_add_tail(&sglq_entry->list,
5948 &free_sgl_list);
5949 spin_lock_irq(&phba->hbalock);
5950 phba->sli4_hba.els_xri_cnt--;
5951 spin_unlock_irq(&phba->hbalock);
5952 }
5953 }
5954 }
5955
5956 /* continue until a nembed page worth of sgls */
5957 if (post_cnt == 0)
5958 continue;
5959
5960 /* post the els buffer list sgls as a block */
5961 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
5962 post_cnt);
5963
5964 if (!status) {
5965 /* success, put sgl list to posted sgl list */
5966 list_splice_init(&blck_sgl_list, &post_sgl_list);
5967 } else {
5968 /* Failure, put sgl list to free sgl list */
5969 sglq_entry_first = list_first_entry(&blck_sgl_list,
5970 struct lpfc_sglq,
5971 list);
5972 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5973 "3160 Failed to post els sgl-list, "
5974 "xritag:x%x-x%x\n",
5975 sglq_entry_first->sli4_xritag,
5976 (sglq_entry_first->sli4_xritag +
5977 post_cnt - 1));
5978 list_splice_init(&blck_sgl_list, &free_sgl_list);
5979 spin_lock_irq(&phba->hbalock);
5980 phba->sli4_hba.els_xri_cnt -= post_cnt;
5981 spin_unlock_irq(&phba->hbalock);
5982 }
5983
5984 /* don't reset xirtag due to hole in xri block */
5985 if (block_cnt == 0)
5986 last_xritag = NO_XRI;
5987
5988 /* reset els sgl post count for next round of posting */
5989 post_cnt = 0;
5990 }
5991
5992 /* free the els sgls failed to post */
5993 lpfc_free_sgl_list(phba, &free_sgl_list);
5994
5995 /* push els sgls posted to the availble list */
5996 if (!list_empty(&post_sgl_list)) {
5997 spin_lock(&phba->hbalock);
5998 list_splice_init(&post_sgl_list,
5999 &phba->sli4_hba.lpfc_sgl_list);
6000 spin_unlock(&phba->hbalock);
6001 } else {
6002 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6003 "3161 Failure to post els sgl to port.\n");
6004 return -EIO;
6005 }
6006 return 0;
6007}
6008
6009/**
5856 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 6010 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
5857 * @phba: Pointer to HBA context object. 6011 * @phba: Pointer to HBA context object.
5858 * 6012 *
@@ -5923,6 +6077,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5923 else 6077 else
5924 phba->hba_flag &= ~HBA_FIP_SUPPORT; 6078 phba->hba_flag &= ~HBA_FIP_SUPPORT;
5925 6079
6080 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6081
5926 if (phba->sli_rev != LPFC_SLI_REV4) { 6082 if (phba->sli_rev != LPFC_SLI_REV4) {
5927 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6083 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5928 "0376 READ_REV Error. SLI Level %d " 6084 "0376 READ_REV Error. SLI Level %d "
@@ -6063,8 +6219,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6063 "rc = x%x\n", rc); 6219 "rc = x%x\n", rc);
6064 goto out_free_mbox; 6220 goto out_free_mbox;
6065 } 6221 }
6066 /* update physical xri mappings in the scsi buffers */
6067 lpfc_scsi_buf_update(phba);
6068 6222
6069 /* Read the port's service parameters. */ 6223 /* Read the port's service parameters. */
6070 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6224 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
@@ -6105,28 +6259,26 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6105 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 6259 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6106 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 6260 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6107 6261
6108 /* Register SGL pool to the device using non-embedded mailbox command */ 6262 /* update host els and scsi xri-sgl sizes and mappings */
6109 if (!phba->sli4_hba.extents_in_use) { 6263 rc = lpfc_sli4_xri_sgl_update(phba);
6110 rc = lpfc_sli4_post_els_sgl_list(phba); 6264 if (unlikely(rc)) {
6111 if (unlikely(rc)) { 6265 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6112 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6266 "1400 Failed to update xri-sgl size and "
6113 "0582 Error %d during els sgl post " 6267 "mapping: %d\n", rc);
6114 "operation\n", rc); 6268 goto out_free_mbox;
6115 rc = -ENODEV;
6116 goto out_free_mbox;
6117 }
6118 } else {
6119 rc = lpfc_sli4_post_els_sgl_list_ext(phba);
6120 if (unlikely(rc)) {
6121 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6122 "2560 Error %d during els sgl post "
6123 "operation\n", rc);
6124 rc = -ENODEV;
6125 goto out_free_mbox;
6126 }
6127 } 6269 }
6128 6270
6129 /* Register SCSI SGL pool to the device */ 6271 /* register the els sgl pool to the port */
6272 rc = lpfc_sli4_repost_els_sgl_list(phba);
6273 if (unlikely(rc)) {
6274 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6275 "0582 Error %d during els sgl post "
6276 "operation\n", rc);
6277 rc = -ENODEV;
6278 goto out_free_mbox;
6279 }
6280
6281 /* register the allocated scsi sgl pool to the port */
6130 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 6282 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6131 if (unlikely(rc)) { 6283 if (unlikely(rc)) {
6132 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6284 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7060,14 +7212,19 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7060 if (rc != MBX_SUCCESS) 7212 if (rc != MBX_SUCCESS)
7061 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7213 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7062 "(%d):2541 Mailbox command x%x " 7214 "(%d):2541 Mailbox command x%x "
7063 "(x%x/x%x) cannot issue Data: " 7215 "(x%x/x%x) failure: "
7064 "x%x x%x\n", 7216 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7217 "Data: x%x x%x\n,",
7065 mboxq->vport ? mboxq->vport->vpi : 0, 7218 mboxq->vport ? mboxq->vport->vpi : 0,
7066 mboxq->u.mb.mbxCommand, 7219 mboxq->u.mb.mbxCommand,
7067 lpfc_sli_config_mbox_subsys_get(phba, 7220 lpfc_sli_config_mbox_subsys_get(phba,
7068 mboxq), 7221 mboxq),
7069 lpfc_sli_config_mbox_opcode_get(phba, 7222 lpfc_sli_config_mbox_opcode_get(phba,
7070 mboxq), 7223 mboxq),
7224 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7225 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7226 bf_get(lpfc_mcqe_ext_status,
7227 &mboxq->mcqe),
7071 psli->sli_flag, flag); 7228 psli->sli_flag, flag);
7072 return rc; 7229 return rc;
7073 } else if (flag == MBX_POLL) { 7230 } else if (flag == MBX_POLL) {
@@ -7086,18 +7243,22 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7086 /* Successfully blocked, now issue sync mbox cmd */ 7243 /* Successfully blocked, now issue sync mbox cmd */
7087 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7244 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7088 if (rc != MBX_SUCCESS) 7245 if (rc != MBX_SUCCESS)
7089 lpfc_printf_log(phba, KERN_ERR, 7246 lpfc_printf_log(phba, KERN_WARNING,
7090 LOG_MBOX | LOG_SLI, 7247 LOG_MBOX | LOG_SLI,
7091 "(%d):2597 Mailbox command " 7248 "(%d):2597 Sync Mailbox command "
7092 "x%x (x%x/x%x) cannot issue " 7249 "x%x (x%x/x%x) failure: "
7093 "Data: x%x x%x\n", 7250 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7094 mboxq->vport ? 7251 "Data: x%x x%x\n,",
7095 mboxq->vport->vpi : 0, 7252 mboxq->vport ? mboxq->vport->vpi : 0,
7096 mboxq->u.mb.mbxCommand, 7253 mboxq->u.mb.mbxCommand,
7097 lpfc_sli_config_mbox_subsys_get(phba, 7254 lpfc_sli_config_mbox_subsys_get(phba,
7098 mboxq), 7255 mboxq),
7099 lpfc_sli_config_mbox_opcode_get(phba, 7256 lpfc_sli_config_mbox_opcode_get(phba,
7100 mboxq), 7257 mboxq),
7258 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7259 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7260 bf_get(lpfc_mcqe_ext_status,
7261 &mboxq->mcqe),
7101 psli->sli_flag, flag); 7262 psli->sli_flag, flag);
7102 /* Unblock the async mailbox posting afterward */ 7263 /* Unblock the async mailbox posting afterward */
7103 lpfc_sli4_async_mbox_unblock(phba); 7264 lpfc_sli4_async_mbox_unblock(phba);
@@ -7712,7 +7873,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7712 7873
7713 switch (iocbq->iocb.ulpCommand) { 7874 switch (iocbq->iocb.ulpCommand) {
7714 case CMD_ELS_REQUEST64_CR: 7875 case CMD_ELS_REQUEST64_CR:
7715 ndlp = (struct lpfc_nodelist *)iocbq->context1; 7876 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
7877 ndlp = iocbq->context_un.ndlp;
7878 else
7879 ndlp = (struct lpfc_nodelist *)iocbq->context1;
7716 if (!iocbq->iocb.ulpLe) { 7880 if (!iocbq->iocb.ulpLe) {
7717 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7881 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7718 "2007 Only Limited Edition cmd Format" 7882 "2007 Only Limited Edition cmd Format"
@@ -7751,9 +7915,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7751 bf_set(els_req64_sp, &wqe->els_req, 1); 7915 bf_set(els_req64_sp, &wqe->els_req, 1);
7752 bf_set(els_req64_sid, &wqe->els_req, 7916 bf_set(els_req64_sid, &wqe->els_req,
7753 iocbq->vport->fc_myDID); 7917 iocbq->vport->fc_myDID);
7918 if ((*pcmd == ELS_CMD_FLOGI) &&
7919 !(phba->fc_topology ==
7920 LPFC_TOPOLOGY_LOOP))
7921 bf_set(els_req64_sid, &wqe->els_req, 0);
7754 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 7922 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
7755 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7923 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7756 phba->vpi_ids[phba->pport->vpi]); 7924 phba->vpi_ids[iocbq->vport->vpi]);
7757 } else if (pcmd && iocbq->context1) { 7925 } else if (pcmd && iocbq->context1) {
7758 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 7926 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
7759 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7927 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
@@ -7908,11 +8076,25 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7908 /* words0-2 BDE memcpy */ 8076 /* words0-2 BDE memcpy */
7909 /* word3 iocb=iotag32 wqe=response_payload_len */ 8077 /* word3 iocb=iotag32 wqe=response_payload_len */
7910 wqe->xmit_els_rsp.response_payload_len = xmit_len; 8078 wqe->xmit_els_rsp.response_payload_len = xmit_len;
7911 /* word4 iocb=did wge=rsvd. */ 8079 /* word4 */
7912 wqe->xmit_els_rsp.rsvd4 = 0; 8080 wqe->xmit_els_rsp.word4 = 0;
7913 /* word5 iocb=rsvd wge=did */ 8081 /* word5 iocb=rsvd wge=did */
7914 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 8082 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
7915 iocbq->iocb.un.elsreq64.remoteID); 8083 iocbq->iocb.un.xseq64.xmit_els_remoteID);
8084
8085 if_type = bf_get(lpfc_sli_intf_if_type,
8086 &phba->sli4_hba.sli_intf);
8087 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8088 if (iocbq->vport->fc_flag & FC_PT2PT) {
8089 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8090 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8091 iocbq->vport->fc_myDID);
8092 if (iocbq->vport->fc_myDID == Fabric_DID) {
8093 bf_set(wqe_els_did,
8094 &wqe->xmit_els_rsp.wqe_dest, 0);
8095 }
8096 }
8097 }
7916 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 8098 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
7917 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8099 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
7918 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 8100 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
@@ -7932,11 +8114,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7932 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8114 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
7933 iocbq->context2)->virt); 8115 iocbq->context2)->virt);
7934 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8116 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
7935 bf_set(els_req64_sp, &wqe->els_req, 1); 8117 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
7936 bf_set(els_req64_sid, &wqe->els_req, 8118 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
7937 iocbq->vport->fc_myDID); 8119 iocbq->vport->fc_myDID);
7938 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 8120 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
7939 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8121 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
7940 phba->vpi_ids[phba->pport->vpi]); 8122 phba->vpi_ids[phba->pport->vpi]);
7941 } 8123 }
7942 command_type = OTHER_COMMAND; 8124 command_type = OTHER_COMMAND;
@@ -13080,9 +13262,7 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
13080 } else { 13262 } else {
13081 set_bit(xri, phba->sli4_hba.xri_bmask); 13263 set_bit(xri, phba->sli4_hba.xri_bmask);
13082 phba->sli4_hba.max_cfg_param.xri_used++; 13264 phba->sli4_hba.max_cfg_param.xri_used++;
13083 phba->sli4_hba.xri_count++;
13084 } 13265 }
13085
13086 spin_unlock_irq(&phba->hbalock); 13266 spin_unlock_irq(&phba->hbalock);
13087 return xri; 13267 return xri;
13088} 13268}
@@ -13098,7 +13278,6 @@ void
13098__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 13278__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13099{ 13279{
13100 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 13280 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
13101 phba->sli4_hba.xri_count--;
13102 phba->sli4_hba.max_cfg_param.xri_used--; 13281 phba->sli4_hba.max_cfg_param.xri_used--;
13103 } 13282 }
13104} 13283}
@@ -13134,46 +13313,45 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
13134 uint16_t xri_index; 13313 uint16_t xri_index;
13135 13314
13136 xri_index = lpfc_sli4_alloc_xri(phba); 13315 xri_index = lpfc_sli4_alloc_xri(phba);
13137 if (xri_index != NO_XRI) 13316 if (xri_index == NO_XRI)
13138 return xri_index; 13317 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13139 13318 "2004 Failed to allocate XRI.last XRITAG is %d"
13140 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13319 " Max XRI is %d, Used XRI is %d\n",
13141 "2004 Failed to allocate XRI.last XRITAG is %d" 13320 xri_index,
13142 " Max XRI is %d, Used XRI is %d\n", 13321 phba->sli4_hba.max_cfg_param.max_xri,
13143 xri_index, 13322 phba->sli4_hba.max_cfg_param.xri_used);
13144 phba->sli4_hba.max_cfg_param.max_xri, 13323 return xri_index;
13145 phba->sli4_hba.max_cfg_param.xri_used);
13146 return NO_XRI;
13147} 13324}
13148 13325
13149/** 13326/**
13150 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. 13327 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
13151 * @phba: pointer to lpfc hba data structure. 13328 * @phba: pointer to lpfc hba data structure.
13329 * @post_sgl_list: pointer to els sgl entry list.
13330 * @count: number of els sgl entries on the list.
13152 * 13331 *
13153 * This routine is invoked to post a block of driver's sgl pages to the 13332 * This routine is invoked to post a block of driver's sgl pages to the
13154 * HBA using non-embedded mailbox command. No Lock is held. This routine 13333 * HBA using non-embedded mailbox command. No Lock is held. This routine
13155 * is only called when the driver is loading and after all IO has been 13334 * is only called when the driver is loading and after all IO has been
13156 * stopped. 13335 * stopped.
13157 **/ 13336 **/
13158int 13337static int
13159lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba) 13338lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
13339 struct list_head *post_sgl_list,
13340 int post_cnt)
13160{ 13341{
13161 struct lpfc_sglq *sglq_entry; 13342 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
13162 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13343 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13163 struct sgl_page_pairs *sgl_pg_pairs; 13344 struct sgl_page_pairs *sgl_pg_pairs;
13164 void *viraddr; 13345 void *viraddr;
13165 LPFC_MBOXQ_t *mbox; 13346 LPFC_MBOXQ_t *mbox;
13166 uint32_t reqlen, alloclen, pg_pairs; 13347 uint32_t reqlen, alloclen, pg_pairs;
13167 uint32_t mbox_tmo; 13348 uint32_t mbox_tmo;
13168 uint16_t xritag_start = 0, lxri = 0; 13349 uint16_t xritag_start = 0;
13169 int els_xri_cnt, rc = 0; 13350 int rc = 0;
13170 uint32_t shdr_status, shdr_add_status; 13351 uint32_t shdr_status, shdr_add_status;
13171 union lpfc_sli4_cfg_shdr *shdr; 13352 union lpfc_sli4_cfg_shdr *shdr;
13172 13353
13173 /* The number of sgls to be posted */ 13354 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
13174 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
13175
13176 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
13177 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13355 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13178 if (reqlen > SLI4_PAGE_SIZE) { 13356 if (reqlen > SLI4_PAGE_SIZE) {
13179 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13357 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -13203,25 +13381,8 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
13203 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13381 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13204 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13382 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13205 13383
13206 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 13384 pg_pairs = 0;
13207 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 13385 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
13208
13209 /*
13210 * Assign the sglq a physical xri only if the driver has not
13211 * initialized those resources. A port reset only needs
13212 * the sglq's posted.
13213 */
13214 if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
13215 LPFC_XRI_RSRC_RDY) {
13216 lxri = lpfc_sli4_next_xritag(phba);
13217 if (lxri == NO_XRI) {
13218 lpfc_sli4_mbox_cmd_free(phba, mbox);
13219 return -ENOMEM;
13220 }
13221 sglq_entry->sli4_lxritag = lxri;
13222 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
13223 }
13224
13225 /* Set up the sge entry */ 13386 /* Set up the sge entry */
13226 sgl_pg_pairs->sgl_pg0_addr_lo = 13387 sgl_pg_pairs->sgl_pg0_addr_lo =
13227 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 13388 cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -13236,11 +13397,12 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
13236 if (pg_pairs == 0) 13397 if (pg_pairs == 0)
13237 xritag_start = sglq_entry->sli4_xritag; 13398 xritag_start = sglq_entry->sli4_xritag;
13238 sgl_pg_pairs++; 13399 sgl_pg_pairs++;
13400 pg_pairs++;
13239 } 13401 }
13240 13402
13241 /* Complete initialization and perform endian conversion. */ 13403 /* Complete initialization and perform endian conversion. */
13242 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 13404 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13243 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 13405 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
13244 sgl->word0 = cpu_to_le32(sgl->word0); 13406 sgl->word0 = cpu_to_le32(sgl->word0);
13245 if (!phba->sli4_hba.intr_enable) 13407 if (!phba->sli4_hba.intr_enable)
13246 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13408 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
@@ -13260,183 +13422,6 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
13260 shdr_status, shdr_add_status, rc); 13422 shdr_status, shdr_add_status, rc);
13261 rc = -ENXIO; 13423 rc = -ENXIO;
13262 } 13424 }
13263
13264 if (rc == 0)
13265 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
13266 LPFC_XRI_RSRC_RDY);
13267 return rc;
13268}
13269
13270/**
13271 * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
13272 * @phba: pointer to lpfc hba data structure.
13273 *
13274 * This routine is invoked to post a block of driver's sgl pages to the
13275 * HBA using non-embedded mailbox command. No Lock is held. This routine
13276 * is only called when the driver is loading and after all IO has been
13277 * stopped.
13278 **/
13279int
13280lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
13281{
13282 struct lpfc_sglq *sglq_entry;
13283 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13284 struct sgl_page_pairs *sgl_pg_pairs;
13285 void *viraddr;
13286 LPFC_MBOXQ_t *mbox;
13287 uint32_t reqlen, alloclen, index;
13288 uint32_t mbox_tmo;
13289 uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt;
13290 uint16_t xritag_start = 0, lxri = 0;
13291 struct lpfc_rsrc_blks *rsrc_blk;
13292 int cnt, ttl_cnt, rc = 0;
13293 int loop_cnt;
13294 uint32_t shdr_status, shdr_add_status;
13295 union lpfc_sli4_cfg_shdr *shdr;
13296
13297 /* The number of sgls to be posted */
13298 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
13299
13300 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
13301 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13302 if (reqlen > SLI4_PAGE_SIZE) {
13303 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13304 "2989 Block sgl registration required DMA "
13305 "size (%d) great than a page\n", reqlen);
13306 return -ENOMEM;
13307 }
13308
13309 cnt = 0;
13310 ttl_cnt = 0;
13311 post_els_xri_cnt = els_xri_cnt;
13312 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
13313 list) {
13314 rsrc_start = rsrc_blk->rsrc_start;
13315 rsrc_size = rsrc_blk->rsrc_size;
13316
13317 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13318 "3014 Working ELS Extent start %d, cnt %d\n",
13319 rsrc_start, rsrc_size);
13320
13321 loop_cnt = min(post_els_xri_cnt, rsrc_size);
13322 if (loop_cnt < post_els_xri_cnt) {
13323 post_els_xri_cnt -= loop_cnt;
13324 ttl_cnt += loop_cnt;
13325 } else
13326 ttl_cnt += post_els_xri_cnt;
13327
13328 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13329 if (!mbox)
13330 return -ENOMEM;
13331 /*
13332 * Allocate DMA memory and set up the non-embedded mailbox
13333 * command.
13334 */
13335 alloclen = lpfc_sli4_config(phba, mbox,
13336 LPFC_MBOX_SUBSYSTEM_FCOE,
13337 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13338 reqlen, LPFC_SLI4_MBX_NEMBED);
13339 if (alloclen < reqlen) {
13340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13341 "2987 Allocated DMA memory size (%d) "
13342 "is less than the requested DMA memory "
13343 "size (%d)\n", alloclen, reqlen);
13344 lpfc_sli4_mbox_cmd_free(phba, mbox);
13345 return -ENOMEM;
13346 }
13347
13348 /* Set up the SGL pages in the non-embedded DMA pages */
13349 viraddr = mbox->sge_array->addr[0];
13350 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13351 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13352
13353 /*
13354 * The starting resource may not begin at zero. Control
13355 * the loop variants via the block resource parameters,
13356 * but handle the sge pointers with a zero-based index
13357 * that doesn't get reset per loop pass.
13358 */
13359 for (index = rsrc_start;
13360 index < rsrc_start + loop_cnt;
13361 index++) {
13362 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
13363
13364 /*
13365 * Assign the sglq a physical xri only if the driver
13366 * has not initialized those resources. A port reset
13367 * only needs the sglq's posted.
13368 */
13369 if (bf_get(lpfc_xri_rsrc_rdy,
13370 &phba->sli4_hba.sli4_flags) !=
13371 LPFC_XRI_RSRC_RDY) {
13372 lxri = lpfc_sli4_next_xritag(phba);
13373 if (lxri == NO_XRI) {
13374 lpfc_sli4_mbox_cmd_free(phba, mbox);
13375 rc = -ENOMEM;
13376 goto err_exit;
13377 }
13378 sglq_entry->sli4_lxritag = lxri;
13379 sglq_entry->sli4_xritag =
13380 phba->sli4_hba.xri_ids[lxri];
13381 }
13382
13383 /* Set up the sge entry */
13384 sgl_pg_pairs->sgl_pg0_addr_lo =
13385 cpu_to_le32(putPaddrLow(sglq_entry->phys));
13386 sgl_pg_pairs->sgl_pg0_addr_hi =
13387 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
13388 sgl_pg_pairs->sgl_pg1_addr_lo =
13389 cpu_to_le32(putPaddrLow(0));
13390 sgl_pg_pairs->sgl_pg1_addr_hi =
13391 cpu_to_le32(putPaddrHigh(0));
13392
13393 /* Track the starting physical XRI for the mailbox. */
13394 if (index == rsrc_start)
13395 xritag_start = sglq_entry->sli4_xritag;
13396 sgl_pg_pairs++;
13397 cnt++;
13398 }
13399
13400 /* Complete initialization and perform endian conversion. */
13401 rsrc_blk->rsrc_used += loop_cnt;
13402 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13403 bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
13404 sgl->word0 = cpu_to_le32(sgl->word0);
13405
13406 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13407 "3015 Post ELS Extent SGL, start %d, "
13408 "cnt %d, used %d\n",
13409 xritag_start, loop_cnt, rsrc_blk->rsrc_used);
13410 if (!phba->sli4_hba.intr_enable)
13411 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13412 else {
13413 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13414 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13415 }
13416 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13417 shdr_status = bf_get(lpfc_mbox_hdr_status,
13418 &shdr->response);
13419 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13420 &shdr->response);
13421 if (rc != MBX_TIMEOUT)
13422 lpfc_sli4_mbox_cmd_free(phba, mbox);
13423 if (shdr_status || shdr_add_status || rc) {
13424 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13425 "2988 POST_SGL_BLOCK mailbox "
13426 "command failed status x%x "
13427 "add_status x%x mbx status x%x\n",
13428 shdr_status, shdr_add_status, rc);
13429 rc = -ENXIO;
13430 goto err_exit;
13431 }
13432 if (ttl_cnt >= els_xri_cnt)
13433 break;
13434 }
13435
13436 err_exit:
13437 if (rc == 0)
13438 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
13439 LPFC_XRI_RSRC_RDY);
13440 return rc; 13425 return rc;
13441} 13426}
13442 13427
@@ -13452,8 +13437,9 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
13452 * 13437 *
13453 **/ 13438 **/
13454int 13439int
13455lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, 13440lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
13456 int cnt) 13441 struct list_head *sblist,
13442 int count)
13457{ 13443{
13458 struct lpfc_scsi_buf *psb; 13444 struct lpfc_scsi_buf *psb;
13459 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13445 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -13469,7 +13455,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
13469 union lpfc_sli4_cfg_shdr *shdr; 13455 union lpfc_sli4_cfg_shdr *shdr;
13470 13456
13471 /* Calculate the requested length of the dma memory */ 13457 /* Calculate the requested length of the dma memory */
13472 reqlen = cnt * sizeof(struct sgl_page_pairs) + 13458 reqlen = count * sizeof(struct sgl_page_pairs) +
13473 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13459 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13474 if (reqlen > SLI4_PAGE_SIZE) { 13460 if (reqlen > SLI4_PAGE_SIZE) {
13475 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13461 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -13553,169 +13539,6 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
13553} 13539}
13554 13540
13555/** 13541/**
13556 * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
13557 * @phba: pointer to lpfc hba data structure.
13558 * @sblist: pointer to scsi buffer list.
13559 * @count: number of scsi buffers on the list.
13560 *
13561 * This routine is invoked to post a block of @count scsi sgl pages from a
13562 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13563 * No Lock is held.
13564 *
13565 **/
13566int
13567lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
13568 int cnt)
13569{
13570 struct lpfc_scsi_buf *psb = NULL;
13571 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13572 struct sgl_page_pairs *sgl_pg_pairs;
13573 void *viraddr;
13574 LPFC_MBOXQ_t *mbox;
13575 uint32_t reqlen, alloclen, pg_pairs;
13576 uint32_t mbox_tmo;
13577 uint16_t xri_start = 0, scsi_xri_start;
13578 uint16_t rsrc_range;
13579 int rc = 0, avail_cnt;
13580 uint32_t shdr_status, shdr_add_status;
13581 dma_addr_t pdma_phys_bpl1;
13582 union lpfc_sli4_cfg_shdr *shdr;
13583 struct lpfc_rsrc_blks *rsrc_blk;
13584 uint32_t xri_cnt = 0;
13585
13586 /* Calculate the total requested length of the dma memory */
13587 reqlen = cnt * sizeof(struct sgl_page_pairs) +
13588 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13589 if (reqlen > SLI4_PAGE_SIZE) {
13590 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13591 "2932 Block sgl registration required DMA "
13592 "size (%d) great than a page\n", reqlen);
13593 return -ENOMEM;
13594 }
13595
13596 /*
13597 * The use of extents requires the driver to post the sgl headers
13598 * in multiple postings to meet the contiguous resource assignment.
13599 */
13600 psb = list_prepare_entry(psb, sblist, list);
13601 scsi_xri_start = phba->sli4_hba.scsi_xri_start;
13602 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
13603 list) {
13604 rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
13605 if (rsrc_range < scsi_xri_start)
13606 continue;
13607 else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
13608 continue;
13609 else
13610 avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
13611
13612 reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
13613 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13614 /*
13615 * Allocate DMA memory and set up the non-embedded mailbox
13616 * command. The mbox is used to post an SGL page per loop
13617 * but the DMA memory has a use-once semantic so the mailbox
13618 * is used and freed per loop pass.
13619 */
13620 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13621 if (!mbox) {
13622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13623 "2933 Failed to allocate mbox cmd "
13624 "memory\n");
13625 return -ENOMEM;
13626 }
13627 alloclen = lpfc_sli4_config(phba, mbox,
13628 LPFC_MBOX_SUBSYSTEM_FCOE,
13629 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13630 reqlen,
13631 LPFC_SLI4_MBX_NEMBED);
13632 if (alloclen < reqlen) {
13633 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13634 "2934 Allocated DMA memory size (%d) "
13635 "is less than the requested DMA memory "
13636 "size (%d)\n", alloclen, reqlen);
13637 lpfc_sli4_mbox_cmd_free(phba, mbox);
13638 return -ENOMEM;
13639 }
13640
13641 /* Get the first SGE entry from the non-embedded DMA memory */
13642 viraddr = mbox->sge_array->addr[0];
13643
13644 /* Set up the SGL pages in the non-embedded DMA pages */
13645 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13646 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13647
13648 /* pg_pairs tracks posted SGEs per loop iteration. */
13649 pg_pairs = 0;
13650 list_for_each_entry_continue(psb, sblist, list) {
13651 /* Set up the sge entry */
13652 sgl_pg_pairs->sgl_pg0_addr_lo =
13653 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13654 sgl_pg_pairs->sgl_pg0_addr_hi =
13655 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13656 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13657 pdma_phys_bpl1 = psb->dma_phys_bpl +
13658 SGL_PAGE_SIZE;
13659 else
13660 pdma_phys_bpl1 = 0;
13661 sgl_pg_pairs->sgl_pg1_addr_lo =
13662 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13663 sgl_pg_pairs->sgl_pg1_addr_hi =
13664 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13665 /* Keep the first xri for this extent. */
13666 if (pg_pairs == 0)
13667 xri_start = psb->cur_iocbq.sli4_xritag;
13668 sgl_pg_pairs++;
13669 pg_pairs++;
13670 xri_cnt++;
13671
13672 /*
13673 * Track two exit conditions - the loop has constructed
13674 * all of the caller's SGE pairs or all available
13675 * resource IDs in this extent are consumed.
13676 */
13677 if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
13678 break;
13679 }
13680 rsrc_blk->rsrc_used += pg_pairs;
13681 bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
13682 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13683
13684 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13685 "3016 Post SCSI Extent SGL, start %d, cnt %d "
13686 "blk use %d\n",
13687 xri_start, pg_pairs, rsrc_blk->rsrc_used);
13688 /* Perform endian conversion if necessary */
13689 sgl->word0 = cpu_to_le32(sgl->word0);
13690 if (!phba->sli4_hba.intr_enable)
13691 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13692 else {
13693 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13694 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13695 }
13696 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13697 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13698 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13699 &shdr->response);
13700 if (rc != MBX_TIMEOUT)
13701 lpfc_sli4_mbox_cmd_free(phba, mbox);
13702 if (shdr_status || shdr_add_status || rc) {
13703 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13704 "2935 POST_SGL_BLOCK mailbox command "
13705 "failed status x%x add_status x%x "
13706 "mbx status x%x\n",
13707 shdr_status, shdr_add_status, rc);
13708 return -ENXIO;
13709 }
13710
13711 /* Post only what is requested. */
13712 if (xri_cnt >= cnt)
13713 break;
13714 }
13715 return rc;
13716}
13717
13718/**
13719 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 13542 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
13720 * @phba: pointer to lpfc_hba struct that the frame was received on 13543 * @phba: pointer to lpfc_hba struct that the frame was received on
13721 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13544 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -13839,8 +13662,13 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
13839 uint32_t did = (fc_hdr->fh_d_id[0] << 16 | 13662 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
13840 fc_hdr->fh_d_id[1] << 8 | 13663 fc_hdr->fh_d_id[1] << 8 |
13841 fc_hdr->fh_d_id[2]); 13664 fc_hdr->fh_d_id[2]);
13665
13842 if (did == Fabric_DID) 13666 if (did == Fabric_DID)
13843 return phba->pport; 13667 return phba->pport;
13668 if ((phba->pport->fc_flag & FC_PT2PT) &&
13669 !(phba->link_state == LPFC_HBA_READY))
13670 return phba->pport;
13671
13844 vports = lpfc_create_vport_work_array(phba); 13672 vports = lpfc_create_vport_work_array(phba);
13845 if (vports != NULL) 13673 if (vports != NULL)
13846 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 13674 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
@@ -14133,7 +13961,6 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
14133 return NO_XRI; 13961 return NO_XRI;
14134} 13962}
14135 13963
14136
14137/** 13964/**
14138 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 13965 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
14139 * @phba: Pointer to HBA context object. 13966 * @phba: Pointer to HBA context object.
@@ -14148,7 +13975,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14148{ 13975{
14149 struct lpfc_iocbq *ctiocb = NULL; 13976 struct lpfc_iocbq *ctiocb = NULL;
14150 struct lpfc_nodelist *ndlp; 13977 struct lpfc_nodelist *ndlp;
14151 uint16_t oxid, rxid; 13978 uint16_t oxid, rxid, xri, lxri;
14152 uint32_t sid, fctl; 13979 uint32_t sid, fctl;
14153 IOCB_t *icmd; 13980 IOCB_t *icmd;
14154 int rc; 13981 int rc;
@@ -14167,8 +13994,6 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14167 "SID:x%x\n", oxid, sid); 13994 "SID:x%x\n", oxid, sid);
14168 return; 13995 return;
14169 } 13996 }
14170 if (lpfc_sli4_xri_inrange(phba, rxid))
14171 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
14172 13997
14173 /* Allocate buffer for rsp iocb */ 13998 /* Allocate buffer for rsp iocb */
14174 ctiocb = lpfc_sli_get_iocbq(phba); 13999 ctiocb = lpfc_sli_get_iocbq(phba);
@@ -14199,13 +14024,24 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14199 ctiocb->sli4_lxritag = NO_XRI; 14024 ctiocb->sli4_lxritag = NO_XRI;
14200 ctiocb->sli4_xritag = NO_XRI; 14025 ctiocb->sli4_xritag = NO_XRI;
14201 14026
14027 if (fctl & FC_FC_EX_CTX)
14028 /* Exchange responder sent the abort so we
14029 * own the oxid.
14030 */
14031 xri = oxid;
14032 else
14033 xri = rxid;
14034 lxri = lpfc_sli4_xri_inrange(phba, xri);
14035 if (lxri != NO_XRI)
14036 lpfc_set_rrq_active(phba, ndlp, lxri,
14037 (xri == oxid) ? rxid : oxid, 0);
14202 /* If the oxid maps to the FCP XRI range or if it is out of range, 14038 /* If the oxid maps to the FCP XRI range or if it is out of range,
14203 * send a BLS_RJT. The driver no longer has that exchange. 14039 * send a BLS_RJT. The driver no longer has that exchange.
14204 * Override the IOCB for a BA_RJT. 14040 * Override the IOCB for a BA_RJT.
14205 */ 14041 */
14206 if (oxid > (phba->sli4_hba.max_cfg_param.max_xri + 14042 if (xri > (phba->sli4_hba.max_cfg_param.max_xri +
14207 phba->sli4_hba.max_cfg_param.xri_base) || 14043 phba->sli4_hba.max_cfg_param.xri_base) ||
14208 oxid > (lpfc_sli4_get_els_iocb_cnt(phba) + 14044 xri > (lpfc_sli4_get_els_iocb_cnt(phba) +
14209 phba->sli4_hba.max_cfg_param.xri_base)) { 14045 phba->sli4_hba.max_cfg_param.xri_base)) {
14210 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 14046 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14211 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 14047 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
@@ -14377,7 +14213,15 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14377 /* Initialize the first IOCB. */ 14213 /* Initialize the first IOCB. */
14378 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 14214 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
14379 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 14215 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
14380 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 14216
14217 /* Check FC Header to see what TYPE of frame we are rcv'ing */
14218 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
14219 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
14220 first_iocbq->iocb.un.rcvels.parmRo =
14221 sli4_did_from_fc_hdr(fc_hdr);
14222 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
14223 } else
14224 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
14381 first_iocbq->iocb.ulpContext = NO_XRI; 14225 first_iocbq->iocb.ulpContext = NO_XRI;
14382 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 14226 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
14383 be16_to_cpu(fc_hdr->fh_ox_id); 14227 be16_to_cpu(fc_hdr->fh_ox_id);
@@ -14507,6 +14351,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
14507 struct fc_frame_header *fc_hdr; 14351 struct fc_frame_header *fc_hdr;
14508 struct lpfc_vport *vport; 14352 struct lpfc_vport *vport;
14509 uint32_t fcfi; 14353 uint32_t fcfi;
14354 uint32_t did;
14510 14355
14511 /* Process each received buffer */ 14356 /* Process each received buffer */
14512 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14357 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
@@ -14522,12 +14367,32 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
14522 else 14367 else
14523 fcfi = bf_get(lpfc_rcqe_fcf_id, 14368 fcfi = bf_get(lpfc_rcqe_fcf_id,
14524 &dmabuf->cq_event.cqe.rcqe_cmpl); 14369 &dmabuf->cq_event.cqe.rcqe_cmpl);
14370
14525 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 14371 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
14526 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { 14372 if (!vport) {
14527 /* throw out the frame */ 14373 /* throw out the frame */
14528 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14374 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14529 return; 14375 return;
14530 } 14376 }
14377
14378 /* d_id this frame is directed to */
14379 did = sli4_did_from_fc_hdr(fc_hdr);
14380
14381 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
14382 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
14383 (did != Fabric_DID)) {
14384 /*
14385 * Throw out the frame if we are not pt2pt.
14386 * The pt2pt protocol allows for discovery frames
14387 * to be received without a registered VPI.
14388 */
14389 if (!(vport->fc_flag & FC_PT2PT) ||
14390 (phba->link_state == LPFC_HBA_READY)) {
14391 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14392 return;
14393 }
14394 }
14395
14531 /* Handle the basic abort sequence (BA_ABTS) event */ 14396 /* Handle the basic abort sequence (BA_ABTS) event */
14532 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 14397 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
14533 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 14398 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 3290b8e7ab65..2626f58c0747 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -68,7 +68,7 @@ struct lpfc_iocbq {
68#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ 68#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
69#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ 69#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
70#define DSS_SECURITY_OP 0x100 /* security IO */ 70#define DSS_SECURITY_OP 0x100 /* security IO */
71#define LPFC_IO_ON_Q 0x200 /* The IO is still on the TXCMPLQ */ 71#define LPFC_IO_ON_TXCMPLQ 0x200 /* The IO is still on the TXCMPLQ */
72#define LPFC_IO_DIF 0x400 /* T10 DIF IO */ 72#define LPFC_IO_DIF 0x400 /* T10 DIF IO */
73 73
74#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 74#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c19d139618b7..a4a77080091b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -75,11 +75,19 @@
75 (fc_hdr)->fh_s_id[1] << 8 | \ 75 (fc_hdr)->fh_s_id[1] << 8 | \
76 (fc_hdr)->fh_s_id[2]) 76 (fc_hdr)->fh_s_id[2])
77 77
78#define sli4_did_from_fc_hdr(fc_hdr) \
79 ((fc_hdr)->fh_d_id[0] << 16 | \
80 (fc_hdr)->fh_d_id[1] << 8 | \
81 (fc_hdr)->fh_d_id[2])
82
78#define sli4_fctl_from_fc_hdr(fc_hdr) \ 83#define sli4_fctl_from_fc_hdr(fc_hdr) \
79 ((fc_hdr)->fh_f_ctl[0] << 16 | \ 84 ((fc_hdr)->fh_f_ctl[0] << 16 | \
80 (fc_hdr)->fh_f_ctl[1] << 8 | \ 85 (fc_hdr)->fh_f_ctl[1] << 8 | \
81 (fc_hdr)->fh_f_ctl[2]) 86 (fc_hdr)->fh_f_ctl[2])
82 87
88#define sli4_type_from_fc_hdr(fc_hdr) \
89 ((fc_hdr)->fh_type)
90
83#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000 91#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
84 92
85enum lpfc_sli4_queue_type { 93enum lpfc_sli4_queue_type {
@@ -493,14 +501,12 @@ struct lpfc_sli4_hba {
493 uint16_t next_rpi; 501 uint16_t next_rpi;
494 uint16_t scsi_xri_max; 502 uint16_t scsi_xri_max;
495 uint16_t scsi_xri_cnt; 503 uint16_t scsi_xri_cnt;
504 uint16_t els_xri_cnt;
496 uint16_t scsi_xri_start; 505 uint16_t scsi_xri_start;
497 struct list_head lpfc_free_sgl_list; 506 struct list_head lpfc_free_sgl_list;
498 struct list_head lpfc_sgl_list; 507 struct list_head lpfc_sgl_list;
499 struct lpfc_sglq **lpfc_els_sgl_array;
500 struct list_head lpfc_abts_els_sgl_list; 508 struct list_head lpfc_abts_els_sgl_list;
501 struct lpfc_scsi_buf **lpfc_scsi_psb_array;
502 struct list_head lpfc_abts_scsi_buf_list; 509 struct list_head lpfc_abts_scsi_buf_list;
503 uint32_t total_sglq_bufs;
504 struct lpfc_sglq **lpfc_sglq_active_list; 510 struct lpfc_sglq **lpfc_sglq_active_list;
505 struct list_head lpfc_rpi_hdr_list; 511 struct list_head lpfc_rpi_hdr_list;
506 unsigned long *rpi_bmask; 512 unsigned long *rpi_bmask;
@@ -509,7 +515,6 @@ struct lpfc_sli4_hba {
509 struct list_head lpfc_rpi_blk_list; 515 struct list_head lpfc_rpi_blk_list;
510 unsigned long *xri_bmask; 516 unsigned long *xri_bmask;
511 uint16_t *xri_ids; 517 uint16_t *xri_ids;
512 uint16_t xri_count;
513 struct list_head lpfc_xri_blk_list; 518 struct list_head lpfc_xri_blk_list;
514 unsigned long *vfi_bmask; 519 unsigned long *vfi_bmask;
515 uint16_t *vfi_ids; 520 uint16_t *vfi_ids;
@@ -614,11 +619,7 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
614int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); 619int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
615uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); 620uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
616int lpfc_sli4_post_async_mbox(struct lpfc_hba *); 621int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
617int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
618int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
619int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); 622int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
620int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
621 int);
622struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); 623struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
623struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); 624struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
624void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); 625void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 25cefc254b76..59c57a409981 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.30" 21#define LPFC_DRIVER_VERSION "8.3.31"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index e5f416f8042d..e8f892647681 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.06.14-rc1" 36#define MEGASAS_VERSION "00.00.06.15-rc1"
37#define MEGASAS_RELDATE "Jan. 6, 2012" 37#define MEGASAS_RELDATE "Mar. 19, 2012"
38#define MEGASAS_EXT_VERSION "Fri. Jan. 6 17:00:00 PDT 2012" 38#define MEGASAS_EXT_VERSION "Mon. Mar. 19 17:00:00 PDT 2012"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 8b300be44284..dc27598785e5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.06.14-rc1 21 * Version : v00.00.06.15-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 294abb0defa6..e3d251a2e26a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -362,15 +362,20 @@ MR_BuildRaidContext(struct megasas_instance *instance,
362 /* assume this IO needs the full row - we'll adjust if not true */ 362 /* assume this IO needs the full row - we'll adjust if not true */
363 regSize = stripSize; 363 regSize = stripSize;
364 364
365 /* If IO spans more than 1 strip, fp is not possible 365 /* Check if we can send this I/O via FastPath */
366 FP is not possible for writes on non-0 raid levels 366 if (raid->capability.fpCapable) {
367 FP is not possible if LD is not capable */ 367 if (isRead)
368 if (num_strips > 1 || (!isRead && raid->level != 0) || 368 io_info->fpOkForIo = (raid->capability.fpReadCapable &&
369 !raid->capability.fpCapable) { 369 ((num_strips == 1) ||
370 raid->capability.
371 fpReadAcrossStripe));
372 else
373 io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
374 ((num_strips == 1) ||
375 raid->capability.
376 fpWriteAcrossStripe));
377 } else
370 io_info->fpOkForIo = FALSE; 378 io_info->fpOkForIo = FALSE;
371 } else {
372 io_info->fpOkForIo = TRUE;
373 }
374 379
375 if (numRows == 1) { 380 if (numRows == 1) {
376 /* single-strip IOs can always lock only the data needed */ 381 /* single-strip IOs can always lock only the data needed */
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index bfd87fab39aa..a610cf1d4847 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -634,9 +634,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
634 fusion->reply_frames_desc_phys; 634 fusion->reply_frames_desc_phys;
635 IOCInitMessage->SystemRequestFrameBaseAddress = 635 IOCInitMessage->SystemRequestFrameBaseAddress =
636 fusion->io_request_frames_phys; 636 fusion->io_request_frames_phys;
637 /* Set to 0 for none or 1 MSI-X vectors */ 637 IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
638 IOCInitMessage->HostMSIxVectors = (instance->msix_vectors > 0 ?
639 instance->msix_vectors : 0);
640 init_frame = (struct megasas_init_frame *)cmd->frame; 638 init_frame = (struct megasas_init_frame *)cmd->frame;
641 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 639 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
642 640
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index a01f0aa66f20..a80f3220c641 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.22 11 * mpi2.h Version: 02.00.23
12 * 12 *
13 * Version History 13 * Version History
14 * --------------- 14 * ---------------
@@ -71,6 +71,7 @@
71 * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. 71 * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
72 * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. 72 * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
73 * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. 73 * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
74 * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT.
74 * -------------------------------------------------------------------------- 75 * --------------------------------------------------------------------------
75 */ 76 */
76 77
@@ -96,7 +97,7 @@
96#define MPI2_VERSION_02_00 (0x0200) 97#define MPI2_VERSION_02_00 (0x0200)
97 98
98/* versioning for this MPI header set */ 99/* versioning for this MPI header set */
99#define MPI2_HEADER_VERSION_UNIT (0x16) 100#define MPI2_HEADER_VERSION_UNIT (0x17)
100#define MPI2_HEADER_VERSION_DEV (0x00) 101#define MPI2_HEADER_VERSION_DEV (0x00)
101#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 102#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
102#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 103#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -480,7 +481,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
480 MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; 481 MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
481 U64 Words; 482 U64 Words;
482} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION, 483} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION,
483 Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t; 484Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
484 485
485 486
486 487
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 3a023dad77a1..737fa8cfb54a 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
6 * Title: MPI Configuration messages and pages 6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006 7 * Creation Date: November 10, 2006
8 * 8 *
9 * mpi2_cnfg.h Version: 02.00.21 9 * mpi2_cnfg.h Version: 02.00.22
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -146,7 +146,9 @@
146 * Added SpinupFlags field containing a Disable Spin-up 146 * Added SpinupFlags field containing a Disable Spin-up
147 * bit to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of 147 * bit to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of
148 * SAS IO Unit Page 4. 148 * SAS IO Unit Page 4.
149 149 * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
150 * Added UEFIVersion field to BIOS Page 1 and defined new
151 * BiosOptions bits.
150 * -------------------------------------------------------------------------- 152 * --------------------------------------------------------------------------
151 */ 153 */
152 154
@@ -1131,9 +1133,10 @@ typedef struct _MPI2_CONFIG_PAGE_IOC_6
1131} MPI2_CONFIG_PAGE_IOC_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_6, 1133} MPI2_CONFIG_PAGE_IOC_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_6,
1132 Mpi2IOCPage6_t, MPI2_POINTER pMpi2IOCPage6_t; 1134 Mpi2IOCPage6_t, MPI2_POINTER pMpi2IOCPage6_t;
1133 1135
1134#define MPI2_IOCPAGE6_PAGEVERSION (0x04) 1136#define MPI2_IOCPAGE6_PAGEVERSION (0x05)
1135 1137
1136/* defines for IOC Page 6 CapabilitiesFlags */ 1138/* defines for IOC Page 6 CapabilitiesFlags */
1139#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020)
1137#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010) 1140#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010)
1138#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008) 1141#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008)
1139#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004) 1142#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004)
@@ -1204,24 +1207,29 @@ typedef struct _MPI2_CONFIG_PAGE_IOC_8
1204 1207
1205typedef struct _MPI2_CONFIG_PAGE_BIOS_1 1208typedef struct _MPI2_CONFIG_PAGE_BIOS_1
1206{ 1209{
1207 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ 1210 MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
1208 U32 BiosOptions; /* 0x04 */ 1211 U32 BiosOptions; /* 0x04 */
1209 U32 IOCSettings; /* 0x08 */ 1212 U32 IOCSettings; /* 0x08 */
1210 U32 Reserved1; /* 0x0C */ 1213 U32 Reserved1; /* 0x0C */
1211 U32 DeviceSettings; /* 0x10 */ 1214 U32 DeviceSettings; /* 0x10 */
1212 U16 NumberOfDevices; /* 0x14 */ 1215 U16 NumberOfDevices; /* 0x14 */
1213 U16 Reserved2; /* 0x16 */ 1216 U16 UEFIVersion; /* 0x16 */
1214 U16 IOTimeoutBlockDevicesNonRM; /* 0x18 */ 1217 U16 IOTimeoutBlockDevicesNonRM; /* 0x18 */
1215 U16 IOTimeoutSequential; /* 0x1A */ 1218 U16 IOTimeoutSequential; /* 0x1A */
1216 U16 IOTimeoutOther; /* 0x1C */ 1219 U16 IOTimeoutOther; /* 0x1C */
1217 U16 IOTimeoutBlockDevicesRM; /* 0x1E */ 1220 U16 IOTimeoutBlockDevicesRM; /* 0x1E */
1218} MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1, 1221} MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1,
1219 Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t; 1222 Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t;
1220 1223
1221#define MPI2_BIOSPAGE1_PAGEVERSION (0x04) 1224#define MPI2_BIOSPAGE1_PAGEVERSION (0x05)
1222 1225
1223/* values for BIOS Page 1 BiosOptions field */ 1226/* values for BIOS Page 1 BiosOptions field */
1224#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001) 1227#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006)
1228#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000)
1229#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002)
1230#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004)
1231
1232#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
1225 1233
1226/* values for BIOS Page 1 IOCSettings field */ 1234/* values for BIOS Page 1 IOCSettings field */
1227#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000) 1235#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
@@ -1248,6 +1256,13 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1
1248#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002) 1256#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
1249#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001) 1257#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001)
1250 1258
1259/* defines for BIOS Page 1 UEFIVersion field */
1260#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00)
1261#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8)
1262#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF)
1263#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0)
1264
1265
1251 1266
1252/* BIOS Page 2 */ 1267/* BIOS Page 2 */
1253 1268
@@ -2216,6 +2231,27 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 {
2216 2231
2217 2232
2218 2233
2234/* SAS IO Unit Page 16 */
2235
2236typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 {
2237 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2238 U64 TimeStamp; /* 0x08 */
2239 U32 Reserved1; /* 0x10 */
2240 U32 Reserved2; /* 0x14 */
2241 U32 FastPathPendedRequests; /* 0x18 */
2242 U32 FastPathUnPendedRequests; /* 0x1C */
2243 U32 FastPathHostRequestStarts; /* 0x20 */
2244 U32 FastPathFirmwareRequestStarts; /* 0x24 */
2245 U32 FastPathHostCompletions; /* 0x28 */
2246 U32 FastPathFirmwareCompletions; /* 0x2C */
2247 U32 NonFastPathRequestStarts; /* 0x30 */
2248 U32 NonFastPathHostCompletions; /* 0x30 */
2249} MPI2_CONFIG_PAGE_SASIOUNIT16,
2250MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT16,
2251Mpi2SasIOUnitPage16_t, MPI2_POINTER pMpi2SasIOUnitPage16_t;
2252
2253#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00)
2254
2219 2255
2220/**************************************************************************** 2256/****************************************************************************
2221* SAS Expander Config Pages 2257* SAS Expander Config Pages
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 8a59a772fdf2..6102ef2cb2d8 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -699,6 +699,11 @@ _base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
699 u16 ioc_status; 699 u16 ioc_status;
700 700
701 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 701 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
702 if (unlikely(!mpi_reply)) {
703 printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
704 ioc->name, __FILE__, __LINE__, __func__);
705 return;
706 }
702 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 707 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
703#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 708#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
704 if ((ioc_status & MPI2_IOCSTATUS_MASK) && 709 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
@@ -930,16 +935,18 @@ _base_interrupt(int irq, void *bus_id)
930 else if (request_desript_type == 935 else if (request_desript_type ==
931 MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS) 936 MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
932 goto next; 937 goto next;
933 if (smid) 938 if (smid) {
934 cb_idx = _base_get_cb_idx(ioc, smid); 939 cb_idx = _base_get_cb_idx(ioc, smid);
935 if (smid && cb_idx != 0xFF) { 940 if ((likely(cb_idx < MPT_MAX_CALLBACKS))
936 rc = mpt_callbacks[cb_idx](ioc, smid, msix_index, 941 && (likely(mpt_callbacks[cb_idx] != NULL))) {
937 reply); 942 rc = mpt_callbacks[cb_idx](ioc, smid,
943 msix_index, reply);
938 if (reply) 944 if (reply)
939 _base_display_reply_info(ioc, smid, msix_index, 945 _base_display_reply_info(ioc, smid,
940 reply); 946 msix_index, reply);
941 if (rc) 947 if (rc)
942 mpt2sas_base_free_smid(ioc, smid); 948 mpt2sas_base_free_smid(ioc, smid);
949 }
943 } 950 }
944 if (!smid) 951 if (!smid)
945 _base_async_event(ioc, msix_index, reply); 952 _base_async_event(ioc, msix_index, reply);
@@ -3343,7 +3350,7 @@ _base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
3343 } 3350 }
3344 3351
3345 pfacts = &ioc->pfacts[port]; 3352 pfacts = &ioc->pfacts[port];
3346 memset(pfacts, 0, sizeof(Mpi2PortFactsReply_t)); 3353 memset(pfacts, 0, sizeof(struct mpt2sas_port_facts));
3347 pfacts->PortNumber = mpi_reply.PortNumber; 3354 pfacts->PortNumber = mpi_reply.PortNumber;
3348 pfacts->VP_ID = mpi_reply.VP_ID; 3355 pfacts->VP_ID = mpi_reply.VP_ID;
3349 pfacts->VF_ID = mpi_reply.VF_ID; 3356 pfacts->VF_ID = mpi_reply.VF_ID;
@@ -3385,7 +3392,7 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3385 } 3392 }
3386 3393
3387 facts = &ioc->facts; 3394 facts = &ioc->facts;
3388 memset(facts, 0, sizeof(Mpi2IOCFactsReply_t)); 3395 memset(facts, 0, sizeof(struct mpt2sas_facts));
3389 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); 3396 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
3390 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); 3397 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
3391 facts->VP_ID = mpi_reply.VP_ID; 3398 facts->VP_ID = mpi_reply.VP_ID;
@@ -4153,7 +4160,8 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4153 if (ioc->is_driver_loading) { 4160 if (ioc->is_driver_loading) {
4154 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier 4161 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
4155 == 0x80) { 4162 == 0x80) {
4156 hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 & 4163 hide_flag = (u8) (
4164 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
4157 MFG_PAGE10_HIDE_SSDS_MASK); 4165 MFG_PAGE10_HIDE_SSDS_MASK);
4158 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK) 4166 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
4159 ioc->mfg_pg10_hide_flag = hide_flag; 4167 ioc->mfg_pg10_hide_flag = hide_flag;
@@ -4262,7 +4270,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4262 goto out_free_resources; 4270 goto out_free_resources;
4263 4271
4264 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, 4272 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
4265 sizeof(Mpi2PortFactsReply_t), GFP_KERNEL); 4273 sizeof(struct mpt2sas_port_facts), GFP_KERNEL);
4266 if (!ioc->pfacts) { 4274 if (!ioc->pfacts) {
4267 r = -ENOMEM; 4275 r = -ENOMEM;
4268 goto out_free_resources; 4276 goto out_free_resources;
@@ -4279,7 +4287,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4279 goto out_free_resources; 4287 goto out_free_resources;
4280 4288
4281 init_waitqueue_head(&ioc->reset_wq); 4289 init_waitqueue_head(&ioc->reset_wq);
4282
4283 /* allocate memory pd handle bitmask list */ 4290 /* allocate memory pd handle bitmask list */
4284 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); 4291 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
4285 if (ioc->facts.MaxDevHandle % 8) 4292 if (ioc->facts.MaxDevHandle % 8)
@@ -4290,7 +4297,12 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4290 r = -ENOMEM; 4297 r = -ENOMEM;
4291 goto out_free_resources; 4298 goto out_free_resources;
4292 } 4299 }
4293 4300 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
4301 GFP_KERNEL);
4302 if (!ioc->blocking_handles) {
4303 r = -ENOMEM;
4304 goto out_free_resources;
4305 }
4294 ioc->fwfault_debug = mpt2sas_fwfault_debug; 4306 ioc->fwfault_debug = mpt2sas_fwfault_debug;
4295 4307
4296 /* base internal command bits */ 4308 /* base internal command bits */
@@ -4377,6 +4389,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4377 if (ioc->is_warpdrive) 4389 if (ioc->is_warpdrive)
4378 kfree(ioc->reply_post_host_index); 4390 kfree(ioc->reply_post_host_index);
4379 kfree(ioc->pd_handles); 4391 kfree(ioc->pd_handles);
4392 kfree(ioc->blocking_handles);
4380 kfree(ioc->tm_cmds.reply); 4393 kfree(ioc->tm_cmds.reply);
4381 kfree(ioc->transport_cmds.reply); 4394 kfree(ioc->transport_cmds.reply);
4382 kfree(ioc->scsih_cmds.reply); 4395 kfree(ioc->scsih_cmds.reply);
@@ -4418,6 +4431,7 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
4418 if (ioc->is_warpdrive) 4431 if (ioc->is_warpdrive)
4419 kfree(ioc->reply_post_host_index); 4432 kfree(ioc->reply_post_host_index);
4420 kfree(ioc->pd_handles); 4433 kfree(ioc->pd_handles);
4434 kfree(ioc->blocking_handles);
4421 kfree(ioc->pfacts); 4435 kfree(ioc->pfacts);
4422 kfree(ioc->ctl_cmds.reply); 4436 kfree(ioc->ctl_cmds.reply);
4423 kfree(ioc->ctl_cmds.sense); 4437 kfree(ioc->ctl_cmds.sense);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index c7459fdc06cc..b6dd3a5de7f9 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "12.100.00.00" 72#define MPT2SAS_DRIVER_VERSION "13.100.00.00"
73#define MPT2SAS_MAJOR_VERSION 12 73#define MPT2SAS_MAJOR_VERSION 13
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
@@ -720,6 +720,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
720 * @io_missing_delay: time for IO completed by fw when PDR enabled 720 * @io_missing_delay: time for IO completed by fw when PDR enabled
721 * @device_missing_delay: time for device missing by fw when PDR enabled 721 * @device_missing_delay: time for device missing by fw when PDR enabled
722 * @sas_id : used for setting volume target IDs 722 * @sas_id : used for setting volume target IDs
723 * @blocking_handles: bitmask used to identify which devices need blocking
723 * @pd_handles : bitmask for PD handles 724 * @pd_handles : bitmask for PD handles
724 * @pd_handles_sz : size of pd_handle bitmask 725 * @pd_handles_sz : size of pd_handle bitmask
725 * @config_page_sz: config page size 726 * @config_page_sz: config page size
@@ -889,7 +890,7 @@ struct MPT2SAS_ADAPTER {
889 u8 io_missing_delay; 890 u8 io_missing_delay;
890 u16 device_missing_delay; 891 u16 device_missing_delay;
891 int sas_id; 892 int sas_id;
892 893 void *blocking_handles;
893 void *pd_handles; 894 void *pd_handles;
894 u16 pd_handles_sz; 895 u16 pd_handles_sz;
895 896
@@ -1058,7 +1059,8 @@ int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
1058void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); 1059void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
1059void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); 1060void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
1060void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address); 1061void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
1061void mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address); 1062void mpt2sas_device_remove_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
1063 u64 sas_address);
1062struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, 1064struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc,
1063 u16 handle); 1065 u16 handle);
1064struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER 1066struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 3b9a28efea82..49bdd2dc8452 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -620,11 +620,10 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
620 * @ioc: per adapter object 620 * @ioc: per adapter object
621 * @karg - (struct mpt2_ioctl_command) 621 * @karg - (struct mpt2_ioctl_command)
622 * @mf - pointer to mf in user space 622 * @mf - pointer to mf in user space
623 * @state - NON_BLOCKING or BLOCKING
624 */ 623 */
625static long 624static long
626_ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, 625_ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg,
627 struct mpt2_ioctl_command karg, void __user *mf, enum block_state state) 626 void __user *mf)
628{ 627{
629 MPI2RequestHeader_t *mpi_request = NULL, *request; 628 MPI2RequestHeader_t *mpi_request = NULL, *request;
630 MPI2DefaultReply_t *mpi_reply; 629 MPI2DefaultReply_t *mpi_reply;
@@ -647,11 +646,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
647 646
648 issue_reset = 0; 647 issue_reset = 0;
649 648
650 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
651 return -EAGAIN;
652 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
653 return -ERESTARTSYS;
654
655 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { 649 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
656 printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", 650 printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
657 ioc->name, __func__); 651 ioc->name, __func__);
@@ -871,8 +865,16 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
871 if (smp_request->PassthroughFlags & 865 if (smp_request->PassthroughFlags &
872 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) 866 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
873 data = (u8 *)&smp_request->SGL; 867 data = (u8 *)&smp_request->SGL;
874 else 868 else {
869 if (unlikely(data_out == NULL)) {
870 printk(KERN_ERR "failure at %s:%d/%s()!\n",
871 __FILE__, __LINE__, __func__);
872 mpt2sas_base_free_smid(ioc, smid);
873 ret = -EINVAL;
874 goto out;
875 }
875 data = data_out; 876 data = data_out;
877 }
876 878
877 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { 879 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
878 ioc->ioc_link_reset_in_progress = 1; 880 ioc->ioc_link_reset_in_progress = 1;
@@ -985,7 +987,8 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
985 ret = -ENODATA; 987 ret = -ENODATA;
986 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 988 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
987 mpi_request->Function == 989 mpi_request->Function ==
988 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 990 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
991 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
989 printk(MPT2SAS_INFO_FMT "issue target reset: handle " 992 printk(MPT2SAS_INFO_FMT "issue target reset: handle "
990 "= (0x%04x)\n", ioc->name, 993 "= (0x%04x)\n", ioc->name,
991 le16_to_cpu(mpi_request->FunctionDependent1)); 994 le16_to_cpu(mpi_request->FunctionDependent1));
@@ -1013,27 +1016,24 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
1013 1016
1014 kfree(mpi_request); 1017 kfree(mpi_request);
1015 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; 1018 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
1016 mutex_unlock(&ioc->ctl_cmds.mutex);
1017 return ret; 1019 return ret;
1018} 1020}
1019 1021
1020/** 1022/**
1021 * _ctl_getiocinfo - main handler for MPT2IOCINFO opcode 1023 * _ctl_getiocinfo - main handler for MPT2IOCINFO opcode
1024 * @ioc: per adapter object
1022 * @arg - user space buffer containing ioctl content 1025 * @arg - user space buffer containing ioctl content
1023 */ 1026 */
1024static long 1027static long
1025_ctl_getiocinfo(void __user *arg) 1028_ctl_getiocinfo(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
1026{ 1029{
1027 struct mpt2_ioctl_iocinfo karg; 1030 struct mpt2_ioctl_iocinfo karg;
1028 struct MPT2SAS_ADAPTER *ioc;
1029 1031
1030 if (copy_from_user(&karg, arg, sizeof(karg))) { 1032 if (copy_from_user(&karg, arg, sizeof(karg))) {
1031 printk(KERN_ERR "failure at %s:%d/%s()!\n", 1033 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1032 __FILE__, __LINE__, __func__); 1034 __FILE__, __LINE__, __func__);
1033 return -EFAULT; 1035 return -EFAULT;
1034 } 1036 }
1035 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1036 return -ENODEV;
1037 1037
1038 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, 1038 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1039 __func__)); 1039 __func__));
@@ -1069,21 +1069,19 @@ _ctl_getiocinfo(void __user *arg)
1069 1069
1070/** 1070/**
1071 * _ctl_eventquery - main handler for MPT2EVENTQUERY opcode 1071 * _ctl_eventquery - main handler for MPT2EVENTQUERY opcode
1072 * @ioc: per adapter object
1072 * @arg - user space buffer containing ioctl content 1073 * @arg - user space buffer containing ioctl content
1073 */ 1074 */
1074static long 1075static long
1075_ctl_eventquery(void __user *arg) 1076_ctl_eventquery(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
1076{ 1077{
1077 struct mpt2_ioctl_eventquery karg; 1078 struct mpt2_ioctl_eventquery karg;
1078 struct MPT2SAS_ADAPTER *ioc;
1079 1079
1080 if (copy_from_user(&karg, arg, sizeof(karg))) { 1080 if (copy_from_user(&karg, arg, sizeof(karg))) {
1081 printk(KERN_ERR "failure at %s:%d/%s()!\n", 1081 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1082 __FILE__, __LINE__, __func__); 1082 __FILE__, __LINE__, __func__);
1083 return -EFAULT; 1083 return -EFAULT;
1084 } 1084 }
1085 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1086 return -ENODEV;
1087 1085
1088 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, 1086 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1089 __func__)); 1087 __func__));
@@ -1102,21 +1100,19 @@ _ctl_eventquery(void __user *arg)
1102 1100
1103/** 1101/**
1104 * _ctl_eventenable - main handler for MPT2EVENTENABLE opcode 1102 * _ctl_eventenable - main handler for MPT2EVENTENABLE opcode
1103 * @ioc: per adapter object
1105 * @arg - user space buffer containing ioctl content 1104 * @arg - user space buffer containing ioctl content
1106 */ 1105 */
1107static long 1106static long
1108_ctl_eventenable(void __user *arg) 1107_ctl_eventenable(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
1109{ 1108{
1110 struct mpt2_ioctl_eventenable karg; 1109 struct mpt2_ioctl_eventenable karg;
1111 struct MPT2SAS_ADAPTER *ioc;
1112 1110
1113 if (copy_from_user(&karg, arg, sizeof(karg))) { 1111 if (copy_from_user(&karg, arg, sizeof(karg))) {
1114 printk(KERN_ERR "failure at %s:%d/%s()!\n", 1112 printk(KERN_ERR "failure at %s:%d/%s()!\n",
1115 __FILE__, __LINE__, __func__); 1113 __FILE__, __LINE__, __func__);
1116 return -EFAULT; 1114 return -EFAULT;
1117 } 1115 }
1118 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1119 return -ENODEV;
1120 1116
1121 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, 1117 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1122 __func__)); 1118 __func__));
@@ -1142,13 +1138,13 @@ _ctl_eventenable(void __user *arg)
1142 1138
1143/** 1139/**
1144 * _ctl_eventreport - main handler for MPT2EVENTREPORT opcode 1140 * _ctl_eventreport - main handler for MPT2EVENTREPORT opcode
1141 * @ioc: per adapter object
1145 * @arg - user space buffer containing ioctl content 1142 * @arg - user space buffer containing ioctl content
1146 */ 1143 */
1147static long 1144static long
1148_ctl_eventreport(void __user *arg) 1145_ctl_eventreport(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
1149{ 1146{
1150 struct mpt2_ioctl_eventreport karg; 1147 struct mpt2_ioctl_eventreport karg;
1151 struct MPT2SAS_ADAPTER *ioc;
1152 u32 number_bytes, max_events, max; 1148 u32 number_bytes, max_events, max;
1153 struct mpt2_ioctl_eventreport __user *uarg = arg; 1149 struct mpt2_ioctl_eventreport __user *uarg = arg;
1154 1150
@@ -1157,8 +1153,6 @@ _ctl_eventreport(void __user *arg)
1157 __FILE__, __LINE__, __func__); 1153 __FILE__, __LINE__, __func__);
1158 return -EFAULT; 1154 return -EFAULT;
1159 } 1155 }
1160 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1161 return -ENODEV;
1162 1156
1163 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, 1157 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1164 __func__)); 1158 __func__));
@@ -1188,13 +1182,13 @@ _ctl_eventreport(void __user *arg)
1188 1182
1189/** 1183/**
1190 * _ctl_do_reset - main handler for MPT2HARDRESET opcode 1184 * _ctl_do_reset - main handler for MPT2HARDRESET opcode
1185 * @ioc: per adapter object
1191 * @arg - user space buffer containing ioctl content 1186 * @arg - user space buffer containing ioctl content
1192 */ 1187 */
1193static long 1188static long
1194_ctl_do_reset(void __user *arg) 1189_ctl_do_reset(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
1195{ 1190{
1196 struct mpt2_ioctl_diag_reset karg; 1191 struct mpt2_ioctl_diag_reset karg;
1197 struct MPT2SAS_ADAPTER *ioc;
1198 int retval; 1192 int retval;
1199 1193
1200 if (copy_from_user(&karg, arg, sizeof(karg))) { 1194 if (copy_from_user(&karg, arg, sizeof(karg))) {
@@ -1202,8 +1196,6 @@ _ctl_do_reset(void __user *arg)
1202 __FILE__, __LINE__, __func__); 1196 __FILE__, __LINE__, __func__);
1203 return -EFAULT; 1197 return -EFAULT;
1204 } 1198 }
1205 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1206 return -ENODEV;
1207 1199
1208 if (ioc->shost_recovery || ioc->pci_error_recovery || 1200 if (ioc->shost_recovery || ioc->pci_error_recovery ||
1209 ioc->is_driver_loading) 1201 ioc->is_driver_loading)
@@ -1292,13 +1284,13 @@ _ctl_btdh_search_raid_device(struct MPT2SAS_ADAPTER *ioc,
1292 1284
1293/** 1285/**
1294 * _ctl_btdh_mapping - main handler for MPT2BTDHMAPPING opcode 1286 * _ctl_btdh_mapping - main handler for MPT2BTDHMAPPING opcode
1287 * @ioc: per adapter object
1295 * @arg - user space buffer containing ioctl content 1288 * @arg - user space buffer containing ioctl content
1296 */ 1289 */
1297static long 1290static long
1298_ctl_btdh_mapping(void __user *arg) 1291_ctl_btdh_mapping(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
1299{ 1292{
1300 struct mpt2_ioctl_btdh_mapping karg; 1293 struct mpt2_ioctl_btdh_mapping karg;
1301 struct MPT2SAS_ADAPTER *ioc;
1302 int rc; 1294 int rc;
1303 1295
1304 if (copy_from_user(&karg, arg, sizeof(karg))) { 1296 if (copy_from_user(&karg, arg, sizeof(karg))) {
@@ -1306,8 +1298,6 @@ _ctl_btdh_mapping(void __user *arg)
1306 __FILE__, __LINE__, __func__); 1298 __FILE__, __LINE__, __func__);
1307 return -EFAULT; 1299 return -EFAULT;
1308 } 1300 }
1309 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1310 return -ENODEV;
1311 1301
1312 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1302 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1313 __func__)); 1303 __func__));
@@ -1576,17 +1566,16 @@ mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, u8 bits_to_register)
1576 1566
1577/** 1567/**
1578 * _ctl_diag_register - application register with driver 1568 * _ctl_diag_register - application register with driver
1569 * @ioc: per adapter object
1579 * @arg - user space buffer containing ioctl content 1570 * @arg - user space buffer containing ioctl content
1580 * @state - NON_BLOCKING or BLOCKING
1581 * 1571 *
1582 * This will allow the driver to setup any required buffers that will be 1572 * This will allow the driver to setup any required buffers that will be
1583 * needed by firmware to communicate with the driver. 1573 * needed by firmware to communicate with the driver.
1584 */ 1574 */
1585static long 1575static long
1586_ctl_diag_register(void __user *arg, enum block_state state) 1576_ctl_diag_register(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
1587{ 1577{
1588 struct mpt2_diag_register karg; 1578 struct mpt2_diag_register karg;
1589 struct MPT2SAS_ADAPTER *ioc;
1590 long rc; 1579 long rc;
1591 1580
1592 if (copy_from_user(&karg, arg, sizeof(karg))) { 1581 if (copy_from_user(&karg, arg, sizeof(karg))) {
@@ -1594,30 +1583,23 @@ _ctl_diag_register(void __user *arg, enum block_state state)
1594 __FILE__, __LINE__, __func__); 1583 __FILE__, __LINE__, __func__);
1595 return -EFAULT; 1584 return -EFAULT;
1596 } 1585 }
1597 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1598 return -ENODEV;
1599 1586
1600 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
1601 return -EAGAIN;
1602 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
1603 return -ERESTARTSYS;
1604 rc = _ctl_diag_register_2(ioc, &karg); 1587 rc = _ctl_diag_register_2(ioc, &karg);
1605 mutex_unlock(&ioc->ctl_cmds.mutex);
1606 return rc; 1588 return rc;
1607} 1589}
1608 1590
1609/** 1591/**
1610 * _ctl_diag_unregister - application unregister with driver 1592 * _ctl_diag_unregister - application unregister with driver
1593 * @ioc: per adapter object
1611 * @arg - user space buffer containing ioctl content 1594 * @arg - user space buffer containing ioctl content
1612 * 1595 *
1613 * This will allow the driver to cleanup any memory allocated for diag 1596 * This will allow the driver to cleanup any memory allocated for diag
1614 * messages and to free up any resources. 1597 * messages and to free up any resources.
1615 */ 1598 */
1616static long 1599static long
1617_ctl_diag_unregister(void __user *arg) 1600_ctl_diag_unregister(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
1618{ 1601{
1619 struct mpt2_diag_unregister karg; 1602 struct mpt2_diag_unregister karg;
1620 struct MPT2SAS_ADAPTER *ioc;
1621 void *request_data; 1603 void *request_data;
1622 dma_addr_t request_data_dma; 1604 dma_addr_t request_data_dma;
1623 u32 request_data_sz; 1605 u32 request_data_sz;
@@ -1628,8 +1610,6 @@ _ctl_diag_unregister(void __user *arg)
1628 __FILE__, __LINE__, __func__); 1610 __FILE__, __LINE__, __func__);
1629 return -EFAULT; 1611 return -EFAULT;
1630 } 1612 }
1631 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1632 return -ENODEV;
1633 1613
1634 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1614 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1635 __func__)); 1615 __func__));
@@ -1678,6 +1658,7 @@ _ctl_diag_unregister(void __user *arg)
1678 1658
1679/** 1659/**
1680 * _ctl_diag_query - query relevant info associated with diag buffers 1660 * _ctl_diag_query - query relevant info associated with diag buffers
1661 * @ioc: per adapter object
1681 * @arg - user space buffer containing ioctl content 1662 * @arg - user space buffer containing ioctl content
1682 * 1663 *
1683 * The application will send only buffer_type and unique_id. Driver will 1664 * The application will send only buffer_type and unique_id. Driver will
@@ -1685,10 +1666,9 @@ _ctl_diag_unregister(void __user *arg)
1685 * 0x00, the driver will return info specified by Buffer Type. 1666 * 0x00, the driver will return info specified by Buffer Type.
1686 */ 1667 */
1687static long 1668static long
1688_ctl_diag_query(void __user *arg) 1669_ctl_diag_query(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
1689{ 1670{
1690 struct mpt2_diag_query karg; 1671 struct mpt2_diag_query karg;
1691 struct MPT2SAS_ADAPTER *ioc;
1692 void *request_data; 1672 void *request_data;
1693 int i; 1673 int i;
1694 u8 buffer_type; 1674 u8 buffer_type;
@@ -1698,8 +1678,6 @@ _ctl_diag_query(void __user *arg)
1698 __FILE__, __LINE__, __func__); 1678 __FILE__, __LINE__, __func__);
1699 return -EFAULT; 1679 return -EFAULT;
1700 } 1680 }
1701 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1702 return -ENODEV;
1703 1681
1704 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1682 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1705 __func__)); 1683 __func__));
@@ -1866,17 +1844,15 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
1866/** 1844/**
1867 * _ctl_diag_release - request to send Diag Release Message to firmware 1845 * _ctl_diag_release - request to send Diag Release Message to firmware
1868 * @arg - user space buffer containing ioctl content 1846 * @arg - user space buffer containing ioctl content
1869 * @state - NON_BLOCKING or BLOCKING
1870 * 1847 *
1871 * This allows ownership of the specified buffer to returned to the driver, 1848 * This allows ownership of the specified buffer to returned to the driver,
1872 * allowing an application to read the buffer without fear that firmware is 1849 * allowing an application to read the buffer without fear that firmware is
1873 * overwritting information in the buffer. 1850 * overwritting information in the buffer.
1874 */ 1851 */
1875static long 1852static long
1876_ctl_diag_release(void __user *arg, enum block_state state) 1853_ctl_diag_release(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
1877{ 1854{
1878 struct mpt2_diag_release karg; 1855 struct mpt2_diag_release karg;
1879 struct MPT2SAS_ADAPTER *ioc;
1880 void *request_data; 1856 void *request_data;
1881 int rc; 1857 int rc;
1882 u8 buffer_type; 1858 u8 buffer_type;
@@ -1887,8 +1863,6 @@ _ctl_diag_release(void __user *arg, enum block_state state)
1887 __FILE__, __LINE__, __func__); 1863 __FILE__, __LINE__, __func__);
1888 return -EFAULT; 1864 return -EFAULT;
1889 } 1865 }
1890 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1891 return -ENODEV;
1892 1866
1893 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1867 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1894 __func__)); 1868 __func__));
@@ -1942,32 +1916,25 @@ _ctl_diag_release(void __user *arg, enum block_state state)
1942 return 0; 1916 return 0;
1943 } 1917 }
1944 1918
1945 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
1946 return -EAGAIN;
1947 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
1948 return -ERESTARTSYS;
1949
1950 rc = _ctl_send_release(ioc, buffer_type, &issue_reset); 1919 rc = _ctl_send_release(ioc, buffer_type, &issue_reset);
1951 1920
1952 if (issue_reset) 1921 if (issue_reset)
1953 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1922 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1954 FORCE_BIG_HAMMER); 1923 FORCE_BIG_HAMMER);
1955 1924
1956 mutex_unlock(&ioc->ctl_cmds.mutex);
1957 return rc; 1925 return rc;
1958} 1926}
1959 1927
1960/** 1928/**
1961 * _ctl_diag_read_buffer - request for copy of the diag buffer 1929 * _ctl_diag_read_buffer - request for copy of the diag buffer
1930 * @ioc: per adapter object
1962 * @arg - user space buffer containing ioctl content 1931 * @arg - user space buffer containing ioctl content
1963 * @state - NON_BLOCKING or BLOCKING
1964 */ 1932 */
1965static long 1933static long
1966_ctl_diag_read_buffer(void __user *arg, enum block_state state) 1934_ctl_diag_read_buffer(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
1967{ 1935{
1968 struct mpt2_diag_read_buffer karg; 1936 struct mpt2_diag_read_buffer karg;
1969 struct mpt2_diag_read_buffer __user *uarg = arg; 1937 struct mpt2_diag_read_buffer __user *uarg = arg;
1970 struct MPT2SAS_ADAPTER *ioc;
1971 void *request_data, *diag_data; 1938 void *request_data, *diag_data;
1972 Mpi2DiagBufferPostRequest_t *mpi_request; 1939 Mpi2DiagBufferPostRequest_t *mpi_request;
1973 Mpi2DiagBufferPostReply_t *mpi_reply; 1940 Mpi2DiagBufferPostReply_t *mpi_reply;
@@ -1983,8 +1950,6 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
1983 __FILE__, __LINE__, __func__); 1950 __FILE__, __LINE__, __func__);
1984 return -EFAULT; 1951 return -EFAULT;
1985 } 1952 }
1986 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1987 return -ENODEV;
1988 1953
1989 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1954 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1990 __func__)); 1955 __func__));
@@ -2055,10 +2020,6 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
2055 } 2020 }
2056 /* Get a free request frame and save the message context. 2021 /* Get a free request frame and save the message context.
2057 */ 2022 */
2058 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
2059 return -EAGAIN;
2060 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
2061 return -ERESTARTSYS;
2062 2023
2063 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) { 2024 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
2064 printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n", 2025 printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
@@ -2139,115 +2100,170 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
2139 out: 2100 out:
2140 2101
2141 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; 2102 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
2142 mutex_unlock(&ioc->ctl_cmds.mutex);
2143 return rc; 2103 return rc;
2144} 2104}
2145 2105
2106
2107#ifdef CONFIG_COMPAT
2108/**
2109 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
2110 * @ioc: per adapter object
2111 * @cmd - ioctl opcode
2112 * @arg - (struct mpt2_ioctl_command32)
2113 *
2114 * MPT2COMMAND32 - Handle 32bit applications running on 64bit os.
2115 */
2116static long
2117_ctl_compat_mpt_command(struct MPT2SAS_ADAPTER *ioc, unsigned cmd,
2118 void __user *arg)
2119{
2120 struct mpt2_ioctl_command32 karg32;
2121 struct mpt2_ioctl_command32 __user *uarg;
2122 struct mpt2_ioctl_command karg;
2123
2124 if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32))
2125 return -EINVAL;
2126
2127 uarg = (struct mpt2_ioctl_command32 __user *) arg;
2128
2129 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
2130 printk(KERN_ERR "failure at %s:%d/%s()!\n",
2131 __FILE__, __LINE__, __func__);
2132 return -EFAULT;
2133 }
2134
2135 memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
2136 karg.hdr.ioc_number = karg32.hdr.ioc_number;
2137 karg.hdr.port_number = karg32.hdr.port_number;
2138 karg.hdr.max_data_size = karg32.hdr.max_data_size;
2139 karg.timeout = karg32.timeout;
2140 karg.max_reply_bytes = karg32.max_reply_bytes;
2141 karg.data_in_size = karg32.data_in_size;
2142 karg.data_out_size = karg32.data_out_size;
2143 karg.max_sense_bytes = karg32.max_sense_bytes;
2144 karg.data_sge_offset = karg32.data_sge_offset;
2145 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
2146 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
2147 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
2148 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
2149 return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2150}
2151#endif
2152
2146/** 2153/**
2147 * _ctl_ioctl_main - main ioctl entry point 2154 * _ctl_ioctl_main - main ioctl entry point
2148 * @file - (struct file) 2155 * @file - (struct file)
2149 * @cmd - ioctl opcode 2156 * @cmd - ioctl opcode
2150 * @arg - 2157 * @arg -
2158 * compat - handles 32 bit applications in 64bit os
2151 */ 2159 */
2152static long 2160static long
2153_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg) 2161_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2162 u8 compat)
2154{ 2163{
2164 struct MPT2SAS_ADAPTER *ioc;
2165 struct mpt2_ioctl_header ioctl_header;
2155 enum block_state state; 2166 enum block_state state;
2156 long ret = -EINVAL; 2167 long ret = -EINVAL;
2157 2168
2158 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : 2169 /* get IOCTL header */
2159 BLOCKING; 2170 if (copy_from_user(&ioctl_header, (char __user *)arg,
2171 sizeof(struct mpt2_ioctl_header))) {
2172 printk(KERN_ERR "failure at %s:%d/%s()!\n",
2173 __FILE__, __LINE__, __func__);
2174 return -EFAULT;
2175 }
2176
2177 if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc)
2178 return -ENODEV;
2179 if (ioc->shost_recovery || ioc->pci_error_recovery ||
2180 ioc->is_driver_loading)
2181 return -EAGAIN;
2182
2183 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2184 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
2185 return -EAGAIN;
2186 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
2187 return -ERESTARTSYS;
2160 2188
2161 switch (cmd) { 2189 switch (cmd) {
2162 case MPT2IOCINFO: 2190 case MPT2IOCINFO:
2163 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_iocinfo)) 2191 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_iocinfo))
2164 ret = _ctl_getiocinfo(arg); 2192 ret = _ctl_getiocinfo(ioc, arg);
2165 break; 2193 break;
2194#ifdef CONFIG_COMPAT
2195 case MPT2COMMAND32:
2196#endif
2166 case MPT2COMMAND: 2197 case MPT2COMMAND:
2167 { 2198 {
2168 struct mpt2_ioctl_command karg;
2169 struct mpt2_ioctl_command __user *uarg; 2199 struct mpt2_ioctl_command __user *uarg;
2170 struct MPT2SAS_ADAPTER *ioc; 2200 struct mpt2_ioctl_command karg;
2171 2201#ifdef CONFIG_COMPAT
2202 if (compat) {
2203 ret = _ctl_compat_mpt_command(ioc, cmd, arg);
2204 break;
2205 }
2206#endif
2172 if (copy_from_user(&karg, arg, sizeof(karg))) { 2207 if (copy_from_user(&karg, arg, sizeof(karg))) {
2173 printk(KERN_ERR "failure at %s:%d/%s()!\n", 2208 printk(KERN_ERR "failure at %s:%d/%s()!\n",
2174 __FILE__, __LINE__, __func__); 2209 __FILE__, __LINE__, __func__);
2175 return -EFAULT; 2210 ret = -EFAULT;
2211 break;
2176 } 2212 }
2177 2213
2178 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 ||
2179 !ioc)
2180 return -ENODEV;
2181
2182 if (ioc->shost_recovery || ioc->pci_error_recovery ||
2183 ioc->is_driver_loading)
2184 return -EAGAIN;
2185
2186 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) { 2214 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
2187 uarg = arg; 2215 uarg = arg;
2188 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf, state); 2216 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2189 } 2217 }
2190 break; 2218 break;
2191 } 2219 }
2192 case MPT2EVENTQUERY: 2220 case MPT2EVENTQUERY:
2193 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventquery)) 2221 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventquery))
2194 ret = _ctl_eventquery(arg); 2222 ret = _ctl_eventquery(ioc, arg);
2195 break; 2223 break;
2196 case MPT2EVENTENABLE: 2224 case MPT2EVENTENABLE:
2197 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventenable)) 2225 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventenable))
2198 ret = _ctl_eventenable(arg); 2226 ret = _ctl_eventenable(ioc, arg);
2199 break; 2227 break;
2200 case MPT2EVENTREPORT: 2228 case MPT2EVENTREPORT:
2201 ret = _ctl_eventreport(arg); 2229 ret = _ctl_eventreport(ioc, arg);
2202 break; 2230 break;
2203 case MPT2HARDRESET: 2231 case MPT2HARDRESET:
2204 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_diag_reset)) 2232 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_diag_reset))
2205 ret = _ctl_do_reset(arg); 2233 ret = _ctl_do_reset(ioc, arg);
2206 break; 2234 break;
2207 case MPT2BTDHMAPPING: 2235 case MPT2BTDHMAPPING:
2208 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_btdh_mapping)) 2236 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_btdh_mapping))
2209 ret = _ctl_btdh_mapping(arg); 2237 ret = _ctl_btdh_mapping(ioc, arg);
2210 break; 2238 break;
2211 case MPT2DIAGREGISTER: 2239 case MPT2DIAGREGISTER:
2212 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_register)) 2240 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_register))
2213 ret = _ctl_diag_register(arg, state); 2241 ret = _ctl_diag_register(ioc, arg);
2214 break; 2242 break;
2215 case MPT2DIAGUNREGISTER: 2243 case MPT2DIAGUNREGISTER:
2216 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_unregister)) 2244 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_unregister))
2217 ret = _ctl_diag_unregister(arg); 2245 ret = _ctl_diag_unregister(ioc, arg);
2218 break; 2246 break;
2219 case MPT2DIAGQUERY: 2247 case MPT2DIAGQUERY:
2220 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_query)) 2248 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_query))
2221 ret = _ctl_diag_query(arg); 2249 ret = _ctl_diag_query(ioc, arg);
2222 break; 2250 break;
2223 case MPT2DIAGRELEASE: 2251 case MPT2DIAGRELEASE:
2224 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_release)) 2252 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_release))
2225 ret = _ctl_diag_release(arg, state); 2253 ret = _ctl_diag_release(ioc, arg);
2226 break; 2254 break;
2227 case MPT2DIAGREADBUFFER: 2255 case MPT2DIAGREADBUFFER:
2228 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_read_buffer)) 2256 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_read_buffer))
2229 ret = _ctl_diag_read_buffer(arg, state); 2257 ret = _ctl_diag_read_buffer(ioc, arg);
2230 break; 2258 break;
2231 default: 2259 default:
2232 {
2233 struct mpt2_ioctl_command karg;
2234 struct MPT2SAS_ADAPTER *ioc;
2235
2236 if (copy_from_user(&karg, arg, sizeof(karg))) {
2237 printk(KERN_ERR "failure at %s:%d/%s()!\n",
2238 __FILE__, __LINE__, __func__);
2239 return -EFAULT;
2240 }
2241
2242 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 ||
2243 !ioc)
2244 return -ENODEV;
2245 2260
2246 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT 2261 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT
2247 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); 2262 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
2248 break; 2263 break;
2249 } 2264 }
2250 } 2265
2266 mutex_unlock(&ioc->ctl_cmds.mutex);
2251 return ret; 2267 return ret;
2252} 2268}
2253 2269
@@ -2262,66 +2278,11 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2262{ 2278{
2263 long ret; 2279 long ret;
2264 2280
2265 mutex_lock(&_ctl_mutex); 2281 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0);
2266 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg);
2267 mutex_unlock(&_ctl_mutex);
2268 return ret; 2282 return ret;
2269} 2283}
2270
2271#ifdef CONFIG_COMPAT 2284#ifdef CONFIG_COMPAT
2272/** 2285/**
2273 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
2274 * @file - (struct file)
2275 * @cmd - ioctl opcode
2276 * @arg - (struct mpt2_ioctl_command32)
2277 *
2278 * MPT2COMMAND32 - Handle 32bit applications running on 64bit os.
2279 */
2280static long
2281_ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
2282{
2283 struct mpt2_ioctl_command32 karg32;
2284 struct mpt2_ioctl_command32 __user *uarg;
2285 struct mpt2_ioctl_command karg;
2286 struct MPT2SAS_ADAPTER *ioc;
2287 enum block_state state;
2288
2289 if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32))
2290 return -EINVAL;
2291
2292 uarg = (struct mpt2_ioctl_command32 __user *) arg;
2293
2294 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
2295 printk(KERN_ERR "failure at %s:%d/%s()!\n",
2296 __FILE__, __LINE__, __func__);
2297 return -EFAULT;
2298 }
2299 if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
2300 return -ENODEV;
2301
2302 if (ioc->shost_recovery || ioc->pci_error_recovery ||
2303 ioc->is_driver_loading)
2304 return -EAGAIN;
2305
2306 memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
2307 karg.hdr.ioc_number = karg32.hdr.ioc_number;
2308 karg.hdr.port_number = karg32.hdr.port_number;
2309 karg.hdr.max_data_size = karg32.hdr.max_data_size;
2310 karg.timeout = karg32.timeout;
2311 karg.max_reply_bytes = karg32.max_reply_bytes;
2312 karg.data_in_size = karg32.data_in_size;
2313 karg.data_out_size = karg32.data_out_size;
2314 karg.max_sense_bytes = karg32.max_sense_bytes;
2315 karg.data_sge_offset = karg32.data_sge_offset;
2316 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
2317 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
2318 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
2319 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
2320 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2321 return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
2322}
2323
2324/**
2325 * _ctl_ioctl_compat - main ioctl entry point (compat) 2286 * _ctl_ioctl_compat - main ioctl entry point (compat)
2326 * @file - 2287 * @file -
2327 * @cmd - 2288 * @cmd -
@@ -2334,12 +2295,7 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2334{ 2295{
2335 long ret; 2296 long ret;
2336 2297
2337 mutex_lock(&_ctl_mutex); 2298 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1);
2338 if (cmd == MPT2COMMAND32)
2339 ret = _ctl_compat_mpt_command(file, cmd, arg);
2340 else
2341 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg);
2342 mutex_unlock(&_ctl_mutex);
2343 return ret; 2299 return ret;
2344} 2300}
2345#endif 2301#endif
@@ -2884,7 +2840,7 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
2884 struct mpt2_diag_register diag_register; 2840 struct mpt2_diag_register diag_register;
2885 u8 issue_reset = 0; 2841 u8 issue_reset = 0;
2886 2842
2887 if (sscanf(buf, "%s", str) != 1) 2843 if (sscanf(buf, "%9s", str) != 1)
2888 return -EINVAL; 2844 return -EINVAL;
2889 2845
2890 if (!strcmp(str, "post")) { 2846 if (!strcmp(str, "post")) {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index d953a57e779d..76973e8ca4ba 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -579,14 +579,12 @@ _scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
579 return; 579 return;
580 580
581 spin_lock_irqsave(&ioc->sas_device_lock, flags); 581 spin_lock_irqsave(&ioc->sas_device_lock, flags);
582 if (mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 582 list_del(&sas_device->list);
583 sas_device->sas_address)) { 583 kfree(sas_device);
584 list_del(&sas_device->list);
585 kfree(sas_device);
586 }
587 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 584 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
588} 585}
589 586
587
590/** 588/**
591 * _scsih_sas_device_add - insert sas_device to the list. 589 * _scsih_sas_device_add - insert sas_device to the list.
592 * @ioc: per adapter object 590 * @ioc: per adapter object
@@ -645,8 +643,8 @@ _scsih_sas_device_init_add(struct MPT2SAS_ADAPTER *ioc,
645 643
646 spin_lock_irqsave(&ioc->sas_device_lock, flags); 644 spin_lock_irqsave(&ioc->sas_device_lock, flags);
647 list_add_tail(&sas_device->list, &ioc->sas_device_init_list); 645 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
648 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
649 _scsih_determine_boot_device(ioc, sas_device, 0); 646 _scsih_determine_boot_device(ioc, sas_device, 0);
647 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
650} 648}
651 649
652/** 650/**
@@ -755,7 +753,6 @@ _scsih_raid_device_add(struct MPT2SAS_ADAPTER *ioc,
755 * @ioc: per adapter object 753 * @ioc: per adapter object
756 * @raid_device: raid_device object 754 * @raid_device: raid_device object
757 * 755 *
758 * This is removed from the raid_device_list link list.
759 */ 756 */
760static void 757static void
761_scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc, 758_scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc,
@@ -765,7 +762,6 @@ _scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc,
765 762
766 spin_lock_irqsave(&ioc->raid_device_lock, flags); 763 spin_lock_irqsave(&ioc->raid_device_lock, flags);
767 list_del(&raid_device->list); 764 list_del(&raid_device->list);
768 memset(raid_device, 0, sizeof(struct _raid_device));
769 kfree(raid_device); 765 kfree(raid_device);
770 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 766 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
771} 767}
@@ -1199,10 +1195,10 @@ _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
1199 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1195 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1200 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1196 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1201 sas_device_priv_data->sas_target->sas_address); 1197 sas_device_priv_data->sas_target->sas_address);
1202 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1203 if (sas_device && sas_device->device_info & 1198 if (sas_device && sas_device->device_info &
1204 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 1199 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1205 max_depth = MPT2SAS_SATA_QUEUE_DEPTH; 1200 max_depth = MPT2SAS_SATA_QUEUE_DEPTH;
1201 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1206 1202
1207 not_sata: 1203 not_sata:
1208 1204
@@ -1299,7 +1295,8 @@ _scsih_target_alloc(struct scsi_target *starget)
1299 sas_target_priv_data->handle = raid_device->handle; 1295 sas_target_priv_data->handle = raid_device->handle;
1300 sas_target_priv_data->sas_address = raid_device->wwid; 1296 sas_target_priv_data->sas_address = raid_device->wwid;
1301 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; 1297 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1302 sas_target_priv_data->raid_device = raid_device; 1298 if (ioc->is_warpdrive)
1299 sas_target_priv_data->raid_device = raid_device;
1303 raid_device->starget = starget; 1300 raid_device->starget = starget;
1304 } 1301 }
1305 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1302 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
@@ -1465,12 +1462,12 @@ _scsih_slave_destroy(struct scsi_device *sdev)
1465/** 1462/**
1466 * _scsih_display_sata_capabilities - sata capabilities 1463 * _scsih_display_sata_capabilities - sata capabilities
1467 * @ioc: per adapter object 1464 * @ioc: per adapter object
1468 * @sas_device: the sas_device object 1465 * @handle: device handle
1469 * @sdev: scsi device struct 1466 * @sdev: scsi device struct
1470 */ 1467 */
1471static void 1468static void
1472_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, 1469_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
1473 struct _sas_device *sas_device, struct scsi_device *sdev) 1470 u16 handle, struct scsi_device *sdev)
1474{ 1471{
1475 Mpi2ConfigReply_t mpi_reply; 1472 Mpi2ConfigReply_t mpi_reply;
1476 Mpi2SasDevicePage0_t sas_device_pg0; 1473 Mpi2SasDevicePage0_t sas_device_pg0;
@@ -1479,7 +1476,7 @@ _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
1479 u32 device_info; 1476 u32 device_info;
1480 1477
1481 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 1478 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1482 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, sas_device->handle))) { 1479 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
1483 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 1480 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1484 ioc->name, __FILE__, __LINE__, __func__); 1481 ioc->name, __FILE__, __LINE__, __func__);
1485 return; 1482 return;
@@ -1537,27 +1534,40 @@ _scsih_get_resync(struct device *dev)
1537 Mpi2RaidVolPage0_t vol_pg0; 1534 Mpi2RaidVolPage0_t vol_pg0;
1538 Mpi2ConfigReply_t mpi_reply; 1535 Mpi2ConfigReply_t mpi_reply;
1539 u32 volume_status_flags; 1536 u32 volume_status_flags;
1540 u8 percent_complete = 0; 1537 u8 percent_complete;
1538 u16 handle;
1539
1540 percent_complete = 0;
1541 handle = 0;
1542 if (ioc->is_warpdrive)
1543 goto out;
1541 1544
1542 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1545 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1543 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 1546 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1544 sdev->channel); 1547 sdev->channel);
1548 if (raid_device) {
1549 handle = raid_device->handle;
1550 percent_complete = raid_device->percent_complete;
1551 }
1545 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1552 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1546 1553
1547 if (!raid_device || ioc->is_warpdrive) 1554 if (!handle)
1548 goto out; 1555 goto out;
1549 1556
1550 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 1557 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1551 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, 1558 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
1552 sizeof(Mpi2RaidVolPage0_t))) { 1559 sizeof(Mpi2RaidVolPage0_t))) {
1553 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 1560 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1554 ioc->name, __FILE__, __LINE__, __func__); 1561 ioc->name, __FILE__, __LINE__, __func__);
1562 percent_complete = 0;
1555 goto out; 1563 goto out;
1556 } 1564 }
1557 1565
1558 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); 1566 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
1559 if (volume_status_flags & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) 1567 if (!(volume_status_flags &
1560 percent_complete = raid_device->percent_complete; 1568 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
1569 percent_complete = 0;
1570
1561 out: 1571 out:
1562 raid_set_resync(mpt2sas_raid_template, dev, percent_complete); 1572 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
1563} 1573}
@@ -1577,17 +1587,20 @@ _scsih_get_state(struct device *dev)
1577 Mpi2ConfigReply_t mpi_reply; 1587 Mpi2ConfigReply_t mpi_reply;
1578 u32 volstate; 1588 u32 volstate;
1579 enum raid_state state = RAID_STATE_UNKNOWN; 1589 enum raid_state state = RAID_STATE_UNKNOWN;
1590 u16 handle = 0;
1580 1591
1581 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1592 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1582 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 1593 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1583 sdev->channel); 1594 sdev->channel);
1595 if (raid_device)
1596 handle = raid_device->handle;
1584 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1597 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1585 1598
1586 if (!raid_device) 1599 if (!raid_device)
1587 goto out; 1600 goto out;
1588 1601
1589 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 1602 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1590 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, 1603 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
1591 sizeof(Mpi2RaidVolPage0_t))) { 1604 sizeof(Mpi2RaidVolPage0_t))) {
1592 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 1605 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1593 ioc->name, __FILE__, __LINE__, __func__); 1606 ioc->name, __FILE__, __LINE__, __func__);
@@ -1620,14 +1633,14 @@ _scsih_get_state(struct device *dev)
1620/** 1633/**
1621 * _scsih_set_level - set raid level 1634 * _scsih_set_level - set raid level
1622 * @sdev: scsi device struct 1635 * @sdev: scsi device struct
1623 * @raid_device: raid_device object 1636 * @volume_type: volume type
1624 */ 1637 */
1625static void 1638static void
1626_scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device) 1639_scsih_set_level(struct scsi_device *sdev, u8 volume_type)
1627{ 1640{
1628 enum raid_level level = RAID_LEVEL_UNKNOWN; 1641 enum raid_level level = RAID_LEVEL_UNKNOWN;
1629 1642
1630 switch (raid_device->volume_type) { 1643 switch (volume_type) {
1631 case MPI2_RAID_VOL_TYPE_RAID0: 1644 case MPI2_RAID_VOL_TYPE_RAID0:
1632 level = RAID_LEVEL_0; 1645 level = RAID_LEVEL_0;
1633 break; 1646 break;
@@ -1722,6 +1735,7 @@ _scsih_disable_ddio(struct MPT2SAS_ADAPTER *ioc)
1722 struct _raid_device *raid_device; 1735 struct _raid_device *raid_device;
1723 u16 handle; 1736 u16 handle;
1724 u16 ioc_status; 1737 u16 ioc_status;
1738 unsigned long flags;
1725 1739
1726 handle = 0xFFFF; 1740 handle = 0xFFFF;
1727 while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 1741 while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
@@ -1731,9 +1745,11 @@ _scsih_disable_ddio(struct MPT2SAS_ADAPTER *ioc)
1731 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 1745 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1732 break; 1746 break;
1733 handle = le16_to_cpu(vol_pg1.DevHandle); 1747 handle = le16_to_cpu(vol_pg1.DevHandle);
1748 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1734 raid_device = _scsih_raid_device_find_by_handle(ioc, handle); 1749 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
1735 if (raid_device) 1750 if (raid_device)
1736 raid_device->direct_io_enabled = 0; 1751 raid_device->direct_io_enabled = 0;
1752 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1737 } 1753 }
1738 return; 1754 return;
1739} 1755}
@@ -1838,7 +1854,8 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
1838 if (mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 1854 if (mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
1839 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, 1855 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
1840 vol_pg0->PhysDisk[count].PhysDiskNum) || 1856 vol_pg0->PhysDisk[count].PhysDiskNum) ||
1841 pd_pg0.DevHandle == MPT2SAS_INVALID_DEVICE_HANDLE) { 1857 le16_to_cpu(pd_pg0.DevHandle) ==
1858 MPT2SAS_INVALID_DEVICE_HANDLE) {
1842 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is " 1859 printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
1843 "disabled for the drive with handle(0x%04x) member" 1860 "disabled for the drive with handle(0x%04x) member"
1844 "handle retrieval failed for member number=%d\n", 1861 "handle retrieval failed for member number=%d\n",
@@ -1968,19 +1985,21 @@ _scsih_slave_configure(struct scsi_device *sdev)
1968 u8 ssp_target = 0; 1985 u8 ssp_target = 0;
1969 char *ds = ""; 1986 char *ds = "";
1970 char *r_level = ""; 1987 char *r_level = "";
1988 u16 handle, volume_handle = 0;
1989 u64 volume_wwid = 0;
1971 1990
1972 qdepth = 1; 1991 qdepth = 1;
1973 sas_device_priv_data = sdev->hostdata; 1992 sas_device_priv_data = sdev->hostdata;
1974 sas_device_priv_data->configured_lun = 1; 1993 sas_device_priv_data->configured_lun = 1;
1975 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; 1994 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
1976 sas_target_priv_data = sas_device_priv_data->sas_target; 1995 sas_target_priv_data = sas_device_priv_data->sas_target;
1996 handle = sas_target_priv_data->handle;
1977 1997
1978 /* raid volume handling */ 1998 /* raid volume handling */
1979 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { 1999 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
1980 2000
1981 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2001 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1982 raid_device = _scsih_raid_device_find_by_handle(ioc, 2002 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
1983 sas_target_priv_data->handle);
1984 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2003 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1985 if (!raid_device) { 2004 if (!raid_device) {
1986 dfailprintk(ioc, printk(MPT2SAS_WARN_FMT 2005 dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
@@ -1989,8 +2008,6 @@ _scsih_slave_configure(struct scsi_device *sdev)
1989 return 1; 2008 return 1;
1990 } 2009 }
1991 2010
1992 _scsih_get_volume_capabilities(ioc, raid_device);
1993
1994 if (_scsih_get_volume_capabilities(ioc, raid_device)) { 2011 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
1995 dfailprintk(ioc, printk(MPT2SAS_WARN_FMT 2012 dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
1996 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, 2013 "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
@@ -2058,68 +2075,67 @@ _scsih_slave_configure(struct scsi_device *sdev)
2058 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 2075 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
2059 /* raid transport support */ 2076 /* raid transport support */
2060 if (!ioc->is_warpdrive) 2077 if (!ioc->is_warpdrive)
2061 _scsih_set_level(sdev, raid_device); 2078 _scsih_set_level(sdev, raid_device->volume_type);
2062 return 0; 2079 return 0;
2063 } 2080 }
2064 2081
2065 /* non-raid handling */ 2082 /* non-raid handling */
2066 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2083 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2067 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 2084 if (mpt2sas_config_get_volume_handle(ioc, handle,
2068 sas_device_priv_data->sas_target->sas_address); 2085 &volume_handle)) {
2069 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2086 dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
2070 if (sas_device) { 2087 "failure at %s:%d/%s()!\n", ioc->name,
2071 if (sas_target_priv_data->flags & 2088 __FILE__, __LINE__, __func__));
2072 MPT_TARGET_FLAGS_RAID_COMPONENT) { 2089 return 1;
2073 if (mpt2sas_config_get_volume_handle(ioc,
2074 sas_device->handle, &sas_device->volume_handle)) {
2075 dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
2076 "failure at %s:%d/%s()!\n", ioc->name,
2077 __FILE__, __LINE__, __func__));
2078 return 1;
2079 }
2080 if (sas_device->volume_handle &&
2081 mpt2sas_config_get_volume_wwid(ioc,
2082 sas_device->volume_handle,
2083 &sas_device->volume_wwid)) {
2084 dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
2085 "failure at %s:%d/%s()!\n", ioc->name,
2086 __FILE__, __LINE__, __func__));
2087 return 1;
2088 }
2089 } 2090 }
2090 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2091 if (volume_handle && mpt2sas_config_get_volume_wwid(ioc,
2091 qdepth = MPT2SAS_SAS_QUEUE_DEPTH; 2092 volume_handle, &volume_wwid)) {
2092 ssp_target = 1; 2093 dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
2093 ds = "SSP"; 2094 "failure at %s:%d/%s()!\n", ioc->name,
2094 } else { 2095 __FILE__, __LINE__, __func__));
2095 qdepth = MPT2SAS_SATA_QUEUE_DEPTH; 2096 return 1;
2096 if (sas_device->device_info &
2097 MPI2_SAS_DEVICE_INFO_STP_TARGET)
2098 ds = "STP";
2099 else if (sas_device->device_info &
2100 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2101 ds = "SATA";
2102 } 2097 }
2098 }
2103 2099
2104 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " 2100 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2105 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", 2101 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
2106 ds, sas_device->handle, 2102 sas_device_priv_data->sas_target->sas_address);
2107 (unsigned long long)sas_device->sas_address, 2103 if (!sas_device) {
2108 sas_device->phy, 2104 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2109 (unsigned long long)sas_device->device_name);
2110 sdev_printk(KERN_INFO, sdev, "%s: "
2111 "enclosure_logical_id(0x%016llx), slot(%d)\n", ds,
2112 (unsigned long long) sas_device->enclosure_logical_id,
2113 sas_device->slot);
2114
2115 if (!ssp_target)
2116 _scsih_display_sata_capabilities(ioc, sas_device, sdev);
2117 } else {
2118 dfailprintk(ioc, printk(MPT2SAS_WARN_FMT 2105 dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
2119 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, 2106 "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
2120 __func__)); 2107 __LINE__, __func__));
2121 return 1; 2108 return 1;
2122 } 2109 }
2110 sas_device->volume_handle = volume_handle;
2111 sas_device->volume_wwid = volume_wwid;
2112 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2113 qdepth = MPT2SAS_SAS_QUEUE_DEPTH;
2114 ssp_target = 1;
2115 ds = "SSP";
2116 } else {
2117 qdepth = MPT2SAS_SATA_QUEUE_DEPTH;
2118 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2119 ds = "STP";
2120 else if (sas_device->device_info &
2121 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2122 ds = "SATA";
2123 }
2124 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), "
2125 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2126 ds, sas_device->handle,
2127 (unsigned long long)sas_device->sas_address,
2128 sas_device->phy,
2129 (unsigned long long)sas_device->device_name);
2130 sdev_printk(KERN_INFO, sdev, "%s: "
2131 "enclosure_logical_id(0x%016llx), slot(%d)\n", ds,
2132 (unsigned long long) sas_device->enclosure_logical_id,
2133 sas_device->slot);
2134
2135 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2136 if (!ssp_target)
2137 _scsih_display_sata_capabilities(ioc, handle, sdev);
2138
2123 2139
2124 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 2140 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
2125 2141
@@ -2899,7 +2915,7 @@ _scsih_ublock_io_all_device(struct MPT2SAS_ADAPTER *ioc)
2899 * During device pull we need to appropiately set the sdev state. 2915 * During device pull we need to appropiately set the sdev state.
2900 */ 2916 */
2901static void 2917static void
2902_scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) 2918_scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
2903{ 2919{
2904 struct MPT2SAS_DEVICE *sas_device_priv_data; 2920 struct MPT2SAS_DEVICE *sas_device_priv_data;
2905 struct scsi_device *sdev; 2921 struct scsi_device *sdev;
@@ -2910,10 +2926,12 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2910 continue; 2926 continue;
2911 if (!sas_device_priv_data->block) 2927 if (!sas_device_priv_data->block)
2912 continue; 2928 continue;
2913 if (sas_device_priv_data->sas_target->handle == handle) { 2929 if (sas_device_priv_data->sas_target->sas_address ==
2930 sas_address) {
2914 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, 2931 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
2915 MPT2SAS_INFO_FMT "SDEV_RUNNING: " 2932 MPT2SAS_INFO_FMT "SDEV_RUNNING: "
2916 "handle(0x%04x)\n", ioc->name, handle)); 2933 "sas address(0x%016llx)\n", ioc->name,
2934 (unsigned long long)sas_address));
2917 sas_device_priv_data->block = 0; 2935 sas_device_priv_data->block = 0;
2918 scsi_internal_device_unblock(sdev); 2936 scsi_internal_device_unblock(sdev);
2919 } 2937 }
@@ -3006,10 +3024,10 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT2SAS_ADAPTER *ioc,
3006 sas_device = 3024 sas_device =
3007 mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 3025 mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
3008 mpt2sas_port->remote_identify.sas_address); 3026 mpt2sas_port->remote_identify.sas_address);
3027 if (sas_device)
3028 set_bit(sas_device->handle,
3029 ioc->blocking_handles);
3009 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3030 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3010 if (!sas_device)
3011 continue;
3012 _scsih_block_io_device(ioc, sas_device->handle);
3013 } 3031 }
3014 } 3032 }
3015 3033
@@ -3020,12 +3038,9 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT2SAS_ADAPTER *ioc,
3020 SAS_EDGE_EXPANDER_DEVICE || 3038 SAS_EDGE_EXPANDER_DEVICE ||
3021 mpt2sas_port->remote_identify.device_type == 3039 mpt2sas_port->remote_identify.device_type ==
3022 SAS_FANOUT_EXPANDER_DEVICE) { 3040 SAS_FANOUT_EXPANDER_DEVICE) {
3023
3024 spin_lock_irqsave(&ioc->sas_node_lock, flags);
3025 expander_sibling = 3041 expander_sibling =
3026 mpt2sas_scsih_expander_find_by_sas_address( 3042 mpt2sas_scsih_expander_find_by_sas_address(
3027 ioc, mpt2sas_port->remote_identify.sas_address); 3043 ioc, mpt2sas_port->remote_identify.sas_address);
3028 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
3029 _scsih_block_io_to_children_attached_to_ex(ioc, 3044 _scsih_block_io_to_children_attached_to_ex(ioc,
3030 expander_sibling); 3045 expander_sibling);
3031 } 3046 }
@@ -3124,7 +3139,7 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3124 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "setting delete flag: " 3139 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "setting delete flag: "
3125 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle, 3140 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle,
3126 (unsigned long long)sas_address)); 3141 (unsigned long long)sas_address));
3127 _scsih_ublock_io_device(ioc, handle); 3142 _scsih_ublock_io_device(ioc, sas_address);
3128 sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE; 3143 sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE;
3129 } 3144 }
3130 3145
@@ -3174,16 +3189,19 @@ static u8
3174_scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, 3189_scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
3175 u8 msix_index, u32 reply) 3190 u8 msix_index, u32 reply)
3176{ 3191{
3177#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
3178 Mpi2SasIoUnitControlReply_t *mpi_reply = 3192 Mpi2SasIoUnitControlReply_t *mpi_reply =
3179 mpt2sas_base_get_reply_virt_addr(ioc, reply); 3193 mpt2sas_base_get_reply_virt_addr(ioc, reply);
3180#endif 3194 if (likely(mpi_reply)) {
3181 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT 3195 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
3182 "sc_complete:handle(0x%04x), (open) " 3196 "sc_complete:handle(0x%04x), (open) "
3183 "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", 3197 "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
3184 ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid, 3198 ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
3185 le16_to_cpu(mpi_reply->IOCStatus), 3199 le16_to_cpu(mpi_reply->IOCStatus),
3186 le32_to_cpu(mpi_reply->IOCLogInfo))); 3200 le32_to_cpu(mpi_reply->IOCLogInfo)));
3201 } else {
3202 printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
3203 ioc->name, __FILE__, __LINE__, __func__);
3204 }
3187 return 1; 3205 return 1;
3188} 3206}
3189 3207
@@ -3262,7 +3280,11 @@ _scsih_tm_volume_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
3262 "progress!\n", __func__, ioc->name)); 3280 "progress!\n", __func__, ioc->name));
3263 return 1; 3281 return 1;
3264 } 3282 }
3265 3283 if (unlikely(!mpi_reply)) {
3284 printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
3285 ioc->name, __FILE__, __LINE__, __func__);
3286 return 1;
3287 }
3266 mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid); 3288 mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
3267 handle = le16_to_cpu(mpi_request_tm->DevHandle); 3289 handle = le16_to_cpu(mpi_request_tm->DevHandle);
3268 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 3290 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
@@ -3325,7 +3347,11 @@ _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3325 "operational\n", __func__, ioc->name)); 3347 "operational\n", __func__, ioc->name));
3326 return 1; 3348 return 1;
3327 } 3349 }
3328 3350 if (unlikely(!mpi_reply)) {
3351 printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
3352 ioc->name, __FILE__, __LINE__, __func__);
3353 return 1;
3354 }
3329 mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid); 3355 mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
3330 handle = le16_to_cpu(mpi_request_tm->DevHandle); 3356 handle = le16_to_cpu(mpi_request_tm->DevHandle);
3331 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 3357 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
@@ -3441,14 +3467,20 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
3441 _scsih_block_io_to_children_attached_directly(ioc, event_data); 3467 _scsih_block_io_to_children_attached_directly(ioc, event_data);
3442 return; 3468 return;
3443 } 3469 }
3444 3470 if (event_data->ExpStatus ==
3445 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING 3471 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
3446 || event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) { 3472 /* put expander attached devices into blocking state */
3447 spin_lock_irqsave(&ioc->sas_node_lock, flags); 3473 spin_lock_irqsave(&ioc->sas_node_lock, flags);
3448 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, 3474 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
3449 expander_handle); 3475 expander_handle);
3450 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
3451 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); 3476 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
3477 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
3478 do {
3479 handle = find_first_bit(ioc->blocking_handles,
3480 ioc->facts.MaxDevHandle);
3481 if (handle < ioc->facts.MaxDevHandle)
3482 _scsih_block_io_device(ioc, handle);
3483 } while (test_and_clear_bit(handle, ioc->blocking_handles));
3452 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) 3484 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
3453 _scsih_block_io_to_children_attached_directly(ioc, event_data); 3485 _scsih_block_io_to_children_attached_directly(ioc, event_data);
3454 3486
@@ -4446,8 +4478,8 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4446 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { 4478 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
4447 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4479 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4448 ioc->scsi_lookup[smid - 1].scmd = scmd; 4480 ioc->scsi_lookup[smid - 1].scmd = scmd;
4449 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4450 _scsih_scsi_direct_io_set(ioc, smid, 0); 4481 _scsih_scsi_direct_io_set(ioc, smid, 0);
4482 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4451 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 4483 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4452 mpi_request->DevHandle = 4484 mpi_request->DevHandle =
4453 cpu_to_le16(sas_device_priv_data->sas_target->handle); 4485 cpu_to_le16(sas_device_priv_data->sas_target->handle);
@@ -5020,13 +5052,11 @@ mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
5020 spin_lock_irqsave(&ioc->sas_node_lock, flags); 5052 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5021 sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, 5053 sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
5022 sas_address); 5054 sas_address);
5023 if (!sas_expander) { 5055 if (sas_expander)
5024 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 5056 list_del(&sas_expander->list);
5025 return;
5026 }
5027 list_del(&sas_expander->list);
5028 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 5057 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5029 _scsih_expander_node_remove(ioc, sas_expander); 5058 if (sas_expander)
5059 _scsih_expander_node_remove(ioc, sas_expander);
5030} 5060}
5031 5061
5032/** 5062/**
@@ -5106,6 +5136,7 @@ _scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
5106 struct MPT2SAS_TARGET *sas_target_priv_data; 5136 struct MPT2SAS_TARGET *sas_target_priv_data;
5107 u32 device_info; 5137 u32 device_info;
5108 5138
5139
5109 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 5140 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5110 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) 5141 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
5111 return; 5142 return;
@@ -5139,21 +5170,24 @@ _scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
5139 sas_target_priv_data->handle = handle; 5170 sas_target_priv_data->handle = handle;
5140 sas_device->handle = handle; 5171 sas_device->handle = handle;
5141 } 5172 }
5142 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5143 5173
5144 /* check if device is present */ 5174 /* check if device is present */
5145 if (!(le16_to_cpu(sas_device_pg0.Flags) & 5175 if (!(le16_to_cpu(sas_device_pg0.Flags) &
5146 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 5176 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
5147 printk(MPT2SAS_ERR_FMT "device is not present " 5177 printk(MPT2SAS_ERR_FMT "device is not present "
5148 "handle(0x%04x), flags!!!\n", ioc->name, handle); 5178 "handle(0x%04x), flags!!!\n", ioc->name, handle);
5179 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5149 return; 5180 return;
5150 } 5181 }
5151 5182
5152 /* check if there were any issues with discovery */ 5183 /* check if there were any issues with discovery */
5153 if (_scsih_check_access_status(ioc, sas_address, handle, 5184 if (_scsih_check_access_status(ioc, sas_address, handle,
5154 sas_device_pg0.AccessStatus)) 5185 sas_device_pg0.AccessStatus)) {
5186 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5155 return; 5187 return;
5156 _scsih_ublock_io_device(ioc, handle); 5188 }
5189 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5190 _scsih_ublock_io_device(ioc, sas_address);
5157 5191
5158} 5192}
5159 5193
@@ -5280,54 +5314,71 @@ static void
5280_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, 5314_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
5281 struct _sas_device *sas_device) 5315 struct _sas_device *sas_device)
5282{ 5316{
5283 struct _sas_device sas_device_backup;
5284 struct MPT2SAS_TARGET *sas_target_priv_data; 5317 struct MPT2SAS_TARGET *sas_target_priv_data;
5285 5318
5286 if (!sas_device)
5287 return;
5288
5289 memcpy(&sas_device_backup, sas_device, sizeof(struct _sas_device));
5290 _scsih_sas_device_remove(ioc, sas_device);
5291
5292 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: " 5319 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: "
5293 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, 5320 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
5294 sas_device_backup.handle, (unsigned long long) 5321 sas_device->handle, (unsigned long long)
5295 sas_device_backup.sas_address)); 5322 sas_device->sas_address));
5296 5323
5297 if (sas_device_backup.starget && sas_device_backup.starget->hostdata) { 5324 if (sas_device->starget && sas_device->starget->hostdata) {
5298 sas_target_priv_data = sas_device_backup.starget->hostdata; 5325 sas_target_priv_data = sas_device->starget->hostdata;
5299 sas_target_priv_data->deleted = 1; 5326 sas_target_priv_data->deleted = 1;
5300 _scsih_ublock_io_device(ioc, sas_device_backup.handle); 5327 _scsih_ublock_io_device(ioc, sas_device->sas_address);
5301 sas_target_priv_data->handle = 5328 sas_target_priv_data->handle =
5302 MPT2SAS_INVALID_DEVICE_HANDLE; 5329 MPT2SAS_INVALID_DEVICE_HANDLE;
5303 } 5330 }
5304 5331
5305 _scsih_ublock_io_device(ioc, sas_device_backup.handle);
5306
5307 if (!ioc->hide_drives) 5332 if (!ioc->hide_drives)
5308 mpt2sas_transport_port_remove(ioc, 5333 mpt2sas_transport_port_remove(ioc,
5309 sas_device_backup.sas_address, 5334 sas_device->sas_address,
5310 sas_device_backup.sas_address_parent); 5335 sas_device->sas_address_parent);
5311 5336
5312 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr" 5337 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
5313 "(0x%016llx)\n", ioc->name, sas_device_backup.handle, 5338 "(0x%016llx)\n", ioc->name, sas_device->handle,
5314 (unsigned long long) sas_device_backup.sas_address); 5339 (unsigned long long) sas_device->sas_address);
5315 5340
5316 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: " 5341 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: "
5317 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, 5342 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
5318 sas_device_backup.handle, (unsigned long long) 5343 sas_device->handle, (unsigned long long)
5319 sas_device_backup.sas_address)); 5344 sas_device->sas_address));
5345 kfree(sas_device);
5346}
5347/**
5348 * _scsih_device_remove_by_handle - removing device object by handle
5349 * @ioc: per adapter object
5350 * @handle: device handle
5351 *
5352 * Return nothing.
5353 */
5354static void
5355_scsih_device_remove_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
5356{
5357 struct _sas_device *sas_device;
5358 unsigned long flags;
5359
5360 if (ioc->shost_recovery)
5361 return;
5362
5363 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5364 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
5365 if (sas_device)
5366 list_del(&sas_device->list);
5367 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5368 if (sas_device)
5369 _scsih_remove_device(ioc, sas_device);
5320} 5370}
5321 5371
5322/** 5372/**
5323 * mpt2sas_device_remove - removing device object 5373 * mpt2sas_device_remove_by_sas_address - removing device object by sas address
5324 * @ioc: per adapter object 5374 * @ioc: per adapter object
5325 * @sas_address: expander sas_address 5375 * @sas_address: device sas_address
5326 * 5376 *
5327 * Return nothing. 5377 * Return nothing.
5328 */ 5378 */
5329void 5379void
5330mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) 5380mpt2sas_device_remove_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
5381 u64 sas_address)
5331{ 5382{
5332 struct _sas_device *sas_device; 5383 struct _sas_device *sas_device;
5333 unsigned long flags; 5384 unsigned long flags;
@@ -5338,14 +5389,12 @@ mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
5338 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5389 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5339 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 5390 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
5340 sas_address); 5391 sas_address);
5341 if (!sas_device) { 5392 if (sas_device)
5342 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5393 list_del(&sas_device->list);
5343 return;
5344 }
5345 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5394 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5346 _scsih_remove_device(ioc, sas_device); 5395 if (sas_device)
5396 _scsih_remove_device(ioc, sas_device);
5347} 5397}
5348
5349#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 5398#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
5350/** 5399/**
5351 * _scsih_sas_topology_change_event_debug - debug for topology event 5400 * _scsih_sas_topology_change_event_debug - debug for topology event
@@ -5442,7 +5491,6 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
5442 u16 reason_code; 5491 u16 reason_code;
5443 u8 phy_number, max_phys; 5492 u8 phy_number, max_phys;
5444 struct _sas_node *sas_expander; 5493 struct _sas_node *sas_expander;
5445 struct _sas_device *sas_device;
5446 u64 sas_address; 5494 u64 sas_address;
5447 unsigned long flags; 5495 unsigned long flags;
5448 u8 link_rate, prev_link_rate; 5496 u8 link_rate, prev_link_rate;
@@ -5477,15 +5525,17 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
5477 spin_lock_irqsave(&ioc->sas_node_lock, flags); 5525 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5478 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, 5526 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
5479 parent_handle); 5527 parent_handle);
5480 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5481 if (sas_expander) { 5528 if (sas_expander) {
5482 sas_address = sas_expander->sas_address; 5529 sas_address = sas_expander->sas_address;
5483 max_phys = sas_expander->num_phys; 5530 max_phys = sas_expander->num_phys;
5484 } else if (parent_handle < ioc->sas_hba.num_phys) { 5531 } else if (parent_handle < ioc->sas_hba.num_phys) {
5485 sas_address = ioc->sas_hba.sas_address; 5532 sas_address = ioc->sas_hba.sas_address;
5486 max_phys = ioc->sas_hba.num_phys; 5533 max_phys = ioc->sas_hba.num_phys;
5487 } else 5534 } else {
5535 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5488 return; 5536 return;
5537 }
5538 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5489 5539
5490 /* handle siblings events */ 5540 /* handle siblings events */
5491 for (i = 0; i < event_data->NumEntries; i++) { 5541 for (i = 0; i < event_data->NumEntries; i++) {
@@ -5540,16 +5590,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
5540 break; 5590 break;
5541 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 5591 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
5542 5592
5543 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5593 _scsih_device_remove_by_handle(ioc, handle);
5544 sas_device = _scsih_sas_device_find_by_handle(ioc,
5545 handle);
5546 if (!sas_device) {
5547 spin_unlock_irqrestore(&ioc->sas_device_lock,
5548 flags);
5549 break;
5550 }
5551 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5552 _scsih_remove_device(ioc, sas_device);
5553 break; 5594 break;
5554 } 5595 }
5555 } 5596 }
@@ -5672,20 +5713,24 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
5672 sas_address = le64_to_cpu(event_data->SASAddress); 5713 sas_address = le64_to_cpu(event_data->SASAddress);
5673 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 5714 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
5674 sas_address); 5715 sas_address);
5675 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5676 5716
5677 if (!sas_device || !sas_device->starget) 5717 if (!sas_device || !sas_device->starget) {
5718 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5678 return; 5719 return;
5720 }
5679 5721
5680 target_priv_data = sas_device->starget->hostdata; 5722 target_priv_data = sas_device->starget->hostdata;
5681 if (!target_priv_data) 5723 if (!target_priv_data) {
5724 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5682 return; 5725 return;
5726 }
5683 5727
5684 if (event_data->ReasonCode == 5728 if (event_data->ReasonCode ==
5685 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) 5729 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
5686 target_priv_data->tm_busy = 1; 5730 target_priv_data->tm_busy = 1;
5687 else 5731 else
5688 target_priv_data->tm_busy = 0; 5732 target_priv_data->tm_busy = 0;
5733 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5689} 5734}
5690 5735
5691#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 5736#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -5950,30 +5995,6 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
5950} 5995}
5951 5996
5952/** 5997/**
5953 * _scsih_reprobe_target - reprobing target
5954 * @starget: scsi target struct
5955 * @no_uld_attach: sdev->no_uld_attach flag setting
5956 *
5957 * Note: no_uld_attach flag determines whether the disk device is attached
5958 * to block layer. A value of `1` means to not attach.
5959 **/
5960static void
5961_scsih_reprobe_target(struct scsi_target *starget, int no_uld_attach)
5962{
5963 struct MPT2SAS_TARGET *sas_target_priv_data;
5964
5965 if (starget == NULL)
5966 return;
5967 sas_target_priv_data = starget->hostdata;
5968 if (no_uld_attach)
5969 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
5970 else
5971 sas_target_priv_data->flags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
5972
5973 starget_for_each_device(starget, no_uld_attach ? (void *)1 : NULL,
5974 _scsih_reprobe_lun);
5975}
5976/**
5977 * _scsih_sas_volume_add - add new volume 5998 * _scsih_sas_volume_add - add new volume
5978 * @ioc: per adapter object 5999 * @ioc: per adapter object
5979 * @element: IR config element data 6000 * @element: IR config element data
@@ -6024,8 +6045,11 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc,
6024 raid_device->id, 0); 6045 raid_device->id, 0);
6025 if (rc) 6046 if (rc)
6026 _scsih_raid_device_remove(ioc, raid_device); 6047 _scsih_raid_device_remove(ioc, raid_device);
6027 } else 6048 } else {
6049 spin_lock_irqsave(&ioc->raid_device_lock, flags);
6028 _scsih_determine_boot_device(ioc, raid_device, 1); 6050 _scsih_determine_boot_device(ioc, raid_device, 1);
6051 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
6052 }
6029} 6053}
6030 6054
6031/** 6055/**
@@ -6042,21 +6066,25 @@ _scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc, u16 handle)
6042 struct _raid_device *raid_device; 6066 struct _raid_device *raid_device;
6043 unsigned long flags; 6067 unsigned long flags;
6044 struct MPT2SAS_TARGET *sas_target_priv_data; 6068 struct MPT2SAS_TARGET *sas_target_priv_data;
6069 struct scsi_target *starget = NULL;
6045 6070
6046 spin_lock_irqsave(&ioc->raid_device_lock, flags); 6071 spin_lock_irqsave(&ioc->raid_device_lock, flags);
6047 raid_device = _scsih_raid_device_find_by_handle(ioc, handle); 6072 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
6048 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 6073 if (raid_device) {
6049 if (!raid_device) 6074 if (raid_device->starget) {
6050 return; 6075 starget = raid_device->starget;
6051 if (raid_device->starget) { 6076 sas_target_priv_data = starget->hostdata;
6052 sas_target_priv_data = raid_device->starget->hostdata; 6077 sas_target_priv_data->deleted = 1;
6053 sas_target_priv_data->deleted = 1; 6078 }
6054 scsi_remove_target(&raid_device->starget->dev); 6079 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
6080 "(0x%016llx)\n", ioc->name, raid_device->handle,
6081 (unsigned long long) raid_device->wwid);
6082 list_del(&raid_device->list);
6083 kfree(raid_device);
6055 } 6084 }
6056 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid" 6085 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
6057 "(0x%016llx)\n", ioc->name, raid_device->handle, 6086 if (starget)
6058 (unsigned long long) raid_device->wwid); 6087 scsi_remove_target(&starget->dev);
6059 _scsih_raid_device_remove(ioc, raid_device);
6060} 6088}
6061 6089
6062/** 6090/**
@@ -6072,20 +6100,31 @@ _scsih_sas_pd_expose(struct MPT2SAS_ADAPTER *ioc,
6072 Mpi2EventIrConfigElement_t *element) 6100 Mpi2EventIrConfigElement_t *element)
6073{ 6101{
6074 struct _sas_device *sas_device; 6102 struct _sas_device *sas_device;
6103 struct scsi_target *starget = NULL;
6104 struct MPT2SAS_TARGET *sas_target_priv_data;
6075 unsigned long flags; 6105 unsigned long flags;
6076 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 6106 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
6077 6107
6078 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6108 spin_lock_irqsave(&ioc->sas_device_lock, flags);
6079 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 6109 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
6110 if (sas_device) {
6111 sas_device->volume_handle = 0;
6112 sas_device->volume_wwid = 0;
6113 clear_bit(handle, ioc->pd_handles);
6114 if (sas_device->starget && sas_device->starget->hostdata) {
6115 starget = sas_device->starget;
6116 sas_target_priv_data = starget->hostdata;
6117 sas_target_priv_data->flags &=
6118 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
6119 }
6120 }
6080 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 6121 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6081 if (!sas_device) 6122 if (!sas_device)
6082 return; 6123 return;
6083 6124
6084 /* exposing raid component */ 6125 /* exposing raid component */
6085 sas_device->volume_handle = 0; 6126 if (starget)
6086 sas_device->volume_wwid = 0; 6127 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
6087 clear_bit(handle, ioc->pd_handles);
6088 _scsih_reprobe_target(sas_device->starget, 0);
6089} 6128}
6090 6129
6091/** 6130/**
@@ -6101,23 +6140,38 @@ _scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc,
6101 Mpi2EventIrConfigElement_t *element) 6140 Mpi2EventIrConfigElement_t *element)
6102{ 6141{
6103 struct _sas_device *sas_device; 6142 struct _sas_device *sas_device;
6143 struct scsi_target *starget = NULL;
6144 struct MPT2SAS_TARGET *sas_target_priv_data;
6104 unsigned long flags; 6145 unsigned long flags;
6105 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 6146 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
6147 u16 volume_handle = 0;
6148 u64 volume_wwid = 0;
6149
6150 mpt2sas_config_get_volume_handle(ioc, handle, &volume_handle);
6151 if (volume_handle)
6152 mpt2sas_config_get_volume_wwid(ioc, volume_handle,
6153 &volume_wwid);
6106 6154
6107 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6155 spin_lock_irqsave(&ioc->sas_device_lock, flags);
6108 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 6156 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
6157 if (sas_device) {
6158 set_bit(handle, ioc->pd_handles);
6159 if (sas_device->starget && sas_device->starget->hostdata) {
6160 starget = sas_device->starget;
6161 sas_target_priv_data = starget->hostdata;
6162 sas_target_priv_data->flags |=
6163 MPT_TARGET_FLAGS_RAID_COMPONENT;
6164 sas_device->volume_handle = volume_handle;
6165 sas_device->volume_wwid = volume_wwid;
6166 }
6167 }
6109 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 6168 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6110 if (!sas_device) 6169 if (!sas_device)
6111 return; 6170 return;
6112 6171
6113 /* hiding raid component */ 6172 /* hiding raid component */
6114 mpt2sas_config_get_volume_handle(ioc, handle, 6173 if (starget)
6115 &sas_device->volume_handle); 6174 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
6116 mpt2sas_config_get_volume_wwid(ioc, sas_device->volume_handle,
6117 &sas_device->volume_wwid);
6118 set_bit(handle, ioc->pd_handles);
6119 _scsih_reprobe_target(sas_device->starget, 1);
6120
6121} 6175}
6122 6176
6123/** 6177/**
@@ -6132,16 +6186,9 @@ static void
6132_scsih_sas_pd_delete(struct MPT2SAS_ADAPTER *ioc, 6186_scsih_sas_pd_delete(struct MPT2SAS_ADAPTER *ioc,
6133 Mpi2EventIrConfigElement_t *element) 6187 Mpi2EventIrConfigElement_t *element)
6134{ 6188{
6135 struct _sas_device *sas_device;
6136 unsigned long flags;
6137 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 6189 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
6138 6190
6139 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6191 _scsih_device_remove_by_handle(ioc, handle);
6140 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
6141 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6142 if (!sas_device)
6143 return;
6144 _scsih_remove_device(ioc, sas_device);
6145} 6192}
6146 6193
6147/** 6194/**
@@ -6583,18 +6630,13 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
6583 /* code added for raid transport support */ 6630 /* code added for raid transport support */
6584 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { 6631 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
6585 6632
6586 handle = le16_to_cpu(event_data->VolDevHandle);
6587
6588 spin_lock_irqsave(&ioc->raid_device_lock, flags); 6633 spin_lock_irqsave(&ioc->raid_device_lock, flags);
6634 handle = le16_to_cpu(event_data->VolDevHandle);
6589 raid_device = _scsih_raid_device_find_by_handle(ioc, handle); 6635 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
6590 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 6636 if (raid_device)
6591
6592 if (!raid_device)
6593 return;
6594
6595 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC)
6596 raid_device->percent_complete = 6637 raid_device->percent_complete =
6597 event_data->PercentComplete; 6638 event_data->PercentComplete;
6639 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
6598 } 6640 }
6599} 6641}
6600 6642
@@ -6761,13 +6803,18 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
6761 * required data for Direct IO 6803 * required data for Direct IO
6762 */ 6804 */
6763 _scsih_init_warpdrive_properties(ioc, raid_device); 6805 _scsih_init_warpdrive_properties(ioc, raid_device);
6764 if (raid_device->handle == handle) 6806 spin_lock_irqsave(&ioc->raid_device_lock, flags);
6807 if (raid_device->handle == handle) {
6808 spin_unlock_irqrestore(&ioc->raid_device_lock,
6809 flags);
6765 return; 6810 return;
6811 }
6766 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", 6812 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
6767 raid_device->handle); 6813 raid_device->handle);
6768 raid_device->handle = handle; 6814 raid_device->handle = handle;
6769 if (sas_target_priv_data) 6815 if (sas_target_priv_data)
6770 sas_target_priv_data->handle = handle; 6816 sas_target_priv_data->handle = handle;
6817 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
6771 return; 6818 return;
6772 } 6819 }
6773 } 6820 }
@@ -6939,58 +6986,56 @@ static void
6939_scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc) 6986_scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
6940{ 6987{
6941 struct _sas_device *sas_device, *sas_device_next; 6988 struct _sas_device *sas_device, *sas_device_next;
6942 struct _sas_node *sas_expander; 6989 struct _sas_node *sas_expander, *sas_expander_next;
6943 struct _raid_device *raid_device, *raid_device_next; 6990 struct _raid_device *raid_device, *raid_device_next;
6991 struct list_head tmp_list;
6992 unsigned long flags;
6944 6993
6945 printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n", 6994 printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n",
6946 ioc->name); 6995 ioc->name);
6947 6996
6997 /* removing unresponding end devices */
6998 printk(MPT2SAS_INFO_FMT "removing unresponding devices: end-devices\n",
6999 ioc->name);
6948 list_for_each_entry_safe(sas_device, sas_device_next, 7000 list_for_each_entry_safe(sas_device, sas_device_next,
6949 &ioc->sas_device_list, list) { 7001 &ioc->sas_device_list, list) {
6950 if (sas_device->responding) { 7002 if (!sas_device->responding)
7003 mpt2sas_device_remove_by_sas_address(ioc,
7004 sas_device->sas_address);
7005 else
6951 sas_device->responding = 0; 7006 sas_device->responding = 0;
6952 continue;
6953 }
6954 if (sas_device->starget)
6955 starget_printk(KERN_INFO, sas_device->starget,
6956 "removing: handle(0x%04x), sas_addr(0x%016llx), "
6957 "enclosure logical id(0x%016llx), slot(%d)\n",
6958 sas_device->handle,
6959 (unsigned long long)sas_device->sas_address,
6960 (unsigned long long)
6961 sas_device->enclosure_logical_id,
6962 sas_device->slot);
6963 _scsih_remove_device(ioc, sas_device);
6964 } 7007 }
6965 7008
6966 if (!ioc->ir_firmware) 7009 /* removing unresponding volumes */
6967 goto retry_expander_search; 7010 if (ioc->ir_firmware) {
6968 7011 printk(MPT2SAS_INFO_FMT "removing unresponding devices: "
6969 list_for_each_entry_safe(raid_device, raid_device_next, 7012 "volumes\n", ioc->name);
6970 &ioc->raid_device_list, list) { 7013 list_for_each_entry_safe(raid_device, raid_device_next,
6971 if (raid_device->responding) { 7014 &ioc->raid_device_list, list) {
6972 raid_device->responding = 0; 7015 if (!raid_device->responding)
6973 continue; 7016 _scsih_sas_volume_delete(ioc,
6974 } 7017 raid_device->handle);
6975 if (raid_device->starget) { 7018 else
6976 starget_printk(KERN_INFO, raid_device->starget, 7019 raid_device->responding = 0;
6977 "removing: handle(0x%04x), wwid(0x%016llx)\n",
6978 raid_device->handle,
6979 (unsigned long long)raid_device->wwid);
6980 scsi_remove_target(&raid_device->starget->dev);
6981 } 7020 }
6982 _scsih_raid_device_remove(ioc, raid_device);
6983 } 7021 }
6984 7022 /* removing unresponding expanders */
6985 retry_expander_search: 7023 printk(MPT2SAS_INFO_FMT "removing unresponding devices: expanders\n",
6986 sas_expander = NULL; 7024 ioc->name);
6987 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 7025 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6988 if (sas_expander->responding) { 7026 INIT_LIST_HEAD(&tmp_list);
7027 list_for_each_entry_safe(sas_expander, sas_expander_next,
7028 &ioc->sas_expander_list, list) {
7029 if (!sas_expander->responding)
7030 list_move_tail(&sas_expander->list, &tmp_list);
7031 else
6989 sas_expander->responding = 0; 7032 sas_expander->responding = 0;
6990 continue; 7033 }
6991 } 7034 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6992 mpt2sas_expander_remove(ioc, sas_expander->sas_address); 7035 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
6993 goto retry_expander_search; 7036 list) {
7037 list_del(&sas_expander->list);
7038 _scsih_expander_node_remove(ioc, sas_expander);
6994 } 7039 }
6995 printk(MPT2SAS_INFO_FMT "removing unresponding devices: complete\n", 7040 printk(MPT2SAS_INFO_FMT "removing unresponding devices: complete\n",
6996 ioc->name); 7041 ioc->name);
@@ -7043,6 +7088,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
7043 struct _sas_device *sas_device; 7088 struct _sas_device *sas_device;
7044 struct _sas_node *expander_device; 7089 struct _sas_node *expander_device;
7045 static struct _raid_device *raid_device; 7090 static struct _raid_device *raid_device;
7091 unsigned long flags;
7046 7092
7047 printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name); 7093 printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
7048 7094
@@ -7057,8 +7103,10 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
7057 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 7103 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
7058 break; 7104 break;
7059 handle = le16_to_cpu(expander_pg0.DevHandle); 7105 handle = le16_to_cpu(expander_pg0.DevHandle);
7106 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7060 expander_device = mpt2sas_scsih_expander_find_by_sas_address( 7107 expander_device = mpt2sas_scsih_expander_find_by_sas_address(
7061 ioc, le64_to_cpu(expander_pg0.SASAddress)); 7108 ioc, le64_to_cpu(expander_pg0.SASAddress));
7109 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7062 if (expander_device) 7110 if (expander_device)
7063 _scsih_refresh_expander_links(ioc, expander_device, 7111 _scsih_refresh_expander_links(ioc, expander_device,
7064 handle); 7112 handle);
@@ -7080,7 +7128,9 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
7080 break; 7128 break;
7081 phys_disk_num = pd_pg0.PhysDiskNum; 7129 phys_disk_num = pd_pg0.PhysDiskNum;
7082 handle = le16_to_cpu(pd_pg0.DevHandle); 7130 handle = le16_to_cpu(pd_pg0.DevHandle);
7131 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7083 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 7132 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
7133 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7084 if (sas_device) 7134 if (sas_device)
7085 continue; 7135 continue;
7086 if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, 7136 if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
@@ -7107,8 +7157,10 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
7107 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 7157 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
7108 break; 7158 break;
7109 handle = le16_to_cpu(volume_pg1.DevHandle); 7159 handle = le16_to_cpu(volume_pg1.DevHandle);
7160 spin_lock_irqsave(&ioc->raid_device_lock, flags);
7110 raid_device = _scsih_raid_device_find_by_wwid(ioc, 7161 raid_device = _scsih_raid_device_find_by_wwid(ioc,
7111 le64_to_cpu(volume_pg1.WWID)); 7162 le64_to_cpu(volume_pg1.WWID));
7163 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7112 if (raid_device) 7164 if (raid_device)
7113 continue; 7165 continue;
7114 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 7166 if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
@@ -7140,8 +7192,10 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
7140 if (!(_scsih_is_end_device( 7192 if (!(_scsih_is_end_device(
7141 le32_to_cpu(sas_device_pg0.DeviceInfo)))) 7193 le32_to_cpu(sas_device_pg0.DeviceInfo))))
7142 continue; 7194 continue;
7195 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7143 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 7196 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
7144 le64_to_cpu(sas_device_pg0.SASAddress)); 7197 le64_to_cpu(sas_device_pg0.SASAddress));
7198 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7145 if (sas_device) 7199 if (sas_device)
7146 continue; 7200 continue;
7147 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 7201 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
@@ -7235,7 +7289,7 @@ _firmware_event_work(struct work_struct *work)
7235 7289
7236 switch (fw_event->event) { 7290 switch (fw_event->event) {
7237 case MPT2SAS_REMOVE_UNRESPONDING_DEVICES: 7291 case MPT2SAS_REMOVE_UNRESPONDING_DEVICES:
7238 while (scsi_host_in_recovery(ioc->shost)) 7292 while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery)
7239 ssleep(1); 7293 ssleep(1);
7240 _scsih_remove_unresponding_sas_devices(ioc); 7294 _scsih_remove_unresponding_sas_devices(ioc);
7241 _scsih_scan_for_devices_after_reset(ioc); 7295 _scsih_scan_for_devices_after_reset(ioc);
@@ -7313,6 +7367,13 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
7313 return 1; 7367 return 1;
7314 7368
7315 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 7369 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
7370
7371 if (unlikely(!mpi_reply)) {
7372 printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
7373 ioc->name, __FILE__, __LINE__, __func__);
7374 return 1;
7375 }
7376
7316 event = le16_to_cpu(mpi_reply->Event); 7377 event = le16_to_cpu(mpi_reply->Event);
7317 7378
7318 switch (event) { 7379 switch (event) {
@@ -7353,14 +7414,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
7353 case MPI2_EVENT_LOG_ENTRY_ADDED: 7414 case MPI2_EVENT_LOG_ENTRY_ADDED:
7354 { 7415 {
7355 Mpi2EventDataLogEntryAdded_t *log_entry; 7416 Mpi2EventDataLogEntryAdded_t *log_entry;
7356 u32 *log_code; 7417 __le32 *log_code;
7357 7418
7358 if (!ioc->is_warpdrive) 7419 if (!ioc->is_warpdrive)
7359 break; 7420 break;
7360 7421
7361 log_entry = (Mpi2EventDataLogEntryAdded_t *) 7422 log_entry = (Mpi2EventDataLogEntryAdded_t *)
7362 mpi_reply->EventData; 7423 mpi_reply->EventData;
7363 log_code = (u32 *)log_entry->LogData; 7424 log_code = (__le32 *)log_entry->LogData;
7364 7425
7365 if (le16_to_cpu(log_entry->LogEntryQualifier) 7426 if (le16_to_cpu(log_entry->LogEntryQualifier)
7366 != MPT2_WARPDRIVE_LOGENTRY) 7427 != MPT2_WARPDRIVE_LOGENTRY)
@@ -7487,7 +7548,7 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
7487 return; 7548 return;
7488 if (mpt2sas_port->remote_identify.device_type == 7549 if (mpt2sas_port->remote_identify.device_type ==
7489 SAS_END_DEVICE) 7550 SAS_END_DEVICE)
7490 mpt2sas_device_remove(ioc, 7551 mpt2sas_device_remove_by_sas_address(ioc,
7491 mpt2sas_port->remote_identify.sas_address); 7552 mpt2sas_port->remote_identify.sas_address);
7492 else if (mpt2sas_port->remote_identify.device_type == 7553 else if (mpt2sas_port->remote_identify.device_type ==
7493 SAS_EDGE_EXPANDER_DEVICE || 7554 SAS_EDGE_EXPANDER_DEVICE ||
@@ -7661,7 +7722,7 @@ _scsih_remove(struct pci_dev *pdev)
7661 &ioc->sas_hba.sas_port_list, port_list) { 7722 &ioc->sas_hba.sas_port_list, port_list) {
7662 if (mpt2sas_port->remote_identify.device_type == 7723 if (mpt2sas_port->remote_identify.device_type ==
7663 SAS_END_DEVICE) 7724 SAS_END_DEVICE)
7664 mpt2sas_device_remove(ioc, 7725 mpt2sas_device_remove_by_sas_address(ioc,
7665 mpt2sas_port->remote_identify.sas_address); 7726 mpt2sas_port->remote_identify.sas_address);
7666 else if (mpt2sas_port->remote_identify.device_type == 7727 else if (mpt2sas_port->remote_identify.device_type ==
7667 SAS_EDGE_EXPANDER_DEVICE || 7728 SAS_EDGE_EXPANDER_DEVICE ||
@@ -7733,11 +7794,11 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
7733 if (rc) 7794 if (rc)
7734 _scsih_raid_device_remove(ioc, raid_device); 7795 _scsih_raid_device_remove(ioc, raid_device);
7735 } else { 7796 } else {
7797 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7736 sas_device = device; 7798 sas_device = device;
7737 handle = sas_device->handle; 7799 handle = sas_device->handle;
7738 sas_address_parent = sas_device->sas_address_parent; 7800 sas_address_parent = sas_device->sas_address_parent;
7739 sas_address = sas_device->sas_address; 7801 sas_address = sas_device->sas_address;
7740 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7741 list_move_tail(&sas_device->list, &ioc->sas_device_list); 7802 list_move_tail(&sas_device->list, &ioc->sas_device_list);
7742 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7803 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7743 7804
@@ -8061,8 +8122,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8061 out_thread_fail: 8122 out_thread_fail:
8062 list_del(&ioc->list); 8123 list_del(&ioc->list);
8063 scsi_remove_host(shost); 8124 scsi_remove_host(shost);
8064 scsi_host_put(shost);
8065 out_add_shost_fail: 8125 out_add_shost_fail:
8126 scsi_host_put(shost);
8066 return -ENODEV; 8127 return -ENODEV;
8067} 8128}
8068 8129
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 831047466a5a..c6cf20f60720 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -163,12 +163,15 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
163 return -EIO; 163 return -EIO;
164 } 164 }
165 165
166 memset(identify, 0, sizeof(*identify)); 166 memset(identify, 0, sizeof(struct sas_identify));
167 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 167 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
168 168
169 /* sas_address */ 169 /* sas_address */
170 identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 170 identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
171 171
172 /* phy number of the parent device this device is linked to */
173 identify->phy_identifier = sas_device_pg0.PhyNum;
174
172 /* device_type */ 175 /* device_type */
173 switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) { 176 switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
174 case MPI2_SAS_DEVICE_INFO_NO_DEVICE: 177 case MPI2_SAS_DEVICE_INFO_NO_DEVICE:
@@ -484,7 +487,7 @@ _transport_delete_port(struct MPT2SAS_ADAPTER *ioc,
484 487
485 ioc->logging_level |= MPT_DEBUG_TRANSPORT; 488 ioc->logging_level |= MPT_DEBUG_TRANSPORT;
486 if (device_type == SAS_END_DEVICE) 489 if (device_type == SAS_END_DEVICE)
487 mpt2sas_device_remove(ioc, sas_address); 490 mpt2sas_device_remove_by_sas_address(ioc, sas_address);
488 else if (device_type == SAS_EDGE_EXPANDER_DEVICE || 491 else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
489 device_type == SAS_FANOUT_EXPANDER_DEVICE) 492 device_type == SAS_FANOUT_EXPANDER_DEVICE)
490 mpt2sas_expander_remove(ioc, sas_address); 493 mpt2sas_expander_remove(ioc, sas_address);
@@ -792,9 +795,10 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
792 spin_lock_irqsave(&ioc->sas_node_lock, flags); 795 spin_lock_irqsave(&ioc->sas_node_lock, flags);
793 sas_node = _transport_sas_node_find_by_sas_address(ioc, 796 sas_node = _transport_sas_node_find_by_sas_address(ioc,
794 sas_address_parent); 797 sas_address_parent);
795 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 798 if (!sas_node) {
796 if (!sas_node) 799 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
797 return; 800 return;
801 }
798 list_for_each_entry_safe(mpt2sas_port, next, &sas_node->sas_port_list, 802 list_for_each_entry_safe(mpt2sas_port, next, &sas_node->sas_port_list,
799 port_list) { 803 port_list) {
800 if (mpt2sas_port->remote_identify.sas_address != sas_address) 804 if (mpt2sas_port->remote_identify.sas_address != sas_address)
@@ -804,8 +808,10 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
804 goto out; 808 goto out;
805 } 809 }
806 out: 810 out:
807 if (!found) 811 if (!found) {
812 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
808 return; 813 return;
814 }
809 815
810 for (i = 0; i < sas_node->num_phys; i++) { 816 for (i = 0; i < sas_node->num_phys; i++) {
811 if (sas_node->phy[i].remote_identify.sas_address == sas_address) 817 if (sas_node->phy[i].remote_identify.sas_address == sas_address)
@@ -813,6 +819,7 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
813 sizeof(struct sas_identify)); 819 sizeof(struct sas_identify));
814 } 820 }
815 821
822 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
816 list_for_each_entry_safe(mpt2sas_phy, next_phy, 823 list_for_each_entry_safe(mpt2sas_phy, next_phy,
817 &mpt2sas_port->phy_list, port_siblings) { 824 &mpt2sas_port->phy_list, port_siblings) {
818 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) 825 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
@@ -986,12 +993,14 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
986 993
987 spin_lock_irqsave(&ioc->sas_node_lock, flags); 994 spin_lock_irqsave(&ioc->sas_node_lock, flags);
988 sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address); 995 sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
989 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 996 if (!sas_node) {
990 if (!sas_node) 997 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
991 return; 998 return;
999 }
992 1000
993 mpt2sas_phy = &sas_node->phy[phy_number]; 1001 mpt2sas_phy = &sas_node->phy[phy_number];
994 mpt2sas_phy->attached_handle = handle; 1002 mpt2sas_phy->attached_handle = handle;
1003 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
995 if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { 1004 if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
996 _transport_set_identify(ioc, handle, 1005 _transport_set_identify(ioc, handle,
997 &mpt2sas_phy->remote_identify); 1006 &mpt2sas_phy->remote_identify);
@@ -1310,17 +1319,20 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
1310 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 1319 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
1311 struct _sas_device *sas_device; 1320 struct _sas_device *sas_device;
1312 unsigned long flags; 1321 unsigned long flags;
1322 int rc;
1313 1323
1314 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1324 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1315 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1325 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1316 rphy->identify.sas_address); 1326 rphy->identify.sas_address);
1327 if (sas_device) {
1328 *identifier = sas_device->enclosure_logical_id;
1329 rc = 0;
1330 } else {
1331 *identifier = 0;
1332 rc = -ENXIO;
1333 }
1317 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1334 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1318 1335 return rc;
1319 if (!sas_device)
1320 return -ENXIO;
1321
1322 *identifier = sas_device->enclosure_logical_id;
1323 return 0;
1324} 1336}
1325 1337
1326/** 1338/**
@@ -1335,16 +1347,17 @@ _transport_get_bay_identifier(struct sas_rphy *rphy)
1335 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 1347 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
1336 struct _sas_device *sas_device; 1348 struct _sas_device *sas_device;
1337 unsigned long flags; 1349 unsigned long flags;
1350 int rc;
1338 1351
1339 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1352 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1340 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1353 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1341 rphy->identify.sas_address); 1354 rphy->identify.sas_address);
1355 if (sas_device)
1356 rc = sas_device->slot;
1357 else
1358 rc = -ENXIO;
1342 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1359 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1343 1360 return rc;
1344 if (!sas_device)
1345 return -ENXIO;
1346
1347 return sas_device->slot;
1348} 1361}
1349 1362
1350/* phy control request structure */ 1363/* phy control request structure */
@@ -1629,11 +1642,13 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
1629{ 1642{
1630 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 1643 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
1631 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 1644 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
1645 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
1632 Mpi2ConfigReply_t mpi_reply; 1646 Mpi2ConfigReply_t mpi_reply;
1633 u16 ioc_status; 1647 u16 ioc_status;
1634 u16 sz; 1648 u16 sz;
1635 int rc = 0; 1649 int rc = 0;
1636 unsigned long flags; 1650 unsigned long flags;
1651 int i, discovery_active;
1637 1652
1638 spin_lock_irqsave(&ioc->sas_node_lock, flags); 1653 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1639 if (_transport_sas_node_find_by_sas_address(ioc, 1654 if (_transport_sas_node_find_by_sas_address(ioc,
@@ -1651,7 +1666,50 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
1651 1666
1652 /* handle hba phys */ 1667 /* handle hba phys */
1653 1668
1654 /* sas_iounit page 1 */ 1669 /* read sas_iounit page 0 */
1670 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
1671 sizeof(Mpi2SasIOUnit0PhyData_t));
1672 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
1673 if (!sas_iounit_pg0) {
1674 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1675 ioc->name, __FILE__, __LINE__, __func__);
1676 rc = -ENOMEM;
1677 goto out;
1678 }
1679 if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
1680 sas_iounit_pg0, sz))) {
1681 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1682 ioc->name, __FILE__, __LINE__, __func__);
1683 rc = -ENXIO;
1684 goto out;
1685 }
1686 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1687 MPI2_IOCSTATUS_MASK;
1688 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1689 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1690 ioc->name, __FILE__, __LINE__, __func__);
1691 rc = -EIO;
1692 goto out;
1693 }
1694
1695 /* unable to enable/disable phys when when discovery is active */
1696 for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
1697 if (sas_iounit_pg0->PhyData[i].PortFlags &
1698 MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
1699 printk(MPT2SAS_ERR_FMT "discovery is active on "
1700 "port = %d, phy = %d: unable to enable/disable "
1701 "phys, try again later!\n", ioc->name,
1702 sas_iounit_pg0->PhyData[i].Port, i);
1703 discovery_active = 1;
1704 }
1705 }
1706
1707 if (discovery_active) {
1708 rc = -EAGAIN;
1709 goto out;
1710 }
1711
1712 /* read sas_iounit page 1 */
1655 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * 1713 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
1656 sizeof(Mpi2SasIOUnit1PhyData_t)); 1714 sizeof(Mpi2SasIOUnit1PhyData_t));
1657 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 1715 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
@@ -1676,7 +1734,18 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
1676 rc = -EIO; 1734 rc = -EIO;
1677 goto out; 1735 goto out;
1678 } 1736 }
1679 1737 /* copy Port/PortFlags/PhyFlags from page 0 */
1738 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
1739 sas_iounit_pg1->PhyData[i].Port =
1740 sas_iounit_pg0->PhyData[i].Port;
1741 sas_iounit_pg1->PhyData[i].PortFlags =
1742 (sas_iounit_pg0->PhyData[i].PortFlags &
1743 MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG);
1744 sas_iounit_pg1->PhyData[i].PhyFlags =
1745 (sas_iounit_pg0->PhyData[i].PhyFlags &
1746 (MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED +
1747 MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED));
1748 }
1680 if (enable) 1749 if (enable)
1681 sas_iounit_pg1->PhyData[phy->number].PhyFlags 1750 sas_iounit_pg1->PhyData[phy->number].PhyFlags
1682 &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; 1751 &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
@@ -1692,6 +1761,7 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
1692 1761
1693 out: 1762 out:
1694 kfree(sas_iounit_pg1); 1763 kfree(sas_iounit_pg1);
1764 kfree(sas_iounit_pg0);
1695 return rc; 1765 return rc;
1696} 1766}
1697 1767
@@ -1828,7 +1898,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1828 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1898 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
1829 Mpi2SmpPassthroughRequest_t *mpi_request; 1899 Mpi2SmpPassthroughRequest_t *mpi_request;
1830 Mpi2SmpPassthroughReply_t *mpi_reply; 1900 Mpi2SmpPassthroughReply_t *mpi_reply;
1831 int rc; 1901 int rc, i;
1832 u16 smid; 1902 u16 smid;
1833 u32 ioc_state; 1903 u32 ioc_state;
1834 unsigned long timeleft; 1904 unsigned long timeleft;
@@ -1837,24 +1907,20 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1837 u8 issue_reset = 0; 1907 u8 issue_reset = 0;
1838 dma_addr_t dma_addr_in = 0; 1908 dma_addr_t dma_addr_in = 0;
1839 dma_addr_t dma_addr_out = 0; 1909 dma_addr_t dma_addr_out = 0;
1910 dma_addr_t pci_dma_in = 0;
1911 dma_addr_t pci_dma_out = 0;
1912 void *pci_addr_in = NULL;
1913 void *pci_addr_out = NULL;
1840 u16 wait_state_count; 1914 u16 wait_state_count;
1841 struct request *rsp = req->next_rq; 1915 struct request *rsp = req->next_rq;
1916 struct bio_vec *bvec = NULL;
1842 1917
1843 if (!rsp) { 1918 if (!rsp) {
1844 printk(MPT2SAS_ERR_FMT "%s: the smp response space is " 1919 printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
1845 "missing\n", ioc->name, __func__); 1920 "missing\n", ioc->name, __func__);
1846 return -EINVAL; 1921 return -EINVAL;
1847 } 1922 }
1848 1923 if (ioc->shost_recovery || ioc->pci_error_recovery) {
1849 /* do we need to support multiple segments? */
1850 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1851 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
1852 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
1853 blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1854 return -EINVAL;
1855 }
1856
1857 if (ioc->shost_recovery) {
1858 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", 1924 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
1859 __func__, ioc->name); 1925 __func__, ioc->name);
1860 return -EFAULT; 1926 return -EFAULT;
@@ -1872,6 +1938,59 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1872 } 1938 }
1873 ioc->transport_cmds.status = MPT2_CMD_PENDING; 1939 ioc->transport_cmds.status = MPT2_CMD_PENDING;
1874 1940
1941 /* Check if the request is split across multiple segments */
1942 if (req->bio->bi_vcnt > 1) {
1943 u32 offset = 0;
1944
1945 /* Allocate memory and copy the request */
1946 pci_addr_out = pci_alloc_consistent(ioc->pdev,
1947 blk_rq_bytes(req), &pci_dma_out);
1948 if (!pci_addr_out) {
1949 printk(MPT2SAS_INFO_FMT "%s(): PCI Addr out = NULL\n",
1950 ioc->name, __func__);
1951 rc = -ENOMEM;
1952 goto out;
1953 }
1954
1955 bio_for_each_segment(bvec, req->bio, i) {
1956 memcpy(pci_addr_out + offset,
1957 page_address(bvec->bv_page) + bvec->bv_offset,
1958 bvec->bv_len);
1959 offset += bvec->bv_len;
1960 }
1961 } else {
1962 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
1963 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1964 if (!dma_addr_out) {
1965 printk(MPT2SAS_INFO_FMT "%s(): DMA Addr out = NULL\n",
1966 ioc->name, __func__);
1967 rc = -ENOMEM;
1968 goto free_pci;
1969 }
1970 }
1971
1972 /* Check if the response needs to be populated across
1973 * multiple segments */
1974 if (rsp->bio->bi_vcnt > 1) {
1975 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
1976 &pci_dma_in);
1977 if (!pci_addr_in) {
1978 printk(MPT2SAS_INFO_FMT "%s(): PCI Addr in = NULL\n",
1979 ioc->name, __func__);
1980 rc = -ENOMEM;
1981 goto unmap;
1982 }
1983 } else {
1984 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
1985 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1986 if (!dma_addr_in) {
1987 printk(MPT2SAS_INFO_FMT "%s(): DMA Addr in = NULL\n",
1988 ioc->name, __func__);
1989 rc = -ENOMEM;
1990 goto unmap;
1991 }
1992 }
1993
1875 wait_state_count = 0; 1994 wait_state_count = 0;
1876 ioc_state = mpt2sas_base_get_iocstate(ioc, 1); 1995 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
1877 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1996 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
@@ -1880,7 +1999,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1880 "%s: failed due to ioc not operational\n", 1999 "%s: failed due to ioc not operational\n",
1881 ioc->name, __func__); 2000 ioc->name, __func__);
1882 rc = -EFAULT; 2001 rc = -EFAULT;
1883 goto out; 2002 goto unmap;
1884 } 2003 }
1885 ssleep(1); 2004 ssleep(1);
1886 ioc_state = mpt2sas_base_get_iocstate(ioc, 1); 2005 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
@@ -1897,7 +2016,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1897 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 2016 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
1898 ioc->name, __func__); 2017 ioc->name, __func__);
1899 rc = -EAGAIN; 2018 rc = -EAGAIN;
1900 goto out; 2019 goto unmap;
1901 } 2020 }
1902 2021
1903 rc = 0; 2022 rc = 0;
@@ -1919,16 +2038,14 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1919 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2038 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1920 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 2039 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1921 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2040 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1922 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 2041 if (req->bio->bi_vcnt > 1) {
1923 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); 2042 ioc->base_add_sg_single(psge, sgl_flags |
1924 if (!dma_addr_out) { 2043 (blk_rq_bytes(req) - 4), pci_dma_out);
1925 mpt2sas_base_free_smid(ioc, smid); 2044 } else {
1926 goto unmap; 2045 ioc->base_add_sg_single(psge, sgl_flags |
2046 (blk_rq_bytes(req) - 4), dma_addr_out);
1927 } 2047 }
1928 2048
1929 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
1930 dma_addr_out);
1931
1932 /* incr sgel */ 2049 /* incr sgel */
1933 psge += ioc->sge_size; 2050 psge += ioc->sge_size;
1934 2051
@@ -1937,16 +2054,14 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1937 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 2054 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1938 MPI2_SGE_FLAGS_END_OF_LIST); 2055 MPI2_SGE_FLAGS_END_OF_LIST);
1939 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2056 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1940 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 2057 if (rsp->bio->bi_vcnt > 1) {
1941 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); 2058 ioc->base_add_sg_single(psge, sgl_flags |
1942 if (!dma_addr_in) { 2059 (blk_rq_bytes(rsp) + 4), pci_dma_in);
1943 mpt2sas_base_free_smid(ioc, smid); 2060 } else {
1944 goto unmap; 2061 ioc->base_add_sg_single(psge, sgl_flags |
2062 (blk_rq_bytes(rsp) + 4), dma_addr_in);
1945 } 2063 }
1946 2064
1947 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
1948 dma_addr_in);
1949
1950 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - " 2065 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
1951 "sending smp request\n", ioc->name, __func__)); 2066 "sending smp request\n", ioc->name, __func__));
1952 2067
@@ -1982,6 +2097,27 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1982 req->resid_len = 0; 2097 req->resid_len = 0;
1983 rsp->resid_len -= 2098 rsp->resid_len -=
1984 le16_to_cpu(mpi_reply->ResponseDataLength); 2099 le16_to_cpu(mpi_reply->ResponseDataLength);
2100 /* check if the resp needs to be copied from the allocated
2101 * pci mem */
2102 if (rsp->bio->bi_vcnt > 1) {
2103 u32 offset = 0;
2104 u32 bytes_to_copy =
2105 le16_to_cpu(mpi_reply->ResponseDataLength);
2106 bio_for_each_segment(bvec, rsp->bio, i) {
2107 if (bytes_to_copy <= bvec->bv_len) {
2108 memcpy(page_address(bvec->bv_page) +
2109 bvec->bv_offset, pci_addr_in +
2110 offset, bytes_to_copy);
2111 break;
2112 } else {
2113 memcpy(page_address(bvec->bv_page) +
2114 bvec->bv_offset, pci_addr_in +
2115 offset, bvec->bv_len);
2116 bytes_to_copy -= bvec->bv_len;
2117 }
2118 offset += bvec->bv_len;
2119 }
2120 }
1985 } else { 2121 } else {
1986 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT 2122 dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
1987 "%s - no reply\n", ioc->name, __func__)); 2123 "%s - no reply\n", ioc->name, __func__));
@@ -2003,6 +2139,15 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2003 pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp), 2139 pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
2004 PCI_DMA_BIDIRECTIONAL); 2140 PCI_DMA_BIDIRECTIONAL);
2005 2141
2142 free_pci:
2143 if (pci_addr_out)
2144 pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out,
2145 pci_dma_out);
2146
2147 if (pci_addr_in)
2148 pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in,
2149 pci_dma_in);
2150
2006 out: 2151 out:
2007 ioc->transport_cmds.status = MPT2_CMD_NOT_USED; 2152 ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
2008 mutex_unlock(&ioc->transport_cmds.mutex); 2153 mutex_unlock(&ioc->transport_cmds.mutex);
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 944afada61ee..c3d20c8d4abe 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -66,9 +66,10 @@ enum port_type {
66 66
67/* driver compile-time configuration */ 67/* driver compile-time configuration */
68#define PM8001_MAX_CCB 512 /* max ccbs supported */ 68#define PM8001_MAX_CCB 512 /* max ccbs supported */
69#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */
69#define PM8001_MAX_INB_NUM 1 70#define PM8001_MAX_INB_NUM 1
70#define PM8001_MAX_OUTB_NUM 1 71#define PM8001_MAX_OUTB_NUM 1
71#define PM8001_CAN_QUEUE 128 /* SCSI Queue depth */ 72#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */
72 73
73/* unchangeable hardware details */ 74/* unchangeable hardware details */
74#define PM8001_MAX_PHYS 8 /* max. possible phys */ 75#define PM8001_MAX_PHYS 8 /* max. possible phys */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 9d82ee5c10de..bf54aafc2d71 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -192,7 +192,7 @@ init_default_table_values(struct pm8001_hba_info *pm8001_ha)
192 pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01; 192 pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01;
193 for (i = 0; i < qn; i++) { 193 for (i = 0; i < qn; i++) {
194 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = 194 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
195 0x00000100 | (0x00000040 << 16) | (0x00<<30); 195 PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
196 pm8001_ha->inbnd_q_tbl[i].upper_base_addr = 196 pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
197 pm8001_ha->memoryMap.region[IB].phys_addr_hi; 197 pm8001_ha->memoryMap.region[IB].phys_addr_hi;
198 pm8001_ha->inbnd_q_tbl[i].lower_base_addr = 198 pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
@@ -218,7 +218,7 @@ init_default_table_values(struct pm8001_hba_info *pm8001_ha)
218 } 218 }
219 for (i = 0; i < qn; i++) { 219 for (i = 0; i < qn; i++) {
220 pm8001_ha->outbnd_q_tbl[i].element_size_cnt = 220 pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
221 256 | (64 << 16) | (1<<30); 221 PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
222 pm8001_ha->outbnd_q_tbl[i].upper_base_addr = 222 pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
223 pm8001_ha->memoryMap.region[OB].phys_addr_hi; 223 pm8001_ha->memoryMap.region[OB].phys_addr_hi;
224 pm8001_ha->outbnd_q_tbl[i].lower_base_addr = 224 pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
@@ -1245,7 +1245,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
1245 /* Stores the new consumer index */ 1245 /* Stores the new consumer index */
1246 consumer_index = pm8001_read_32(circularQ->ci_virt); 1246 consumer_index = pm8001_read_32(circularQ->ci_virt);
1247 circularQ->consumer_index = cpu_to_le32(consumer_index); 1247 circularQ->consumer_index = cpu_to_le32(consumer_index);
1248 if (((circularQ->producer_idx + bcCount) % 256) == 1248 if (((circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE) ==
1249 le32_to_cpu(circularQ->consumer_index)) { 1249 le32_to_cpu(circularQ->consumer_index)) {
1250 *messagePtr = NULL; 1250 *messagePtr = NULL;
1251 return -1; 1251 return -1;
@@ -1253,7 +1253,8 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
1253 /* get memory IOMB buffer address */ 1253 /* get memory IOMB buffer address */
1254 offset = circularQ->producer_idx * 64; 1254 offset = circularQ->producer_idx * 64;
1255 /* increment to next bcCount element */ 1255 /* increment to next bcCount element */
1256 circularQ->producer_idx = (circularQ->producer_idx + bcCount) % 256; 1256 circularQ->producer_idx = (circularQ->producer_idx + bcCount)
1257 % PM8001_MPI_QUEUE;
1257 /* Adds that distance to the base of the region virtual address plus 1258 /* Adds that distance to the base of the region virtual address plus
1258 the message header size*/ 1259 the message header size*/
1259 msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset); 1260 msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset);
@@ -1326,7 +1327,8 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
1326 return 0; 1327 return 0;
1327 } 1328 }
1328 /* free the circular queue buffer elements associated with the message*/ 1329 /* free the circular queue buffer elements associated with the message*/
1329 circularQ->consumer_idx = (circularQ->consumer_idx + bc) % 256; 1330 circularQ->consumer_idx = (circularQ->consumer_idx + bc)
1331 % PM8001_MPI_QUEUE;
1330 /* update the CI of outbound queue */ 1332 /* update the CI of outbound queue */
1331 pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset, 1333 pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset,
1332 circularQ->consumer_idx); 1334 circularQ->consumer_idx);
@@ -1383,7 +1385,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1383 circularQ->consumer_idx = 1385 circularQ->consumer_idx =
1384 (circularQ->consumer_idx + 1386 (circularQ->consumer_idx +
1385 ((le32_to_cpu(msgHeader_tmp) 1387 ((le32_to_cpu(msgHeader_tmp)
1386 >> 24) & 0x1f)) % 256; 1388 >> 24) & 0x1f))
1389 % PM8001_MPI_QUEUE;
1387 msgHeader_tmp = 0; 1390 msgHeader_tmp = 0;
1388 pm8001_write_32(msgHeader, 0, 0); 1391 pm8001_write_32(msgHeader, 0, 0);
1389 /* update the CI of outbound queue */ 1392 /* update the CI of outbound queue */
@@ -1396,7 +1399,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1396 circularQ->consumer_idx = 1399 circularQ->consumer_idx =
1397 (circularQ->consumer_idx + 1400 (circularQ->consumer_idx +
1398 ((le32_to_cpu(msgHeader_tmp) >> 24) & 1401 ((le32_to_cpu(msgHeader_tmp) >> 24) &
1399 0x1f)) % 256; 1402 0x1f)) % PM8001_MPI_QUEUE;
1400 msgHeader_tmp = 0; 1403 msgHeader_tmp = 0;
1401 pm8001_write_32(msgHeader, 0, 0); 1404 pm8001_write_32(msgHeader, 0, 0);
1402 /* update the CI of outbound queue */ 1405 /* update the CI of outbound queue */
@@ -3357,7 +3360,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3357 struct fw_control_ex fw_control_context; 3360 struct fw_control_ex fw_control_context;
3358 struct fw_flash_Update_resp *ppayload = 3361 struct fw_flash_Update_resp *ppayload =
3359 (struct fw_flash_Update_resp *)(piomb + 4); 3362 (struct fw_flash_Update_resp *)(piomb + 4);
3360 u32 tag = ppayload->tag; 3363 u32 tag = le32_to_cpu(ppayload->tag);
3361 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; 3364 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
3362 status = le32_to_cpu(ppayload->status); 3365 status = le32_to_cpu(ppayload->status);
3363 memcpy(&fw_control_context, 3366 memcpy(&fw_control_context,
@@ -3703,8 +3706,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
3703 */ 3706 */
3704static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) 3707static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3705{ 3708{
3706 u32 pHeader = (u32)*(u32 *)piomb; 3709 __le32 pHeader = *(__le32 *)piomb;
3707 u8 opc = (u8)(pHeader & 0xFFF); 3710 u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
3708 3711
3709 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:")); 3712 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
3710 3713
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 1a4611eb0321..d437309cb1e1 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -599,7 +599,7 @@ struct fw_flash_Update_req {
599 * 599 *
600 */ 600 */
601struct fw_flash_Update_resp { 601struct fw_flash_Update_resp {
602 dma_addr_t tag; 602 __le32 tag;
603 __le32 status; 603 __le32 status;
604 u32 reserved[13]; 604 u32 reserved[13];
605} __attribute__((packed, aligned(4))); 605} __attribute__((packed, aligned(4)));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 36efaa7c3a54..0267c22f8741 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -235,15 +235,15 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
235 pm8001_ha->memoryMap.region[PI].alignment = 4; 235 pm8001_ha->memoryMap.region[PI].alignment = 4;
236 236
237 /* MPI Memory region 5 inbound queues */ 237 /* MPI Memory region 5 inbound queues */
238 pm8001_ha->memoryMap.region[IB].num_elements = 256; 238 pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE;
239 pm8001_ha->memoryMap.region[IB].element_size = 64; 239 pm8001_ha->memoryMap.region[IB].element_size = 64;
240 pm8001_ha->memoryMap.region[IB].total_len = 256 * 64; 240 pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64;
241 pm8001_ha->memoryMap.region[IB].alignment = 64; 241 pm8001_ha->memoryMap.region[IB].alignment = 64;
242 242
243 /* MPI Memory region 6 inbound queues */ 243 /* MPI Memory region 6 outbound queues */
244 pm8001_ha->memoryMap.region[OB].num_elements = 256; 244 pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE;
245 pm8001_ha->memoryMap.region[OB].element_size = 64; 245 pm8001_ha->memoryMap.region[OB].element_size = 64;
246 pm8001_ha->memoryMap.region[OB].total_len = 256 * 64; 246 pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64;
247 pm8001_ha->memoryMap.region[OB].alignment = 64; 247 pm8001_ha->memoryMap.region[OB].alignment = 64;
248 248
249 /* Memory region write DMA*/ 249 /* Memory region write DMA*/
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 07322ecff90d..61c82a345f82 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -90,6 +90,12 @@ unsigned int scsi_logging_level;
90EXPORT_SYMBOL(scsi_logging_level); 90EXPORT_SYMBOL(scsi_logging_level);
91#endif 91#endif
92 92
93#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD)
94/* sd and scsi_pm need to coordinate flushing async actions */
95LIST_HEAD(scsi_sd_probe_domain);
96EXPORT_SYMBOL(scsi_sd_probe_domain);
97#endif
98
93/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. 99/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
94 * You may not alter any existing entry (although adding new ones is 100 * You may not alter any existing entry (although adding new ones is
95 * encouraged once assigned by ANSI/INCITS T10 101 * encouraged once assigned by ANSI/INCITS T10
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5dfd7495d1a1..62ddfd31d4ce 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2348,10 +2348,14 @@ EXPORT_SYMBOL(scsi_device_quiesce);
2348 * 2348 *
2349 * Must be called with user context, may sleep. 2349 * Must be called with user context, may sleep.
2350 */ 2350 */
2351void 2351void scsi_device_resume(struct scsi_device *sdev)
2352scsi_device_resume(struct scsi_device *sdev)
2353{ 2352{
2354 if(scsi_device_set_state(sdev, SDEV_RUNNING)) 2353 /* check if the device state was mutated prior to resume, and if
2354 * so assume the state is being managed elsewhere (for example
2355 * device deleted during suspend)
2356 */
2357 if (sdev->sdev_state != SDEV_QUIESCE ||
2358 scsi_device_set_state(sdev, SDEV_RUNNING))
2355 return; 2359 return;
2356 scsi_run_queue(sdev->request_queue); 2360 scsi_run_queue(sdev->request_queue);
2357} 2361}
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index c4670642d023..f661a41fa4c6 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -97,7 +97,7 @@ static int scsi_bus_prepare(struct device *dev)
97{ 97{
98 if (scsi_is_sdev_device(dev)) { 98 if (scsi_is_sdev_device(dev)) {
99 /* sd probing uses async_schedule. Wait until it finishes. */ 99 /* sd probing uses async_schedule. Wait until it finishes. */
100 async_synchronize_full(); 100 async_synchronize_full_domain(&scsi_sd_probe_domain);
101 101
102 } else if (scsi_is_host_device(dev)) { 102 } else if (scsi_is_host_device(dev)) {
103 /* Wait until async scanning is finished */ 103 /* Wait until async scanning is finished */
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index be4fa6d179b1..07ce3f51701d 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -163,6 +163,8 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
163static inline void scsi_autopm_put_host(struct Scsi_Host *h) {} 163static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
164#endif /* CONFIG_PM_RUNTIME */ 164#endif /* CONFIG_PM_RUNTIME */
165 165
166extern struct list_head scsi_sd_probe_domain;
167
166/* 168/*
167 * internal scsi timeout functions: for use by mid-layer and transport 169 * internal scsi timeout functions: for use by mid-layer and transport
168 * classes. 170 * classes.
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 80fbe2ac0b47..579760420d53 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2808,17 +2808,20 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2808 FC_RPORT_DEVLOSS_PENDING | 2808 FC_RPORT_DEVLOSS_PENDING |
2809 FC_RPORT_DEVLOSS_CALLBK_DONE); 2809 FC_RPORT_DEVLOSS_CALLBK_DONE);
2810 2810
2811 spin_unlock_irqrestore(shost->host_lock, flags);
2812
2811 /* if target, initiate a scan */ 2813 /* if target, initiate a scan */
2812 if (rport->scsi_target_id != -1) { 2814 if (rport->scsi_target_id != -1) {
2815 scsi_target_unblock(&rport->dev);
2816
2817 spin_lock_irqsave(shost->host_lock,
2818 flags);
2813 rport->flags |= FC_RPORT_SCAN_PENDING; 2819 rport->flags |= FC_RPORT_SCAN_PENDING;
2814 scsi_queue_work(shost, 2820 scsi_queue_work(shost,
2815 &rport->scan_work); 2821 &rport->scan_work);
2816 spin_unlock_irqrestore(shost->host_lock, 2822 spin_unlock_irqrestore(shost->host_lock,
2817 flags); 2823 flags);
2818 scsi_target_unblock(&rport->dev); 2824 }
2819 } else
2820 spin_unlock_irqrestore(shost->host_lock,
2821 flags);
2822 2825
2823 fc_bsg_goose_queue(rport); 2826 fc_bsg_goose_queue(rport);
2824 2827
@@ -2876,16 +2879,17 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2876 if (fci->f->dd_fcrport_size) 2879 if (fci->f->dd_fcrport_size)
2877 memset(rport->dd_data, 0, 2880 memset(rport->dd_data, 0,
2878 fci->f->dd_fcrport_size); 2881 fci->f->dd_fcrport_size);
2882 spin_unlock_irqrestore(shost->host_lock, flags);
2883
2884 if (ids->roles & FC_PORT_ROLE_FCP_TARGET) {
2885 scsi_target_unblock(&rport->dev);
2879 2886
2880 if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
2881 /* initiate a scan of the target */ 2887 /* initiate a scan of the target */
2888 spin_lock_irqsave(shost->host_lock, flags);
2882 rport->flags |= FC_RPORT_SCAN_PENDING; 2889 rport->flags |= FC_RPORT_SCAN_PENDING;
2883 scsi_queue_work(shost, &rport->scan_work); 2890 scsi_queue_work(shost, &rport->scan_work);
2884 spin_unlock_irqrestore(shost->host_lock, flags); 2891 spin_unlock_irqrestore(shost->host_lock, flags);
2885 scsi_target_unblock(&rport->dev); 2892 }
2886 } else
2887 spin_unlock_irqrestore(shost->host_lock, flags);
2888
2889 return rport; 2893 return rport;
2890 } 2894 }
2891 } 2895 }
@@ -3083,12 +3087,12 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
3083 /* ensure any stgt delete functions are done */ 3087 /* ensure any stgt delete functions are done */
3084 fc_flush_work(shost); 3088 fc_flush_work(shost);
3085 3089
3090 scsi_target_unblock(&rport->dev);
3086 /* initiate a scan of the target */ 3091 /* initiate a scan of the target */
3087 spin_lock_irqsave(shost->host_lock, flags); 3092 spin_lock_irqsave(shost->host_lock, flags);
3088 rport->flags |= FC_RPORT_SCAN_PENDING; 3093 rport->flags |= FC_RPORT_SCAN_PENDING;
3089 scsi_queue_work(shost, &rport->scan_work); 3094 scsi_queue_work(shost, &rport->scan_work);
3090 spin_unlock_irqrestore(shost->host_lock, flags); 3095 spin_unlock_irqrestore(shost->host_lock, flags);
3091 scsi_target_unblock(&rport->dev);
3092 } 3096 }
3093} 3097}
3094EXPORT_SYMBOL(fc_remote_port_rolechg); 3098EXPORT_SYMBOL(fc_remote_port_rolechg);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index a2715c31e754..cf08071a9b6e 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1010,10 +1010,10 @@ spi_dv_device(struct scsi_device *sdev)
1010 u8 *buffer; 1010 u8 *buffer;
1011 const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; 1011 const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
1012 1012
1013 if (unlikely(scsi_device_get(sdev))) 1013 if (unlikely(spi_dv_in_progress(starget)))
1014 return; 1014 return;
1015 1015
1016 if (unlikely(spi_dv_in_progress(starget))) 1016 if (unlikely(scsi_device_get(sdev)))
1017 return; 1017 return;
1018 spi_dv_in_progress(starget) = 1; 1018 spi_dv_in_progress(starget) = 1;
1019 1019
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5ba5c2a9e8e9..6f0a4c612b3b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -65,6 +65,7 @@
65#include <scsi/scsicam.h> 65#include <scsi/scsicam.h>
66 66
67#include "sd.h" 67#include "sd.h"
68#include "scsi_priv.h"
68#include "scsi_logging.h" 69#include "scsi_logging.h"
69 70
70MODULE_AUTHOR("Eric Youngdale"); 71MODULE_AUTHOR("Eric Youngdale");
@@ -2722,7 +2723,7 @@ static int sd_probe(struct device *dev)
2722 dev_set_drvdata(dev, sdkp); 2723 dev_set_drvdata(dev, sdkp);
2723 2724
2724 get_device(&sdkp->dev); /* prevent release before async_schedule */ 2725 get_device(&sdkp->dev); /* prevent release before async_schedule */
2725 async_schedule(sd_probe_async, sdkp); 2726 async_schedule_domain(sd_probe_async, sdkp, &scsi_sd_probe_domain);
2726 2727
2727 return 0; 2728 return 0;
2728 2729
@@ -2756,7 +2757,7 @@ static int sd_remove(struct device *dev)
2756 sdkp = dev_get_drvdata(dev); 2757 sdkp = dev_get_drvdata(dev);
2757 scsi_autopm_get_device(sdkp->device); 2758 scsi_autopm_get_device(sdkp->device);
2758 2759
2759 async_synchronize_full(); 2760 async_synchronize_full_domain(&scsi_sd_probe_domain);
2760 blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn); 2761 blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
2761 blk_queue_unprep_rq(sdkp->device->request_queue, NULL); 2762 blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
2762 device_del(&sdkp->dev); 2763 device_del(&sdkp->dev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index eacd46bb36b9..9c5c5f2b3962 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -104,7 +104,7 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
104static int sg_add(struct device *, struct class_interface *); 104static int sg_add(struct device *, struct class_interface *);
105static void sg_remove(struct device *, struct class_interface *); 105static void sg_remove(struct device *, struct class_interface *);
106 106
107static DEFINE_MUTEX(sg_mutex); 107static DEFINE_SPINLOCK(sg_open_exclusive_lock);
108 108
109static DEFINE_IDR(sg_index_idr); 109static DEFINE_IDR(sg_index_idr);
110static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock 110static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
@@ -137,13 +137,15 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
137 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ 137 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
138 char orphan; /* 1 -> drop on sight, 0 -> normal */ 138 char orphan; /* 1 -> drop on sight, 0 -> normal */
139 char sg_io_owned; /* 1 -> packet belongs to SG_IO */ 139 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
140 volatile char done; /* 0->before bh, 1->before read, 2->read */ 140 /* done protected by rq_list_lock */
141 char done; /* 0->before bh, 1->before read, 2->read */
141 struct request *rq; 142 struct request *rq;
142 struct bio *bio; 143 struct bio *bio;
143 struct execute_work ew; 144 struct execute_work ew;
144} Sg_request; 145} Sg_request;
145 146
146typedef struct sg_fd { /* holds the state of a file descriptor */ 147typedef struct sg_fd { /* holds the state of a file descriptor */
148 /* sfd_siblings is protected by sg_index_lock */
147 struct list_head sfd_siblings; 149 struct list_head sfd_siblings;
148 struct sg_device *parentdp; /* owning device */ 150 struct sg_device *parentdp; /* owning device */
149 wait_queue_head_t read_wait; /* queue read until command done */ 151 wait_queue_head_t read_wait; /* queue read until command done */
@@ -157,7 +159,6 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
157 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ 159 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
158 char low_dma; /* as in parent but possibly overridden to 1 */ 160 char low_dma; /* as in parent but possibly overridden to 1 */
159 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ 161 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
160 volatile char closed; /* 1 -> fd closed but request(s) outstanding */
161 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ 162 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
162 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */ 163 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
163 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ 164 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
@@ -171,9 +172,11 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
171 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ 172 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
172 int sg_tablesize; /* adapter's max scatter-gather table size */ 173 int sg_tablesize; /* adapter's max scatter-gather table size */
173 u32 index; /* device index number */ 174 u32 index; /* device index number */
175 /* sfds is protected by sg_index_lock */
174 struct list_head sfds; 176 struct list_head sfds;
175 volatile char detached; /* 0->attached, 1->detached pending removal */ 177 volatile char detached; /* 0->attached, 1->detached pending removal */
176 volatile char exclude; /* opened for exclusive access */ 178 /* exclude protected by sg_open_exclusive_lock */
179 char exclude; /* opened for exclusive access */
177 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ 180 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
178 struct gendisk *disk; 181 struct gendisk *disk;
179 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ 182 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
@@ -221,6 +224,38 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
221 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); 224 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
222} 225}
223 226
227static int get_exclude(Sg_device *sdp)
228{
229 unsigned long flags;
230 int ret;
231
232 spin_lock_irqsave(&sg_open_exclusive_lock, flags);
233 ret = sdp->exclude;
234 spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
235 return ret;
236}
237
238static int set_exclude(Sg_device *sdp, char val)
239{
240 unsigned long flags;
241
242 spin_lock_irqsave(&sg_open_exclusive_lock, flags);
243 sdp->exclude = val;
244 spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
245 return val;
246}
247
248static int sfds_list_empty(Sg_device *sdp)
249{
250 unsigned long flags;
251 int ret;
252
253 read_lock_irqsave(&sg_index_lock, flags);
254 ret = list_empty(&sdp->sfds);
255 read_unlock_irqrestore(&sg_index_lock, flags);
256 return ret;
257}
258
224static int 259static int
225sg_open(struct inode *inode, struct file *filp) 260sg_open(struct inode *inode, struct file *filp)
226{ 261{
@@ -232,7 +267,6 @@ sg_open(struct inode *inode, struct file *filp)
232 int res; 267 int res;
233 int retval; 268 int retval;
234 269
235 mutex_lock(&sg_mutex);
236 nonseekable_open(inode, filp); 270 nonseekable_open(inode, filp);
237 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); 271 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
238 sdp = sg_get_dev(dev); 272 sdp = sg_get_dev(dev);
@@ -264,25 +298,22 @@ sg_open(struct inode *inode, struct file *filp)
264 retval = -EPERM; /* Can't lock it with read only access */ 298 retval = -EPERM; /* Can't lock it with read only access */
265 goto error_out; 299 goto error_out;
266 } 300 }
267 if (!list_empty(&sdp->sfds) && (flags & O_NONBLOCK)) { 301 if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
268 retval = -EBUSY; 302 retval = -EBUSY;
269 goto error_out; 303 goto error_out;
270 } 304 }
271 res = 0; 305 res = wait_event_interruptible(sdp->o_excl_wait,
272 __wait_event_interruptible(sdp->o_excl_wait, 306 ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
273 ((!list_empty(&sdp->sfds) || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
274 if (res) { 307 if (res) {
275 retval = res; /* -ERESTARTSYS because signal hit process */ 308 retval = res; /* -ERESTARTSYS because signal hit process */
276 goto error_out; 309 goto error_out;
277 } 310 }
278 } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */ 311 } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
279 if (flags & O_NONBLOCK) { 312 if (flags & O_NONBLOCK) {
280 retval = -EBUSY; 313 retval = -EBUSY;
281 goto error_out; 314 goto error_out;
282 } 315 }
283 res = 0; 316 res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
284 __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
285 res);
286 if (res) { 317 if (res) {
287 retval = res; /* -ERESTARTSYS because signal hit process */ 318 retval = res; /* -ERESTARTSYS because signal hit process */
288 goto error_out; 319 goto error_out;
@@ -292,7 +323,7 @@ sg_open(struct inode *inode, struct file *filp)
292 retval = -ENODEV; 323 retval = -ENODEV;
293 goto error_out; 324 goto error_out;
294 } 325 }
295 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ 326 if (sfds_list_empty(sdp)) { /* no existing opens on this device */
296 sdp->sgdebug = 0; 327 sdp->sgdebug = 0;
297 q = sdp->device->request_queue; 328 q = sdp->device->request_queue;
298 sdp->sg_tablesize = queue_max_segments(q); 329 sdp->sg_tablesize = queue_max_segments(q);
@@ -301,7 +332,7 @@ sg_open(struct inode *inode, struct file *filp)
301 filp->private_data = sfp; 332 filp->private_data = sfp;
302 else { 333 else {
303 if (flags & O_EXCL) { 334 if (flags & O_EXCL) {
304 sdp->exclude = 0; /* undo if error */ 335 set_exclude(sdp, 0); /* undo if error */
305 wake_up_interruptible(&sdp->o_excl_wait); 336 wake_up_interruptible(&sdp->o_excl_wait);
306 } 337 }
307 retval = -ENOMEM; 338 retval = -ENOMEM;
@@ -317,7 +348,6 @@ sdp_put:
317sg_put: 348sg_put:
318 if (sdp) 349 if (sdp)
319 sg_put_dev(sdp); 350 sg_put_dev(sdp);
320 mutex_unlock(&sg_mutex);
321 return retval; 351 return retval;
322} 352}
323 353
@@ -332,9 +362,7 @@ sg_release(struct inode *inode, struct file *filp)
332 return -ENXIO; 362 return -ENXIO;
333 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 363 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
334 364
335 sfp->closed = 1; 365 set_exclude(sdp, 0);
336
337 sdp->exclude = 0;
338 wake_up_interruptible(&sdp->o_excl_wait); 366 wake_up_interruptible(&sdp->o_excl_wait);
339 367
340 scsi_autopm_put_device(sdp->device); 368 scsi_autopm_put_device(sdp->device);
@@ -398,19 +426,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
398 retval = -EAGAIN; 426 retval = -EAGAIN;
399 goto free_old_hdr; 427 goto free_old_hdr;
400 } 428 }
401 while (1) { 429 retval = wait_event_interruptible(sfp->read_wait,
402 retval = 0; /* following macro beats race condition */ 430 (sdp->detached ||
403 __wait_event_interruptible(sfp->read_wait, 431 (srp = sg_get_rq_mark(sfp, req_pack_id))));
404 (sdp->detached || 432 if (sdp->detached) {
405 (srp = sg_get_rq_mark(sfp, req_pack_id))), 433 retval = -ENODEV;
406 retval); 434 goto free_old_hdr;
407 if (sdp->detached) { 435 }
408 retval = -ENODEV; 436 if (retval) {
409 goto free_old_hdr;
410 }
411 if (0 == retval)
412 break;
413
414 /* -ERESTARTSYS as signal hit process */ 437 /* -ERESTARTSYS as signal hit process */
415 goto free_old_hdr; 438 goto free_old_hdr;
416 } 439 }
@@ -771,7 +794,18 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
771 return 0; 794 return 0;
772} 795}
773 796
774static int 797static int srp_done(Sg_fd *sfp, Sg_request *srp)
798{
799 unsigned long flags;
800 int ret;
801
802 read_lock_irqsave(&sfp->rq_list_lock, flags);
803 ret = srp->done;
804 read_unlock_irqrestore(&sfp->rq_list_lock, flags);
805 return ret;
806}
807
808static long
775sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) 809sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
776{ 810{
777 void __user *p = (void __user *)arg; 811 void __user *p = (void __user *)arg;
@@ -791,40 +825,30 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
791 825
792 switch (cmd_in) { 826 switch (cmd_in) {
793 case SG_IO: 827 case SG_IO:
794 { 828 if (sdp->detached)
795 int blocking = 1; /* ignore O_NONBLOCK flag */ 829 return -ENODEV;
796 830 if (!scsi_block_when_processing_errors(sdp->device))
797 if (sdp->detached) 831 return -ENXIO;
798 return -ENODEV; 832 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
799 if (!scsi_block_when_processing_errors(sdp->device)) 833 return -EFAULT;
800 return -ENXIO; 834 result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
801 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) 835 1, read_only, 1, &srp);
802 return -EFAULT; 836 if (result < 0)
803 result = 837 return result;
804 sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 838 result = wait_event_interruptible(sfp->read_wait,
805 blocking, read_only, 1, &srp); 839 (srp_done(sfp, srp) || sdp->detached));
806 if (result < 0) 840 if (sdp->detached)
807 return result; 841 return -ENODEV;
808 while (1) { 842 write_lock_irq(&sfp->rq_list_lock);
809 result = 0; /* following macro to beat race condition */ 843 if (srp->done) {
810 __wait_event_interruptible(sfp->read_wait, 844 srp->done = 2;
811 (srp->done || sdp->detached), 845 write_unlock_irq(&sfp->rq_list_lock);
812 result);
813 if (sdp->detached)
814 return -ENODEV;
815 write_lock_irq(&sfp->rq_list_lock);
816 if (srp->done) {
817 srp->done = 2;
818 write_unlock_irq(&sfp->rq_list_lock);
819 break;
820 }
821 srp->orphan = 1;
822 write_unlock_irq(&sfp->rq_list_lock);
823 return result; /* -ERESTARTSYS because signal hit process */
824 }
825 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); 846 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
826 return (result < 0) ? result : 0; 847 return (result < 0) ? result : 0;
827 } 848 }
849 srp->orphan = 1;
850 write_unlock_irq(&sfp->rq_list_lock);
851 return result; /* -ERESTARTSYS because signal hit process */
828 case SG_SET_TIMEOUT: 852 case SG_SET_TIMEOUT:
829 result = get_user(val, ip); 853 result = get_user(val, ip);
830 if (result) 854 if (result)
@@ -1091,18 +1115,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1091 } 1115 }
1092} 1116}
1093 1117
1094static long
1095sg_unlocked_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1096{
1097 int ret;
1098
1099 mutex_lock(&sg_mutex);
1100 ret = sg_ioctl(filp, cmd_in, arg);
1101 mutex_unlock(&sg_mutex);
1102
1103 return ret;
1104}
1105
1106#ifdef CONFIG_COMPAT 1118#ifdef CONFIG_COMPAT
1107static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) 1119static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1108{ 1120{
@@ -1136,8 +1148,11 @@ sg_poll(struct file *filp, poll_table * wait)
1136 int count = 0; 1148 int count = 0;
1137 unsigned long iflags; 1149 unsigned long iflags;
1138 1150
1139 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)) 1151 sfp = filp->private_data;
1140 || sfp->closed) 1152 if (!sfp)
1153 return POLLERR;
1154 sdp = sfp->parentdp;
1155 if (!sdp)
1141 return POLLERR; 1156 return POLLERR;
1142 poll_wait(filp, &sfp->read_wait, wait); 1157 poll_wait(filp, &sfp->read_wait, wait);
1143 read_lock_irqsave(&sfp->rq_list_lock, iflags); 1158 read_lock_irqsave(&sfp->rq_list_lock, iflags);
@@ -1347,7 +1362,7 @@ static const struct file_operations sg_fops = {
1347 .read = sg_read, 1362 .read = sg_read,
1348 .write = sg_write, 1363 .write = sg_write,
1349 .poll = sg_poll, 1364 .poll = sg_poll,
1350 .unlocked_ioctl = sg_unlocked_ioctl, 1365 .unlocked_ioctl = sg_ioctl,
1351#ifdef CONFIG_COMPAT 1366#ifdef CONFIG_COMPAT
1352 .compat_ioctl = sg_compat_ioctl, 1367 .compat_ioctl = sg_compat_ioctl,
1353#endif 1368#endif
@@ -2312,7 +2327,7 @@ struct sg_proc_leaf {
2312 const struct file_operations * fops; 2327 const struct file_operations * fops;
2313}; 2328};
2314 2329
2315static struct sg_proc_leaf sg_proc_leaf_arr[] = { 2330static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
2316 {"allow_dio", &adio_fops}, 2331 {"allow_dio", &adio_fops},
2317 {"debug", &debug_fops}, 2332 {"debug", &debug_fops},
2318 {"def_reserved_size", &dressz_fops}, 2333 {"def_reserved_size", &dressz_fops},
@@ -2332,7 +2347,7 @@ sg_proc_init(void)
2332 if (!sg_proc_sgp) 2347 if (!sg_proc_sgp)
2333 return 1; 2348 return 1;
2334 for (k = 0; k < num_leaves; ++k) { 2349 for (k = 0; k < num_leaves; ++k) {
2335 struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k]; 2350 const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
2336 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; 2351 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2337 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); 2352 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2338 } 2353 }
@@ -2533,9 +2548,9 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2533 fp->reserve.bufflen, 2548 fp->reserve.bufflen,
2534 (int) fp->reserve.k_use_sg, 2549 (int) fp->reserve.k_use_sg,
2535 (int) fp->low_dma); 2550 (int) fp->low_dma);
2536 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n", 2551 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
2537 (int) fp->cmd_q, (int) fp->force_packid, 2552 (int) fp->cmd_q, (int) fp->force_packid,
2538 (int) fp->keep_orphan, (int) fp->closed); 2553 (int) fp->keep_orphan);
2539 for (m = 0, srp = fp->headrp; 2554 for (m = 0, srp = fp->headrp;
2540 srp != NULL; 2555 srp != NULL;
2541 ++m, srp = srp->nextrp) { 2556 ++m, srp = srp->nextrp) {
@@ -2612,7 +2627,7 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2612 scsidp->lun, 2627 scsidp->lun,
2613 scsidp->host->hostt->emulated); 2628 scsidp->host->hostt->emulated);
2614 seq_printf(s, " sg_tablesize=%d excl=%d\n", 2629 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2615 sdp->sg_tablesize, sdp->exclude); 2630 sdp->sg_tablesize, get_exclude(sdp));
2616 sg_proc_debug_helper(s, sdp); 2631 sg_proc_debug_helper(s, sdp);
2617 } 2632 }
2618 read_unlock_irqrestore(&sg_index_lock, iflags); 2633 read_unlock_irqrestore(&sg_index_lock, iflags);
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index ea35632b986c..b548923785ed 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -35,8 +35,8 @@ struct st_request {
35/* The tape buffer descriptor. */ 35/* The tape buffer descriptor. */
36struct st_buffer { 36struct st_buffer {
37 unsigned char dma; /* DMA-able buffer */ 37 unsigned char dma; /* DMA-able buffer */
38 unsigned char do_dio; /* direct i/o set up? */
39 unsigned char cleared; /* internal buffer cleared after open? */ 38 unsigned char cleared; /* internal buffer cleared after open? */
39 unsigned short do_dio; /* direct i/o set up? */
40 int buffer_size; 40 int buffer_size;
41 int buffer_blocks; 41 int buffer_blocks;
42 int buffer_bytes; 42 int buffer_bytes;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 83a1972a1999..528d52beaa1c 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -785,12 +785,22 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
785 /* 785 /*
786 * If there is an error; offline the device since all 786 * If there is an error; offline the device since all
787 * error recovery strategies would have already been 787 * error recovery strategies would have already been
788 * deployed on the host side. 788 * deployed on the host side. However, if the command
789 * were a pass-through command deal with it appropriately.
789 */ 790 */
790 if (vm_srb->srb_status == SRB_STATUS_ERROR) 791 scmnd->result = vm_srb->scsi_status;
791 scmnd->result = DID_TARGET_FAILURE << 16; 792
792 else 793 if (vm_srb->srb_status == SRB_STATUS_ERROR) {
793 scmnd->result = vm_srb->scsi_status; 794 switch (scmnd->cmnd[0]) {
795 case ATA_16:
796 case ATA_12:
797 set_host_byte(scmnd, DID_PASSTHROUGH);
798 break;
799 default:
800 set_host_byte(scmnd, DID_TARGET_FAILURE);
801 }
802 }
803
794 804
795 /* 805 /*
796 * If the LUN is invalid; remove the device. 806 * If the LUN is invalid; remove the device.
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 52b96e8bf92e..4e010b727818 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1032,11 +1032,11 @@ static int ufshcd_initialize_hba(struct ufs_hba *hba)
1032 return -EIO; 1032 return -EIO;
1033 1033
1034 /* Configure UTRL and UTMRL base address registers */ 1034 /* Configure UTRL and UTMRL base address registers */
1035 writel(hba->utrdl_dma_addr,
1036 (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
1037 writel(lower_32_bits(hba->utrdl_dma_addr), 1035 writel(lower_32_bits(hba->utrdl_dma_addr),
1036 (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
1037 writel(upper_32_bits(hba->utrdl_dma_addr),
1038 (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H)); 1038 (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H));
1039 writel(hba->utmrdl_dma_addr, 1039 writel(lower_32_bits(hba->utmrdl_dma_addr),
1040 (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L)); 1040 (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L));
1041 writel(upper_32_bits(hba->utmrdl_dma_addr), 1041 writel(upper_32_bits(hba->utmrdl_dma_addr),
1042 (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H)); 1042 (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H));
@@ -1160,7 +1160,7 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
1160 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); 1160 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
1161 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); 1161 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
1162 1162
1163 if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL || 1163 if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
1164 task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) 1164 task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
1165 task_result = FAILED; 1165 task_result = FAILED;
1166 } else { 1166 } else {
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index 988ba06b3ad6..c1260d80ef30 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -661,6 +661,8 @@ struct iscsi_reject {
661 661
662#define ISCSI_DEF_TIME2WAIT 2 662#define ISCSI_DEF_TIME2WAIT 2
663 663
664#define ISCSI_NAME_LEN 224
665
664/************************* RFC 3720 End *****************************/ 666/************************* RFC 3720 End *****************************/
665 667
666#endif /* ISCSI_PROTO_H */ 668#endif /* ISCSI_PROTO_H */
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index a577a833603d..be3eb0bf1ac0 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -103,6 +103,7 @@ enum sas_dev_type {
103}; 103};
104 104
105enum sas_protocol { 105enum sas_protocol {
106 SAS_PROTOCOL_NONE = 0,
106 SAS_PROTOCOL_SATA = 0x01, 107 SAS_PROTOCOL_SATA = 0x01,
107 SAS_PROTOCOL_SMP = 0x02, 108 SAS_PROTOCOL_SMP = 0x02,
108 SAS_PROTOCOL_STP = 0x04, 109 SAS_PROTOCOL_STP = 0x04,