aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 16:16:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 16:16:38 -0400
commit5aa1c98862d3f365d9cf6d0833d5dc127d2a76e7 (patch)
tree89cbf0b67634ecc43a863a6ca058ff749df3cce7
parent6da6dc2380c3cfe8d6b59d7c3c55fdd7a521fe6c (diff)
parent9e45dd73234af9a59613dc2989dcc2df2dab847f (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James "Jej B" Bottomley: "The patch set is mostly driver updates (qla4, qla2 [ISF support updates], lpfc, aacraid [dual firmware image support]) and a few bug fixes" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (47 commits) [SCSI] iscsi_tcp: support PF_MEMALLOC/__GFP_MEMALLOC [SCSI] libiscsi: avoid unnecessary multiple NULL assignments [SCSI] qla4xxx: Update driver version to 5.03.00-k8 [SCSI] qla4xxx: Added print statements to display AENs [SCSI] qla4xxx: Use correct value for max flash node entries [SCSI] qla4xxx: Restrict logout from boot target session using session id [SCSI] qla4xxx: Use correct flash ddb offset for ISP40XX [SCSI] isci: add CONFIG_PM_SLEEP to suspend/resume functions [SCSI] scsi_dh_alua: Add module parameter to allow failover to non preferred path without STPG [SCSI] qla2xxx: Update the driver version to 8.05.00.03-k. [SCSI] qla2xxx: Obtain loopback iteration count from bsg request. [SCSI] qla2xxx: Add clarifying printk to thermal access fail cases. [SCSI] qla2xxx: Remove duplicated include form qla_isr.c [SCSI] qla2xxx: Enhancements to support ISPFx00. [SCSI] qla4xxx: Update driver version to 5.03.00-k7 [SCSI] qla4xxx: Replace dev type macros with generic portal type macros [SCSI] scsi_transport_iscsi: Declare portal type string macros for generic use [SCSI] qla4xxx: Add flash node mgmt support [SCSI] libiscsi: export function iscsi_switch_str_param [SCSI] scsi_transport_iscsi: Add flash node mgmt support ...
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/scsi/aacraid/aacraid.h6
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/src.c26
-rw-r--r--drivers/scsi/csiostor/csio_hw.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c17
-rw-r--r--drivers/scsi/isci/init.c6
-rw-r--r--drivers/scsi/iscsi_tcp.c18
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c1107
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c106
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c115
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c154
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h236
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h47
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h41
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c67
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c3476
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h510
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c212
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c28
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.h7
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h19
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h38
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h9
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c47
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c120
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1731
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c1002
-rw-r--r--include/scsi/iscsi_if.h117
-rw-r--r--include/scsi/libiscsi.h1
-rw-r--r--include/scsi/scsi_transport_iscsi.h151
53 files changed, 8964 insertions, 801 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 9cc4bd2a5a10..ea8f65fc6fa3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6236,7 +6236,7 @@ S: Supported
6236F: drivers/scsi/pmcraid.* 6236F: drivers/scsi/pmcraid.*
6237 6237
6238PMC SIERRA PM8001 DRIVER 6238PMC SIERRA PM8001 DRIVER
6239M: jack_wang@usish.com 6239M: xjtuwjp@gmail.com
6240M: lindar_liu@usish.com 6240M: lindar_liu@usish.com
6241L: linux-scsi@vger.kernel.org 6241L: linux-scsi@vger.kernel.org
6242S: Supported 6242S: Supported
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index a6f7190c09a4..9323d058706b 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 30000 15# define AAC_DRIVER_BUILD 30200
16# define AAC_DRIVER_BRANCH "-ms" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
@@ -1918,6 +1918,10 @@ extern struct aac_common aac_config;
1918#define MONITOR_PANIC 0x00000020 1918#define MONITOR_PANIC 0x00000020
1919#define KERNEL_UP_AND_RUNNING 0x00000080 1919#define KERNEL_UP_AND_RUNNING 0x00000080
1920#define KERNEL_PANIC 0x00000100 1920#define KERNEL_PANIC 0x00000100
1921#define FLASH_UPD_PENDING 0x00002000
1922#define FLASH_UPD_SUCCESS 0x00004000
1923#define FLASH_UPD_FAILED 0x00008000
1924#define FWUPD_TIMEOUT (5 * 60)
1921 1925
1922/* 1926/*
1923 * Doorbell bit defines 1927 * Doorbell bit defines
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 3f759957f4b4..177b094c7792 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -214,7 +214,7 @@ int aac_send_shutdown(struct aac_dev * dev)
214 cmd = (struct aac_close *) fib_data(fibctx); 214 cmd = (struct aac_close *) fib_data(fibctx);
215 215
216 cmd->command = cpu_to_le32(VM_CloseAll); 216 cmd->command = cpu_to_le32(VM_CloseAll);
217 cmd->cid = cpu_to_le32(0xffffffff); 217 cmd->cid = cpu_to_le32(0xfffffffe);
218 218
219 status = aac_fib_send(ContainerCommand, 219 status = aac_fib_send(ContainerCommand,
220 fibctx, 220 fibctx,
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index e2e349204e7d..0f56d8d7524f 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -703,6 +703,28 @@ int aac_srcv_init(struct aac_dev *dev)
703 !aac_src_restart_adapter(dev, 0)) 703 !aac_src_restart_adapter(dev, 0))
704 ++restart; 704 ++restart;
705 /* 705 /*
706 * Check to see if flash update is running.
707 * Wait for the adapter to be up and running. Wait up to 5 minutes
708 */
709 status = src_readl(dev, MUnit.OMR);
710 if (status & FLASH_UPD_PENDING) {
711 start = jiffies;
712 do {
713 status = src_readl(dev, MUnit.OMR);
714 if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
715 printk(KERN_ERR "%s%d: adapter flash update failed.\n",
716 dev->name, instance);
717 goto error_iounmap;
718 }
719 } while (!(status & FLASH_UPD_SUCCESS) &&
720 !(status & FLASH_UPD_FAILED));
721 /* Delay 10 seconds.
722 * Because right now FW is doing a soft reset,
723 * do not read scratch pad register at this time
724 */
725 ssleep(10);
726 }
727 /*
706 * Check to see if the board panic'd while booting. 728 * Check to see if the board panic'd while booting.
707 */ 729 */
708 status = src_readl(dev, MUnit.OMR); 730 status = src_readl(dev, MUnit.OMR);
@@ -730,7 +752,9 @@ int aac_srcv_init(struct aac_dev *dev)
730 /* 752 /*
731 * Wait for the adapter to be up and running. Wait up to 3 minutes 753 * Wait for the adapter to be up and running. Wait up to 3 minutes
732 */ 754 */
733 while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)) { 755 while (!((status = src_readl(dev, MUnit.OMR)) &
756 KERNEL_UP_AND_RUNNING) ||
757 status == 0xffffffff) {
734 if ((restart && 758 if ((restart &&
735 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || 759 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
736 time_after(jiffies, start+HZ*startup_timeout)) { 760 time_after(jiffies, start+HZ*startup_timeout)) {
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index bdd78fb4fc70..7dbaf58fab9f 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -3892,7 +3892,6 @@ csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
3892 struct csio_fl_dma_buf *flb, void *priv) 3892 struct csio_fl_dma_buf *flb, void *priv)
3893{ 3893{
3894 __u8 op; 3894 __u8 op;
3895 __be64 *data;
3896 void *msg = NULL; 3895 void *msg = NULL;
3897 uint32_t msg_len = 0; 3896 uint32_t msg_len = 0;
3898 bool msg_sg = 0; 3897 bool msg_sg = 0;
@@ -3908,8 +3907,6 @@ csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
3908 msg = (void *) flb; 3907 msg = (void *) flb;
3909 msg_len = flb->totlen; 3908 msg_len = flb->totlen;
3910 msg_sg = 1; 3909 msg_sg = 1;
3911
3912 data = (__be64 *) msg;
3913 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { 3910 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
3914 3911
3915 CSIO_INC_STATS(hw, n_cpl_fw6_msg); 3912 CSIO_INC_STATS(hw, n_cpl_fw6_msg);
@@ -3917,8 +3914,6 @@ csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
3917 msg = (void *)((uintptr_t)wr + sizeof(__be64)); 3914 msg = (void *)((uintptr_t)wr + sizeof(__be64));
3918 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : 3915 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
3919 sizeof(struct cpl_fw4_msg); 3916 sizeof(struct cpl_fw4_msg);
3920
3921 data = (__be64 *) msg;
3922 } else { 3917 } else {
3923 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); 3918 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
3924 CSIO_INC_STATS(hw, n_cpl_unexp); 3919 CSIO_INC_STATS(hw, n_cpl_unexp);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 6f4d8e6f32f1..68adb8955d2d 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -232,13 +232,13 @@ static void stpg_endio(struct request *req, int error)
232 struct scsi_sense_hdr sense_hdr; 232 struct scsi_sense_hdr sense_hdr;
233 unsigned err = SCSI_DH_OK; 233 unsigned err = SCSI_DH_OK;
234 234
235 if (error || host_byte(req->errors) != DID_OK || 235 if (host_byte(req->errors) != DID_OK ||
236 msg_byte(req->errors) != COMMAND_COMPLETE) { 236 msg_byte(req->errors) != COMMAND_COMPLETE) {
237 err = SCSI_DH_IO; 237 err = SCSI_DH_IO;
238 goto done; 238 goto done;
239 } 239 }
240 240
241 if (h->senselen > 0) { 241 if (req->sense_len > 0) {
242 err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, 242 err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
243 &sense_hdr); 243 &sense_hdr);
244 if (!err) { 244 if (!err) {
@@ -255,7 +255,9 @@ static void stpg_endio(struct request *req, int error)
255 ALUA_DH_NAME, sense_hdr.sense_key, 255 ALUA_DH_NAME, sense_hdr.sense_key,
256 sense_hdr.asc, sense_hdr.ascq); 256 sense_hdr.asc, sense_hdr.ascq);
257 err = SCSI_DH_IO; 257 err = SCSI_DH_IO;
258 } 258 } else if (error)
259 err = SCSI_DH_IO;
260
259 if (err == SCSI_DH_OK) { 261 if (err == SCSI_DH_OK) {
260 h->state = TPGS_STATE_OPTIMIZED; 262 h->state = TPGS_STATE_OPTIMIZED;
261 sdev_printk(KERN_INFO, h->sdev, 263 sdev_printk(KERN_INFO, h->sdev,
@@ -710,6 +712,10 @@ static int alua_set_params(struct scsi_device *sdev, const char *params)
710 return result; 712 return result;
711} 713}
712 714
715static uint optimize_stpg;
716module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
717MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
718
713/* 719/*
714 * alua_activate - activate a path 720 * alua_activate - activate a path
715 * @sdev: device on the path to be activated 721 * @sdev: device on the path to be activated
@@ -731,6 +737,9 @@ static int alua_activate(struct scsi_device *sdev,
731 if (err != SCSI_DH_OK) 737 if (err != SCSI_DH_OK)
732 goto out; 738 goto out;
733 739
740 if (optimize_stpg)
741 h->flags |= ALUA_OPTIMIZE_STPG;
742
734 if (h->tpgs & TPGS_MODE_EXPLICIT) { 743 if (h->tpgs & TPGS_MODE_EXPLICIT) {
735 switch (h->state) { 744 switch (h->state) {
736 case TPGS_STATE_NONOPTIMIZED: 745 case TPGS_STATE_NONOPTIMIZED:
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 2839baa82a5a..d25d0d859f05 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -721,7 +721,7 @@ static void isci_pci_remove(struct pci_dev *pdev)
721 } 721 }
722} 722}
723 723
724#ifdef CONFIG_PM 724#ifdef CONFIG_PM_SLEEP
725static int isci_suspend(struct device *dev) 725static int isci_suspend(struct device *dev)
726{ 726{
727 struct pci_dev *pdev = to_pci_dev(dev); 727 struct pci_dev *pdev = to_pci_dev(dev);
@@ -770,18 +770,16 @@ static int isci_resume(struct device *dev)
770 770
771 return 0; 771 return 0;
772} 772}
773#endif
773 774
774static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume); 775static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume);
775#endif
776 776
777static struct pci_driver isci_pci_driver = { 777static struct pci_driver isci_pci_driver = {
778 .name = DRV_NAME, 778 .name = DRV_NAME,
779 .id_table = isci_id_table, 779 .id_table = isci_id_table,
780 .probe = isci_pci_probe, 780 .probe = isci_pci_probe,
781 .remove = isci_pci_remove, 781 .remove = isci_pci_remove,
782#ifdef CONFIG_PM
783 .driver.pm = &isci_pm_ops, 782 .driver.pm = &isci_pm_ops,
784#endif
785}; 783};
786 784
787static __init int isci_init(void) 785static __init int isci_init(void)
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 1b91ca0dc1e3..9e2588a6881c 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -370,17 +370,24 @@ static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
370static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task) 370static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
371{ 371{
372 struct iscsi_conn *conn = task->conn; 372 struct iscsi_conn *conn = task->conn;
373 int rc; 373 unsigned long pflags = current->flags;
374 int rc = 0;
375
376 current->flags |= PF_MEMALLOC;
374 377
375 while (iscsi_sw_tcp_xmit_qlen(conn)) { 378 while (iscsi_sw_tcp_xmit_qlen(conn)) {
376 rc = iscsi_sw_tcp_xmit(conn); 379 rc = iscsi_sw_tcp_xmit(conn);
377 if (rc == 0) 380 if (rc == 0) {
378 return -EAGAIN; 381 rc = -EAGAIN;
382 break;
383 }
379 if (rc < 0) 384 if (rc < 0)
380 return rc; 385 break;
386 rc = 0;
381 } 387 }
382 388
383 return 0; 389 tsk_restore_flags(current, pflags, PF_MEMALLOC);
390 return rc;
384} 391}
385 392
386/* 393/*
@@ -665,6 +672,7 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
665 sk->sk_reuse = SK_CAN_REUSE; 672 sk->sk_reuse = SK_CAN_REUSE;
666 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ 673 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
667 sk->sk_allocation = GFP_ATOMIC; 674 sk->sk_allocation = GFP_ATOMIC;
675 sk_set_memalloc(sk);
668 676
669 iscsi_sw_tcp_conn_set_callbacks(conn); 677 iscsi_sw_tcp_conn_set_callbacks(conn);
670 tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage; 678 tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 82c3fd4bc938..5de946984500 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -507,7 +507,6 @@ static void iscsi_free_task(struct iscsi_task *task)
507 kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*)); 507 kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
508 508
509 if (sc) { 509 if (sc) {
510 task->sc = NULL;
511 /* SCSI eh reuses commands to verify us */ 510 /* SCSI eh reuses commands to verify us */
512 sc->SCp.ptr = NULL; 511 sc->SCp.ptr = NULL;
513 /* 512 /*
@@ -3142,7 +3141,7 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
3142} 3141}
3143EXPORT_SYMBOL_GPL(iscsi_conn_bind); 3142EXPORT_SYMBOL_GPL(iscsi_conn_bind);
3144 3143
3145static int iscsi_switch_str_param(char **param, char *new_val_buf) 3144int iscsi_switch_str_param(char **param, char *new_val_buf)
3146{ 3145{
3147 char *new_val; 3146 char *new_val;
3148 3147
@@ -3159,6 +3158,7 @@ static int iscsi_switch_str_param(char **param, char *new_val_buf)
3159 *param = new_val; 3158 *param = new_val;
3160 return 0; 3159 return 0;
3161} 3160}
3161EXPORT_SYMBOL_GPL(iscsi_switch_str_param);
3162 3162
3163int iscsi_set_param(struct iscsi_cls_conn *cls_conn, 3163int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
3164 enum iscsi_param param, char *buf, int buflen) 3164 enum iscsi_param param, char *buf, int buflen)
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index a364cae9e984..9290713af253 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -692,7 +692,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
692 */ 692 */
693 for (i = 0; i < psli->num_rings; i++) { 693 for (i = 0; i < psli->num_rings; i++) {
694 pring = &psli->ring[i]; 694 pring = &psli->ring[i];
695 while (pring->txcmplq_cnt) { 695 while (!list_empty(&pring->txcmplq)) {
696 msleep(10); 696 msleep(10);
697 if (cnt++ > 500) { /* 5 secs */ 697 if (cnt++ > 500) { /* 5 secs */
698 lpfc_printf_log(phba, 698 lpfc_printf_log(phba,
@@ -2302,11 +2302,17 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
2302LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, 2302LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
2303 "FCF Fast failover=1 Priority failover=2"); 2303 "FCF Fast failover=1 Priority failover=2");
2304 2304
2305int lpfc_enable_rrq; 2305int lpfc_enable_rrq = 2;
2306module_param(lpfc_enable_rrq, int, S_IRUGO); 2306module_param(lpfc_enable_rrq, int, S_IRUGO);
2307MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); 2307MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
2308lpfc_param_show(enable_rrq); 2308lpfc_param_show(enable_rrq);
2309lpfc_param_init(enable_rrq, 0, 0, 1); 2309/*
2310# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
2311# 0x0 = disabled, XRI/OXID use not tracked.
2312# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
2313# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
2314*/
2315lpfc_param_init(enable_rrq, 2, 0, 2);
2310static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL); 2316static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
2311 2317
2312/* 2318/*
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f5d106456f1d..888666892004 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -64,18 +64,14 @@ struct lpfc_bsg_event {
64 struct list_head events_to_get; 64 struct list_head events_to_get;
65 struct list_head events_to_see; 65 struct list_head events_to_see;
66 66
67 /* job waiting for this event to finish */ 67 /* driver data associated with the job */
68 struct fc_bsg_job *set_job; 68 void *dd_data;
69}; 69};
70 70
71struct lpfc_bsg_iocb { 71struct lpfc_bsg_iocb {
72 struct lpfc_iocbq *cmdiocbq; 72 struct lpfc_iocbq *cmdiocbq;
73 struct lpfc_iocbq *rspiocbq; 73 struct lpfc_dmabuf *rmp;
74 struct lpfc_dmabuf *bmp;
75 struct lpfc_nodelist *ndlp; 74 struct lpfc_nodelist *ndlp;
76
77 /* job waiting for this iocb to finish */
78 struct fc_bsg_job *set_job;
79}; 75};
80 76
81struct lpfc_bsg_mbox { 77struct lpfc_bsg_mbox {
@@ -86,20 +82,13 @@ struct lpfc_bsg_mbox {
86 uint32_t mbOffset; /* from app */ 82 uint32_t mbOffset; /* from app */
87 uint32_t inExtWLen; /* from app */ 83 uint32_t inExtWLen; /* from app */
88 uint32_t outExtWLen; /* from app */ 84 uint32_t outExtWLen; /* from app */
89
90 /* job waiting for this mbox command to finish */
91 struct fc_bsg_job *set_job;
92}; 85};
93 86
94#define MENLO_DID 0x0000FC0E 87#define MENLO_DID 0x0000FC0E
95 88
96struct lpfc_bsg_menlo { 89struct lpfc_bsg_menlo {
97 struct lpfc_iocbq *cmdiocbq; 90 struct lpfc_iocbq *cmdiocbq;
98 struct lpfc_iocbq *rspiocbq; 91 struct lpfc_dmabuf *rmp;
99 struct lpfc_dmabuf *bmp;
100
101 /* job waiting for this iocb to finish */
102 struct fc_bsg_job *set_job;
103}; 92};
104 93
105#define TYPE_EVT 1 94#define TYPE_EVT 1
@@ -108,6 +97,7 @@ struct lpfc_bsg_menlo {
108#define TYPE_MENLO 4 97#define TYPE_MENLO 4
109struct bsg_job_data { 98struct bsg_job_data {
110 uint32_t type; 99 uint32_t type;
100 struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
111 union { 101 union {
112 struct lpfc_bsg_event *evt; 102 struct lpfc_bsg_event *evt;
113 struct lpfc_bsg_iocb iocb; 103 struct lpfc_bsg_iocb iocb;
@@ -141,6 +131,138 @@ struct lpfc_dmabufext {
141 uint32_t flag; 131 uint32_t flag;
142}; 132};
143 133
134static void
135lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
136{
137 struct lpfc_dmabuf *mlast, *next_mlast;
138
139 if (mlist) {
140 list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
141 list) {
142 lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
143 list_del(&mlast->list);
144 kfree(mlast);
145 }
146 lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
147 kfree(mlist);
148 }
149 return;
150}
151
152static struct lpfc_dmabuf *
153lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
154 int outbound_buffers, struct ulp_bde64 *bpl,
155 int *bpl_entries)
156{
157 struct lpfc_dmabuf *mlist = NULL;
158 struct lpfc_dmabuf *mp;
159 unsigned int bytes_left = size;
160
161 /* Verify we can support the size specified */
162 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
163 return NULL;
164
165 /* Determine the number of dma buffers to allocate */
166 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
167 size/LPFC_BPL_SIZE);
168
169 /* Allocate dma buffer and place in BPL passed */
170 while (bytes_left) {
171 /* Allocate dma buffer */
172 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
173 if (!mp) {
174 if (mlist)
175 lpfc_free_bsg_buffers(phba, mlist);
176 return NULL;
177 }
178
179 INIT_LIST_HEAD(&mp->list);
180 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
181
182 if (!mp->virt) {
183 kfree(mp);
184 if (mlist)
185 lpfc_free_bsg_buffers(phba, mlist);
186 return NULL;
187 }
188
189 /* Queue it to a linked list */
190 if (!mlist)
191 mlist = mp;
192 else
193 list_add_tail(&mp->list, &mlist->list);
194
195 /* Add buffer to buffer pointer list */
196 if (outbound_buffers)
197 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
198 else
199 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
200 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
201 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
202 bpl->tus.f.bdeSize = (uint16_t)
203 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
204 bytes_left);
205 bytes_left -= bpl->tus.f.bdeSize;
206 bpl->tus.w = le32_to_cpu(bpl->tus.w);
207 bpl++;
208 }
209 return mlist;
210}
211
212static unsigned int
213lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
214 struct fc_bsg_buffer *bsg_buffers,
215 unsigned int bytes_to_transfer, int to_buffers)
216{
217
218 struct lpfc_dmabuf *mp;
219 unsigned int transfer_bytes, bytes_copied = 0;
220 unsigned int sg_offset, dma_offset;
221 unsigned char *dma_address, *sg_address;
222 struct scatterlist *sgel;
223 LIST_HEAD(temp_list);
224
225
226 list_splice_init(&dma_buffers->list, &temp_list);
227 list_add(&dma_buffers->list, &temp_list);
228 sg_offset = 0;
229 sgel = bsg_buffers->sg_list;
230 list_for_each_entry(mp, &temp_list, list) {
231 dma_offset = 0;
232 while (bytes_to_transfer && sgel &&
233 (dma_offset < LPFC_BPL_SIZE)) {
234 dma_address = mp->virt + dma_offset;
235 if (sg_offset) {
236 /* Continue previous partial transfer of sg */
237 sg_address = sg_virt(sgel) + sg_offset;
238 transfer_bytes = sgel->length - sg_offset;
239 } else {
240 sg_address = sg_virt(sgel);
241 transfer_bytes = sgel->length;
242 }
243 if (bytes_to_transfer < transfer_bytes)
244 transfer_bytes = bytes_to_transfer;
245 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
246 transfer_bytes = LPFC_BPL_SIZE - dma_offset;
247 if (to_buffers)
248 memcpy(dma_address, sg_address, transfer_bytes);
249 else
250 memcpy(sg_address, dma_address, transfer_bytes);
251 dma_offset += transfer_bytes;
252 sg_offset += transfer_bytes;
253 bytes_to_transfer -= transfer_bytes;
254 bytes_copied += transfer_bytes;
255 if (sg_offset >= sgel->length) {
256 sg_offset = 0;
257 sgel = sg_next(sgel);
258 }
259 }
260 }
261 list_del_init(&dma_buffers->list);
262 list_splice(&temp_list, &dma_buffers->list);
263 return bytes_copied;
264}
265
144/** 266/**
145 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler 267 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
146 * @phba: Pointer to HBA context object. 268 * @phba: Pointer to HBA context object.
@@ -166,62 +288,72 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
166 struct bsg_job_data *dd_data; 288 struct bsg_job_data *dd_data;
167 struct fc_bsg_job *job; 289 struct fc_bsg_job *job;
168 IOCB_t *rsp; 290 IOCB_t *rsp;
169 struct lpfc_dmabuf *bmp; 291 struct lpfc_dmabuf *bmp, *cmp, *rmp;
170 struct lpfc_nodelist *ndlp; 292 struct lpfc_nodelist *ndlp;
171 struct lpfc_bsg_iocb *iocb; 293 struct lpfc_bsg_iocb *iocb;
172 unsigned long flags; 294 unsigned long flags;
295 unsigned int rsp_size;
173 int rc = 0; 296 int rc = 0;
174 297
298 dd_data = cmdiocbq->context1;
299
300 /* Determine if job has been aborted */
175 spin_lock_irqsave(&phba->ct_ev_lock, flags); 301 spin_lock_irqsave(&phba->ct_ev_lock, flags);
176 dd_data = cmdiocbq->context2; 302 job = dd_data->set_job;
177 if (!dd_data) { 303 if (job) {
178 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 304 /* Prevent timeout handling from trying to abort job */
179 lpfc_sli_release_iocbq(phba, cmdiocbq); 305 job->dd_data = NULL;
180 return;
181 } 306 }
307 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
182 308
183 iocb = &dd_data->context_un.iocb; 309 iocb = &dd_data->context_un.iocb;
184 job = iocb->set_job; 310 ndlp = iocb->ndlp;
185 job->dd_data = NULL; /* so timeout handler does not reply */ 311 rmp = iocb->rmp;
186 312 cmp = cmdiocbq->context2;
187 bmp = iocb->bmp; 313 bmp = cmdiocbq->context3;
188 rsp = &rspiocbq->iocb; 314 rsp = &rspiocbq->iocb;
189 ndlp = cmdiocbq->context1;
190 315
191 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 316 /* Copy the completed data or set the error status */
192 job->request_payload.sg_cnt, DMA_TO_DEVICE);
193 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
194 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
195 317
196 if (rsp->ulpStatus) { 318 if (job) {
197 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 319 if (rsp->ulpStatus) {
198 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 320 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
199 case IOERR_SEQUENCE_TIMEOUT: 321 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
200 rc = -ETIMEDOUT; 322 case IOERR_SEQUENCE_TIMEOUT:
201 break; 323 rc = -ETIMEDOUT;
202 case IOERR_INVALID_RPI: 324 break;
203 rc = -EFAULT; 325 case IOERR_INVALID_RPI:
204 break; 326 rc = -EFAULT;
205 default: 327 break;
328 default:
329 rc = -EACCES;
330 break;
331 }
332 } else {
206 rc = -EACCES; 333 rc = -EACCES;
207 break;
208 } 334 }
209 } else 335 } else {
210 rc = -EACCES; 336 rsp_size = rsp->un.genreq64.bdl.bdeSize;
211 } else 337 job->reply->reply_payload_rcv_len =
212 job->reply->reply_payload_rcv_len = 338 lpfc_bsg_copy_data(rmp, &job->reply_payload,
213 rsp->un.genreq64.bdl.bdeSize; 339 rsp_size, 0);
340 }
341 }
214 342
343 lpfc_free_bsg_buffers(phba, cmp);
344 lpfc_free_bsg_buffers(phba, rmp);
215 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 345 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
346 kfree(bmp);
216 lpfc_sli_release_iocbq(phba, cmdiocbq); 347 lpfc_sli_release_iocbq(phba, cmdiocbq);
217 lpfc_nlp_put(ndlp); 348 lpfc_nlp_put(ndlp);
218 kfree(bmp);
219 kfree(dd_data); 349 kfree(dd_data);
220 /* make error code available to userspace */ 350
221 job->reply->result = rc; 351 /* Complete the job if the job is still active */
222 /* complete the job back to userspace */ 352
223 job->job_done(job); 353 if (job) {
224 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 354 job->reply->result = rc;
355 job->job_done(job);
356 }
225 return; 357 return;
226} 358}
227 359
@@ -240,12 +372,9 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
240 uint32_t timeout; 372 uint32_t timeout;
241 struct lpfc_iocbq *cmdiocbq = NULL; 373 struct lpfc_iocbq *cmdiocbq = NULL;
242 IOCB_t *cmd; 374 IOCB_t *cmd;
243 struct lpfc_dmabuf *bmp = NULL; 375 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
244 int request_nseg; 376 int request_nseg;
245 int reply_nseg; 377 int reply_nseg;
246 struct scatterlist *sgel = NULL;
247 int numbde;
248 dma_addr_t busaddr;
249 struct bsg_job_data *dd_data; 378 struct bsg_job_data *dd_data;
250 uint32_t creg_val; 379 uint32_t creg_val;
251 int rc = 0; 380 int rc = 0;
@@ -268,54 +397,50 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
268 goto no_ndlp; 397 goto no_ndlp;
269 } 398 }
270 399
271 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
272 if (!bmp) {
273 rc = -ENOMEM;
274 goto free_ndlp;
275 }
276
277 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 400 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
278 rc = -ENODEV; 401 rc = -ENODEV;
279 goto free_bmp; 402 goto free_ndlp;
280 } 403 }
281 404
282 cmdiocbq = lpfc_sli_get_iocbq(phba); 405 cmdiocbq = lpfc_sli_get_iocbq(phba);
283 if (!cmdiocbq) { 406 if (!cmdiocbq) {
284 rc = -ENOMEM; 407 rc = -ENOMEM;
285 goto free_bmp; 408 goto free_ndlp;
286 } 409 }
287 410
288 cmd = &cmdiocbq->iocb; 411 cmd = &cmdiocbq->iocb;
412
413 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
414 if (!bmp) {
415 rc = -ENOMEM;
416 goto free_cmdiocbq;
417 }
289 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 418 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
290 if (!bmp->virt) { 419 if (!bmp->virt) {
291 rc = -ENOMEM; 420 rc = -ENOMEM;
292 goto free_cmdiocbq; 421 goto free_bmp;
293 } 422 }
294 423
295 INIT_LIST_HEAD(&bmp->list); 424 INIT_LIST_HEAD(&bmp->list);
425
296 bpl = (struct ulp_bde64 *) bmp->virt; 426 bpl = (struct ulp_bde64 *) bmp->virt;
297 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 427 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
298 job->request_payload.sg_cnt, DMA_TO_DEVICE); 428 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
299 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 429 1, bpl, &request_nseg);
300 busaddr = sg_dma_address(sgel); 430 if (!cmp) {
301 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 431 rc = -ENOMEM;
302 bpl->tus.f.bdeSize = sg_dma_len(sgel); 432 goto free_bmp;
303 bpl->tus.w = cpu_to_le32(bpl->tus.w);
304 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
305 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
306 bpl++;
307 } 433 }
434 lpfc_bsg_copy_data(cmp, &job->request_payload,
435 job->request_payload.payload_len, 1);
308 436
309 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 437 bpl += request_nseg;
310 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 438 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
311 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 439 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
312 busaddr = sg_dma_address(sgel); 440 bpl, &reply_nseg);
313 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 441 if (!rmp) {
314 bpl->tus.f.bdeSize = sg_dma_len(sgel); 442 rc = -ENOMEM;
315 bpl->tus.w = cpu_to_le32(bpl->tus.w); 443 goto free_cmp;
316 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
317 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
318 bpl++;
319 } 444 }
320 445
321 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 446 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
@@ -343,17 +468,20 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
343 cmd->ulpTimeout = timeout; 468 cmd->ulpTimeout = timeout;
344 469
345 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 470 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
346 cmdiocbq->context1 = ndlp; 471 cmdiocbq->context1 = dd_data;
347 cmdiocbq->context2 = dd_data; 472 cmdiocbq->context2 = cmp;
473 cmdiocbq->context3 = bmp;
348 dd_data->type = TYPE_IOCB; 474 dd_data->type = TYPE_IOCB;
475 dd_data->set_job = job;
349 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 476 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
350 dd_data->context_un.iocb.set_job = job; 477 dd_data->context_un.iocb.ndlp = ndlp;
351 dd_data->context_un.iocb.bmp = bmp; 478 dd_data->context_un.iocb.rmp = rmp;
479 job->dd_data = dd_data;
352 480
353 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 481 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
354 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 482 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
355 rc = -EIO ; 483 rc = -EIO ;
356 goto free_cmdiocbq; 484 goto free_rmp;
357 } 485 }
358 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 486 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
359 writel(creg_val, phba->HCregaddr); 487 writel(creg_val, phba->HCregaddr);
@@ -368,19 +496,18 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
368 else 496 else
369 rc = -EIO; 497 rc = -EIO;
370 498
371
372 /* iocb failed so cleanup */ 499 /* iocb failed so cleanup */
373 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
374 job->request_payload.sg_cnt, DMA_TO_DEVICE);
375 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
376 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
377 500
378 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 501free_rmp:
379 502 lpfc_free_bsg_buffers(phba, rmp);
380free_cmdiocbq: 503free_cmp:
381 lpfc_sli_release_iocbq(phba, cmdiocbq); 504 lpfc_free_bsg_buffers(phba, cmp);
382free_bmp: 505free_bmp:
506 if (bmp->virt)
507 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
383 kfree(bmp); 508 kfree(bmp);
509free_cmdiocbq:
510 lpfc_sli_release_iocbq(phba, cmdiocbq);
384free_ndlp: 511free_ndlp:
385 lpfc_nlp_put(ndlp); 512 lpfc_nlp_put(ndlp);
386no_ndlp: 513no_ndlp:
@@ -418,67 +545,68 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
418 struct fc_bsg_job *job; 545 struct fc_bsg_job *job;
419 IOCB_t *rsp; 546 IOCB_t *rsp;
420 struct lpfc_nodelist *ndlp; 547 struct lpfc_nodelist *ndlp;
421 struct lpfc_dmabuf *pbuflist = NULL; 548 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
422 struct fc_bsg_ctels_reply *els_reply; 549 struct fc_bsg_ctels_reply *els_reply;
423 uint8_t *rjt_data; 550 uint8_t *rjt_data;
424 unsigned long flags; 551 unsigned long flags;
552 unsigned int rsp_size;
425 int rc = 0; 553 int rc = 0;
426 554
427 spin_lock_irqsave(&phba->ct_ev_lock, flags);
428 dd_data = cmdiocbq->context1; 555 dd_data = cmdiocbq->context1;
429 /* normal completion and timeout crossed paths, already done */ 556 ndlp = dd_data->context_un.iocb.ndlp;
430 if (!dd_data) { 557 cmdiocbq->context1 = ndlp;
431 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
432 return;
433 }
434 558
435 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 559 /* Determine if job has been aborted */
436 if (cmdiocbq->context2 && rspiocbq) 560 spin_lock_irqsave(&phba->ct_ev_lock, flags);
437 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 561 job = dd_data->set_job;
438 &rspiocbq->iocb, sizeof(IOCB_t)); 562 if (job) {
563 /* Prevent timeout handling from trying to abort job */
564 job->dd_data = NULL;
565 }
566 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
439 567
440 job = dd_data->context_un.iocb.set_job;
441 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
442 rspiocbq = dd_data->context_un.iocb.rspiocbq;
443 rsp = &rspiocbq->iocb; 568 rsp = &rspiocbq->iocb;
444 ndlp = dd_data->context_un.iocb.ndlp; 569 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
570 prsp = (struct lpfc_dmabuf *)pcmd->list.next;
445 571
446 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 572 /* Copy the completed job data or determine the job status if job is
447 job->request_payload.sg_cnt, DMA_TO_DEVICE); 573 * still active
448 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 574 */
449 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
450 575
451 if (job->reply->result == -EAGAIN) 576 if (job) {
452 rc = -EAGAIN; 577 if (rsp->ulpStatus == IOSTAT_SUCCESS) {
453 else if (rsp->ulpStatus == IOSTAT_SUCCESS) 578 rsp_size = rsp->un.elsreq64.bdl.bdeSize;
454 job->reply->reply_payload_rcv_len = 579 job->reply->reply_payload_rcv_len =
455 rsp->un.elsreq64.bdl.bdeSize; 580 sg_copy_from_buffer(job->reply_payload.sg_list,
456 else if (rsp->ulpStatus == IOSTAT_LS_RJT) { 581 job->reply_payload.sg_cnt,
457 job->reply->reply_payload_rcv_len = 582 prsp->virt,
458 sizeof(struct fc_bsg_ctels_reply); 583 rsp_size);
459 /* LS_RJT data returned in word 4 */ 584 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
460 rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; 585 job->reply->reply_payload_rcv_len =
461 els_reply = &job->reply->reply_data.ctels_reply; 586 sizeof(struct fc_bsg_ctels_reply);
462 els_reply->status = FC_CTELS_STATUS_REJECT; 587 /* LS_RJT data returned in word 4 */
463 els_reply->rjt_data.action = rjt_data[3]; 588 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
464 els_reply->rjt_data.reason_code = rjt_data[2]; 589 els_reply = &job->reply->reply_data.ctels_reply;
465 els_reply->rjt_data.reason_explanation = rjt_data[1]; 590 els_reply->status = FC_CTELS_STATUS_REJECT;
466 els_reply->rjt_data.vendor_unique = rjt_data[0]; 591 els_reply->rjt_data.action = rjt_data[3];
467 } else 592 els_reply->rjt_data.reason_code = rjt_data[2];
468 rc = -EIO; 593 els_reply->rjt_data.reason_explanation = rjt_data[1];
594 els_reply->rjt_data.vendor_unique = rjt_data[0];
595 } else {
596 rc = -EIO;
597 }
598 }
469 599
470 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
471 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
472 lpfc_sli_release_iocbq(phba, rspiocbq);
473 lpfc_sli_release_iocbq(phba, cmdiocbq);
474 lpfc_nlp_put(ndlp); 600 lpfc_nlp_put(ndlp);
601 lpfc_els_free_iocb(phba, cmdiocbq);
475 kfree(dd_data); 602 kfree(dd_data);
476 /* make error code available to userspace */ 603
477 job->reply->result = rc; 604 /* Complete the job if the job is still active */
478 job->dd_data = NULL; 605
479 /* complete the job back to userspace */ 606 if (job) {
480 job->job_done(job); 607 job->reply->result = rc;
481 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 608 job->job_done(job);
609 }
482 return; 610 return;
483} 611}
484 612
@@ -496,19 +624,8 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
496 uint32_t elscmd; 624 uint32_t elscmd;
497 uint32_t cmdsize; 625 uint32_t cmdsize;
498 uint32_t rspsize; 626 uint32_t rspsize;
499 struct lpfc_iocbq *rspiocbq;
500 struct lpfc_iocbq *cmdiocbq; 627 struct lpfc_iocbq *cmdiocbq;
501 IOCB_t *rsp;
502 uint16_t rpi = 0; 628 uint16_t rpi = 0;
503 struct lpfc_dmabuf *pcmd;
504 struct lpfc_dmabuf *prsp;
505 struct lpfc_dmabuf *pbuflist = NULL;
506 struct ulp_bde64 *bpl;
507 int request_nseg;
508 int reply_nseg;
509 struct scatterlist *sgel = NULL;
510 int numbde;
511 dma_addr_t busaddr;
512 struct bsg_job_data *dd_data; 629 struct bsg_job_data *dd_data;
513 uint32_t creg_val; 630 uint32_t creg_val;
514 int rc = 0; 631 int rc = 0;
@@ -516,6 +633,15 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
516 /* in case no data is transferred */ 633 /* in case no data is transferred */
517 job->reply->reply_payload_rcv_len = 0; 634 job->reply->reply_payload_rcv_len = 0;
518 635
636 /* verify the els command is not greater than the
637 * maximum ELS transfer size.
638 */
639
640 if (job->request_payload.payload_len > FCELSSIZE) {
641 rc = -EINVAL;
642 goto no_dd_data;
643 }
644
519 /* allocate our bsg tracking structure */ 645 /* allocate our bsg tracking structure */
520 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 646 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
521 if (!dd_data) { 647 if (!dd_data) {
@@ -525,88 +651,51 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
525 goto no_dd_data; 651 goto no_dd_data;
526 } 652 }
527 653
528 if (!lpfc_nlp_get(ndlp)) {
529 rc = -ENODEV;
530 goto free_dd_data;
531 }
532
533 elscmd = job->request->rqst_data.r_els.els_code; 654 elscmd = job->request->rqst_data.r_els.els_code;
534 cmdsize = job->request_payload.payload_len; 655 cmdsize = job->request_payload.payload_len;
535 rspsize = job->reply_payload.payload_len; 656 rspsize = job->reply_payload.payload_len;
536 rspiocbq = lpfc_sli_get_iocbq(phba); 657
537 if (!rspiocbq) { 658 if (!lpfc_nlp_get(ndlp)) {
538 lpfc_nlp_put(ndlp); 659 rc = -ENODEV;
539 rc = -ENOMEM;
540 goto free_dd_data; 660 goto free_dd_data;
541 } 661 }
542 662
543 rsp = &rspiocbq->iocb; 663 /* We will use the allocated dma buffers by prep els iocb for command
544 rpi = ndlp->nlp_rpi; 664 * and response to ensure if the job times out and the request is freed,
665 * we won't be dma into memory that is no longer allocated to for the
666 * request.
667 */
545 668
546 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, 669 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
547 ndlp->nlp_DID, elscmd); 670 ndlp->nlp_DID, elscmd);
548 if (!cmdiocbq) { 671 if (!cmdiocbq) {
549 rc = -EIO; 672 rc = -EIO;
550 goto free_rspiocbq; 673 goto release_ndlp;
551 } 674 }
552 675
553 /* prep els iocb set context1 to the ndlp, context2 to the command 676 rpi = ndlp->nlp_rpi;
554 * dmabuf, context3 holds the data dmabuf 677
555 */ 678 /* Transfer the request payload to allocated command dma buffer */
556 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; 679
557 prsp = (struct lpfc_dmabuf *) pcmd->list.next; 680 sg_copy_to_buffer(job->request_payload.sg_list,
558 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 681 job->request_payload.sg_cnt,
559 kfree(pcmd); 682 ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
560 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 683 cmdsize);
561 kfree(prsp);
562 cmdiocbq->context2 = NULL;
563
564 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
565 bpl = (struct ulp_bde64 *) pbuflist->virt;
566
567 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
568 job->request_payload.sg_cnt, DMA_TO_DEVICE);
569 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
570 busaddr = sg_dma_address(sgel);
571 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
572 bpl->tus.f.bdeSize = sg_dma_len(sgel);
573 bpl->tus.w = cpu_to_le32(bpl->tus.w);
574 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
575 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
576 bpl++;
577 }
578 684
579 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
580 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
581 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
582 busaddr = sg_dma_address(sgel);
583 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
584 bpl->tus.f.bdeSize = sg_dma_len(sgel);
585 bpl->tus.w = cpu_to_le32(bpl->tus.w);
586 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
587 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
588 bpl++;
589 }
590 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
591 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
592 if (phba->sli_rev == LPFC_SLI_REV4) 685 if (phba->sli_rev == LPFC_SLI_REV4)
593 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi]; 686 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
594 else 687 else
595 cmdiocbq->iocb.ulpContext = rpi; 688 cmdiocbq->iocb.ulpContext = rpi;
596 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 689 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
597 cmdiocbq->context1 = NULL;
598 cmdiocbq->context2 = NULL;
599
600 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
601 cmdiocbq->context1 = dd_data; 690 cmdiocbq->context1 = dd_data;
602 cmdiocbq->context_un.ndlp = ndlp; 691 cmdiocbq->context_un.ndlp = ndlp;
603 cmdiocbq->context2 = rspiocbq; 692 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
604 dd_data->type = TYPE_IOCB; 693 dd_data->type = TYPE_IOCB;
694 dd_data->set_job = job;
605 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 695 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
606 dd_data->context_un.iocb.rspiocbq = rspiocbq;
607 dd_data->context_un.iocb.set_job = job;
608 dd_data->context_un.iocb.bmp = NULL;
609 dd_data->context_un.iocb.ndlp = ndlp; 696 dd_data->context_un.iocb.ndlp = ndlp;
697 dd_data->context_un.iocb.rmp = NULL;
698 job->dd_data = dd_data;
610 699
611 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 700 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
612 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 701 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
@@ -617,8 +706,9 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
617 writel(creg_val, phba->HCregaddr); 706 writel(creg_val, phba->HCregaddr);
618 readl(phba->HCregaddr); /* flush */ 707 readl(phba->HCregaddr); /* flush */
619 } 708 }
709
620 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 710 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
621 lpfc_nlp_put(ndlp); 711
622 if (rc == IOCB_SUCCESS) 712 if (rc == IOCB_SUCCESS)
623 return 0; /* done for now */ 713 return 0; /* done for now */
624 else if (rc == IOCB_BUSY) 714 else if (rc == IOCB_BUSY)
@@ -627,17 +717,12 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
627 rc = -EIO; 717 rc = -EIO;
628 718
629linkdown_err: 719linkdown_err:
630 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
631 job->request_payload.sg_cnt, DMA_TO_DEVICE);
632 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
633 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
634 720
635 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); 721 cmdiocbq->context1 = ndlp;
636 722 lpfc_els_free_iocb(phba, cmdiocbq);
637 lpfc_sli_release_iocbq(phba, cmdiocbq);
638 723
639free_rspiocbq: 724release_ndlp:
640 lpfc_sli_release_iocbq(phba, rspiocbq); 725 lpfc_nlp_put(ndlp);
641 726
642free_dd_data: 727free_dd_data:
643 kfree(dd_data); 728 kfree(dd_data);
@@ -680,6 +765,7 @@ lpfc_bsg_event_free(struct kref *kref)
680 kfree(ed); 765 kfree(ed);
681 } 766 }
682 767
768 kfree(evt->dd_data);
683 kfree(evt); 769 kfree(evt);
684} 770}
685 771
@@ -723,6 +809,7 @@ lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
723 evt->req_id = ev_req_id; 809 evt->req_id = ev_req_id;
724 evt->reg_id = ev_reg_id; 810 evt->reg_id = ev_reg_id;
725 evt->wait_time_stamp = jiffies; 811 evt->wait_time_stamp = jiffies;
812 evt->dd_data = NULL;
726 init_waitqueue_head(&evt->wq); 813 init_waitqueue_head(&evt->wq);
727 kref_init(&evt->kref); 814 kref_init(&evt->kref);
728 return evt; 815 return evt;
@@ -790,6 +877,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
790 struct lpfc_hbq_entry *hbqe; 877 struct lpfc_hbq_entry *hbqe;
791 struct lpfc_sli_ct_request *ct_req; 878 struct lpfc_sli_ct_request *ct_req;
792 struct fc_bsg_job *job = NULL; 879 struct fc_bsg_job *job = NULL;
880 struct bsg_job_data *dd_data = NULL;
793 unsigned long flags; 881 unsigned long flags;
794 int size = 0; 882 int size = 0;
795 883
@@ -986,10 +1074,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
986 } 1074 }
987 1075
988 list_move(evt->events_to_see.prev, &evt->events_to_get); 1076 list_move(evt->events_to_see.prev, &evt->events_to_get);
989 lpfc_bsg_event_unref(evt);
990 1077
991 job = evt->set_job; 1078 dd_data = (struct bsg_job_data *)evt->dd_data;
992 evt->set_job = NULL; 1079 job = dd_data->set_job;
1080 dd_data->set_job = NULL;
1081 lpfc_bsg_event_unref(evt);
993 if (job) { 1082 if (job) {
994 job->reply->reply_payload_rcv_len = size; 1083 job->reply->reply_payload_rcv_len = size;
995 /* make error code available to userspace */ 1084 /* make error code available to userspace */
@@ -1078,14 +1167,6 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1078 goto job_error; 1167 goto job_error;
1079 } 1168 }
1080 1169
1081 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1082 if (dd_data == NULL) {
1083 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1084 "2734 Failed allocation of dd_data\n");
1085 rc = -ENOMEM;
1086 goto job_error;
1087 }
1088
1089 event_req = (struct set_ct_event *) 1170 event_req = (struct set_ct_event *)
1090 job->request->rqst_data.h_vendor.vendor_cmd; 1171 job->request->rqst_data.h_vendor.vendor_cmd;
1091 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1172 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
@@ -1095,6 +1176,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1095 if (evt->reg_id == event_req->ev_reg_id) { 1176 if (evt->reg_id == event_req->ev_reg_id) {
1096 lpfc_bsg_event_ref(evt); 1177 lpfc_bsg_event_ref(evt);
1097 evt->wait_time_stamp = jiffies; 1178 evt->wait_time_stamp = jiffies;
1179 dd_data = (struct bsg_job_data *)evt->dd_data;
1098 break; 1180 break;
1099 } 1181 }
1100 } 1182 }
@@ -1102,6 +1184,13 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1102 1184
1103 if (&evt->node == &phba->ct_ev_waiters) { 1185 if (&evt->node == &phba->ct_ev_waiters) {
1104 /* no event waiting struct yet - first call */ 1186 /* no event waiting struct yet - first call */
1187 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1188 if (dd_data == NULL) {
1189 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1190 "2734 Failed allocation of dd_data\n");
1191 rc = -ENOMEM;
1192 goto job_error;
1193 }
1105 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, 1194 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1106 event_req->ev_req_id); 1195 event_req->ev_req_id);
1107 if (!evt) { 1196 if (!evt) {
@@ -1111,7 +1200,10 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1111 rc = -ENOMEM; 1200 rc = -ENOMEM;
1112 goto job_error; 1201 goto job_error;
1113 } 1202 }
1114 1203 dd_data->type = TYPE_EVT;
1204 dd_data->set_job = NULL;
1205 dd_data->context_un.evt = evt;
1206 evt->dd_data = (void *)dd_data;
1115 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1207 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1116 list_add(&evt->node, &phba->ct_ev_waiters); 1208 list_add(&evt->node, &phba->ct_ev_waiters);
1117 lpfc_bsg_event_ref(evt); 1209 lpfc_bsg_event_ref(evt);
@@ -1121,9 +1213,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1121 1213
1122 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1214 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1123 evt->waiting = 1; 1215 evt->waiting = 1;
1124 dd_data->type = TYPE_EVT; 1216 dd_data->set_job = job; /* for unsolicited command */
1125 dd_data->context_un.evt = evt;
1126 evt->set_job = job; /* for unsolicited command */
1127 job->dd_data = dd_data; /* for fc transport timeout callback*/ 1217 job->dd_data = dd_data; /* for fc transport timeout callback*/
1128 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1218 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1129 return 0; /* call job done later */ 1219 return 0; /* call job done later */
@@ -1252,57 +1342,64 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1252 struct bsg_job_data *dd_data; 1342 struct bsg_job_data *dd_data;
1253 struct fc_bsg_job *job; 1343 struct fc_bsg_job *job;
1254 IOCB_t *rsp; 1344 IOCB_t *rsp;
1255 struct lpfc_dmabuf *bmp; 1345 struct lpfc_dmabuf *bmp, *cmp;
1256 struct lpfc_nodelist *ndlp; 1346 struct lpfc_nodelist *ndlp;
1257 unsigned long flags; 1347 unsigned long flags;
1258 int rc = 0; 1348 int rc = 0;
1259 1349
1350 dd_data = cmdiocbq->context1;
1351
1352 /* Determine if job has been aborted */
1260 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1353 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1261 dd_data = cmdiocbq->context2; 1354 job = dd_data->set_job;
1262 /* normal completion and timeout crossed paths, already done */ 1355 if (job) {
1263 if (!dd_data) { 1356 /* Prevent timeout handling from trying to abort job */
1264 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1357 job->dd_data = NULL;
1265 return;
1266 } 1358 }
1359 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1267 1360
1268 job = dd_data->context_un.iocb.set_job;
1269 bmp = dd_data->context_un.iocb.bmp;
1270 rsp = &rspiocbq->iocb;
1271 ndlp = dd_data->context_un.iocb.ndlp; 1361 ndlp = dd_data->context_un.iocb.ndlp;
1362 cmp = cmdiocbq->context2;
1363 bmp = cmdiocbq->context3;
1364 rsp = &rspiocbq->iocb;
1272 1365
1273 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 1366 /* Copy the completed job data or set the error status */
1274 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1275 1367
1276 if (rsp->ulpStatus) { 1368 if (job) {
1277 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1369 if (rsp->ulpStatus) {
1278 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 1370 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1279 case IOERR_SEQUENCE_TIMEOUT: 1371 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1280 rc = -ETIMEDOUT; 1372 case IOERR_SEQUENCE_TIMEOUT:
1281 break; 1373 rc = -ETIMEDOUT;
1282 case IOERR_INVALID_RPI: 1374 break;
1283 rc = -EFAULT; 1375 case IOERR_INVALID_RPI:
1284 break; 1376 rc = -EFAULT;
1285 default: 1377 break;
1378 default:
1379 rc = -EACCES;
1380 break;
1381 }
1382 } else {
1286 rc = -EACCES; 1383 rc = -EACCES;
1287 break;
1288 } 1384 }
1289 } else 1385 } else {
1290 rc = -EACCES; 1386 job->reply->reply_payload_rcv_len = 0;
1291 } else 1387 }
1292 job->reply->reply_payload_rcv_len = 1388 }
1293 rsp->un.genreq64.bdl.bdeSize;
1294 1389
1390 lpfc_free_bsg_buffers(phba, cmp);
1295 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1391 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1392 kfree(bmp);
1296 lpfc_sli_release_iocbq(phba, cmdiocbq); 1393 lpfc_sli_release_iocbq(phba, cmdiocbq);
1297 lpfc_nlp_put(ndlp); 1394 lpfc_nlp_put(ndlp);
1298 kfree(bmp);
1299 kfree(dd_data); 1395 kfree(dd_data);
1300 /* make error code available to userspace */ 1396
1301 job->reply->result = rc; 1397 /* Complete the job if the job is still active */
1302 job->dd_data = NULL; 1398
1303 /* complete the job back to userspace */ 1399 if (job) {
1304 job->job_done(job); 1400 job->reply->result = rc;
1305 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1401 job->job_done(job);
1402 }
1306 return; 1403 return;
1307} 1404}
1308 1405
@@ -1316,7 +1413,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1316 **/ 1413 **/
1317static int 1414static int
1318lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, 1415lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1319 struct lpfc_dmabuf *bmp, int num_entry) 1416 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1417 int num_entry)
1320{ 1418{
1321 IOCB_t *icmd; 1419 IOCB_t *icmd;
1322 struct lpfc_iocbq *ctiocb = NULL; 1420 struct lpfc_iocbq *ctiocb = NULL;
@@ -1377,7 +1475,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1377 1475
1378 /* Check if the ndlp is active */ 1476 /* Check if the ndlp is active */
1379 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1477 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1380 rc = -IOCB_ERROR; 1478 rc = IOCB_ERROR;
1381 goto issue_ct_rsp_exit; 1479 goto issue_ct_rsp_exit;
1382 } 1480 }
1383 1481
@@ -1385,7 +1483,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1385 * we respond 1483 * we respond
1386 */ 1484 */
1387 if (!lpfc_nlp_get(ndlp)) { 1485 if (!lpfc_nlp_get(ndlp)) {
1388 rc = -IOCB_ERROR; 1486 rc = IOCB_ERROR;
1389 goto issue_ct_rsp_exit; 1487 goto issue_ct_rsp_exit;
1390 } 1488 }
1391 1489
@@ -1407,17 +1505,17 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1407 ctiocb->iocb_cmpl = NULL; 1505 ctiocb->iocb_cmpl = NULL;
1408 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1506 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1409 ctiocb->vport = phba->pport; 1507 ctiocb->vport = phba->pport;
1508 ctiocb->context1 = dd_data;
1509 ctiocb->context2 = cmp;
1410 ctiocb->context3 = bmp; 1510 ctiocb->context3 = bmp;
1411
1412 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1511 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1413 ctiocb->context2 = dd_data; 1512
1414 ctiocb->context1 = ndlp;
1415 dd_data->type = TYPE_IOCB; 1513 dd_data->type = TYPE_IOCB;
1514 dd_data->set_job = job;
1416 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1515 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1417 dd_data->context_un.iocb.rspiocbq = NULL;
1418 dd_data->context_un.iocb.set_job = job;
1419 dd_data->context_un.iocb.bmp = bmp;
1420 dd_data->context_un.iocb.ndlp = ndlp; 1516 dd_data->context_un.iocb.ndlp = ndlp;
1517 dd_data->context_un.iocb.rmp = NULL;
1518 job->dd_data = dd_data;
1421 1519
1422 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1520 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1423 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1521 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
@@ -1454,11 +1552,8 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1454 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1552 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1455 job->request->rqst_data.h_vendor.vendor_cmd; 1553 job->request->rqst_data.h_vendor.vendor_cmd;
1456 struct ulp_bde64 *bpl; 1554 struct ulp_bde64 *bpl;
1457 struct lpfc_dmabuf *bmp = NULL; 1555 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1458 struct scatterlist *sgel = NULL; 1556 int bpl_entries;
1459 int request_nseg;
1460 int numbde;
1461 dma_addr_t busaddr;
1462 uint32_t tag = mgmt_resp->tag; 1557 uint32_t tag = mgmt_resp->tag;
1463 unsigned long reqbfrcnt = 1558 unsigned long reqbfrcnt =
1464 (unsigned long)job->request_payload.payload_len; 1559 (unsigned long)job->request_payload.payload_len;
@@ -1486,30 +1581,28 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1486 1581
1487 INIT_LIST_HEAD(&bmp->list); 1582 INIT_LIST_HEAD(&bmp->list);
1488 bpl = (struct ulp_bde64 *) bmp->virt; 1583 bpl = (struct ulp_bde64 *) bmp->virt;
1489 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 1584 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1490 job->request_payload.sg_cnt, DMA_TO_DEVICE); 1585 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1491 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 1586 1, bpl, &bpl_entries);
1492 busaddr = sg_dma_address(sgel); 1587 if (!cmp) {
1493 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1588 rc = -ENOMEM;
1494 bpl->tus.f.bdeSize = sg_dma_len(sgel); 1589 goto send_mgmt_rsp_free_bmp;
1495 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1496 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1497 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1498 bpl++;
1499 } 1590 }
1591 lpfc_bsg_copy_data(cmp, &job->request_payload,
1592 job->request_payload.payload_len, 1);
1500 1593
1501 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg); 1594 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1502 1595
1503 if (rc == IOCB_SUCCESS) 1596 if (rc == IOCB_SUCCESS)
1504 return 0; /* done for now */ 1597 return 0; /* done for now */
1505 1598
1506 /* TBD need to handle a timeout */
1507 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1508 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1509 rc = -EACCES; 1599 rc = -EACCES;
1510 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1600
1601 lpfc_free_bsg_buffers(phba, cmp);
1511 1602
1512send_mgmt_rsp_free_bmp: 1603send_mgmt_rsp_free_bmp:
1604 if (bmp->virt)
1605 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1513 kfree(bmp); 1606 kfree(bmp);
1514send_mgmt_rsp_exit: 1607send_mgmt_rsp_exit:
1515 /* make error code available to userspace */ 1608 /* make error code available to userspace */
@@ -1559,7 +1652,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1559 scsi_block_requests(shost); 1652 scsi_block_requests(shost);
1560 } 1653 }
1561 1654
1562 while (pring->txcmplq_cnt) { 1655 while (!list_empty(&pring->txcmplq)) {
1563 if (i++ > 500) /* wait up to 5 seconds */ 1656 if (i++ > 500) /* wait up to 5 seconds */
1564 break; 1657 break;
1565 msleep(10); 1658 msleep(10);
@@ -3193,13 +3286,7 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3193 unsigned long flags; 3286 unsigned long flags;
3194 uint8_t *pmb, *pmb_buf; 3287 uint8_t *pmb, *pmb_buf;
3195 3288
3196 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3197 dd_data = pmboxq->context1; 3289 dd_data = pmboxq->context1;
3198 /* job already timed out? */
3199 if (!dd_data) {
3200 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3201 return;
3202 }
3203 3290
3204 /* 3291 /*
3205 * The outgoing buffer is readily referred from the dma buffer, 3292 * The outgoing buffer is readily referred from the dma buffer,
@@ -3209,29 +3296,33 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3209 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3296 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3210 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3297 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3211 3298
3212 job = dd_data->context_un.mbox.set_job; 3299 /* Determine if job has been aborted */
3300
3301 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3302 job = dd_data->set_job;
3303 if (job) {
3304 /* Prevent timeout handling from trying to abort job */
3305 job->dd_data = NULL;
3306 }
3307 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3308
3309 /* Copy the mailbox data to the job if it is still active */
3310
3213 if (job) { 3311 if (job) {
3214 size = job->reply_payload.payload_len; 3312 size = job->reply_payload.payload_len;
3215 job->reply->reply_payload_rcv_len = 3313 job->reply->reply_payload_rcv_len =
3216 sg_copy_from_buffer(job->reply_payload.sg_list, 3314 sg_copy_from_buffer(job->reply_payload.sg_list,
3217 job->reply_payload.sg_cnt, 3315 job->reply_payload.sg_cnt,
3218 pmb_buf, size); 3316 pmb_buf, size);
3219 /* need to hold the lock until we set job->dd_data to NULL
3220 * to hold off the timeout handler returning to the mid-layer
3221 * while we are still processing the job.
3222 */
3223 job->dd_data = NULL;
3224 dd_data->context_un.mbox.set_job = NULL;
3225 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3226 } else {
3227 dd_data->context_un.mbox.set_job = NULL;
3228 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3229 } 3317 }
3230 3318
3319 dd_data->set_job = NULL;
3231 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3320 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3232 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); 3321 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3233 kfree(dd_data); 3322 kfree(dd_data);
3234 3323
3324 /* Complete the job if the job is still active */
3325
3235 if (job) { 3326 if (job) {
3236 job->reply->result = 0; 3327 job->reply->result = 0;
3237 job->job_done(job); 3328 job->job_done(job);
@@ -3377,19 +3468,22 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3377 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3468 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3378 uint8_t *pmbx; 3469 uint8_t *pmbx;
3379 3470
3380 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3381 dd_data = pmboxq->context1; 3471 dd_data = pmboxq->context1;
3382 /* has the job already timed out? */ 3472
3383 if (!dd_data) { 3473 /* Determine if job has been aborted */
3384 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3474 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3385 job = NULL; 3475 job = dd_data->set_job;
3386 goto job_done_out; 3476 if (job) {
3477 /* Prevent timeout handling from trying to abort job */
3478 job->dd_data = NULL;
3387 } 3479 }
3480 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3388 3481
3389 /* 3482 /*
3390 * The outgoing buffer is readily referred from the dma buffer, 3483 * The outgoing buffer is readily referred from the dma buffer,
3391 * just need to get header part from mailboxq structure. 3484 * just need to get header part from mailboxq structure.
3392 */ 3485 */
3486
3393 pmb = (uint8_t *)&pmboxq->u.mb; 3487 pmb = (uint8_t *)&pmboxq->u.mb;
3394 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3488 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3395 /* Copy the byte swapped response mailbox back to the user */ 3489 /* Copy the byte swapped response mailbox back to the user */
@@ -3406,21 +3500,18 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3406 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); 3500 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3407 } 3501 }
3408 3502
3409 job = dd_data->context_un.mbox.set_job; 3503 /* Complete the job if the job is still active */
3504
3410 if (job) { 3505 if (job) {
3411 size = job->reply_payload.payload_len; 3506 size = job->reply_payload.payload_len;
3412 job->reply->reply_payload_rcv_len = 3507 job->reply->reply_payload_rcv_len =
3413 sg_copy_from_buffer(job->reply_payload.sg_list, 3508 sg_copy_from_buffer(job->reply_payload.sg_list,
3414 job->reply_payload.sg_cnt, 3509 job->reply_payload.sg_cnt,
3415 pmb_buf, size); 3510 pmb_buf, size);
3511
3416 /* result for successful */ 3512 /* result for successful */
3417 job->reply->result = 0; 3513 job->reply->result = 0;
3418 job->dd_data = NULL; 3514
3419 /* need to hold the lock util we set job->dd_data to NULL
3420 * to hold off the timeout handler from midlayer to take
3421 * any action.
3422 */
3423 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3424 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3515 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3425 "2937 SLI_CONFIG ext-buffer maibox command " 3516 "2937 SLI_CONFIG ext-buffer maibox command "
3426 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3517 "(x%x/x%x) complete bsg job done, bsize:%d\n",
@@ -3431,20 +3522,18 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3431 phba->mbox_ext_buf_ctx.mboxType, 3522 phba->mbox_ext_buf_ctx.mboxType,
3432 dma_ebuf, sta_pos_addr, 3523 dma_ebuf, sta_pos_addr,
3433 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); 3524 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3434 } else 3525 } else {
3435 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3436
3437job_done_out:
3438 if (!job)
3439 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3526 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3440 "2938 SLI_CONFIG ext-buffer maibox " 3527 "2938 SLI_CONFIG ext-buffer maibox "
3441 "command (x%x/x%x) failure, rc:x%x\n", 3528 "command (x%x/x%x) failure, rc:x%x\n",
3442 phba->mbox_ext_buf_ctx.nembType, 3529 phba->mbox_ext_buf_ctx.nembType,
3443 phba->mbox_ext_buf_ctx.mboxType, rc); 3530 phba->mbox_ext_buf_ctx.mboxType, rc);
3531 }
3532
3533
3444 /* state change */ 3534 /* state change */
3445 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; 3535 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3446 kfree(dd_data); 3536 kfree(dd_data);
3447
3448 return job; 3537 return job;
3449} 3538}
3450 3539
@@ -3461,8 +3550,10 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3461{ 3550{
3462 struct fc_bsg_job *job; 3551 struct fc_bsg_job *job;
3463 3552
3553 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3554
3464 /* handle the BSG job with mailbox command */ 3555 /* handle the BSG job with mailbox command */
3465 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS) 3556 if (!job)
3466 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3557 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3467 3558
3468 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3559 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3470,15 +3561,13 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3470 "complete, ctxState:x%x, mbxStatus:x%x\n", 3561 "complete, ctxState:x%x, mbxStatus:x%x\n",
3471 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3562 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3472 3563
3473 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3474
3475 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) 3564 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3476 lpfc_bsg_mbox_ext_session_reset(phba); 3565 lpfc_bsg_mbox_ext_session_reset(phba);
3477 3566
3478 /* free base driver mailbox structure memory */ 3567 /* free base driver mailbox structure memory */
3479 mempool_free(pmboxq, phba->mbox_mem_pool); 3568 mempool_free(pmboxq, phba->mbox_mem_pool);
3480 3569
3481 /* complete the bsg job if we have it */ 3570 /* if the job is still active, call job done */
3482 if (job) 3571 if (job)
3483 job->job_done(job); 3572 job->job_done(job);
3484 3573
@@ -3498,8 +3587,10 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3498{ 3587{
3499 struct fc_bsg_job *job; 3588 struct fc_bsg_job *job;
3500 3589
3590 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3591
3501 /* handle the BSG job with the mailbox command */ 3592 /* handle the BSG job with the mailbox command */
3502 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS) 3593 if (!job)
3503 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3594 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3504 3595
3505 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3596 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3507,13 +3598,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3507 "complete, ctxState:x%x, mbxStatus:x%x\n", 3598 "complete, ctxState:x%x, mbxStatus:x%x\n",
3508 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3599 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3509 3600
3510 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3511
3512 /* free all memory, including dma buffers */ 3601 /* free all memory, including dma buffers */
3513 mempool_free(pmboxq, phba->mbox_mem_pool); 3602 mempool_free(pmboxq, phba->mbox_mem_pool);
3514 lpfc_bsg_mbox_ext_session_reset(phba); 3603 lpfc_bsg_mbox_ext_session_reset(phba);
3515 3604
3516 /* complete the bsg job if we have it */ 3605 /* if the job is still active, call job done */
3517 if (job) 3606 if (job)
3518 job->job_done(job); 3607 job->job_done(job);
3519 3608
@@ -3759,9 +3848,9 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3759 /* context fields to callback function */ 3848 /* context fields to callback function */
3760 pmboxq->context1 = dd_data; 3849 pmboxq->context1 = dd_data;
3761 dd_data->type = TYPE_MBOX; 3850 dd_data->type = TYPE_MBOX;
3851 dd_data->set_job = job;
3762 dd_data->context_un.mbox.pmboxq = pmboxq; 3852 dd_data->context_un.mbox.pmboxq = pmboxq;
3763 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 3853 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3764 dd_data->context_un.mbox.set_job = job;
3765 job->dd_data = dd_data; 3854 job->dd_data = dd_data;
3766 3855
3767 /* state change */ 3856 /* state change */
@@ -3928,14 +4017,14 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3928 /* context fields to callback function */ 4017 /* context fields to callback function */
3929 pmboxq->context1 = dd_data; 4018 pmboxq->context1 = dd_data;
3930 dd_data->type = TYPE_MBOX; 4019 dd_data->type = TYPE_MBOX;
4020 dd_data->set_job = job;
3931 dd_data->context_un.mbox.pmboxq = pmboxq; 4021 dd_data->context_un.mbox.pmboxq = pmboxq;
3932 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; 4022 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
3933 dd_data->context_un.mbox.set_job = job;
3934 job->dd_data = dd_data; 4023 job->dd_data = dd_data;
3935 4024
3936 /* state change */ 4025 /* state change */
3937 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3938 4026
4027 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3939 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4028 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3940 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4029 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3941 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4030 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3951,6 +4040,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3951 } 4040 }
3952 4041
3953 /* wait for additoinal external buffers */ 4042 /* wait for additoinal external buffers */
4043
3954 job->reply->result = 0; 4044 job->reply->result = 0;
3955 job->job_done(job); 4045 job->job_done(job);
3956 return SLI_CONFIG_HANDLED; 4046 return SLI_CONFIG_HANDLED;
@@ -4268,9 +4358,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
4268 /* context fields to callback function */ 4358 /* context fields to callback function */
4269 pmboxq->context1 = dd_data; 4359 pmboxq->context1 = dd_data;
4270 dd_data->type = TYPE_MBOX; 4360 dd_data->type = TYPE_MBOX;
4361 dd_data->set_job = job;
4271 dd_data->context_un.mbox.pmboxq = pmboxq; 4362 dd_data->context_un.mbox.pmboxq = pmboxq;
4272 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; 4363 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4273 dd_data->context_un.mbox.set_job = job;
4274 job->dd_data = dd_data; 4364 job->dd_data = dd_data;
4275 4365
4276 /* state change */ 4366 /* state change */
@@ -4455,7 +4545,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4455 uint8_t *from; 4545 uint8_t *from;
4456 uint32_t size; 4546 uint32_t size;
4457 4547
4458
4459 /* in case no data is transferred */ 4548 /* in case no data is transferred */
4460 job->reply->reply_payload_rcv_len = 0; 4549 job->reply->reply_payload_rcv_len = 0;
4461 4550
@@ -4681,9 +4770,9 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4681 /* setup context field to pass wait_queue pointer to wake function */ 4770 /* setup context field to pass wait_queue pointer to wake function */
4682 pmboxq->context1 = dd_data; 4771 pmboxq->context1 = dd_data;
4683 dd_data->type = TYPE_MBOX; 4772 dd_data->type = TYPE_MBOX;
4773 dd_data->set_job = job;
4684 dd_data->context_un.mbox.pmboxq = pmboxq; 4774 dd_data->context_un.mbox.pmboxq = pmboxq;
4685 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 4775 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4686 dd_data->context_un.mbox.set_job = job;
4687 dd_data->context_un.mbox.ext = ext; 4776 dd_data->context_un.mbox.ext = ext;
4688 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4777 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4689 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; 4778 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
@@ -4797,75 +4886,79 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4797 struct bsg_job_data *dd_data; 4886 struct bsg_job_data *dd_data;
4798 struct fc_bsg_job *job; 4887 struct fc_bsg_job *job;
4799 IOCB_t *rsp; 4888 IOCB_t *rsp;
4800 struct lpfc_dmabuf *bmp; 4889 struct lpfc_dmabuf *bmp, *cmp, *rmp;
4801 struct lpfc_bsg_menlo *menlo; 4890 struct lpfc_bsg_menlo *menlo;
4802 unsigned long flags; 4891 unsigned long flags;
4803 struct menlo_response *menlo_resp; 4892 struct menlo_response *menlo_resp;
4893 unsigned int rsp_size;
4804 int rc = 0; 4894 int rc = 0;
4805 4895
4806 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4807 dd_data = cmdiocbq->context1; 4896 dd_data = cmdiocbq->context1;
4808 if (!dd_data) { 4897 cmp = cmdiocbq->context2;
4809 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 4898 bmp = cmdiocbq->context3;
4810 return;
4811 }
4812
4813 menlo = &dd_data->context_un.menlo; 4899 menlo = &dd_data->context_un.menlo;
4814 job = menlo->set_job; 4900 rmp = menlo->rmp;
4815 job->dd_data = NULL; /* so timeout handler does not reply */
4816
4817 spin_lock(&phba->hbalock);
4818 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
4819 if (cmdiocbq->context2 && rspiocbq)
4820 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
4821 &rspiocbq->iocb, sizeof(IOCB_t));
4822 spin_unlock(&phba->hbalock);
4823
4824 bmp = menlo->bmp;
4825 rspiocbq = menlo->rspiocbq;
4826 rsp = &rspiocbq->iocb; 4901 rsp = &rspiocbq->iocb;
4827 4902
4828 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 4903 /* Determine if job has been aborted */
4829 job->request_payload.sg_cnt, DMA_TO_DEVICE); 4904 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4830 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 4905 job = dd_data->set_job;
4831 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 4906 if (job) {
4907 /* Prevent timeout handling from trying to abort job */
4908 job->dd_data = NULL;
4909 }
4910 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4911
4912 /* Copy the job data or set the failing status for the job */
4832 4913
4833 /* always return the xri, this would be used in the case 4914 if (job) {
4834 * of a menlo download to allow the data to be sent as a continuation 4915 /* always return the xri, this would be used in the case
4835 * of the exchange. 4916 * of a menlo download to allow the data to be sent as a
4836 */ 4917 * continuation of the exchange.
4837 menlo_resp = (struct menlo_response *) 4918 */
4838 job->reply->reply_data.vendor_reply.vendor_rsp; 4919
4839 menlo_resp->xri = rsp->ulpContext; 4920 menlo_resp = (struct menlo_response *)
4840 if (rsp->ulpStatus) { 4921 job->reply->reply_data.vendor_reply.vendor_rsp;
4841 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 4922 menlo_resp->xri = rsp->ulpContext;
4842 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 4923 if (rsp->ulpStatus) {
4843 case IOERR_SEQUENCE_TIMEOUT: 4924 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
4844 rc = -ETIMEDOUT; 4925 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
4845 break; 4926 case IOERR_SEQUENCE_TIMEOUT:
4846 case IOERR_INVALID_RPI: 4927 rc = -ETIMEDOUT;
4847 rc = -EFAULT; 4928 break;
4848 break; 4929 case IOERR_INVALID_RPI:
4849 default: 4930 rc = -EFAULT;
4931 break;
4932 default:
4933 rc = -EACCES;
4934 break;
4935 }
4936 } else {
4850 rc = -EACCES; 4937 rc = -EACCES;
4851 break;
4852 } 4938 }
4853 } else 4939 } else {
4854 rc = -EACCES; 4940 rsp_size = rsp->un.genreq64.bdl.bdeSize;
4855 } else 4941 job->reply->reply_payload_rcv_len =
4856 job->reply->reply_payload_rcv_len = 4942 lpfc_bsg_copy_data(rmp, &job->reply_payload,
4857 rsp->un.genreq64.bdl.bdeSize; 4943 rsp_size, 0);
4944 }
4945
4946 }
4858 4947
4859 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4860 lpfc_sli_release_iocbq(phba, rspiocbq);
4861 lpfc_sli_release_iocbq(phba, cmdiocbq); 4948 lpfc_sli_release_iocbq(phba, cmdiocbq);
4949 lpfc_free_bsg_buffers(phba, cmp);
4950 lpfc_free_bsg_buffers(phba, rmp);
4951 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4862 kfree(bmp); 4952 kfree(bmp);
4863 kfree(dd_data); 4953 kfree(dd_data);
4864 /* make error code available to userspace */ 4954
4865 job->reply->result = rc; 4955 /* Complete the job if active */
4866 /* complete the job back to userspace */ 4956
4867 job->job_done(job); 4957 if (job) {
4868 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 4958 job->reply->result = rc;
4959 job->job_done(job);
4960 }
4961
4869 return; 4962 return;
4870} 4963}
4871 4964
@@ -4883,17 +4976,14 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
4883{ 4976{
4884 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 4977 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4885 struct lpfc_hba *phba = vport->phba; 4978 struct lpfc_hba *phba = vport->phba;
4886 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 4979 struct lpfc_iocbq *cmdiocbq;
4887 IOCB_t *cmd, *rsp; 4980 IOCB_t *cmd;
4888 int rc = 0; 4981 int rc = 0;
4889 struct menlo_command *menlo_cmd; 4982 struct menlo_command *menlo_cmd;
4890 struct menlo_response *menlo_resp; 4983 struct menlo_response *menlo_resp;
4891 struct lpfc_dmabuf *bmp = NULL; 4984 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
4892 int request_nseg; 4985 int request_nseg;
4893 int reply_nseg; 4986 int reply_nseg;
4894 struct scatterlist *sgel = NULL;
4895 int numbde;
4896 dma_addr_t busaddr;
4897 struct bsg_job_data *dd_data; 4987 struct bsg_job_data *dd_data;
4898 struct ulp_bde64 *bpl = NULL; 4988 struct ulp_bde64 *bpl = NULL;
4899 4989
@@ -4948,50 +5038,38 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
4948 goto free_dd; 5038 goto free_dd;
4949 } 5039 }
4950 5040
4951 cmdiocbq = lpfc_sli_get_iocbq(phba); 5041 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
4952 if (!cmdiocbq) { 5042 if (!bmp->virt) {
4953 rc = -ENOMEM; 5043 rc = -ENOMEM;
4954 goto free_bmp; 5044 goto free_bmp;
4955 } 5045 }
4956 5046
4957 rspiocbq = lpfc_sli_get_iocbq(phba); 5047 INIT_LIST_HEAD(&bmp->list);
4958 if (!rspiocbq) {
4959 rc = -ENOMEM;
4960 goto free_cmdiocbq;
4961 }
4962
4963 rsp = &rspiocbq->iocb;
4964 5048
4965 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 5049 bpl = (struct ulp_bde64 *)bmp->virt;
4966 if (!bmp->virt) { 5050 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
5051 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
5052 1, bpl, &request_nseg);
5053 if (!cmp) {
4967 rc = -ENOMEM; 5054 rc = -ENOMEM;
4968 goto free_rspiocbq; 5055 goto free_bmp;
4969 } 5056 }
5057 lpfc_bsg_copy_data(cmp, &job->request_payload,
5058 job->request_payload.payload_len, 1);
4970 5059
4971 INIT_LIST_HEAD(&bmp->list); 5060 bpl += request_nseg;
4972 bpl = (struct ulp_bde64 *) bmp->virt; 5061 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
4973 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 5062 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
4974 job->request_payload.sg_cnt, DMA_TO_DEVICE); 5063 bpl, &reply_nseg);
4975 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 5064 if (!rmp) {
4976 busaddr = sg_dma_address(sgel); 5065 rc = -ENOMEM;
4977 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 5066 goto free_cmp;
4978 bpl->tus.f.bdeSize = sg_dma_len(sgel);
4979 bpl->tus.w = cpu_to_le32(bpl->tus.w);
4980 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4981 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4982 bpl++;
4983 } 5067 }
4984 5068
4985 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 5069 cmdiocbq = lpfc_sli_get_iocbq(phba);
4986 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 5070 if (!cmdiocbq) {
4987 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 5071 rc = -ENOMEM;
4988 busaddr = sg_dma_address(sgel); 5072 goto free_rmp;
4989 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
4990 bpl->tus.f.bdeSize = sg_dma_len(sgel);
4991 bpl->tus.w = cpu_to_le32(bpl->tus.w);
4992 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4993 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4994 bpl++;
4995 } 5073 }
4996 5074
4997 cmd = &cmdiocbq->iocb; 5075 cmd = &cmdiocbq->iocb;
@@ -5013,11 +5091,10 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
5013 cmdiocbq->vport = phba->pport; 5091 cmdiocbq->vport = phba->pport;
5014 /* We want the firmware to timeout before we do */ 5092 /* We want the firmware to timeout before we do */
5015 cmd->ulpTimeout = MENLO_TIMEOUT - 5; 5093 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
5016 cmdiocbq->context3 = bmp;
5017 cmdiocbq->context2 = rspiocbq;
5018 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; 5094 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
5019 cmdiocbq->context1 = dd_data; 5095 cmdiocbq->context1 = dd_data;
5020 cmdiocbq->context2 = rspiocbq; 5096 cmdiocbq->context2 = cmp;
5097 cmdiocbq->context3 = bmp;
5021 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { 5098 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
5022 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 5099 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5023 cmd->ulpPU = MENLO_PU; /* 3 */ 5100 cmd->ulpPU = MENLO_PU; /* 3 */
@@ -5031,29 +5108,25 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
5031 } 5108 }
5032 5109
5033 dd_data->type = TYPE_MENLO; 5110 dd_data->type = TYPE_MENLO;
5111 dd_data->set_job = job;
5034 dd_data->context_un.menlo.cmdiocbq = cmdiocbq; 5112 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
5035 dd_data->context_un.menlo.rspiocbq = rspiocbq; 5113 dd_data->context_un.menlo.rmp = rmp;
5036 dd_data->context_un.menlo.set_job = job; 5114 job->dd_data = dd_data;
5037 dd_data->context_un.menlo.bmp = bmp;
5038 5115
5039 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 5116 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
5040 MENLO_TIMEOUT - 5); 5117 MENLO_TIMEOUT - 5);
5041 if (rc == IOCB_SUCCESS) 5118 if (rc == IOCB_SUCCESS)
5042 return 0; /* done for now */ 5119 return 0; /* done for now */
5043 5120
5044 /* iocb failed so cleanup */
5045 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
5046 job->request_payload.sg_cnt, DMA_TO_DEVICE);
5047 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
5048 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
5049
5050 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5051
5052free_rspiocbq:
5053 lpfc_sli_release_iocbq(phba, rspiocbq);
5054free_cmdiocbq:
5055 lpfc_sli_release_iocbq(phba, cmdiocbq); 5121 lpfc_sli_release_iocbq(phba, cmdiocbq);
5122
5123free_rmp:
5124 lpfc_free_bsg_buffers(phba, rmp);
5125free_cmp:
5126 lpfc_free_bsg_buffers(phba, cmp);
5056free_bmp: 5127free_bmp:
5128 if (bmp->virt)
5129 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5057 kfree(bmp); 5130 kfree(bmp);
5058free_dd: 5131free_dd:
5059 kfree(dd_data); 5132 kfree(dd_data);
@@ -5162,70 +5235,94 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
5162 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 5235 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
5163 struct lpfc_hba *phba = vport->phba; 5236 struct lpfc_hba *phba = vport->phba;
5164 struct lpfc_iocbq *cmdiocb; 5237 struct lpfc_iocbq *cmdiocb;
5165 struct lpfc_bsg_event *evt;
5166 struct lpfc_bsg_iocb *iocb;
5167 struct lpfc_bsg_mbox *mbox;
5168 struct lpfc_bsg_menlo *menlo;
5169 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 5238 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5170 struct bsg_job_data *dd_data; 5239 struct bsg_job_data *dd_data;
5171 unsigned long flags; 5240 unsigned long flags;
5241 int rc = 0;
5242 LIST_HEAD(completions);
5243 struct lpfc_iocbq *check_iocb, *next_iocb;
5244
5245 /* if job's driver data is NULL, the command completed or is in the
5246 * the process of completing. In this case, return status to request
5247 * so the timeout is retried. This avoids double completion issues
5248 * and the request will be pulled off the timer queue when the
5249 * command's completion handler executes. Otherwise, prevent the
5250 * command's completion handler from executing the job done callback
5251 * and continue processing to abort the outstanding the command.
5252 */
5172 5253
5173 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5254 spin_lock_irqsave(&phba->ct_ev_lock, flags);
5174 dd_data = (struct bsg_job_data *)job->dd_data; 5255 dd_data = (struct bsg_job_data *)job->dd_data;
5175 /* timeout and completion crossed paths if no dd_data */ 5256 if (dd_data) {
5176 if (!dd_data) { 5257 dd_data->set_job = NULL;
5258 job->dd_data = NULL;
5259 } else {
5177 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5260 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5178 return 0; 5261 return -EAGAIN;
5179 } 5262 }
5180 5263
5181 switch (dd_data->type) { 5264 switch (dd_data->type) {
5182 case TYPE_IOCB: 5265 case TYPE_IOCB:
5183 iocb = &dd_data->context_un.iocb; 5266 /* Check to see if IOCB was issued to the port or not. If not,
5184 cmdiocb = iocb->cmdiocbq; 5267 * remove it from the txq queue and call cancel iocbs.
5185 /* hint to completion handler that the job timed out */ 5268 * Otherwise, call abort iotag
5186 job->reply->result = -EAGAIN; 5269 */
5187 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5270
5188 /* this will call our completion handler */ 5271 cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5189 spin_lock_irq(&phba->hbalock); 5272 spin_lock_irq(&phba->hbalock);
5190 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5273 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5274 list) {
5275 if (check_iocb == cmdiocb) {
5276 list_move_tail(&check_iocb->list, &completions);
5277 break;
5278 }
5279 }
5280 if (list_empty(&completions))
5281 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5191 spin_unlock_irq(&phba->hbalock); 5282 spin_unlock_irq(&phba->hbalock);
5283 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5284 if (!list_empty(&completions)) {
5285 lpfc_sli_cancel_iocbs(phba, &completions,
5286 IOSTAT_LOCAL_REJECT,
5287 IOERR_SLI_ABORTED);
5288 }
5192 break; 5289 break;
5290
5193 case TYPE_EVT: 5291 case TYPE_EVT:
5194 evt = dd_data->context_un.evt;
5195 /* this event has no job anymore */
5196 evt->set_job = NULL;
5197 job->dd_data = NULL;
5198 job->reply->reply_payload_rcv_len = 0;
5199 /* Return -EAGAIN which is our way of signallying the
5200 * app to retry.
5201 */
5202 job->reply->result = -EAGAIN;
5203 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5292 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5204 job->job_done(job);
5205 break; 5293 break;
5294
5206 case TYPE_MBOX: 5295 case TYPE_MBOX:
5207 mbox = &dd_data->context_un.mbox; 5296 /* Update the ext buf ctx state if needed */
5208 /* this mbox has no job anymore */ 5297
5209 mbox->set_job = NULL;
5210 job->dd_data = NULL;
5211 job->reply->reply_payload_rcv_len = 0;
5212 job->reply->result = -EAGAIN;
5213 /* the mbox completion handler can now be run */
5214 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5215 job->job_done(job);
5216 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 5298 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5217 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 5299 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5300 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5218 break; 5301 break;
5219 case TYPE_MENLO: 5302 case TYPE_MENLO:
5220 menlo = &dd_data->context_un.menlo; 5303 /* Check to see if IOCB was issued to the port or not. If not,
5221 cmdiocb = menlo->cmdiocbq; 5304 * remove it from the txq queue and call cancel iocbs.
5222 /* hint to completion handler that the job timed out */ 5305 * Otherwise, call abort iotag.
5223 job->reply->result = -EAGAIN; 5306 */
5224 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5307
5225 /* this will call our completion handler */ 5308 cmdiocb = dd_data->context_un.menlo.cmdiocbq;
5226 spin_lock_irq(&phba->hbalock); 5309 spin_lock_irq(&phba->hbalock);
5227 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5310 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5311 list) {
5312 if (check_iocb == cmdiocb) {
5313 list_move_tail(&check_iocb->list, &completions);
5314 break;
5315 }
5316 }
5317 if (list_empty(&completions))
5318 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5228 spin_unlock_irq(&phba->hbalock); 5319 spin_unlock_irq(&phba->hbalock);
5320 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5321 if (!list_empty(&completions)) {
5322 lpfc_sli_cancel_iocbs(phba, &completions,
5323 IOSTAT_LOCAL_REJECT,
5324 IOERR_SLI_ABORTED);
5325 }
5229 break; 5326 break;
5230 default: 5327 default:
5231 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5328 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
@@ -5236,5 +5333,5 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
5236 * otherwise an error message will be displayed on the console 5333 * otherwise an error message will be displayed on the console
5237 * so always return success (zero) 5334 * so always return success (zero)
5238 */ 5335 */
5239 return 0; 5336 return rc;
5240} 5337}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 76ca65dae781..7631893ae005 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -106,6 +106,7 @@ void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
106void lpfc_cleanup(struct lpfc_vport *); 106void lpfc_cleanup(struct lpfc_vport *);
107void lpfc_disc_timeout(unsigned long); 107void lpfc_disc_timeout(unsigned long);
108 108
109int lpfc_unregister_fcf_prep(struct lpfc_hba *);
109struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); 110struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
110struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); 111struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
111void lpfc_worker_wake_up(struct lpfc_hba *); 112void lpfc_worker_wake_up(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 08d156a9094f..bbed8471bf0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -484,6 +484,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
484 vport->port_state = LPFC_FABRIC_CFG_LINK; 484 vport->port_state = LPFC_FABRIC_CFG_LINK;
485 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam)); 485 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
486 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 486 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
487
487 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 488 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
488 mboxq->vport = vport; 489 mboxq->vport = vport;
489 mboxq->context1 = dmabuf; 490 mboxq->context1 = dmabuf;
@@ -700,6 +701,20 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
700 } 701 }
701 } 702 }
702 703
704 /*
705 * For FC we need to do some special processing because of the SLI
706 * Port's default settings of the Common Service Parameters.
707 */
708 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
709 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
710 if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
711 lpfc_unregister_fcf_prep(phba);
712
713 /* This should just update the VFI CSPs*/
714 if (vport->fc_flag & FC_VFI_REGISTERED)
715 lpfc_issue_reg_vfi(vport);
716 }
717
703 if (fabric_param_changed && 718 if (fabric_param_changed &&
704 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 719 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
705 720
@@ -6225,7 +6240,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
6225 spin_unlock_irq(&phba->hbalock); 6240 spin_unlock_irq(&phba->hbalock);
6226 } 6241 }
6227 6242
6228 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 6243 if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
6229 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 6244 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
6230} 6245}
6231 6246
@@ -6279,7 +6294,6 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
6279 continue; 6294 continue;
6280 6295
6281 list_move_tail(&piocb->list, &completions); 6296 list_move_tail(&piocb->list, &completions);
6282 pring->txq_cnt--;
6283 } 6297 }
6284 6298
6285 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 6299 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
@@ -6339,7 +6353,6 @@ lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
6339 cmd->ulpCommand == CMD_ABORT_XRI_CN) 6353 cmd->ulpCommand == CMD_ABORT_XRI_CN)
6340 continue; 6354 continue;
6341 list_move_tail(&piocb->list, &completions); 6355 list_move_tail(&piocb->list, &completions);
6342 pring->txq_cnt--;
6343 } 6356 }
6344 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 6357 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6345 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 6358 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
@@ -8065,7 +8078,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
8065 rxid, 1); 8078 rxid, 1);
8066 8079
8067 /* Check if TXQ queue needs to be serviced */ 8080 /* Check if TXQ queue needs to be serviced */
8068 if (pring->txq_cnt) 8081 if (!(list_empty(&pring->txq)))
8069 lpfc_worker_wake_up(phba); 8082 lpfc_worker_wake_up(phba);
8070 return; 8083 return;
8071 } 8084 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bfda18467ee6..326e05a65a73 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -691,12 +691,15 @@ lpfc_work_done(struct lpfc_hba *phba)
691 /* Set the lpfc data pending flag */ 691 /* Set the lpfc data pending flag */
692 set_bit(LPFC_DATA_READY, &phba->data_flags); 692 set_bit(LPFC_DATA_READY, &phba->data_flags);
693 } else { 693 } else {
694 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 694 if (phba->link_state >= LPFC_LINK_UP) {
695 lpfc_sli_handle_slow_ring_event(phba, pring, 695 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
696 (status & 696 lpfc_sli_handle_slow_ring_event(phba, pring,
697 HA_RXMASK)); 697 (status &
698 HA_RXMASK));
699 }
698 } 700 }
699 if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt) 701 if ((phba->sli_rev == LPFC_SLI_REV4) &
702 (!list_empty(&pring->txq)))
700 lpfc_drain_txq(phba); 703 lpfc_drain_txq(phba);
701 /* 704 /*
702 * Turn on Ring interrupts 705 * Turn on Ring interrupts
@@ -1792,6 +1795,8 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1792 virt_addr = mboxq->sge_array->addr[0]; 1795 virt_addr = mboxq->sge_array->addr[0];
1793 1796
1794 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 1797 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1798 lpfc_sli_pcimem_bcopy(shdr, shdr,
1799 sizeof(union lpfc_sli4_cfg_shdr));
1795 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1800 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1796 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 1801 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1797 if (shdr_status || shdr_add_status) { 1802 if (shdr_status || shdr_add_status) {
@@ -2888,6 +2893,11 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2888 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2893 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2889 goto out_free_mem; 2894 goto out_free_mem;
2890 } 2895 }
2896
2897 /* If the VFI is already registered, there is nothing else to do */
2898 if (vport->fc_flag & FC_VFI_REGISTERED)
2899 goto out_free_mem;
2900
2891 /* The VPI is implicitly registered when the VFI is registered */ 2901 /* The VPI is implicitly registered when the VFI is registered */
2892 spin_lock_irq(shost->host_lock); 2902 spin_lock_irq(shost->host_lock);
2893 vport->vpi_state |= LPFC_VPI_REGISTERED; 2903 vport->vpi_state |= LPFC_VPI_REGISTERED;
@@ -2980,6 +2990,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
2980 struct lpfc_dmabuf *mp; 2990 struct lpfc_dmabuf *mp;
2981 int rc; 2991 int rc;
2982 struct fcf_record *fcf_record; 2992 struct fcf_record *fcf_record;
2993 uint32_t fc_flags = 0;
2983 2994
2984 spin_lock_irq(&phba->hbalock); 2995 spin_lock_irq(&phba->hbalock);
2985 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { 2996 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
@@ -3011,11 +3022,8 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3011 "1309 Link Up Event npiv not supported in loop " 3022 "1309 Link Up Event npiv not supported in loop "
3012 "topology\n"); 3023 "topology\n");
3013 /* Get Loop Map information */ 3024 /* Get Loop Map information */
3014 if (bf_get(lpfc_mbx_read_top_il, la)) { 3025 if (bf_get(lpfc_mbx_read_top_il, la))
3015 spin_lock(shost->host_lock); 3026 fc_flags |= FC_LBIT;
3016 vport->fc_flag |= FC_LBIT;
3017 spin_unlock(shost->host_lock);
3018 }
3019 3027
3020 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); 3028 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3021 i = la->lilpBde64.tus.f.bdeSize; 3029 i = la->lilpBde64.tus.f.bdeSize;
@@ -3064,12 +3072,16 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3064 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3072 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3065 } 3073 }
3066 vport->fc_myDID = phba->fc_pref_DID; 3074 vport->fc_myDID = phba->fc_pref_DID;
3067 spin_lock(shost->host_lock); 3075 fc_flags |= FC_LBIT;
3068 vport->fc_flag |= FC_LBIT;
3069 spin_unlock(shost->host_lock);
3070 } 3076 }
3071 spin_unlock_irq(&phba->hbalock); 3077 spin_unlock_irq(&phba->hbalock);
3072 3078
3079 if (fc_flags) {
3080 spin_lock_irq(shost->host_lock);
3081 vport->fc_flag |= fc_flags;
3082 spin_unlock_irq(shost->host_lock);
3083 }
3084
3073 lpfc_linkup(phba); 3085 lpfc_linkup(phba);
3074 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3086 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3075 if (!sparam_mbox) 3087 if (!sparam_mbox)
@@ -3237,8 +3249,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3237 vport->fc_flag &= ~FC_BYPASSED_MODE; 3249 vport->fc_flag &= ~FC_BYPASSED_MODE;
3238 spin_unlock_irq(shost->host_lock); 3250 spin_unlock_irq(shost->host_lock);
3239 3251
3240 if ((phba->fc_eventTag < la->eventTag) || 3252 if (phba->fc_eventTag <= la->eventTag) {
3241 (phba->fc_eventTag == la->eventTag)) {
3242 phba->fc_stat.LinkMultiEvent++; 3253 phba->fc_stat.LinkMultiEvent++;
3243 if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) 3254 if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
3244 if (phba->fc_eventTag != 0) 3255 if (phba->fc_eventTag != 0)
@@ -3246,16 +3257,18 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3246 } 3257 }
3247 3258
3248 phba->fc_eventTag = la->eventTag; 3259 phba->fc_eventTag = la->eventTag;
3249 spin_lock_irq(&phba->hbalock); 3260 if (phba->sli_rev < LPFC_SLI_REV4) {
3250 if (bf_get(lpfc_mbx_read_top_mm, la)) 3261 spin_lock_irq(&phba->hbalock);
3251 phba->sli.sli_flag |= LPFC_MENLO_MAINT; 3262 if (bf_get(lpfc_mbx_read_top_mm, la))
3252 else 3263 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3253 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 3264 else
3254 spin_unlock_irq(&phba->hbalock); 3265 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3266 spin_unlock_irq(&phba->hbalock);
3267 }
3255 3268
3256 phba->link_events++; 3269 phba->link_events++;
3257 if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) && 3270 if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
3258 (!bf_get(lpfc_mbx_read_top_mm, la))) { 3271 !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
3259 phba->fc_stat.LinkUp++; 3272 phba->fc_stat.LinkUp++;
3260 if (phba->link_flag & LS_LOOPBACK_MODE) { 3273 if (phba->link_flag & LS_LOOPBACK_MODE) {
3261 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3274 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3300,8 +3313,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3300 bf_get(lpfc_mbx_read_top_fa, la)); 3313 bf_get(lpfc_mbx_read_top_fa, la));
3301 lpfc_mbx_issue_link_down(phba); 3314 lpfc_mbx_issue_link_down(phba);
3302 } 3315 }
3303 if ((bf_get(lpfc_mbx_read_top_mm, la)) && 3316 if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) &&
3304 (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) { 3317 ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) {
3305 if (phba->link_state != LPFC_LINK_DOWN) { 3318 if (phba->link_state != LPFC_LINK_DOWN) {
3306 phba->fc_stat.LinkDown++; 3319 phba->fc_stat.LinkDown++;
3307 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3320 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3329,8 +3342,9 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3329 } 3342 }
3330 } 3343 }
3331 3344
3332 if (bf_get(lpfc_mbx_read_top_fa, la)) { 3345 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3333 if (bf_get(lpfc_mbx_read_top_mm, la)) 3346 bf_get(lpfc_mbx_read_top_fa, la)) {
3347 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
3334 lpfc_issue_clear_la(phba, vport); 3348 lpfc_issue_clear_la(phba, vport);
3335 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 3349 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3336 "1311 fa %d\n", 3350 "1311 fa %d\n",
@@ -4354,7 +4368,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4354 with an error */ 4368 with an error */
4355 list_move_tail(&iocb->list, 4369 list_move_tail(&iocb->list,
4356 &completions); 4370 &completions);
4357 pring->txq_cnt--;
4358 } 4371 }
4359 } 4372 }
4360 spin_unlock_irq(&phba->hbalock); 4373 spin_unlock_irq(&phba->hbalock);
@@ -5055,7 +5068,6 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5055 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { 5068 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
5056 5069
5057 list_move_tail(&iocb->list, &completions); 5070 list_move_tail(&iocb->list, &completions);
5058 pring->txq_cnt--;
5059 } 5071 }
5060 } 5072 }
5061 5073
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 6e93b886cd4d..1dd2f6f0a127 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1958,6 +1958,9 @@ struct lpfc_mbx_init_vfi {
1958 1958
1959struct lpfc_mbx_reg_vfi { 1959struct lpfc_mbx_reg_vfi {
1960 uint32_t word1; 1960 uint32_t word1;
1961#define lpfc_reg_vfi_upd_SHIFT 29
1962#define lpfc_reg_vfi_upd_MASK 0x00000001
1963#define lpfc_reg_vfi_upd_WORD word1
1961#define lpfc_reg_vfi_vp_SHIFT 28 1964#define lpfc_reg_vfi_vp_SHIFT 28
1962#define lpfc_reg_vfi_vp_MASK 0x00000001 1965#define lpfc_reg_vfi_vp_MASK 0x00000001
1963#define lpfc_reg_vfi_vp_WORD word1 1966#define lpfc_reg_vfi_vp_WORD word1
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 314b4f61b9e3..5da297290262 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -839,7 +839,6 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
839 * way, nothing should be on txcmplq as it will NEVER complete. 839 * way, nothing should be on txcmplq as it will NEVER complete.
840 */ 840 */
841 list_splice_init(&pring->txcmplq, &completions); 841 list_splice_init(&pring->txcmplq, &completions);
842 pring->txcmplq_cnt = 0;
843 spin_unlock_irq(&phba->hbalock); 842 spin_unlock_irq(&phba->hbalock);
844 843
845 /* Cancel all the IOCBs from the completions list */ 844 /* Cancel all the IOCBs from the completions list */
@@ -2915,9 +2914,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2915 sglq_entry->state = SGL_FREED; 2914 sglq_entry->state = SGL_FREED;
2916 list_add_tail(&sglq_entry->list, &els_sgl_list); 2915 list_add_tail(&sglq_entry->list, &els_sgl_list);
2917 } 2916 }
2918 spin_lock(&phba->hbalock); 2917 spin_lock_irq(&phba->hbalock);
2919 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 2918 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2920 spin_unlock(&phba->hbalock); 2919 spin_unlock_irq(&phba->hbalock);
2921 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 2920 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
2922 /* els xri-sgl shrinked */ 2921 /* els xri-sgl shrinked */
2923 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 2922 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
@@ -3015,9 +3014,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3015 psb->cur_iocbq.sli4_lxritag = lxri; 3014 psb->cur_iocbq.sli4_lxritag = lxri;
3016 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3015 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3017 } 3016 }
3018 spin_lock(&phba->scsi_buf_list_lock); 3017 spin_lock_irq(&phba->scsi_buf_list_lock);
3019 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list); 3018 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
3020 spin_unlock(&phba->scsi_buf_list_lock); 3019 spin_unlock_irq(&phba->scsi_buf_list_lock);
3021 3020
3022 return 0; 3021 return 0;
3023 3022
@@ -4004,6 +4003,52 @@ lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4004} 4003}
4005 4004
4006/** 4005/**
4006 * lpfc_sli4_perform_inuse_fcf_recovery - Perform inuse fcf recovery
4007 * @vport: pointer to lpfc hba data structure.
4008 *
4009 * This routine is to perform FCF recovery when the in-use FCF either dead or
4010 * got modified.
4011 **/
4012static void
4013lpfc_sli4_perform_inuse_fcf_recovery(struct lpfc_hba *phba,
4014 struct lpfc_acqe_fip *acqe_fip)
4015{
4016 int rc;
4017
4018 spin_lock_irq(&phba->hbalock);
4019 /* Mark the fast failover process in progress */
4020 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4021 spin_unlock_irq(&phba->hbalock);
4022
4023 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4024 "2771 Start FCF fast failover process due to in-use "
4025 "FCF DEAD/MODIFIED event: evt_tag:x%x, index:x%x\n",
4026 acqe_fip->event_tag, acqe_fip->index);
4027 rc = lpfc_sli4_redisc_fcf_table(phba);
4028 if (rc) {
4029 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4030 "2772 Issue FCF rediscover mabilbox command "
4031 "failed, fail through to FCF dead event\n");
4032 spin_lock_irq(&phba->hbalock);
4033 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4034 spin_unlock_irq(&phba->hbalock);
4035 /*
4036 * Last resort will fail over by treating this as a link
4037 * down to FCF registration.
4038 */
4039 lpfc_sli4_fcf_dead_failthrough(phba);
4040 } else {
4041 /* Reset FCF roundrobin bmask for new discovery */
4042 lpfc_sli4_clear_fcf_rr_bmask(phba);
4043 /*
4044 * Handling fast FCF failover to a DEAD FCF event is
4045 * considered equalivant to receiving CVL to all vports.
4046 */
4047 lpfc_sli4_perform_all_vport_cvl(phba);
4048 }
4049}
4050
4051/**
4007 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 4052 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
4008 * @phba: pointer to lpfc hba data structure. 4053 * @phba: pointer to lpfc hba data structure.
4009 * @acqe_link: pointer to the async fcoe completion queue entry. 4054 * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -4068,9 +4113,22 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4068 break; 4113 break;
4069 } 4114 }
4070 4115
4071 /* If the FCF has been in discovered state, do nothing. */ 4116 /* If FCF has been in discovered state, perform rediscovery
4072 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 4117 * only if the FCF with the same index of the in-use FCF got
4118 * modified during normal operation. Otherwise, do nothing.
4119 */
4120 if (phba->pport->port_state > LPFC_FLOGI) {
4073 spin_unlock_irq(&phba->hbalock); 4121 spin_unlock_irq(&phba->hbalock);
4122 if (phba->fcf.current_rec.fcf_indx ==
4123 acqe_fip->index) {
4124 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
4125 "3300 In-use FCF (%d) "
4126 "modified, perform FCF "
4127 "rediscovery\n",
4128 acqe_fip->index);
4129 lpfc_sli4_perform_inuse_fcf_recovery(phba,
4130 acqe_fip);
4131 }
4074 break; 4132 break;
4075 } 4133 }
4076 spin_unlock_irq(&phba->hbalock); 4134 spin_unlock_irq(&phba->hbalock);
@@ -4123,39 +4181,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4123 * is no longer valid as we are not in the middle of FCF 4181 * is no longer valid as we are not in the middle of FCF
4124 * failover process already. 4182 * failover process already.
4125 */ 4183 */
4126 spin_lock_irq(&phba->hbalock); 4184 lpfc_sli4_perform_inuse_fcf_recovery(phba, acqe_fip);
4127 /* Mark the fast failover process in progress */
4128 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4129 spin_unlock_irq(&phba->hbalock);
4130
4131 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4132 "2771 Start FCF fast failover process due to "
4133 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
4134 "\n", acqe_fip->event_tag, acqe_fip->index);
4135 rc = lpfc_sli4_redisc_fcf_table(phba);
4136 if (rc) {
4137 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4138 LOG_DISCOVERY,
4139 "2772 Issue FCF rediscover mabilbox "
4140 "command failed, fail through to FCF "
4141 "dead event\n");
4142 spin_lock_irq(&phba->hbalock);
4143 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4144 spin_unlock_irq(&phba->hbalock);
4145 /*
4146 * Last resort will fail over by treating this
4147 * as a link down to FCF registration.
4148 */
4149 lpfc_sli4_fcf_dead_failthrough(phba);
4150 } else {
4151 /* Reset FCF roundrobin bmask for new discovery */
4152 lpfc_sli4_clear_fcf_rr_bmask(phba);
4153 /*
4154 * Handling fast FCF failover to a DEAD FCF event is
4155 * considered equalivant to receiving CVL to all vports.
4156 */
4157 lpfc_sli4_perform_all_vport_cvl(phba);
4158 }
4159 break; 4185 break;
4160 case LPFC_FIP_EVENT_TYPE_CVL: 4186 case LPFC_FIP_EVENT_TYPE_CVL:
4161 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4187 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index efc9cd9def8b..a7a9fa468308 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2126,32 +2126,40 @@ void
2126lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) 2126lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2127{ 2127{
2128 struct lpfc_mbx_reg_vfi *reg_vfi; 2128 struct lpfc_mbx_reg_vfi *reg_vfi;
2129 struct lpfc_hba *phba = vport->phba;
2129 2130
2130 memset(mbox, 0, sizeof(*mbox)); 2131 memset(mbox, 0, sizeof(*mbox));
2131 reg_vfi = &mbox->u.mqe.un.reg_vfi; 2132 reg_vfi = &mbox->u.mqe.un.reg_vfi;
2132 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); 2133 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
2133 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); 2134 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
2134 bf_set(lpfc_reg_vfi_vfi, reg_vfi, 2135 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2135 vport->phba->sli4_hba.vfi_ids[vport->vfi]); 2136 phba->sli4_hba.vfi_ids[vport->vfi]);
2136 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); 2137 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
2137 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]); 2138 bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
2138 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); 2139 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
2139 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); 2140 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
2140 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); 2141 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
2141 reg_vfi->e_d_tov = vport->phba->fc_edtov; 2142 reg_vfi->e_d_tov = phba->fc_edtov;
2142 reg_vfi->r_a_tov = vport->phba->fc_ratov; 2143 reg_vfi->r_a_tov = phba->fc_ratov;
2143 reg_vfi->bde.addrHigh = putPaddrHigh(phys); 2144 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
2144 reg_vfi->bde.addrLow = putPaddrLow(phys); 2145 reg_vfi->bde.addrLow = putPaddrLow(phys);
2145 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); 2146 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
2146 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2147 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2147 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); 2148 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
2149
2150 /* Only FC supports upd bit */
2151 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
2152 (vport->fc_flag & FC_VFI_REGISTERED)) {
2153 bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
2154 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
2155 }
2148 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, 2156 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2149 "3134 Register VFI, mydid:x%x, fcfi:%d, " 2157 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2150 " vfi:%d, vpi:%d, fc_pname:%x%x\n", 2158 " vfi:%d, vpi:%d, fc_pname:%x%x\n",
2151 vport->fc_myDID, 2159 vport->fc_myDID,
2152 vport->phba->fcf.fcfi, 2160 phba->fcf.fcfi,
2153 vport->phba->sli4_hba.vfi_ids[vport->vfi], 2161 phba->sli4_hba.vfi_ids[vport->vfi],
2154 vport->phba->vpi_ids[vport->vpi], 2162 phba->vpi_ids[vport->vpi],
2155 reg_vfi->wwn[0], reg_vfi->wwn[1]); 2163 reg_vfi->wwn[0], reg_vfi->wwn[1]);
2156} 2164}
2157 2165
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 46128c679202..82f4d3542289 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -226,7 +226,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
226 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 226 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
227 /* It matches, so deque and call compl with anp error */ 227 /* It matches, so deque and call compl with anp error */
228 list_move_tail(&iocb->list, &completions); 228 list_move_tail(&iocb->list, &completions);
229 pring->txq_cnt--;
230 } 229 }
231 } 230 }
232 231
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 98af07c6e300..74b8710e1e90 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -732,7 +732,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
732 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 732 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
733 psb->exch_busy = 0; 733 psb->exch_busy = 0;
734 spin_unlock_irqrestore(&phba->hbalock, iflag); 734 spin_unlock_irqrestore(&phba->hbalock, iflag);
735 if (pring->txq_cnt) 735 if (!list_empty(&pring->txq))
736 lpfc_worker_wake_up(phba); 736 lpfc_worker_wake_up(phba);
737 return; 737 return;
738 738
@@ -885,9 +885,9 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
885 int num_posted, rc = 0; 885 int num_posted, rc = 0;
886 886
887 /* get all SCSI buffers need to repost to a local list */ 887 /* get all SCSI buffers need to repost to a local list */
888 spin_lock(&phba->scsi_buf_list_lock); 888 spin_lock_irq(&phba->scsi_buf_list_lock);
889 list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist); 889 list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
890 spin_unlock(&phba->scsi_buf_list_lock); 890 spin_unlock_irq(&phba->scsi_buf_list_lock);
891 891
892 /* post the list of scsi buffer sgls to port if available */ 892 /* post the list of scsi buffer sgls to port if available */
893 if (!list_empty(&post_sblist)) { 893 if (!list_empty(&post_sblist)) {
@@ -4246,7 +4246,7 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4246 unsigned long poll_tmo_expires = 4246 unsigned long poll_tmo_expires =
4247 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 4247 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4248 4248
4249 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) 4249 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
4250 mod_timer(&phba->fcp_poll_timer, 4250 mod_timer(&phba->fcp_poll_timer,
4251 poll_tmo_expires); 4251 poll_tmo_expires);
4252} 4252}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d43faf34c1e2..35dd17eb0f27 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -873,14 +873,16 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
873 xritag, rxid, ndlp->nlp_DID, send_rrq); 873 xritag, rxid, ndlp->nlp_DID, send_rrq);
874 return -EINVAL; 874 return -EINVAL;
875 } 875 }
876 rrq->send_rrq = send_rrq; 876 if (phba->cfg_enable_rrq == 1)
877 rrq->send_rrq = send_rrq;
878 else
879 rrq->send_rrq = 0;
877 rrq->xritag = xritag; 880 rrq->xritag = xritag;
878 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 881 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
879 rrq->ndlp = ndlp; 882 rrq->ndlp = ndlp;
880 rrq->nlp_DID = ndlp->nlp_DID; 883 rrq->nlp_DID = ndlp->nlp_DID;
881 rrq->vport = ndlp->vport; 884 rrq->vport = ndlp->vport;
882 rrq->rxid = rxid; 885 rrq->rxid = rxid;
883 rrq->send_rrq = send_rrq;
884 spin_lock_irqsave(&phba->hbalock, iflags); 886 spin_lock_irqsave(&phba->hbalock, iflags);
885 empty = list_empty(&phba->active_rrq_list); 887 empty = list_empty(&phba->active_rrq_list);
886 list_add_tail(&rrq->list, &phba->active_rrq_list); 888 list_add_tail(&rrq->list, &phba->active_rrq_list);
@@ -1009,6 +1011,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1009 else 1011 else
1010 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1012 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1011 1013
1014 /*
1015 ** This should have been removed from the txcmplq before calling
1016 ** iocbq_release. The normal completion
1017 ** path should have already done the list_del_init.
1018 */
1019 if (unlikely(!list_empty(&iocbq->list))) {
1020 if (iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)
1021 iocbq->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
1022 list_del_init(&iocbq->list);
1023 }
1024
1025
1012 if (sglq) { 1026 if (sglq) {
1013 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1027 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1014 (sglq->state != SGL_XRI_ABORTED)) { 1028 (sglq->state != SGL_XRI_ABORTED)) {
@@ -1025,7 +1039,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1025 &phba->sli4_hba.lpfc_sgl_list); 1039 &phba->sli4_hba.lpfc_sgl_list);
1026 1040
1027 /* Check if TXQ queue needs to be serviced */ 1041 /* Check if TXQ queue needs to be serviced */
1028 if (pring->txq_cnt) 1042 if (!list_empty(&pring->txq))
1029 lpfc_worker_wake_up(phba); 1043 lpfc_worker_wake_up(phba);
1030 } 1044 }
1031 } 1045 }
@@ -1057,6 +1071,14 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1057 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1071 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1058 1072
1059 /* 1073 /*
1074 ** This should have been removed from the txcmplq before calling
1075 ** iocbq_release. The normal completion
1076 ** path should have already done the list_del_init.
1077 */
1078 if (unlikely(!list_empty(&iocbq->list)))
1079 list_del_init(&iocbq->list);
1080
1081 /*
1060 * Clean all volatile data fields, preserve iotag and node struct. 1082 * Clean all volatile data fields, preserve iotag and node struct.
1061 */ 1083 */
1062 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1084 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
@@ -1122,7 +1144,6 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1122 1144
1123 while (!list_empty(iocblist)) { 1145 while (!list_empty(iocblist)) {
1124 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1146 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1125
1126 if (!piocb->iocb_cmpl) 1147 if (!piocb->iocb_cmpl)
1127 lpfc_sli_release_iocbq(phba, piocb); 1148 lpfc_sli_release_iocbq(phba, piocb);
1128 else { 1149 else {
@@ -1310,9 +1331,6 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1310{ 1331{
1311 list_add_tail(&piocb->list, &pring->txcmplq); 1332 list_add_tail(&piocb->list, &pring->txcmplq);
1312 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1333 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1313 pring->txcmplq_cnt++;
1314 if (pring->txcmplq_cnt > pring->txcmplq_max)
1315 pring->txcmplq_max = pring->txcmplq_cnt;
1316 1334
1317 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1335 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1318 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1336 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -1344,8 +1362,6 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1344 struct lpfc_iocbq *cmd_iocb; 1362 struct lpfc_iocbq *cmd_iocb;
1345 1363
1346 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1364 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1347 if (cmd_iocb != NULL)
1348 pring->txq_cnt--;
1349 return cmd_iocb; 1365 return cmd_iocb;
1350} 1366}
1351 1367
@@ -1614,8 +1630,9 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1614 * (c) link attention events can be processed (fcp ring only) 1630 * (c) link attention events can be processed (fcp ring only)
1615 * (d) IOCB processing is not blocked by the outstanding mbox command. 1631 * (d) IOCB processing is not blocked by the outstanding mbox command.
1616 */ 1632 */
1617 if (pring->txq_cnt && 1633
1618 lpfc_is_link_up(phba) && 1634 if (lpfc_is_link_up(phba) &&
1635 (!list_empty(&pring->txq)) &&
1619 (pring->ringno != phba->sli.fcp_ring || 1636 (pring->ringno != phba->sli.fcp_ring ||
1620 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1637 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1621 1638
@@ -2612,7 +2629,6 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2612 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2629 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2613 list_del_init(&cmd_iocb->list); 2630 list_del_init(&cmd_iocb->list);
2614 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2631 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2615 pring->txcmplq_cnt--;
2616 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2632 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2617 } 2633 }
2618 return cmd_iocb; 2634 return cmd_iocb;
@@ -2650,7 +2666,6 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2650 /* remove from txcmpl queue list */ 2666 /* remove from txcmpl queue list */
2651 list_del_init(&cmd_iocb->list); 2667 list_del_init(&cmd_iocb->list);
2652 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2668 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2653 pring->txcmplq_cnt--;
2654 return cmd_iocb; 2669 return cmd_iocb;
2655 } 2670 }
2656 } 2671 }
@@ -3499,7 +3514,6 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3499 */ 3514 */
3500 spin_lock_irq(&phba->hbalock); 3515 spin_lock_irq(&phba->hbalock);
3501 list_splice_init(&pring->txq, &completions); 3516 list_splice_init(&pring->txq, &completions);
3502 pring->txq_cnt = 0;
3503 3517
3504 /* Next issue ABTS for everything on the txcmplq */ 3518 /* Next issue ABTS for everything on the txcmplq */
3505 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3519 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
@@ -3536,11 +3550,9 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3536 spin_lock_irq(&phba->hbalock); 3550 spin_lock_irq(&phba->hbalock);
3537 /* Retrieve everything on txq */ 3551 /* Retrieve everything on txq */
3538 list_splice_init(&pring->txq, &txq); 3552 list_splice_init(&pring->txq, &txq);
3539 pring->txq_cnt = 0;
3540 3553
3541 /* Retrieve everything on the txcmplq */ 3554 /* Retrieve everything on the txcmplq */
3542 list_splice_init(&pring->txcmplq, &txcmplq); 3555 list_splice_init(&pring->txcmplq, &txcmplq);
3543 pring->txcmplq_cnt = 0;
3544 3556
3545 /* Indicate the I/O queues are flushed */ 3557 /* Indicate the I/O queues are flushed */
3546 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3558 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
@@ -5988,9 +6000,9 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5988 LIST_HEAD(post_sgl_list); 6000 LIST_HEAD(post_sgl_list);
5989 LIST_HEAD(free_sgl_list); 6001 LIST_HEAD(free_sgl_list);
5990 6002
5991 spin_lock(&phba->hbalock); 6003 spin_lock_irq(&phba->hbalock);
5992 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); 6004 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
5993 spin_unlock(&phba->hbalock); 6005 spin_unlock_irq(&phba->hbalock);
5994 6006
5995 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6007 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
5996 &allc_sgl_list, list) { 6008 &allc_sgl_list, list) {
@@ -6091,10 +6103,10 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6091 6103
6092 /* push els sgls posted to the availble list */ 6104 /* push els sgls posted to the availble list */
6093 if (!list_empty(&post_sgl_list)) { 6105 if (!list_empty(&post_sgl_list)) {
6094 spin_lock(&phba->hbalock); 6106 spin_lock_irq(&phba->hbalock);
6095 list_splice_init(&post_sgl_list, 6107 list_splice_init(&post_sgl_list,
6096 &phba->sli4_hba.lpfc_sgl_list); 6108 &phba->sli4_hba.lpfc_sgl_list);
6097 spin_unlock(&phba->hbalock); 6109 spin_unlock_irq(&phba->hbalock);
6098 } else { 6110 } else {
6099 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6111 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6100 "3161 Failure to post els sgl to port.\n"); 6112 "3161 Failure to post els sgl to port.\n");
@@ -7615,7 +7627,6 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7615{ 7627{
7616 /* Insert the caller's iocb in the txq tail for later processing. */ 7628 /* Insert the caller's iocb in the txq tail for later processing. */
7617 list_add_tail(&piocb->list, &pring->txq); 7629 list_add_tail(&piocb->list, &pring->txq);
7618 pring->txq_cnt++;
7619} 7630}
7620 7631
7621/** 7632/**
@@ -8387,7 +8398,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8387 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8398 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
8388 sglq = NULL; 8399 sglq = NULL;
8389 else { 8400 else {
8390 if (pring->txq_cnt) { 8401 if (!list_empty(&pring->txq)) {
8391 if (!(flag & SLI_IOCB_RET_IOCB)) { 8402 if (!(flag & SLI_IOCB_RET_IOCB)) {
8392 __lpfc_sli_ringtx_put(phba, 8403 __lpfc_sli_ringtx_put(phba,
8393 pring, piocb); 8404 pring, piocb);
@@ -9055,7 +9066,6 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
9055 if (iocb->vport != vport) 9066 if (iocb->vport != vport)
9056 continue; 9067 continue;
9057 list_move_tail(&iocb->list, &completions); 9068 list_move_tail(&iocb->list, &completions);
9058 pring->txq_cnt--;
9059 } 9069 }
9060 9070
9061 /* Next issue ABTS for everything on the txcmplq */ 9071 /* Next issue ABTS for everything on the txcmplq */
@@ -9124,8 +9134,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
9124 * given to the FW yet. 9134 * given to the FW yet.
9125 */ 9135 */
9126 list_splice_init(&pring->txq, &completions); 9136 list_splice_init(&pring->txq, &completions);
9127 pring->txq_cnt = 0;
9128
9129 } 9137 }
9130 spin_unlock_irqrestore(&phba->hbalock, flags); 9138 spin_unlock_irqrestore(&phba->hbalock, flags);
9131 9139
@@ -9966,6 +9974,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
9966 long timeleft, timeout_req = 0; 9974 long timeleft, timeout_req = 0;
9967 int retval = IOCB_SUCCESS; 9975 int retval = IOCB_SUCCESS;
9968 uint32_t creg_val; 9976 uint32_t creg_val;
9977 struct lpfc_iocbq *iocb;
9978 int txq_cnt = 0;
9979 int txcmplq_cnt = 0;
9969 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 9980 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
9970 /* 9981 /*
9971 * If the caller has provided a response iocbq buffer, then context2 9982 * If the caller has provided a response iocbq buffer, then context2
@@ -10013,9 +10024,17 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
10013 retval = IOCB_TIMEDOUT; 10024 retval = IOCB_TIMEDOUT;
10014 } 10025 }
10015 } else if (retval == IOCB_BUSY) { 10026 } else if (retval == IOCB_BUSY) {
10016 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10027 if (phba->cfg_log_verbose & LOG_SLI) {
10017 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 10028 list_for_each_entry(iocb, &pring->txq, list) {
10018 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt); 10029 txq_cnt++;
10030 }
10031 list_for_each_entry(iocb, &pring->txcmplq, list) {
10032 txcmplq_cnt++;
10033 }
10034 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10035 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
10036 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
10037 }
10019 return retval; 10038 return retval;
10020 } else { 10039 } else {
10021 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10040 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -11298,16 +11317,25 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11298 struct lpfc_iocbq *irspiocbq; 11317 struct lpfc_iocbq *irspiocbq;
11299 unsigned long iflags; 11318 unsigned long iflags;
11300 struct lpfc_sli_ring *pring = cq->pring; 11319 struct lpfc_sli_ring *pring = cq->pring;
11320 int txq_cnt = 0;
11321 int txcmplq_cnt = 0;
11322 int fcp_txcmplq_cnt = 0;
11301 11323
11302 /* Get an irspiocbq for later ELS response processing use */ 11324 /* Get an irspiocbq for later ELS response processing use */
11303 irspiocbq = lpfc_sli_get_iocbq(phba); 11325 irspiocbq = lpfc_sli_get_iocbq(phba);
11304 if (!irspiocbq) { 11326 if (!irspiocbq) {
11327 if (!list_empty(&pring->txq))
11328 txq_cnt++;
11329 if (!list_empty(&pring->txcmplq))
11330 txcmplq_cnt++;
11331 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
11332 fcp_txcmplq_cnt++;
11305 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11333 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11306 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 11334 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
11307 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 11335 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
11308 pring->txq_cnt, phba->iocb_cnt, 11336 txq_cnt, phba->iocb_cnt,
11309 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt, 11337 fcp_txcmplq_cnt,
11310 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt); 11338 txcmplq_cnt);
11311 return false; 11339 return false;
11312 } 11340 }
11313 11341
@@ -15482,11 +15510,18 @@ lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
15482 LPFC_SLI4_FCF_TBL_INDX_MAX); 15510 LPFC_SLI4_FCF_TBL_INDX_MAX);
15483 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15511 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15484 "3060 Last IDX %d\n", last_index); 15512 "3060 Last IDX %d\n", last_index);
15485 if (list_empty(&phba->fcf.fcf_pri_list)) { 15513
15514 /* Verify the priority list has 2 or more entries */
15515 spin_lock_irq(&phba->hbalock);
15516 if (list_empty(&phba->fcf.fcf_pri_list) ||
15517 list_is_singular(&phba->fcf.fcf_pri_list)) {
15518 spin_unlock_irq(&phba->hbalock);
15486 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15519 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15487 "3061 Last IDX %d\n", last_index); 15520 "3061 Last IDX %d\n", last_index);
15488 return 0; /* Empty rr list */ 15521 return 0; /* Empty rr list */
15489 } 15522 }
15523 spin_unlock_irq(&phba->hbalock);
15524
15490 next_fcf_pri = 0; 15525 next_fcf_pri = 0;
15491 /* 15526 /*
15492 * Clear the rr_bmask and set all of the bits that are at this 15527 * Clear the rr_bmask and set all of the bits that are at this
@@ -16245,14 +16280,19 @@ lpfc_drain_txq(struct lpfc_hba *phba)
16245 char *fail_msg = NULL; 16280 char *fail_msg = NULL;
16246 struct lpfc_sglq *sglq; 16281 struct lpfc_sglq *sglq;
16247 union lpfc_wqe wqe; 16282 union lpfc_wqe wqe;
16283 int txq_cnt = 0;
16248 16284
16249 spin_lock_irqsave(&phba->hbalock, iflags); 16285 spin_lock_irqsave(&phba->hbalock, iflags);
16250 if (pring->txq_cnt > pring->txq_max) 16286 list_for_each_entry(piocbq, &pring->txq, list) {
16251 pring->txq_max = pring->txq_cnt; 16287 txq_cnt++;
16288 }
16289
16290 if (txq_cnt > pring->txq_max)
16291 pring->txq_max = txq_cnt;
16252 16292
16253 spin_unlock_irqrestore(&phba->hbalock, iflags); 16293 spin_unlock_irqrestore(&phba->hbalock, iflags);
16254 16294
16255 while (pring->txq_cnt) { 16295 while (!list_empty(&pring->txq)) {
16256 spin_lock_irqsave(&phba->hbalock, iflags); 16296 spin_lock_irqsave(&phba->hbalock, iflags);
16257 16297
16258 piocbq = lpfc_sli_ringtx_get(phba, pring); 16298 piocbq = lpfc_sli_ringtx_get(phba, pring);
@@ -16260,7 +16300,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
16260 spin_unlock_irqrestore(&phba->hbalock, iflags); 16300 spin_unlock_irqrestore(&phba->hbalock, iflags);
16261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16301 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16262 "2823 txq empty and txq_cnt is %d\n ", 16302 "2823 txq empty and txq_cnt is %d\n ",
16263 pring->txq_cnt); 16303 txq_cnt);
16264 break; 16304 break;
16265 } 16305 }
16266 sglq = __lpfc_sli_get_sglq(phba, piocbq); 16306 sglq = __lpfc_sli_get_sglq(phba, piocbq);
@@ -16269,6 +16309,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
16269 spin_unlock_irqrestore(&phba->hbalock, iflags); 16309 spin_unlock_irqrestore(&phba->hbalock, iflags);
16270 break; 16310 break;
16271 } 16311 }
16312 txq_cnt--;
16272 16313
16273 /* The xri and iocb resources secured, 16314 /* The xri and iocb resources secured,
16274 * attempt to issue request 16315 * attempt to issue request
@@ -16300,5 +16341,5 @@ lpfc_drain_txq(struct lpfc_hba *phba)
16300 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 16341 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
16301 IOERR_SLI_ABORTED); 16342 IOERR_SLI_ABORTED);
16302 16343
16303 return pring->txq_cnt; 16344 return txq_cnt;
16304} 16345}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f3b7795a296b..664cd04f7cd8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.37" 21#define LPFC_DRIVER_VERSION "8.3.38"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index dce7d788cdc9..c37b244cf8ae 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ 2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
3 qla_nx.o qla_target.o 3 qla_nx.o qla_mr.o qla_target.o
4 4
5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o 5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
6obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o 6obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b3db9dcc2619..bf60c631abb5 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -888,7 +888,10 @@ qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
888 struct qla_hw_data *ha = vha->hw; 888 struct qla_hw_data *ha = vha->hw;
889 uint32_t sn; 889 uint32_t sn;
890 890
891 if (IS_FWI2_CAPABLE(ha)) { 891 if (IS_QLAFX00(vha->hw)) {
892 return snprintf(buf, PAGE_SIZE, "%s\n",
893 vha->hw->mr.serial_num);
894 } else if (IS_FWI2_CAPABLE(ha)) {
892 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE); 895 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
893 return snprintf(buf, PAGE_SIZE, "%s\n", buf); 896 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
894 } 897 }
@@ -912,6 +915,11 @@ qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
912{ 915{
913 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 916 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
914 struct qla_hw_data *ha = vha->hw; 917 struct qla_hw_data *ha = vha->hw;
918
919 if (IS_QLAFX00(vha->hw))
920 return snprintf(buf, PAGE_SIZE, "%s\n",
921 vha->hw->mr.hw_version);
922
915 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", 923 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
916 ha->product_id[0], ha->product_id[1], ha->product_id[2], 924 ha->product_id[0], ha->product_id[1], ha->product_id[2],
917 ha->product_id[3]); 925 ha->product_id[3]);
@@ -922,6 +930,11 @@ qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
922 char *buf) 930 char *buf)
923{ 931{
924 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 932 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
933
934 if (IS_QLAFX00(vha->hw))
935 return snprintf(buf, PAGE_SIZE, "%s\n",
936 vha->hw->mr.product_name);
937
925 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number); 938 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
926} 939}
927 940
@@ -1304,6 +1317,12 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1304 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1317 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1305 int rval = QLA_FUNCTION_FAILED; 1318 int rval = QLA_FUNCTION_FAILED;
1306 uint16_t state[5]; 1319 uint16_t state[5];
1320 uint32_t pstate;
1321
1322 if (IS_QLAFX00(vha->hw)) {
1323 pstate = qlafx00_fw_state_show(dev, attr, buf);
1324 return snprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1325 }
1307 1326
1308 if (qla2x00_reset_active(vha)) 1327 if (qla2x00_reset_active(vha))
1309 ql_log(ql_log_warn, vha, 0x707c, 1328 ql_log(ql_log_warn, vha, 0x707c,
@@ -1454,6 +1473,11 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
1454 (shost_priv(shost)))->hw; 1473 (shost_priv(shost)))->hw;
1455 u32 speed = FC_PORTSPEED_UNKNOWN; 1474 u32 speed = FC_PORTSPEED_UNKNOWN;
1456 1475
1476 if (IS_QLAFX00(ha)) {
1477 qlafx00_get_host_speed(shost);
1478 return;
1479 }
1480
1457 switch (ha->link_data_rate) { 1481 switch (ha->link_data_rate) {
1458 case PORT_SPEED_1GB: 1482 case PORT_SPEED_1GB:
1459 speed = FC_PORTSPEED_1GBIT; 1483 speed = FC_PORTSPEED_1GBIT;
@@ -1637,6 +1661,9 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
1637{ 1661{
1638 scsi_qla_host_t *vha = shost_priv(shost); 1662 scsi_qla_host_t *vha = shost_priv(shost);
1639 1663
1664 if (IS_QLAFX00(vha->hw))
1665 return 0;
1666
1640 qla2x00_loop_reset(vha); 1667 qla2x00_loop_reset(vha);
1641 return 0; 1668 return 0;
1642} 1669}
@@ -1655,6 +1682,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1655 pfc_host_stat = &vha->fc_host_stat; 1682 pfc_host_stat = &vha->fc_host_stat;
1656 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 1683 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1657 1684
1685 if (IS_QLAFX00(vha->hw))
1686 goto done;
1687
1658 if (test_bit(UNLOADING, &vha->dpc_flags)) 1688 if (test_bit(UNLOADING, &vha->dpc_flags))
1659 goto done; 1689 goto done;
1660 1690
@@ -2087,6 +2117,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
2087 FC_PORTSPEED_1GBIT; 2117 FC_PORTSPEED_1GBIT;
2088 else if (IS_QLA23XX(ha)) 2118 else if (IS_QLA23XX(ha))
2089 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 2119 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2120 else if (IS_QLAFX00(ha))
2121 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2122 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2090 else 2123 else
2091 speed = FC_PORTSPEED_1GBIT; 2124 speed = FC_PORTSPEED_1GBIT;
2092 fc_host_supported_speeds(vha->host) = speed; 2125 fc_host_supported_speeds(vha->host) = speed;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index ad54099cb805..39719f892488 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -30,14 +30,31 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
30 struct scsi_qla_host *vha = sp->fcport->vha; 30 struct scsi_qla_host *vha = sp->fcport->vha;
31 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 31 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
32 struct qla_hw_data *ha = vha->hw; 32 struct qla_hw_data *ha = vha->hw;
33 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
33 34
34 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 35 if (sp->type == SRB_FXIOCB_BCMD) {
35 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 36 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
37 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
36 38
37 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 39 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
38 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 40 dma_unmap_sg(&ha->pdev->dev,
41 bsg_job->request_payload.sg_list,
42 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
43
44 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
45 dma_unmap_sg(&ha->pdev->dev,
46 bsg_job->reply_payload.sg_list,
47 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
48 } else {
49 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
50 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
51
52 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
53 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
54 }
39 55
40 if (sp->type == SRB_CT_CMD || 56 if (sp->type == SRB_CT_CMD ||
57 sp->type == SRB_FXIOCB_BCMD ||
41 sp->type == SRB_ELS_CMD_HST) 58 sp->type == SRB_ELS_CMD_HST)
42 kfree(sp->fcport); 59 kfree(sp->fcport);
43 qla2x00_rel_sp(vha, sp); 60 qla2x00_rel_sp(vha, sp);
@@ -751,6 +768,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
751 elreq.transfer_size = req_data_len; 768 elreq.transfer_size = req_data_len;
752 769
753 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 770 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
771 elreq.iteration_count =
772 bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
754 773
755 if (atomic_read(&vha->loop_state) == LOOP_READY && 774 if (atomic_read(&vha->loop_state) == LOOP_READY &&
756 (ha->current_topology == ISP_CFG_F || 775 (ha->current_topology == ISP_CFG_F ||
@@ -1883,6 +1902,128 @@ done:
1883} 1902}
1884 1903
1885static int 1904static int
1905qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
1906{
1907 struct Scsi_Host *host = bsg_job->shost;
1908 scsi_qla_host_t *vha = shost_priv(host);
1909 struct qla_hw_data *ha = vha->hw;
1910 int rval = (DRIVER_ERROR << 16);
1911 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1912 srb_t *sp;
1913 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1914 struct fc_port *fcport;
1915 char *type = "FC_BSG_HST_FX_MGMT";
1916
1917 /* Copy the IOCB specific information */
1918 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1919 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1920
1921 /* Dump the vendor information */
1922 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1923 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1924
1925 if (!vha->flags.online) {
1926 ql_log(ql_log_warn, vha, 0x70d0,
1927 "Host is not online.\n");
1928 rval = -EIO;
1929 goto done;
1930 }
1931
1932 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1933 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1934 bsg_job->request_payload.sg_list,
1935 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1936 if (!req_sg_cnt) {
1937 ql_log(ql_log_warn, vha, 0x70c7,
1938 "dma_map_sg return %d for request\n", req_sg_cnt);
1939 rval = -ENOMEM;
1940 goto done;
1941 }
1942 }
1943
1944 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1945 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1946 bsg_job->reply_payload.sg_list,
1947 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1948 if (!rsp_sg_cnt) {
1949 ql_log(ql_log_warn, vha, 0x70c8,
1950 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1951 rval = -ENOMEM;
1952 goto done_unmap_req_sg;
1953 }
1954 }
1955
1956 ql_dbg(ql_dbg_user, vha, 0x70c9,
1957 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
1958 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
1959 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1960
1961 /* Allocate a dummy fcport structure, since functions preparing the
1962 * IOCB and mailbox command retrieves port specific information
1963 * from fcport structure. For Host based ELS commands there will be
1964 * no fcport structure allocated
1965 */
1966 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1967 if (!fcport) {
1968 ql_log(ql_log_warn, vha, 0x70ca,
1969 "Failed to allocate fcport.\n");
1970 rval = -ENOMEM;
1971 goto done_unmap_rsp_sg;
1972 }
1973
1974 /* Alloc SRB structure */
1975 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1976 if (!sp) {
1977 ql_log(ql_log_warn, vha, 0x70cb,
1978 "qla2x00_get_sp failed.\n");
1979 rval = -ENOMEM;
1980 goto done_free_fcport;
1981 }
1982
1983 /* Initialize all required fields of fcport */
1984 fcport->vha = vha;
1985 fcport->loop_id = piocb_rqst->dataword;
1986
1987 sp->type = SRB_FXIOCB_BCMD;
1988 sp->name = "bsg_fx_mgmt";
1989 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
1990 sp->u.bsg_job = bsg_job;
1991 sp->free = qla2x00_bsg_sp_free;
1992 sp->done = qla2x00_bsg_job_done;
1993
1994 ql_dbg(ql_dbg_user, vha, 0x70cc,
1995 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
1996 type, piocb_rqst->func_type, fcport->loop_id);
1997
1998 rval = qla2x00_start_sp(sp);
1999 if (rval != QLA_SUCCESS) {
2000 ql_log(ql_log_warn, vha, 0x70cd,
2001 "qla2x00_start_sp failed=%d.\n", rval);
2002 mempool_free(sp, ha->srb_mempool);
2003 rval = -EIO;
2004 goto done_free_fcport;
2005 }
2006 return rval;
2007
2008done_free_fcport:
2009 kfree(fcport);
2010
2011done_unmap_rsp_sg:
2012 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2013 dma_unmap_sg(&ha->pdev->dev,
2014 bsg_job->reply_payload.sg_list,
2015 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2016done_unmap_req_sg:
2017 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2018 dma_unmap_sg(&ha->pdev->dev,
2019 bsg_job->request_payload.sg_list,
2020 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2021
2022done:
2023 return rval;
2024}
2025
2026static int
1886qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) 2027qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1887{ 2028{
1888 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { 2029 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1928,6 +2069,8 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1928 case QL_VND_DIAG_IO_CMD: 2069 case QL_VND_DIAG_IO_CMD:
1929 return qla24xx_process_bidir_cmd(bsg_job); 2070 return qla24xx_process_bidir_cmd(bsg_job);
1930 2071
2072 case QL_VND_FX00_MGMT_CMD:
2073 return qlafx00_mgmt_cmd(bsg_job);
1931 default: 2074 default:
1932 return -ENOSYS; 2075 return -ENOSYS;
1933 } 2076 }
@@ -2007,7 +2150,8 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2007 sp = req->outstanding_cmds[cnt]; 2150 sp = req->outstanding_cmds[cnt];
2008 if (sp) { 2151 if (sp) {
2009 if (((sp->type == SRB_CT_CMD) || 2152 if (((sp->type == SRB_CT_CMD) ||
2010 (sp->type == SRB_ELS_CMD_HST)) 2153 (sp->type == SRB_ELS_CMD_HST) ||
2154 (sp->type == SRB_FXIOCB_BCMD))
2011 && (sp->u.bsg_job == bsg_job)) { 2155 && (sp->u.bsg_job == bsg_job)) {
2012 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2156 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2013 if (ha->isp_ops->abort_command(sp)) { 2157 if (ha->isp_ops->abort_command(sp)) {
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index e9f6b9bbf29a..04f770332c2b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -22,6 +22,7 @@
22#define QL_VND_DIAG_IO_CMD 0x0A 22#define QL_VND_DIAG_IO_CMD 0x0A
23#define QL_VND_WRITE_I2C 0x10 23#define QL_VND_WRITE_I2C 0x10
24#define QL_VND_READ_I2C 0x11 24#define QL_VND_READ_I2C 0x11
25#define QL_VND_FX00_MGMT_CMD 0x12
25 26
26/* BSG Vendor specific subcode returns */ 27/* BSG Vendor specific subcode returns */
27#define EXT_STATUS_OK 0 28#define EXT_STATUS_OK 0
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index fbc305f1c87c..cfa2a20dee97 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,28 +11,31 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0126 | 0x4b,0xba,0xfa | 14 * | Module Init and Probe | 0x014f | 0x4b,0xba,0xfa |
15 * | Mailbox commands | 0x115b | 0x111a-0x111b | 15 * | Mailbox commands | 0x1179 | 0x111a-0x111b |
16 * | | | 0x112c-0x112e |
17 * | | | 0x113a |
18 * | | | 0x1155-0x1158 | 16 * | | | 0x1155-0x1158 |
19 * | Device Discovery | 0x2087 | 0x2020-0x2022, | 17 * | Device Discovery | 0x2095 | 0x2020-0x2022, |
20 * | | | 0x2016 | 18 * | | | 0x2016 |
21 * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | 19 * | Queue Command and IO tracing | 0x3058 | 0x3006-0x300b |
22 * | | | 0x3027-0x3028 | 20 * | | | 0x3027-0x3028 |
23 * | | | 0x302d-0x302e | 21 * | | | 0x303d-0x3041 |
24 * | DPC Thread | 0x401d | 0x4002,0x4013 | 22 * | | | 0x302d,0x3033 |
25 * | Async Events | 0x5071 | 0x502b-0x502f | 23 * | | | 0x3036,0x3038 |
24 * | | | 0x303a |
25 * | DPC Thread | 0x4022 | 0x4002,0x4013 |
26 * | Async Events | 0x5081 | 0x502b-0x502f |
26 * | | | 0x5047,0x5052 | 27 * | | | 0x5047,0x5052 |
28 * | | | 0x5040,0x5075 |
27 * | Timer Routines | 0x6011 | | 29 * | Timer Routines | 0x6011 | |
28 * | User Space Interactions | 0x70c4 | 0x7018,0x702e, | 30 * | User Space Interactions | 0x70dd | 0x7018,0x702e, |
29 * | | | 0x7020,0x7024, | 31 * | | | 0x7020,0x7024, |
30 * | | | 0x7039,0x7045, | 32 * | | | 0x7039,0x7045, |
31 * | | | 0x7073-0x7075, | 33 * | | | 0x7073-0x7075, |
32 * | | | 0x708c, | 34 * | | | 0x707b,0x708c, |
33 * | | | 0x70a5,0x70a6, | 35 * | | | 0x70a5,0x70a6, |
34 * | | | 0x70a8,0x70ab, | 36 * | | | 0x70a8,0x70ab, |
35 * | | | 0x70ad-0x70ae | 37 * | | | 0x70ad-0x70ae, |
38 * | | | 0x70d1-0x70da |
36 * | Task Management | 0x803c | 0x8025-0x8026 | 39 * | Task Management | 0x803c | 0x8025-0x8026 |
37 * | | | 0x800b,0x8039 | 40 * | | | 0x800b,0x8039 |
38 * | AER/EEH | 0x9011 | | 41 * | AER/EEH | 0x9011 | |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 65c5ff75936b..c32efc753229 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -245,7 +245,6 @@
245 245
246#define MAX_CMDSZ 16 /* SCSI maximum CDB size. */ 246#define MAX_CMDSZ 16 /* SCSI maximum CDB size. */
247#include "qla_fw.h" 247#include "qla_fw.h"
248
249/* 248/*
250 * Timeout timer counts in seconds 249 * Timeout timer counts in seconds
251 */ 250 */
@@ -265,6 +264,7 @@
265#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 264#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
266#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ 265#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
267#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */ 266#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
267#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
268 268
269struct req_que; 269struct req_que;
270 270
@@ -284,6 +284,7 @@ struct sd_dif_tuple {
284struct srb_cmd { 284struct srb_cmd {
285 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 285 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
286 uint32_t request_sense_length; 286 uint32_t request_sense_length;
287 uint32_t fw_sense_length;
287 uint8_t *request_sense_ptr; 288 uint8_t *request_sense_ptr;
288 void *ctx; 289 void *ctx;
289}; 290};
@@ -321,7 +322,39 @@ struct srb_iocb {
321 uint32_t flags; 322 uint32_t flags;
322 uint32_t lun; 323 uint32_t lun;
323 uint32_t data; 324 uint32_t data;
325 struct completion comp;
326 uint32_t comp_status;
324 } tmf; 327 } tmf;
328 struct {
329#define SRB_FXDISC_REQ_DMA_VALID BIT_0
330#define SRB_FXDISC_RESP_DMA_VALID BIT_1
331#define SRB_FXDISC_REQ_DWRD_VALID BIT_2
332#define SRB_FXDISC_RSP_DWRD_VALID BIT_3
333#define FXDISC_TIMEOUT 20
334 uint8_t flags;
335 uint32_t req_len;
336 uint32_t rsp_len;
337 void *req_addr;
338 void *rsp_addr;
339 dma_addr_t req_dma_handle;
340 dma_addr_t rsp_dma_handle;
341 uint32_t adapter_id;
342 uint32_t adapter_id_hi;
343 uint32_t req_func_type;
344 uint32_t req_data;
345 uint32_t req_data_extra;
346 uint32_t result;
347 uint32_t seq_number;
348 uint32_t fw_flags;
349 struct completion fxiocb_comp;
350 uint32_t reserved_0;
351 uint8_t reserved_1;
352 } fxiocb;
353 struct {
354 uint32_t cmd_hndl;
355 uint32_t comp_status;
356 struct completion comp;
357 } abt;
325 } u; 358 } u;
326 359
327 struct timer_list timer; 360 struct timer_list timer;
@@ -338,6 +371,10 @@ struct srb_iocb {
338#define SRB_TM_CMD 7 371#define SRB_TM_CMD 7
339#define SRB_SCSI_CMD 8 372#define SRB_SCSI_CMD 8
340#define SRB_BIDI_CMD 9 373#define SRB_BIDI_CMD 9
374#define SRB_FXIOCB_DCMD 10
375#define SRB_FXIOCB_BCMD 11
376#define SRB_ABT_CMD 12
377
341 378
342typedef struct srb { 379typedef struct srb {
343 atomic_t ref_count; 380 atomic_t ref_count;
@@ -368,6 +405,10 @@ typedef struct srb {
368 (sp->u.scmd.request_sense_ptr) 405 (sp->u.scmd.request_sense_ptr)
369#define SET_CMD_SENSE_PTR(sp, ptr) \ 406#define SET_CMD_SENSE_PTR(sp, ptr) \
370 (sp->u.scmd.request_sense_ptr = ptr) 407 (sp->u.scmd.request_sense_ptr = ptr)
408#define GET_FW_SENSE_LEN(sp) \
409 (sp->u.scmd.fw_sense_length)
410#define SET_FW_SENSE_LEN(sp, len) \
411 (sp->u.scmd.fw_sense_length = len)
371 412
372struct msg_echo_lb { 413struct msg_echo_lb {
373 dma_addr_t send_dma; 414 dma_addr_t send_dma;
@@ -376,6 +417,7 @@ struct msg_echo_lb {
376 uint16_t rsp_sg_cnt; 417 uint16_t rsp_sg_cnt;
377 uint16_t options; 418 uint16_t options;
378 uint32_t transfer_size; 419 uint32_t transfer_size;
420 uint32_t iteration_count;
379}; 421};
380 422
381/* 423/*
@@ -542,11 +584,74 @@ struct device_reg_25xxmq {
542 uint32_t atio_q_out; 584 uint32_t atio_q_out;
543}; 585};
544 586
587
588struct device_reg_fx00 {
589 uint32_t mailbox0; /* 00 */
590 uint32_t mailbox1; /* 04 */
591 uint32_t mailbox2; /* 08 */
592 uint32_t mailbox3; /* 0C */
593 uint32_t mailbox4; /* 10 */
594 uint32_t mailbox5; /* 14 */
595 uint32_t mailbox6; /* 18 */
596 uint32_t mailbox7; /* 1C */
597 uint32_t mailbox8; /* 20 */
598 uint32_t mailbox9; /* 24 */
599 uint32_t mailbox10; /* 28 */
600 uint32_t mailbox11;
601 uint32_t mailbox12;
602 uint32_t mailbox13;
603 uint32_t mailbox14;
604 uint32_t mailbox15;
605 uint32_t mailbox16;
606 uint32_t mailbox17;
607 uint32_t mailbox18;
608 uint32_t mailbox19;
609 uint32_t mailbox20;
610 uint32_t mailbox21;
611 uint32_t mailbox22;
612 uint32_t mailbox23;
613 uint32_t mailbox24;
614 uint32_t mailbox25;
615 uint32_t mailbox26;
616 uint32_t mailbox27;
617 uint32_t mailbox28;
618 uint32_t mailbox29;
619 uint32_t mailbox30;
620 uint32_t mailbox31;
621 uint32_t aenmailbox0;
622 uint32_t aenmailbox1;
623 uint32_t aenmailbox2;
624 uint32_t aenmailbox3;
625 uint32_t aenmailbox4;
626 uint32_t aenmailbox5;
627 uint32_t aenmailbox6;
628 uint32_t aenmailbox7;
629 /* Request Queue. */
630 uint32_t req_q_in; /* A0 - Request Queue In-Pointer */
631 uint32_t req_q_out; /* A4 - Request Queue Out-Pointer */
632 /* Response Queue. */
633 uint32_t rsp_q_in; /* A8 - Response Queue In-Pointer */
634 uint32_t rsp_q_out; /* AC - Response Queue Out-Pointer */
635 /* Init values shadowed on FW Up Event */
636 uint32_t initval0; /* B0 */
637 uint32_t initval1; /* B4 */
638 uint32_t initval2; /* B8 */
639 uint32_t initval3; /* BC */
640 uint32_t initval4; /* C0 */
641 uint32_t initval5; /* C4 */
642 uint32_t initval6; /* C8 */
643 uint32_t initval7; /* CC */
644 uint32_t fwheartbeat; /* D0 */
645};
646
647
648
545typedef union { 649typedef union {
546 struct device_reg_2xxx isp; 650 struct device_reg_2xxx isp;
547 struct device_reg_24xx isp24; 651 struct device_reg_24xx isp24;
548 struct device_reg_25xxmq isp25mq; 652 struct device_reg_25xxmq isp25mq;
549 struct device_reg_82xx isp82; 653 struct device_reg_82xx isp82;
654 struct device_reg_fx00 ispfx00;
550} device_reg_t; 655} device_reg_t;
551 656
552#define ISP_REQ_Q_IN(ha, reg) \ 657#define ISP_REQ_Q_IN(ha, reg) \
@@ -602,6 +707,20 @@ typedef struct {
602#define IOCTL_CMD BIT_2 707#define IOCTL_CMD BIT_2
603} mbx_cmd_t; 708} mbx_cmd_t;
604 709
710struct mbx_cmd_32 {
711 uint32_t out_mb; /* outbound from driver */
712 uint32_t in_mb; /* Incoming from RISC */
713 uint32_t mb[MAILBOX_REGISTER_COUNT];
714 long buf_size;
715 void *bufp;
716 uint32_t tov;
717 uint8_t flags;
718#define MBX_DMA_IN BIT_0
719#define MBX_DMA_OUT BIT_1
720#define IOCTL_CMD BIT_2
721};
722
723
605#define MBX_TOV_SECONDS 30 724#define MBX_TOV_SECONDS 30
606 725
607/* 726/*
@@ -677,6 +796,15 @@ typedef struct {
677#define MBA_BYPASS_NOTIFICATION 0x8043 /* Auto bypass notification. */ 796#define MBA_BYPASS_NOTIFICATION 0x8043 /* Auto bypass notification. */
678#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */ 797#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
679#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */ 798#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
799#define MBA_FW_NOT_STARTED 0x8050 /* Firmware not started */
800#define MBA_FW_STARTING 0x8051 /* Firmware starting */
801#define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */
802#define MBA_INIT_REQUIRED 0x8061 /* Initialization required */
803#define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */
804#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */
805#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
806 Notification */
807#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */
680 808
681/* 83XX FCoE specific */ 809/* 83XX FCoE specific */
682#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */ 810#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
@@ -798,6 +926,12 @@ typedef struct {
798#define MBC_LUN_RESET 0x7E /* Send LUN reset */ 926#define MBC_LUN_RESET 0x7E /* Send LUN reset */
799 927
800/* 928/*
929 * all the Mt. Rainier mailbox command codes that clash with FC/FCoE ones
930 * should be defined with MBC_MR_*
931 */
932#define MBC_MR_DRV_SHUTDOWN 0x6A
933
934/*
801 * ISP24xx mailbox commands 935 * ISP24xx mailbox commands
802 */ 936 */
803#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */ 937#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */
@@ -1058,6 +1192,30 @@ typedef struct {
1058 uint8_t reserved_3[26]; 1192 uint8_t reserved_3[26];
1059} init_cb_t; 1193} init_cb_t;
1060 1194
1195
1196struct init_cb_fx {
1197 uint16_t version;
1198 uint16_t reserved_1[13];
1199 uint16_t request_q_outpointer;
1200 uint16_t response_q_inpointer;
1201 uint16_t reserved_2[2];
1202 uint16_t response_q_length;
1203 uint16_t request_q_length;
1204 uint16_t reserved_3[2];
1205 uint32_t request_q_address[2];
1206 uint32_t response_q_address[2];
1207 uint16_t reserved_4[4];
1208 uint8_t response_q_msivec;
1209 uint8_t reserved_5[19];
1210 uint16_t interrupt_delay_timer;
1211 uint16_t reserved_6;
1212 uint32_t fwoptions1;
1213 uint32_t fwoptions2;
1214 uint32_t fwoptions3;
1215 uint8_t reserved_7[24];
1216};
1217
1218
1061/* 1219/*
1062 * Get Link Status mailbox command return buffer. 1220 * Get Link Status mailbox command return buffer.
1063 */ 1221 */
@@ -1831,6 +1989,9 @@ typedef struct fc_port {
1831 uint16_t loop_id; 1989 uint16_t loop_id;
1832 uint16_t old_loop_id; 1990 uint16_t old_loop_id;
1833 1991
1992 uint16_t tgt_id;
1993 uint16_t old_tgt_id;
1994
1834 uint8_t fcp_prio; 1995 uint8_t fcp_prio;
1835 1996
1836 uint8_t fabric_port_name[WWN_SIZE]; 1997 uint8_t fabric_port_name[WWN_SIZE];
@@ -1848,8 +2009,15 @@ typedef struct fc_port {
1848 2009
1849 uint8_t fc4_type; 2010 uint8_t fc4_type;
1850 uint8_t scan_state; 2011 uint8_t scan_state;
2012
2013 unsigned long last_queue_full;
2014 unsigned long last_ramp_up;
2015
2016 uint16_t port_id;
1851} fc_port_t; 2017} fc_port_t;
1852 2018
2019#include "qla_mr.h"
2020
1853/* 2021/*
1854 * Fibre channel port/lun states. 2022 * Fibre channel port/lun states.
1855 */ 2023 */
@@ -2391,6 +2559,7 @@ struct isp_operations {
2391 int (*start_scsi) (srb_t *); 2559 int (*start_scsi) (srb_t *);
2392 int (*abort_isp) (struct scsi_qla_host *); 2560 int (*abort_isp) (struct scsi_qla_host *);
2393 int (*iospace_config)(struct qla_hw_data*); 2561 int (*iospace_config)(struct qla_hw_data*);
2562 int (*initialize_adapter)(struct scsi_qla_host *);
2394}; 2563};
2395 2564
2396/* MSI-X Support *************************************************************/ 2565/* MSI-X Support *************************************************************/
@@ -2429,6 +2598,7 @@ enum qla_work_type {
2429 QLA_EVT_ASYNC_ADISC, 2598 QLA_EVT_ASYNC_ADISC,
2430 QLA_EVT_ASYNC_ADISC_DONE, 2599 QLA_EVT_ASYNC_ADISC_DONE,
2431 QLA_EVT_UEVENT, 2600 QLA_EVT_UEVENT,
2601 QLA_EVT_AENFX,
2432}; 2602};
2433 2603
2434 2604
@@ -2456,7 +2626,15 @@ struct qla_work_evt {
2456 u32 code; 2626 u32 code;
2457#define QLA_UEVENT_CODE_FW_DUMP 0 2627#define QLA_UEVENT_CODE_FW_DUMP 0
2458 } uevent; 2628 } uevent;
2459 } u; 2629 struct {
2630 uint32_t evtcode;
2631 uint32_t mbx[8];
2632 uint32_t count;
2633 } aenfx;
2634 struct {
2635 srb_t *sp;
2636 } iosb;
2637 } u;
2460}; 2638};
2461 2639
2462struct qla_chip_state_84xx { 2640struct qla_chip_state_84xx {
@@ -2520,6 +2698,11 @@ struct rsp_que {
2520 struct req_que *req; 2698 struct req_que *req;
2521 srb_t *status_srb; /* status continuation entry */ 2699 srb_t *status_srb; /* status continuation entry */
2522 struct work_struct q_work; 2700 struct work_struct q_work;
2701
2702 dma_addr_t dma_fx00;
2703 response_t *ring_fx00;
2704 uint16_t length_fx00;
2705 uint8_t rsp_pkt[REQUEST_ENTRY_SIZE];
2523}; 2706};
2524 2707
2525/* Request queue data structure */ 2708/* Request queue data structure */
@@ -2544,6 +2727,11 @@ struct req_que {
2544 uint16_t num_outstanding_cmds; 2727 uint16_t num_outstanding_cmds;
2545#define MAX_Q_DEPTH 32 2728#define MAX_Q_DEPTH 32
2546 int max_q_depth; 2729 int max_q_depth;
2730
2731 dma_addr_t dma_fx00;
2732 request_t *ring_fx00;
2733 uint16_t length_fx00;
2734 uint8_t req_pkt[REQUEST_ENTRY_SIZE];
2547}; 2735};
2548 2736
2549/* Place holder for FW buffer parameters */ 2737/* Place holder for FW buffer parameters */
@@ -2633,7 +2821,10 @@ struct qla_hw_data {
2633 uint32_t isp82xx_no_md_cap:1; 2821 uint32_t isp82xx_no_md_cap:1;
2634 uint32_t host_shutting_down:1; 2822 uint32_t host_shutting_down:1;
2635 uint32_t idc_compl_status:1; 2823 uint32_t idc_compl_status:1;
2636 /* 32 bits */ 2824
2825 uint32_t mr_reset_hdlr_active:1;
2826 uint32_t mr_intr_valid:1;
2827 /* 34 bits */
2637 } flags; 2828 } flags;
2638 2829
2639 /* This spinlock is used to protect "io transactions", you must 2830 /* This spinlock is used to protect "io transactions", you must
@@ -2650,7 +2841,21 @@ struct qla_hw_data {
2650 resource_size_t pio_address; 2841 resource_size_t pio_address;
2651 2842
2652#define MIN_IOBASE_LEN 0x100 2843#define MIN_IOBASE_LEN 0x100
2653/* Multi queue data structs */ 2844 dma_addr_t bar0_hdl;
2845
2846 void __iomem *cregbase;
2847 dma_addr_t bar2_hdl;
2848#define BAR0_LEN_FX00 (1024 * 1024)
2849#define BAR2_LEN_FX00 (128 * 1024)
2850
2851 uint32_t rqstq_intr_code;
2852 uint32_t mbx_intr_code;
2853 uint32_t req_que_len;
2854 uint32_t rsp_que_len;
2855 uint32_t req_que_off;
2856 uint32_t rsp_que_off;
2857
2858 /* Multi queue data structs */
2654 device_reg_t __iomem *mqiobase; 2859 device_reg_t __iomem *mqiobase;
2655 device_reg_t __iomem *msixbase; 2860 device_reg_t __iomem *msixbase;
2656 uint16_t msix_count; 2861 uint16_t msix_count;
@@ -2729,7 +2934,8 @@ struct qla_hw_data {
2729#define DT_ISP8021 BIT_14 2934#define DT_ISP8021 BIT_14
2730#define DT_ISP2031 BIT_15 2935#define DT_ISP2031 BIT_15
2731#define DT_ISP8031 BIT_16 2936#define DT_ISP8031 BIT_16
2732#define DT_ISP_LAST (DT_ISP8031 << 1) 2937#define DT_ISPFX00 BIT_17
2938#define DT_ISP_LAST (DT_ISPFX00 << 1)
2733 2939
2734#define DT_T10_PI BIT_25 2940#define DT_T10_PI BIT_25
2735#define DT_IIDMA BIT_26 2941#define DT_IIDMA BIT_26
@@ -2757,6 +2963,7 @@ struct qla_hw_data {
2757#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021) 2963#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
2758#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031) 2964#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
2759#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031) 2965#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
2966#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
2760 2967
2761#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ 2968#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2762 IS_QLA6312(ha) || IS_QLA6322(ha)) 2969 IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2821,6 +3028,7 @@ struct qla_hw_data {
2821 uint16_t r_a_tov; 3028 uint16_t r_a_tov;
2822 int port_down_retry_count; 3029 int port_down_retry_count;
2823 uint8_t mbx_count; 3030 uint8_t mbx_count;
3031 uint8_t aen_mbx_count;
2824 3032
2825 uint32_t login_retry_count; 3033 uint32_t login_retry_count;
2826 /* SNS command interfaces. */ 3034 /* SNS command interfaces. */
@@ -2868,9 +3076,13 @@ struct qla_hw_data {
2868 void *swl; 3076 void *swl;
2869 3077
2870 /* These are used by mailbox operations. */ 3078 /* These are used by mailbox operations. */
2871 volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT]; 3079 uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
3080 uint32_t mailbox_out32[MAILBOX_REGISTER_COUNT];
3081 uint32_t aenmb[AEN_MAILBOX_REGISTER_COUNT_FX00];
2872 3082
2873 mbx_cmd_t *mcp; 3083 mbx_cmd_t *mcp;
3084 struct mbx_cmd_32 *mcp32;
3085
2874 unsigned long mbx_cmd_flags; 3086 unsigned long mbx_cmd_flags;
2875#define MBX_INTERRUPT 1 3087#define MBX_INTERRUPT 1
2876#define MBX_INTR_WAIT 2 3088#define MBX_INTR_WAIT 2
@@ -3014,6 +3226,7 @@ struct qla_hw_data {
3014 int cur_vport_count; 3226 int cur_vport_count;
3015 3227
3016 struct qla_chip_state_84xx *cs84xx; 3228 struct qla_chip_state_84xx *cs84xx;
3229 struct qla_statistics qla_stats;
3017 struct isp_operations *isp_ops; 3230 struct isp_operations *isp_ops;
3018 struct workqueue_struct *wq; 3231 struct workqueue_struct *wq;
3019 struct qlfc_fw fw_buf; 3232 struct qlfc_fw fw_buf;
@@ -3080,6 +3293,8 @@ struct qla_hw_data {
3080 unsigned long host_last_rampup_time; 3293 unsigned long host_last_rampup_time;
3081 int cfg_lun_q_depth; 3294 int cfg_lun_q_depth;
3082 3295
3296 struct mr_data_fx00 mr;
3297
3083 struct qlt_hw_data tgt; 3298 struct qlt_hw_data tgt;
3084 uint16_t thermal_support; 3299 uint16_t thermal_support;
3085#define THERMAL_SUPPORT_I2C BIT_0 3300#define THERMAL_SUPPORT_I2C BIT_0
@@ -3109,6 +3324,8 @@ typedef struct scsi_qla_host {
3109 uint32_t process_response_queue :1; 3324 uint32_t process_response_queue :1;
3110 uint32_t difdix_supported:1; 3325 uint32_t difdix_supported:1;
3111 uint32_t delete_progress:1; 3326 uint32_t delete_progress:1;
3327
3328 uint32_t fw_tgt_reported:1;
3112 } flags; 3329 } flags;
3113 3330
3114 atomic_t loop_state; 3331 atomic_t loop_state;
@@ -3144,6 +3361,9 @@ typedef struct scsi_qla_host {
3144#define SCR_PENDING 21 /* SCR in target mode */ 3361#define SCR_PENDING 21 /* SCR in target mode */
3145#define HOST_RAMP_DOWN_QUEUE_DEPTH 22 3362#define HOST_RAMP_DOWN_QUEUE_DEPTH 22
3146#define HOST_RAMP_UP_QUEUE_DEPTH 23 3363#define HOST_RAMP_UP_QUEUE_DEPTH 23
3364#define PORT_UPDATE_NEEDED 24
3365#define FX00_RESET_RECOVERY 25
3366#define FX00_TARGET_SCAN 26
3147 3367
3148 uint32_t device_flags; 3368 uint32_t device_flags;
3149#define SWITCH_FOUND BIT_0 3369#define SWITCH_FOUND BIT_0
@@ -3234,6 +3454,10 @@ struct qla_tgt_vp_map {
3234 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ 3454 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
3235 atomic_read(&ha->loop_state) == LOOP_DOWN) 3455 atomic_read(&ha->loop_state) == LOOP_DOWN)
3236 3456
3457#define STATE_TRANSITION(ha) \
3458 (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
3459 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
3460
3237#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ 3461#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
3238 atomic_inc(&__vha->vref_count); \ 3462 atomic_inc(&__vha->vref_count); \
3239 mb(); \ 3463 mb(); \
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b310fa97b545..026bfde33e67 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -86,6 +86,7 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
86 86
87extern int 87extern int
88qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *); 88qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
89extern int qla2x00_init_rings(scsi_qla_host_t *);
89 90
90/* 91/*
91 * Global Data in qla_os.c source file. 92 * Global Data in qla_os.c source file.
@@ -134,7 +135,6 @@ extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
134 uint16_t *); 135 uint16_t *);
135extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *, 136extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
136 fc_port_t *, uint16_t *); 137 fc_port_t *, uint16_t *);
137extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
138 138
139extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); 139extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
140 140
@@ -158,6 +158,7 @@ extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
158extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha); 158extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
159extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha); 159extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
160extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha); 160extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
161extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
161 162
162/* 163/*
163 * Global Functions in qla_mid.c source file. 164 * Global Functions in qla_mid.c source file.
@@ -211,8 +212,6 @@ extern int qla24xx_start_scsi(srb_t *sp);
211int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, 212int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
212 uint16_t, uint16_t, uint8_t); 213 uint16_t, uint16_t, uint8_t);
213extern int qla2x00_start_sp(srb_t *); 214extern int qla2x00_start_sp(srb_t *);
214extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
215extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
216extern int qla24xx_dif_start_scsi(srb_t *); 215extern int qla24xx_dif_start_scsi(srb_t *);
217extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t); 216extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
218extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); 217extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
@@ -424,6 +423,12 @@ extern void qla2x00_free_irqs(scsi_qla_host_t *);
424 423
425extern int qla2x00_get_data_rate(scsi_qla_host_t *); 424extern int qla2x00_get_data_rate(scsi_qla_host_t *);
426extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t); 425extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t);
426extern srb_t *
427qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
428 void *);
429extern void
430qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
431 uint32_t);
427 432
428/* 433/*
429 * Global Function Prototypes in qla_sup.c source file. 434 * Global Function Prototypes in qla_sup.c source file.
@@ -561,6 +566,42 @@ extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
561extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 566extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
562extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 567extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
563 568
569/* qlafx00 related functions */
570extern int qlafx00_pci_config(struct scsi_qla_host *);
571extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
572extern void qlafx00_soft_reset(scsi_qla_host_t *);
573extern int qlafx00_chip_diag(scsi_qla_host_t *);
574extern void qlafx00_config_rings(struct scsi_qla_host *);
575extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
576extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
577extern irqreturn_t qlafx00_intr_handler(int, void *);
578extern void qlafx00_enable_intrs(struct qla_hw_data *);
579extern void qlafx00_disable_intrs(struct qla_hw_data *);
580extern int qlafx00_abort_command(srb_t *);
581extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
582extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
583extern int qlafx00_start_scsi(srb_t *);
584extern int qlafx00_abort_isp(scsi_qla_host_t *);
585extern int qlafx00_iospace_config(struct qla_hw_data *);
586extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t);
587extern int qlafx00_fw_ready(scsi_qla_host_t *);
588extern int qlafx00_configure_devices(scsi_qla_host_t *);
589extern int qlafx00_reset_initialize(scsi_qla_host_t *);
590extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint8_t);
591extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
592extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t,
593 uint32_t *, int);
594extern uint32_t qlafx00_fw_state_show(struct device *,
595 struct device_attribute *, char *);
596extern void qlafx00_get_host_speed(struct Scsi_Host *);
597extern void qlafx00_init_response_q_entries(struct rsp_que *);
598
599extern void qlafx00_tm_iocb(srb_t *, struct tsk_mgmt_entry_fx00 *);
600extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
601extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
602extern void qlafx00_timer_routine(scsi_qla_host_t *);
603extern int qlafx00_rescan_isp(scsi_qla_host_t *);
604
564/* qla82xx related functions */ 605/* qla82xx related functions */
565 606
566/* PCI related functions */ 607/* PCI related functions */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 9b455250c101..d0ea8b921177 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -639,9 +639,14 @@ void
639qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn) 639qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
640{ 640{
641 struct qla_hw_data *ha = vha->hw; 641 struct qla_hw_data *ha = vha->hw;
642 sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number, 642
643 ha->fw_major_version, ha->fw_minor_version, 643 if (IS_QLAFX00(ha))
644 ha->fw_subminor_version, qla2x00_version_str); 644 sprintf(snn, "%s FW:v%s DVR:v%s", ha->model_number,
645 ha->mr.fw_version, qla2x00_version_str);
646 else
647 sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
648 ha->fw_major_version, ha->fw_minor_version,
649 ha->fw_subminor_version, qla2x00_version_str);
645} 650}
646 651
647/** 652/**
@@ -923,7 +928,7 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
923 sns_cmd->p.gpn_data[9] != 0x02) { 928 sns_cmd->p.gpn_data[9] != 0x02) {
924 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e, 929 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
925 "GPN_ID failed, rejected request, gpn_rsp:\n"); 930 "GPN_ID failed, rejected request, gpn_rsp:\n");
926 ql_dump_buffer(ql_dbg_disc, vha, 0x207f, 931 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
927 sns_cmd->p.gpn_data, 16); 932 sns_cmd->p.gpn_data, 16);
928 rval = QLA_FUNCTION_FAILED; 933 rval = QLA_FUNCTION_FAILED;
929 } else { 934 } else {
@@ -1718,7 +1723,8 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
1718 int rval; 1723 int rval;
1719 struct qla_hw_data *ha = vha->hw; 1724 struct qla_hw_data *ha = vha->hw;
1720 1725
1721 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1726 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
1727 IS_QLAFX00(ha))
1722 return QLA_FUNCTION_FAILED; 1728 return QLA_FUNCTION_FAILED;
1723 1729
1724 rval = qla2x00_mgmt_svr_login(vha); 1730 rval = qla2x00_mgmt_svr_login(vha);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b59203393cb2..3565dfd8f370 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -25,7 +25,6 @@
25*/ 25*/
26static int qla2x00_isp_firmware(scsi_qla_host_t *); 26static int qla2x00_isp_firmware(scsi_qla_host_t *);
27static int qla2x00_setup_chip(scsi_qla_host_t *); 27static int qla2x00_setup_chip(scsi_qla_host_t *);
28static int qla2x00_init_rings(scsi_qla_host_t *);
29static int qla2x00_fw_ready(scsi_qla_host_t *); 28static int qla2x00_fw_ready(scsi_qla_host_t *);
30static int qla2x00_configure_hba(scsi_qla_host_t *); 29static int qla2x00_configure_hba(scsi_qla_host_t *);
31static int qla2x00_configure_loop(scsi_qla_host_t *); 30static int qla2x00_configure_loop(scsi_qla_host_t *);
@@ -83,7 +82,9 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
83 82
84 /* Firmware should use switch negotiated r_a_tov for timeout. */ 83 /* Firmware should use switch negotiated r_a_tov for timeout. */
85 tmo = ha->r_a_tov / 10 * 2; 84 tmo = ha->r_a_tov / 10 * 2;
86 if (!IS_FWI2_CAPABLE(ha)) { 85 if (IS_QLAFX00(ha)) {
86 tmo = FX00_DEF_RATOV * 2;
87 } else if (!IS_FWI2_CAPABLE(ha)) {
87 /* 88 /*
88 * Except for earlier ISPs where the timeout is seeded from the 89 * Except for earlier ISPs where the timeout is seeded from the
89 * initialization control block. 90 * initialization control block.
@@ -1977,7 +1978,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1977 * 1978 *
1978 * Returns 0 on success. 1979 * Returns 0 on success.
1979 */ 1980 */
1980static int 1981int
1981qla2x00_init_rings(scsi_qla_host_t *vha) 1982qla2x00_init_rings(scsi_qla_host_t *vha)
1982{ 1983{
1983 int rval; 1984 int rval;
@@ -2012,7 +2013,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2012 if (!rsp) 2013 if (!rsp)
2013 continue; 2014 continue;
2014 /* Initialize response queue entries */ 2015 /* Initialize response queue entries */
2015 qla2x00_init_response_q_entries(rsp); 2016 if (IS_QLAFX00(ha))
2017 qlafx00_init_response_q_entries(rsp);
2018 else
2019 qla2x00_init_response_q_entries(rsp);
2016 } 2020 }
2017 2021
2018 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 2022 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
@@ -2024,11 +2028,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2024 2028
2025 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2029 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2026 2030
2031 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
2032
2033 if (IS_QLAFX00(ha)) {
2034 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
2035 goto next_check;
2036 }
2037
2027 /* Update any ISP specific firmware options before initialization. */ 2038 /* Update any ISP specific firmware options before initialization. */
2028 ha->isp_ops->update_fw_options(vha); 2039 ha->isp_ops->update_fw_options(vha);
2029 2040
2030 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
2031
2032 if (ha->flags.npiv_supported) { 2041 if (ha->flags.npiv_supported) {
2033 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) 2042 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
2034 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 2043 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
@@ -2042,6 +2051,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2042 } 2051 }
2043 2052
2044 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 2053 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
2054next_check:
2045 if (rval) { 2055 if (rval) {
2046 ql_log(ql_log_fatal, vha, 0x00d2, 2056 ql_log(ql_log_fatal, vha, 0x00d2,
2047 "Init Firmware **** FAILED ****.\n"); 2057 "Init Firmware **** FAILED ****.\n");
@@ -2069,6 +2079,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
2069 uint16_t state[5]; 2079 uint16_t state[5];
2070 struct qla_hw_data *ha = vha->hw; 2080 struct qla_hw_data *ha = vha->hw;
2071 2081
2082 if (IS_QLAFX00(vha->hw))
2083 return qlafx00_fw_ready(vha);
2084
2072 rval = QLA_SUCCESS; 2085 rval = QLA_SUCCESS;
2073 2086
2074 /* 20 seconds for loop down. */ 2087 /* 20 seconds for loop down. */
@@ -3134,6 +3147,12 @@ void
3134qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 3147qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3135{ 3148{
3136 fcport->vha = vha; 3149 fcport->vha = vha;
3150
3151 if (IS_QLAFX00(vha->hw)) {
3152 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3153 qla2x00_reg_remote_port(vha, fcport);
3154 return;
3155 }
3137 fcport->login_retry = 0; 3156 fcport->login_retry = 0;
3138 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 3157 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
3139 3158
@@ -3894,15 +3913,24 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3894 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3913 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3895 wait_time = 256; 3914 wait_time = 256;
3896 do { 3915 do {
3897 /* Issue a marker after FW becomes ready. */ 3916 if (!IS_QLAFX00(vha->hw)) {
3898 qla2x00_marker(vha, req, rsp, 0, 0, 3917 /*
3899 MK_SYNC_ALL); 3918 * Issue a marker after FW becomes
3900 vha->marker_needed = 0; 3919 * ready.
3920 */
3921 qla2x00_marker(vha, req, rsp, 0, 0,
3922 MK_SYNC_ALL);
3923 vha->marker_needed = 0;
3924 }
3901 3925
3902 /* Remap devices on Loop. */ 3926 /* Remap devices on Loop. */
3903 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3927 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3904 3928
3905 qla2x00_configure_loop(vha); 3929 if (IS_QLAFX00(vha->hw))
3930 qlafx00_configure_devices(vha);
3931 else
3932 qla2x00_configure_loop(vha);
3933
3906 wait_time--; 3934 wait_time--;
3907 } while (!atomic_read(&vha->loop_down_timer) && 3935 } while (!atomic_read(&vha->loop_down_timer) &&
3908 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 3936 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
@@ -3968,9 +3996,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3968 if (fcport->drport && 3996 if (fcport->drport &&
3969 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 3997 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
3970 spin_unlock_irqrestore(&ha->vport_slock, flags); 3998 spin_unlock_irqrestore(&ha->vport_slock, flags);
3971
3972 qla2x00_rport_del(fcport); 3999 qla2x00_rport_del(fcport);
3973
3974 spin_lock_irqsave(&ha->vport_slock, flags); 4000 spin_lock_irqsave(&ha->vport_slock, flags);
3975 } 4001 }
3976 } 4002 }
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 68e2c4afc134..98ab921070d2 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -5,6 +5,28 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7 7
8/**
9 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
10 * Continuation Type 1 IOCBs to allocate.
11 *
12 * @dsds: number of data segment decriptors needed
13 *
14 * Returns the number of IOCB entries needed to store @dsds.
15 */
16static inline uint16_t
17qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
18{
19 uint16_t iocbs;
20
21 iocbs = 1;
22 if (dsds > 1) {
23 iocbs += (dsds - 1) / 5;
24 if ((dsds - 1) % 5)
25 iocbs++;
26 }
27 return iocbs;
28}
29
8/* 30/*
9 * qla2x00_debounce_register 31 * qla2x00_debounce_register
10 * Debounce register. 32 * Debounce register.
@@ -58,6 +80,17 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
58} 80}
59 81
60static inline void 82static inline void
83host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
84{
85 uint32_t *isrc = (uint32_t *) src;
86 uint32_t *odest = (uint32_t *) dst;
87 uint32_t iter = bsize >> 2;
88
89 for (; iter ; iter--)
90 *odest++ = cpu_to_le32(*isrc++);
91}
92
93static inline void
61qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) 94qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
62{ 95{
63 int i; 96 int i;
@@ -213,12 +246,18 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
213 sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout; 246 sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
214 add_timer(&sp->u.iocb_cmd.timer); 247 add_timer(&sp->u.iocb_cmd.timer);
215 sp->free = qla2x00_sp_free; 248 sp->free = qla2x00_sp_free;
249 if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
250 (sp->type == SRB_FXIOCB_DCMD))
251 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
216} 252}
217 253
218static inline int 254static inline int
219qla2x00_gid_list_size(struct qla_hw_data *ha) 255qla2x00_gid_list_size(struct qla_hw_data *ha)
220{ 256{
221 return sizeof(struct gid_list_info) * ha->max_fibre_devices; 257 if (IS_QLAFX00(ha))
258 return sizeof(uint32_t) * 32;
259 else
260 return sizeof(struct gid_list_info) * ha->max_fibre_devices;
222} 261}
223 262
224static inline void 263static inline void
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d2630317cce8..15e4080b347c 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -135,7 +135,8 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136 136
137 /* Load packet defaults. */ 137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt->entry_type)) = 138 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139 __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
139 __constant_cpu_to_le32(CONTINUE_A64_TYPE); 140 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
140 141
141 return (cont_pkt); 142 return (cont_pkt);
@@ -486,6 +487,10 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
486 if (ha->mqenable || IS_QLA83XX(ha)) { 487 if (ha->mqenable || IS_QLA83XX(ha)) {
487 WRT_REG_DWORD(req->req_q_in, req->ring_index); 488 WRT_REG_DWORD(req->req_q_in, req->ring_index);
488 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 489 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
490 } else if (IS_QLAFX00(ha)) {
491 WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
492 RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
493 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
489 } else if (IS_FWI2_CAPABLE(ha)) { 494 } else if (IS_FWI2_CAPABLE(ha)) {
490 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index); 495 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
491 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); 496 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@@ -514,11 +519,12 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
514 uint16_t lun, uint8_t type) 519 uint16_t lun, uint8_t type)
515{ 520{
516 mrk_entry_t *mrk; 521 mrk_entry_t *mrk;
517 struct mrk_entry_24xx *mrk24; 522 struct mrk_entry_24xx *mrk24 = NULL;
523 struct mrk_entry_fx00 *mrkfx = NULL;
524
518 struct qla_hw_data *ha = vha->hw; 525 struct qla_hw_data *ha = vha->hw;
519 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 526 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
520 527
521 mrk24 = NULL;
522 req = ha->req_q_map[0]; 528 req = ha->req_q_map[0];
523 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL); 529 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
524 if (mrk == NULL) { 530 if (mrk == NULL) {
@@ -531,7 +537,15 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
531 mrk->entry_type = MARKER_TYPE; 537 mrk->entry_type = MARKER_TYPE;
532 mrk->modifier = type; 538 mrk->modifier = type;
533 if (type != MK_SYNC_ALL) { 539 if (type != MK_SYNC_ALL) {
534 if (IS_FWI2_CAPABLE(ha)) { 540 if (IS_QLAFX00(ha)) {
541 mrkfx = (struct mrk_entry_fx00 *) mrk;
542 mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
543 mrkfx->handle_hi = 0;
544 mrkfx->tgt_id = cpu_to_le16(loop_id);
545 mrkfx->lun[1] = LSB(lun);
546 mrkfx->lun[2] = MSB(lun);
547 host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
548 } else if (IS_FWI2_CAPABLE(ha)) {
535 mrk24 = (struct mrk_entry_24xx *) mrk; 549 mrk24 = (struct mrk_entry_24xx *) mrk;
536 mrk24->nport_handle = cpu_to_le16(loop_id); 550 mrk24->nport_handle = cpu_to_le16(loop_id);
537 mrk24->lun[1] = LSB(lun); 551 mrk24->lun[1] = LSB(lun);
@@ -589,28 +603,6 @@ int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
589 return QLA_SUCCESS; 603 return QLA_SUCCESS;
590} 604}
591 605
592/**
593 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
594 * Continuation Type 1 IOCBs to allocate.
595 *
596 * @dsds: number of data segment decriptors needed
597 *
598 * Returns the number of IOCB entries needed to store @dsds.
599 */
600inline uint16_t
601qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
602{
603 uint16_t iocbs;
604
605 iocbs = 1;
606 if (dsds > 1) {
607 iocbs += (dsds - 1) / 5;
608 if ((dsds - 1) % 5)
609 iocbs++;
610 }
611 return iocbs;
612}
613
614static inline int 606static inline int
615qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, 607qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
616 uint16_t tot_dsds) 608 uint16_t tot_dsds)
@@ -1583,7 +1575,6 @@ queuing_error:
1583 return QLA_FUNCTION_FAILED; 1575 return QLA_FUNCTION_FAILED;
1584} 1576}
1585 1577
1586
1587/** 1578/**
1588 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP 1579 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1589 * @sp: command to send to the ISP 1580 * @sp: command to send to the ISP
@@ -1852,6 +1843,8 @@ skip_cmd_array:
1852 cnt = RD_REG_DWORD(&reg->isp82.req_q_out); 1843 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1853 else if (IS_FWI2_CAPABLE(ha)) 1844 else if (IS_FWI2_CAPABLE(ha))
1854 cnt = RD_REG_DWORD(&reg->isp24.req_q_out); 1845 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1846 else if (IS_QLAFX00(ha))
1847 cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
1855 else 1848 else
1856 cnt = qla2x00_debounce_register( 1849 cnt = qla2x00_debounce_register(
1857 ISP_REQ_Q_OUT(ha, &reg->isp)); 1850 ISP_REQ_Q_OUT(ha, &reg->isp));
@@ -1869,8 +1862,13 @@ skip_cmd_array:
1869 req->cnt -= req_cnt; 1862 req->cnt -= req_cnt;
1870 pkt = req->ring_ptr; 1863 pkt = req->ring_ptr;
1871 memset(pkt, 0, REQUEST_ENTRY_SIZE); 1864 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1872 pkt->entry_count = req_cnt; 1865 if (IS_QLAFX00(ha)) {
1873 pkt->handle = handle; 1866 WRT_REG_BYTE(&pkt->entry_count, req_cnt);
1867 WRT_REG_WORD(&pkt->handle, handle);
1868 } else {
1869 pkt->entry_count = req_cnt;
1870 pkt->handle = handle;
1871 }
1874 1872
1875queuing_error: 1873queuing_error:
1876 return pkt; 1874 return pkt;
@@ -2625,7 +2623,16 @@ qla2x00_start_sp(srb_t *sp)
2625 qla2x00_adisc_iocb(sp, pkt); 2623 qla2x00_adisc_iocb(sp, pkt);
2626 break; 2624 break;
2627 case SRB_TM_CMD: 2625 case SRB_TM_CMD:
2628 qla24xx_tm_iocb(sp, pkt); 2626 IS_QLAFX00(ha) ?
2627 qlafx00_tm_iocb(sp, pkt) :
2628 qla24xx_tm_iocb(sp, pkt);
2629 break;
2630 case SRB_FXIOCB_DCMD:
2631 case SRB_FXIOCB_BCMD:
2632 qlafx00_fxdisc_iocb(sp, pkt);
2633 break;
2634 case SRB_ABT_CMD:
2635 qlafx00_abort_iocb(sp, pkt);
2629 break; 2636 break;
2630 default: 2637 default:
2631 break; 2638 break;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index e9dbd74c20d3..259d9205d876 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -13,11 +13,7 @@
13#include <scsi/scsi_bsg_fc.h> 13#include <scsi/scsi_bsg_fc.h>
14#include <scsi/scsi_eh.h> 14#include <scsi/scsi_eh.h>
15 15
16#include "qla_target.h"
17
18static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 16static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
19static void qla2x00_process_completed_request(struct scsi_qla_host *,
20 struct req_que *, uint32_t);
21static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 17static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 18static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 19static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
@@ -1065,9 +1061,9 @@ skip_rio:
1065 * @ha: SCSI driver HA context 1061 * @ha: SCSI driver HA context
1066 * @index: SRB index 1062 * @index: SRB index
1067 */ 1063 */
1068static void 1064void
1069qla2x00_process_completed_request(struct scsi_qla_host *vha, 1065qla2x00_process_completed_request(struct scsi_qla_host *vha,
1070 struct req_que *req, uint32_t index) 1066 struct req_que *req, uint32_t index)
1071{ 1067{
1072 srb_t *sp; 1068 srb_t *sp;
1073 struct qla_hw_data *ha = vha->hw; 1069 struct qla_hw_data *ha = vha->hw;
@@ -1101,7 +1097,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
1101 } 1097 }
1102} 1098}
1103 1099
1104static srb_t * 1100srb_t *
1105qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1101qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1106 struct req_que *req, void *iocb) 1102 struct req_que *req, void *iocb)
1107{ 1103{
@@ -1994,7 +1990,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1994 return; 1990 return;
1995 } 1991 }
1996 1992
1997 lscsi_status = scsi_status & STATUS_MASK; 1993 lscsi_status = scsi_status & STATUS_MASK;
1998 1994
1999 fcport = sp->fcport; 1995 fcport = sp->fcport;
2000 1996
@@ -2939,7 +2935,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2939 2935
2940 /* If possible, enable MSI-X. */ 2936 /* If possible, enable MSI-X. */
2941 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2937 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2942 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) 2938 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
2943 goto skip_msi; 2939 goto skip_msi;
2944 2940
2945 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 2941 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -2972,7 +2968,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2972skip_msix: 2968skip_msix:
2973 2969
2974 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2970 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2975 !IS_QLA8001(ha) && !IS_QLA82XX(ha)) 2971 !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
2976 goto skip_msi; 2972 goto skip_msi;
2977 2973
2978 ret = pci_enable_msi(ha->pdev); 2974 ret = pci_enable_msi(ha->pdev);
@@ -2998,9 +2994,11 @@ skip_msi:
2998 "Failed to reserve interrupt %d already in use.\n", 2994 "Failed to reserve interrupt %d already in use.\n",
2999 ha->pdev->irq); 2995 ha->pdev->irq);
3000 goto fail; 2996 goto fail;
3001 } else if (!ha->flags.msi_enabled) 2997 } else if (!ha->flags.msi_enabled) {
3002 ql_dbg(ql_dbg_init, vha, 0x0125, 2998 ql_dbg(ql_dbg_init, vha, 0x0125,
3003 "INTa mode: Enabled.\n"); 2999 "INTa mode: Enabled.\n");
3000 ha->flags.mr_intr_valid = 1;
3001 }
3004 3002
3005clear_risc_ints: 3003clear_risc_ints:
3006 3004
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 43345af56431..9e5d89db7272 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4113,7 +4113,6 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4113 int rval; 4113 int rval;
4114 mbx_cmd_t mc; 4114 mbx_cmd_t mc;
4115 mbx_cmd_t *mcp = &mc; 4115 mbx_cmd_t *mcp = &mc;
4116 uint32_t iter_cnt = 0x1;
4117 4116
4118 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 4117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4119 "Entered %s.\n", __func__); 4118 "Entered %s.\n", __func__);
@@ -4139,8 +4138,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4139 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 4138 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4140 4139
4141 /* Iteration count */ 4140 /* Iteration count */
4142 mcp->mb[18] = LSW(iter_cnt); 4141 mcp->mb[18] = LSW(mreq->iteration_count);
4143 mcp->mb[19] = MSW(iter_cnt); 4142 mcp->mb[19] = MSW(mreq->iteration_count);
4144 4143
4145 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 4144 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
4146 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 4145 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
@@ -4518,7 +4517,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
4518 goto done; 4517 goto done;
4519 4518
4520 ql_log(ql_log_warn, vha, 0x10c9, 4519 ql_log(ql_log_warn, vha, 0x10c9,
4521 "Thermal not supported by I2C.\n"); 4520 "Thermal not supported through I2C bus, trying alternate "
4521 "method (ISP access).\n");
4522 ha->thermal_support &= ~THERMAL_SUPPORT_I2C; 4522 ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
4523 } 4523 }
4524 4524
@@ -4528,7 +4528,7 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
4528 goto done; 4528 goto done;
4529 4529
4530 ql_log(ql_log_warn, vha, 0x1019, 4530 ql_log(ql_log_warn, vha, 0x1019,
4531 "Thermal not supported by ISP.\n"); 4531 "Thermal not supported through ISP.\n");
4532 ha->thermal_support &= ~THERMAL_SUPPORT_ISP; 4532 ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
4533 } 4533 }
4534 4534
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
new file mode 100644
index 000000000000..729b74389f83
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -0,0 +1,3476 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8#include <linux/delay.h>
9#include <linux/pci.h>
10#include <linux/ratelimit.h>
11#include <linux/vmalloc.h>
12#include <scsi/scsi_tcq.h>
13#include <linux/utsname.h>
14
15
16/* QLAFX00 specific Mailbox implementation functions */
17
18/*
19 * qlafx00_mailbox_command
20 * Issue mailbox command and waits for completion.
21 *
22 * Input:
23 * ha = adapter block pointer.
24 * mcp = driver internal mbx struct pointer.
25 *
26 * Output:
27 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
28 *
29 * Returns:
30 * 0 : QLA_SUCCESS = cmd performed success
31 * 1 : QLA_FUNCTION_FAILED (error encountered)
32 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
33 *
34 * Context:
35 * Kernel context.
36 */
37static int
38qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
39
40{
41 int rval;
42 unsigned long flags = 0;
43 device_reg_t __iomem *reg;
44 uint8_t abort_active;
45 uint8_t io_lock_on;
46 uint16_t command = 0;
47 uint32_t *iptr;
48 uint32_t __iomem *optr;
49 uint32_t cnt;
50 uint32_t mboxes;
51 unsigned long wait_time;
52 struct qla_hw_data *ha = vha->hw;
53 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
54
55 if (ha->pdev->error_state > pci_channel_io_frozen) {
56 ql_log(ql_log_warn, vha, 0x115c,
57 "error_state is greater than pci_channel_io_frozen, "
58 "exiting.\n");
59 return QLA_FUNCTION_TIMEOUT;
60 }
61
62 if (vha->device_flags & DFLG_DEV_FAILED) {
63 ql_log(ql_log_warn, vha, 0x115f,
64 "Device in failed state, exiting.\n");
65 return QLA_FUNCTION_TIMEOUT;
66 }
67
68 reg = ha->iobase;
69 io_lock_on = base_vha->flags.init_done;
70
71 rval = QLA_SUCCESS;
72 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
73
74 if (ha->flags.pci_channel_io_perm_failure) {
75 ql_log(ql_log_warn, vha, 0x1175,
76 "Perm failure on EEH timeout MBX, exiting.\n");
77 return QLA_FUNCTION_TIMEOUT;
78 }
79
80 if (ha->flags.isp82xx_fw_hung) {
81 /* Setting Link-Down error */
82 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
83 ql_log(ql_log_warn, vha, 0x1176,
84 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
85 rval = QLA_FUNCTION_FAILED;
86 goto premature_exit;
87 }
88
89 /*
90 * Wait for active mailbox commands to finish by waiting at most tov
91 * seconds. This is to serialize actual issuing of mailbox cmds during
92 * non ISP abort time.
93 */
94 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
95 /* Timeout occurred. Return error. */
96 ql_log(ql_log_warn, vha, 0x1177,
97 "Cmd access timeout, cmd=0x%x, Exiting.\n",
98 mcp->mb[0]);
99 return QLA_FUNCTION_TIMEOUT;
100 }
101
102 ha->flags.mbox_busy = 1;
103 /* Save mailbox command for debug */
104 ha->mcp32 = mcp;
105
106 ql_dbg(ql_dbg_mbx, vha, 0x1178,
107 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
108
109 spin_lock_irqsave(&ha->hardware_lock, flags);
110
111 /* Load mailbox registers. */
112 optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
113
114 iptr = mcp->mb;
115 command = mcp->mb[0];
116 mboxes = mcp->out_mb;
117
118 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
119 if (mboxes & BIT_0)
120 WRT_REG_DWORD(optr, *iptr);
121
122 mboxes >>= 1;
123 optr++;
124 iptr++;
125 }
126
127 /* Issue set host interrupt command to send cmd out. */
128 ha->flags.mbox_int = 0;
129 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
130
131 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
132 (uint8_t *)mcp->mb, 16);
133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
134 ((uint8_t *)mcp->mb + 0x10), 16);
135 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
136 ((uint8_t *)mcp->mb + 0x20), 8);
137
138 /* Unlock mbx registers and wait for interrupt */
139 ql_dbg(ql_dbg_mbx, vha, 0x1179,
140 "Going to unlock irq & waiting for interrupts. "
141 "jiffies=%lx.\n", jiffies);
142
143 /* Wait for mbx cmd completion until timeout */
144 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
145 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
146
147 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
148 spin_unlock_irqrestore(&ha->hardware_lock, flags);
149
150 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
151
152 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
153
154 } else {
155 ql_dbg(ql_dbg_mbx, vha, 0x112c,
156 "Cmd=%x Polling Mode.\n", command);
157
158 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
159 spin_unlock_irqrestore(&ha->hardware_lock, flags);
160
161 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
162 while (!ha->flags.mbox_int) {
163 if (time_after(jiffies, wait_time))
164 break;
165
166 /* Check for pending interrupts. */
167 qla2x00_poll(ha->rsp_q_map[0]);
168
169 if (!ha->flags.mbox_int &&
170 !(IS_QLA2200(ha) &&
171 command == MBC_LOAD_RISC_RAM_EXTENDED))
172 usleep_range(10000, 11000);
173 } /* while */
174 ql_dbg(ql_dbg_mbx, vha, 0x112d,
175 "Waited %d sec.\n",
176 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
177 }
178
179 /* Check whether we timed out */
180 if (ha->flags.mbox_int) {
181 uint32_t *iptr2;
182
183 ql_dbg(ql_dbg_mbx, vha, 0x112e,
184 "Cmd=%x completed.\n", command);
185
186 /* Got interrupt. Clear the flag. */
187 ha->flags.mbox_int = 0;
188 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
189
190 if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
191 rval = QLA_FUNCTION_FAILED;
192
193 /* Load return mailbox registers. */
194 iptr2 = mcp->mb;
195 iptr = (uint32_t *)&ha->mailbox_out32[0];
196 mboxes = mcp->in_mb;
197 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
198 if (mboxes & BIT_0)
199 *iptr2 = *iptr;
200
201 mboxes >>= 1;
202 iptr2++;
203 iptr++;
204 }
205 } else {
206
207 rval = QLA_FUNCTION_TIMEOUT;
208 }
209
210 ha->flags.mbox_busy = 0;
211
212 /* Clean up */
213 ha->mcp32 = NULL;
214
215 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
216 ql_dbg(ql_dbg_mbx, vha, 0x113a,
217 "checking for additional resp interrupt.\n");
218
219 /* polling mode for non isp_abort commands. */
220 qla2x00_poll(ha->rsp_q_map[0]);
221 }
222
223 if (rval == QLA_FUNCTION_TIMEOUT &&
224 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
225 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
226 ha->flags.eeh_busy) {
227 /* not in dpc. schedule it for dpc to take over. */
228 ql_dbg(ql_dbg_mbx, vha, 0x115d,
229 "Timeout, schedule isp_abort_needed.\n");
230
231 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
232 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
233 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
234
235 ql_log(ql_log_info, base_vha, 0x115e,
236 "Mailbox cmd timeout occurred, cmd=0x%x, "
237 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
238 "abort.\n", command, mcp->mb[0],
239 ha->flags.eeh_busy);
240 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
241 qla2xxx_wake_dpc(vha);
242 }
243 } else if (!abort_active) {
244 /* call abort directly since we are in the DPC thread */
245 ql_dbg(ql_dbg_mbx, vha, 0x1160,
246 "Timeout, calling abort_isp.\n");
247
248 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
249 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
250 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
251
252 ql_log(ql_log_info, base_vha, 0x1161,
253 "Mailbox cmd timeout occurred, cmd=0x%x, "
254 "mb[0]=0x%x. Scheduling ISP abort ",
255 command, mcp->mb[0]);
256
257 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
258 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
259 if (ha->isp_ops->abort_isp(vha)) {
260 /* Failed. retry later. */
261 set_bit(ISP_ABORT_NEEDED,
262 &vha->dpc_flags);
263 }
264 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
265 ql_dbg(ql_dbg_mbx, vha, 0x1162,
266 "Finished abort_isp.\n");
267 }
268 }
269 }
270
271premature_exit:
272 /* Allow next mbx cmd to come in. */
273 complete(&ha->mbx_cmd_comp);
274
275 if (rval) {
276 ql_log(ql_log_warn, base_vha, 0x1163,
277 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
278 "mb[3]=%x, cmd=%x ****.\n",
279 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
280 } else {
281 ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
282 }
283
284 return rval;
285}
286
287/*
288 * qlafx00_driver_shutdown
289 * Indicate a driver shutdown to firmware.
290 *
291 * Input:
292 * ha = adapter block pointer.
293 *
294 * Returns:
295 * local function return status code.
296 *
297 * Context:
298 * Kernel context.
299 */
300static int
301qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
302{
303 int rval;
304 struct mbx_cmd_32 mc;
305 struct mbx_cmd_32 *mcp = &mc;
306
307 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
308 "Entered %s.\n", __func__);
309
310 mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
311 mcp->out_mb = MBX_0;
312 mcp->in_mb = MBX_0;
313 if (tmo)
314 mcp->tov = tmo;
315 else
316 mcp->tov = MBX_TOV_SECONDS;
317 mcp->flags = 0;
318 rval = qlafx00_mailbox_command(vha, mcp);
319
320 if (rval != QLA_SUCCESS) {
321 ql_dbg(ql_dbg_mbx, vha, 0x1167,
322 "Failed=%x.\n", rval);
323 } else {
324 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
325 "Done %s.\n", __func__);
326 }
327
328 return rval;
329}
330
331/*
332 * qlafx00_get_firmware_state
333 * Get adapter firmware state.
334 *
335 * Input:
336 * ha = adapter block pointer.
337 * TARGET_QUEUE_LOCK must be released.
338 * ADAPTER_STATE_LOCK must be released.
339 *
340 * Returns:
341 * qla7xxx local function return status code.
342 *
343 * Context:
344 * Kernel context.
345 */
346static int
347qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
348{
349 int rval;
350 struct mbx_cmd_32 mc;
351 struct mbx_cmd_32 *mcp = &mc;
352
353 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
354 "Entered %s.\n", __func__);
355
356 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
357 mcp->out_mb = MBX_0;
358 mcp->in_mb = MBX_1|MBX_0;
359 mcp->tov = MBX_TOV_SECONDS;
360 mcp->flags = 0;
361 rval = qlafx00_mailbox_command(vha, mcp);
362
363 /* Return firmware states. */
364 states[0] = mcp->mb[1];
365
366 if (rval != QLA_SUCCESS) {
367 ql_dbg(ql_dbg_mbx, vha, 0x116a,
368 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
369 } else {
370 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
371 "Done %s.\n", __func__);
372 }
373 return rval;
374}
375
376/*
377 * qlafx00_init_firmware
378 * Initialize adapter firmware.
379 *
380 * Input:
381 * ha = adapter block pointer.
382 * dptr = Initialization control block pointer.
383 * size = size of initialization control block.
384 * TARGET_QUEUE_LOCK must be released.
385 * ADAPTER_STATE_LOCK must be released.
386 *
387 * Returns:
388 * qlafx00 local function return status code.
389 *
390 * Context:
391 * Kernel context.
392 */
393int
394qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
395{
396 int rval;
397 struct mbx_cmd_32 mc;
398 struct mbx_cmd_32 *mcp = &mc;
399 struct qla_hw_data *ha = vha->hw;
400
401 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
402 "Entered %s.\n", __func__);
403
404 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
405
406 mcp->mb[1] = 0;
407 mcp->mb[2] = MSD(ha->init_cb_dma);
408 mcp->mb[3] = LSD(ha->init_cb_dma);
409
410 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
411 mcp->in_mb = MBX_0;
412 mcp->buf_size = size;
413 mcp->flags = MBX_DMA_OUT;
414 mcp->tov = MBX_TOV_SECONDS;
415 rval = qlafx00_mailbox_command(vha, mcp);
416
417 if (rval != QLA_SUCCESS) {
418 ql_dbg(ql_dbg_mbx, vha, 0x116d,
419 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
420 } else {
421 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
422 "Done %s.\n", __func__);
423 }
424 return rval;
425}
426
427/*
428 * qlafx00_mbx_reg_test
429 */
430static int
431qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
432{
433 int rval;
434 struct mbx_cmd_32 mc;
435 struct mbx_cmd_32 *mcp = &mc;
436
437 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
438 "Entered %s.\n", __func__);
439
440
441 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
442 mcp->mb[1] = 0xAAAA;
443 mcp->mb[2] = 0x5555;
444 mcp->mb[3] = 0xAA55;
445 mcp->mb[4] = 0x55AA;
446 mcp->mb[5] = 0xA5A5;
447 mcp->mb[6] = 0x5A5A;
448 mcp->mb[7] = 0x2525;
449 mcp->mb[8] = 0xBBBB;
450 mcp->mb[9] = 0x6666;
451 mcp->mb[10] = 0xBB66;
452 mcp->mb[11] = 0x66BB;
453 mcp->mb[12] = 0xB6B6;
454 mcp->mb[13] = 0x6B6B;
455 mcp->mb[14] = 0x3636;
456 mcp->mb[15] = 0xCCCC;
457
458
459 mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
460 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
461 mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
462 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
463 mcp->buf_size = 0;
464 mcp->flags = MBX_DMA_OUT;
465 mcp->tov = MBX_TOV_SECONDS;
466 rval = qlafx00_mailbox_command(vha, mcp);
467 if (rval == QLA_SUCCESS) {
468 if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
469 mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
470 rval = QLA_FUNCTION_FAILED;
471 if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
472 mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
473 rval = QLA_FUNCTION_FAILED;
474 if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
475 mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
476 rval = QLA_FUNCTION_FAILED;
477 if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
478 mcp->mb[31] != 0xCCCC)
479 rval = QLA_FUNCTION_FAILED;
480 }
481
482 if (rval != QLA_SUCCESS) {
483 ql_dbg(ql_dbg_mbx, vha, 0x1170,
484 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
485 } else {
486 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
487 "Done %s.\n", __func__);
488 }
489 return rval;
490}
491
492/**
493 * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
494 * @ha: HA context
495 *
496 * Returns 0 on success.
497 */
498int
499qlafx00_pci_config(scsi_qla_host_t *vha)
500{
501 uint16_t w;
502 struct qla_hw_data *ha = vha->hw;
503
504 pci_set_master(ha->pdev);
505 pci_try_set_mwi(ha->pdev);
506
507 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
508 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
509 w &= ~PCI_COMMAND_INTX_DISABLE;
510 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
511
512 /* PCIe -- adjust Maximum Read Request Size (2048). */
513 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
514 pcie_set_readrq(ha->pdev, 2048);
515
516 ha->chip_revision = ha->pdev->revision;
517
518 return QLA_SUCCESS;
519}
520
521/**
522 * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
523 * @ha: HA context
524 *
525 */
526static inline void
527qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
528{
529 unsigned long flags = 0;
530 struct qla_hw_data *ha = vha->hw;
531 int i, core;
532 uint32_t cnt;
533
534 /* Set all 4 cores in reset */
535 for (i = 0; i < 4; i++) {
536 QLAFX00_SET_HBA_SOC_REG(ha,
537 (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
538 }
539
540 /* Set all 4 core Clock gating control */
541 for (i = 0; i < 4; i++) {
542 QLAFX00_SET_HBA_SOC_REG(ha,
543 (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
544 }
545
546 /* Reset all units in Fabric */
547 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101));
548
549 /* Reset all interrupt control registers */
550 for (i = 0; i < 115; i++) {
551 QLAFX00_SET_HBA_SOC_REG(ha,
552 (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
553 }
554
555 /* Reset Timers control registers. per core */
556 for (core = 0; core < 4; core++)
557 for (i = 0; i < 8; i++)
558 QLAFX00_SET_HBA_SOC_REG(ha,
559 (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
560
561 /* Reset per core IRQ ack register */
562 for (core = 0; core < 4; core++)
563 QLAFX00_SET_HBA_SOC_REG(ha,
564 (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
565
566 /* Set Fabric control and config to defaults */
567 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
568 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
569
570 spin_lock_irqsave(&ha->hardware_lock, flags);
571
572 /* Kick in Fabric units */
573 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
574
575 /* Kick in Core0 to start boot process */
576 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
577
578 /* Wait 10secs for soft-reset to complete. */
579 for (cnt = 10; cnt; cnt--) {
580 msleep(1000);
581 barrier();
582 }
583 spin_unlock_irqrestore(&ha->hardware_lock, flags);
584}
585
586/**
587 * qlafx00_soft_reset() - Soft Reset ISPFx00.
588 * @ha: HA context
589 *
590 * Returns 0 on success.
591 */
592void
593qlafx00_soft_reset(scsi_qla_host_t *vha)
594{
595 struct qla_hw_data *ha = vha->hw;
596
597 if (unlikely(pci_channel_offline(ha->pdev) &&
598 ha->flags.pci_channel_io_perm_failure))
599 return;
600
601 ha->isp_ops->disable_intrs(ha);
602 qlafx00_soc_cpu_reset(vha);
603 ha->isp_ops->enable_intrs(ha);
604}
605
606/**
607 * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
608 * @ha: HA context
609 *
610 * Returns 0 on success.
611 */
612int
613qlafx00_chip_diag(scsi_qla_host_t *vha)
614{
615 int rval = 0;
616 struct qla_hw_data *ha = vha->hw;
617 struct req_que *req = ha->req_q_map[0];
618
619 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
620
621 rval = qlafx00_mbx_reg_test(vha);
622 if (rval) {
623 ql_log(ql_log_warn, vha, 0x1165,
624 "Failed mailbox send register test\n");
625 } else {
626 /* Flag a successful rval */
627 rval = QLA_SUCCESS;
628 }
629 return rval;
630}
631
632void
633qlafx00_config_rings(struct scsi_qla_host *vha)
634{
635 struct qla_hw_data *ha = vha->hw;
636 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
637 struct init_cb_fx *icb;
638 struct req_que *req = ha->req_q_map[0];
639 struct rsp_que *rsp = ha->rsp_q_map[0];
640
641 /* Setup ring parameters in initialization control block. */
642 icb = (struct init_cb_fx *)ha->init_cb;
643 icb->request_q_outpointer = __constant_cpu_to_le16(0);
644 icb->response_q_inpointer = __constant_cpu_to_le16(0);
645 icb->request_q_length = cpu_to_le16(req->length);
646 icb->response_q_length = cpu_to_le16(rsp->length);
647 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
648 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
649 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
650 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
651
652 WRT_REG_DWORD(&reg->req_q_in, 0);
653 WRT_REG_DWORD(&reg->req_q_out, 0);
654
655 WRT_REG_DWORD(&reg->rsp_q_in, 0);
656 WRT_REG_DWORD(&reg->rsp_q_out, 0);
657
658 /* PCI posting */
659 RD_REG_DWORD(&reg->rsp_q_out);
660}
661
662char *
663qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
664{
665 struct qla_hw_data *ha = vha->hw;
666 int pcie_reg;
667
668 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
669 if (pcie_reg) {
670 strcpy(str, "PCIe iSA");
671 return str;
672 }
673 return str;
674}
675
676char *
677qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str)
678{
679 struct qla_hw_data *ha = vha->hw;
680
681 sprintf(str, "%s", ha->mr.fw_version);
682 return str;
683}
684
685void
686qlafx00_enable_intrs(struct qla_hw_data *ha)
687{
688 unsigned long flags = 0;
689
690 spin_lock_irqsave(&ha->hardware_lock, flags);
691 ha->interrupts_on = 1;
692 QLAFX00_ENABLE_ICNTRL_REG(ha);
693 spin_unlock_irqrestore(&ha->hardware_lock, flags);
694}
695
696void
697qlafx00_disable_intrs(struct qla_hw_data *ha)
698{
699 unsigned long flags = 0;
700
701 spin_lock_irqsave(&ha->hardware_lock, flags);
702 ha->interrupts_on = 0;
703 QLAFX00_DISABLE_ICNTRL_REG(ha);
704 spin_unlock_irqrestore(&ha->hardware_lock, flags);
705}
706
707static void
708qlafx00_tmf_iocb_timeout(void *data)
709{
710 srb_t *sp = (srb_t *)data;
711 struct srb_iocb *tmf = &sp->u.iocb_cmd;
712
713 tmf->u.tmf.comp_status = CS_TIMEOUT;
714 complete(&tmf->u.tmf.comp);
715}
716
717static void
718qlafx00_tmf_sp_done(void *data, void *ptr, int res)
719{
720 srb_t *sp = (srb_t *)ptr;
721 struct srb_iocb *tmf = &sp->u.iocb_cmd;
722
723 complete(&tmf->u.tmf.comp);
724}
725
726static int
727qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags,
728 uint32_t lun, uint32_t tag)
729{
730 scsi_qla_host_t *vha = fcport->vha;
731 struct srb_iocb *tm_iocb;
732 srb_t *sp;
733 int rval = QLA_FUNCTION_FAILED;
734
735 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
736 if (!sp)
737 goto done;
738
739 tm_iocb = &sp->u.iocb_cmd;
740 sp->type = SRB_TM_CMD;
741 sp->name = "tmf";
742 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
743 tm_iocb->u.tmf.flags = flags;
744 tm_iocb->u.tmf.lun = lun;
745 tm_iocb->u.tmf.data = tag;
746 sp->done = qlafx00_tmf_sp_done;
747 tm_iocb->timeout = qlafx00_tmf_iocb_timeout;
748 init_completion(&tm_iocb->u.tmf.comp);
749
750 rval = qla2x00_start_sp(sp);
751 if (rval != QLA_SUCCESS)
752 goto done_free_sp;
753
754 ql_dbg(ql_dbg_async, vha, 0x507b,
755 "Task management command issued target_id=%x\n",
756 fcport->tgt_id);
757
758 wait_for_completion(&tm_iocb->u.tmf.comp);
759
760 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
761 QLA_SUCCESS : QLA_FUNCTION_FAILED;
762
763done_free_sp:
764 sp->free(vha, sp);
765done:
766 return rval;
767}
768
769int
770qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
771{
772 return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
773}
774
775int
776qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
777{
778 return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
779}
780
781int
782qlafx00_iospace_config(struct qla_hw_data *ha)
783{
784 if (pci_request_selected_regions(ha->pdev, ha->bars,
785 QLA2XXX_DRIVER_NAME)) {
786 ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
787 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
788 pci_name(ha->pdev));
789 goto iospace_error_exit;
790 }
791
792 /* Use MMIO operations for all accesses. */
793 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
794 ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
795 "Invalid pci I/O region size (%s).\n",
796 pci_name(ha->pdev));
797 goto iospace_error_exit;
798 }
799 if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
800 ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
801 "Invalid PCI mem BAR0 region size (%s), aborting\n",
802 pci_name(ha->pdev));
803 goto iospace_error_exit;
804 }
805
806 ha->cregbase =
807 ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
808 if (!ha->cregbase) {
809 ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
810 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
811 goto iospace_error_exit;
812 }
813
814 if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
815 ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
816 "region #2 not an MMIO resource (%s), aborting\n",
817 pci_name(ha->pdev));
818 goto iospace_error_exit;
819 }
820 if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
821 ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
822 "Invalid PCI mem BAR2 region size (%s), aborting\n",
823 pci_name(ha->pdev));
824 goto iospace_error_exit;
825 }
826
827 ha->iobase =
828 ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
829 if (!ha->iobase) {
830 ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
831 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
832 goto iospace_error_exit;
833 }
834
835 /* Determine queue resources */
836 ha->max_req_queues = ha->max_rsp_queues = 1;
837
838 ql_log_pci(ql_log_info, ha->pdev, 0x012c,
839 "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
840 ha->bars, ha->cregbase, ha->iobase);
841
842 return 0;
843
844iospace_error_exit:
845 return -ENOMEM;
846}
847
848static void
849qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
850{
851 struct qla_hw_data *ha = vha->hw;
852 struct req_que *req = ha->req_q_map[0];
853 struct rsp_que *rsp = ha->rsp_q_map[0];
854
855 req->length_fx00 = req->length;
856 req->ring_fx00 = req->ring;
857 req->dma_fx00 = req->dma;
858
859 rsp->length_fx00 = rsp->length;
860 rsp->ring_fx00 = rsp->ring;
861 rsp->dma_fx00 = rsp->dma;
862
863 ql_dbg(ql_dbg_init, vha, 0x012d,
864 "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
865 "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
866 req->length_fx00, (u64)req->dma_fx00);
867
868 ql_dbg(ql_dbg_init, vha, 0x012e,
869 "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
870 "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
871 rsp->length_fx00, (u64)rsp->dma_fx00);
872}
873
874static int
875qlafx00_config_queues(struct scsi_qla_host *vha)
876{
877 struct qla_hw_data *ha = vha->hw;
878 struct req_que *req = ha->req_q_map[0];
879 struct rsp_que *rsp = ha->rsp_q_map[0];
880 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
881
882 req->length = ha->req_que_len;
883 req->ring = (void *)ha->iobase + ha->req_que_off;
884 req->dma = bar2_hdl + ha->req_que_off;
885 if ((!req->ring) || (req->length == 0)) {
886 ql_log_pci(ql_log_info, ha->pdev, 0x012f,
887 "Unable to allocate memory for req_ring\n");
888 return QLA_FUNCTION_FAILED;
889 }
890
891 ql_dbg(ql_dbg_init, vha, 0x0130,
892 "req: %p req_ring pointer %p req len 0x%x "
893 "req off 0x%x\n, req->dma: 0x%llx",
894 req, req->ring, req->length,
895 ha->req_que_off, (u64)req->dma);
896
897 rsp->length = ha->rsp_que_len;
898 rsp->ring = (void *)ha->iobase + ha->rsp_que_off;
899 rsp->dma = bar2_hdl + ha->rsp_que_off;
900 if ((!rsp->ring) || (rsp->length == 0)) {
901 ql_log_pci(ql_log_info, ha->pdev, 0x0131,
902 "Unable to allocate memory for rsp_ring\n");
903 return QLA_FUNCTION_FAILED;
904 }
905
906 ql_dbg(ql_dbg_init, vha, 0x0132,
907 "rsp: %p rsp_ring pointer %p rsp len 0x%x "
908 "rsp off 0x%x, rsp->dma: 0x%llx\n",
909 rsp, rsp->ring, rsp->length,
910 ha->rsp_que_off, (u64)rsp->dma);
911
912 return QLA_SUCCESS;
913}
914
915static int
916qlafx00_init_fw_ready(scsi_qla_host_t *vha)
917{
918 int rval = 0;
919 unsigned long wtime;
920 uint16_t wait_time; /* Wait time */
921 struct qla_hw_data *ha = vha->hw;
922 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
923 uint32_t aenmbx, aenmbx7 = 0;
924 uint32_t state[5];
925 bool done = false;
926
927 /* 30 seconds wait - Adjust if required */
928 wait_time = 30;
929
930 /* wait time before firmware ready */
931 wtime = jiffies + (wait_time * HZ);
932 do {
933 aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
934 barrier();
935 ql_dbg(ql_dbg_mbx, vha, 0x0133,
936 "aenmbx: 0x%x\n", aenmbx);
937
938 switch (aenmbx) {
939 case MBA_FW_NOT_STARTED:
940 case MBA_FW_STARTING:
941 break;
942
943 case MBA_SYSTEM_ERR:
944 case MBA_REQ_TRANSFER_ERR:
945 case MBA_RSP_TRANSFER_ERR:
946 case MBA_FW_INIT_FAILURE:
947 qlafx00_soft_reset(vha);
948 break;
949
950 case MBA_FW_RESTART_CMPLT:
951 /* Set the mbx and rqstq intr code */
952 aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
953 ha->mbx_intr_code = MSW(aenmbx7);
954 ha->rqstq_intr_code = LSW(aenmbx7);
955 ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
956 ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
957 ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
958 ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
959 WRT_REG_DWORD(&reg->aenmailbox0, 0);
960 RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
961 ql_dbg(ql_dbg_init, vha, 0x0134,
962 "f/w returned mbx_intr_code: 0x%x, "
963 "rqstq_intr_code: 0x%x\n",
964 ha->mbx_intr_code, ha->rqstq_intr_code);
965 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
966 rval = QLA_SUCCESS;
967 done = true;
968 break;
969
970 default:
971 /* If fw is apparently not ready. In order to continue,
972 * we might need to issue Mbox cmd, but the problem is
973 * that the DoorBell vector values that come with the
974 * 8060 AEN are most likely gone by now (and thus no
975 * bell would be rung on the fw side when mbox cmd is
976 * issued). We have to therefore grab the 8060 AEN
977 * shadow regs (filled in by FW when the last 8060
978 * AEN was being posted).
979 * Do the following to determine what is needed in
980 * order to get the FW ready:
981 * 1. reload the 8060 AEN values from the shadow regs
982 * 2. clear int status to get rid of possible pending
983 * interrupts
984 * 3. issue Get FW State Mbox cmd to determine fw state
985 * Set the mbx and rqstq intr code from Shadow Regs
986 */
987 aenmbx7 = RD_REG_DWORD(&reg->initval7);
988 ha->mbx_intr_code = MSW(aenmbx7);
989 ha->rqstq_intr_code = LSW(aenmbx7);
990 ha->req_que_off = RD_REG_DWORD(&reg->initval1);
991 ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
992 ha->req_que_len = RD_REG_DWORD(&reg->initval5);
993 ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
994 ql_dbg(ql_dbg_init, vha, 0x0135,
995 "f/w returned mbx_intr_code: 0x%x, "
996 "rqstq_intr_code: 0x%x\n",
997 ha->mbx_intr_code, ha->rqstq_intr_code);
998 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
999
1000 /* Get the FW state */
1001 rval = qlafx00_get_firmware_state(vha, state);
1002 if (rval != QLA_SUCCESS) {
1003 /* Retry if timer has not expired */
1004 break;
1005 }
1006
1007 if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
1008 /* Firmware is waiting to be
1009 * initialized by driver
1010 */
1011 rval = QLA_SUCCESS;
1012 done = true;
1013 break;
1014 }
1015
1016 /* Issue driver shutdown and wait until f/w recovers.
1017 * Driver should continue to poll until 8060 AEN is
1018 * received indicating firmware recovery.
1019 */
1020 ql_dbg(ql_dbg_init, vha, 0x0136,
1021 "Sending Driver shutdown fw_state 0x%x\n",
1022 state[0]);
1023
1024 rval = qlafx00_driver_shutdown(vha, 10);
1025 if (rval != QLA_SUCCESS) {
1026 rval = QLA_FUNCTION_FAILED;
1027 break;
1028 }
1029 msleep(500);
1030
1031 wtime = jiffies + (wait_time * HZ);
1032 break;
1033 }
1034
1035 if (!done) {
1036 if (time_after_eq(jiffies, wtime)) {
1037 ql_dbg(ql_dbg_init, vha, 0x0137,
1038 "Init f/w failed: aen[7]: 0x%x\n",
1039 RD_REG_DWORD(&reg->aenmailbox7));
1040 rval = QLA_FUNCTION_FAILED;
1041 done = true;
1042 break;
1043 }
1044 /* Delay for a while */
1045 msleep(500);
1046 }
1047 } while (!done);
1048
1049 if (rval)
1050 ql_dbg(ql_dbg_init, vha, 0x0138,
1051 "%s **** FAILED ****.\n", __func__);
1052 else
1053 ql_dbg(ql_dbg_init, vha, 0x0139,
1054 "%s **** SUCCESS ****.\n", __func__);
1055
1056 return rval;
1057}
1058
1059/*
1060 * qlafx00_fw_ready() - Waits for firmware ready.
1061 * @ha: HA context
1062 *
1063 * Returns 0 on success.
1064 */
1065int
1066qlafx00_fw_ready(scsi_qla_host_t *vha)
1067{
1068 int rval;
1069 unsigned long wtime;
1070 uint16_t wait_time; /* Wait time if loop is coming ready */
1071 uint32_t state[5];
1072
1073 rval = QLA_SUCCESS;
1074
1075 wait_time = 10;
1076
1077 /* wait time before firmware ready */
1078 wtime = jiffies + (wait_time * HZ);
1079
1080 /* Wait for ISP to finish init */
1081 if (!vha->flags.init_done)
1082 ql_dbg(ql_dbg_init, vha, 0x013a,
1083 "Waiting for init to complete...\n");
1084
1085 do {
1086 rval = qlafx00_get_firmware_state(vha, state);
1087
1088 if (rval == QLA_SUCCESS) {
1089 if (state[0] == FSTATE_FX00_INITIALIZED) {
1090 ql_dbg(ql_dbg_init, vha, 0x013b,
1091 "fw_state=%x\n", state[0]);
1092 rval = QLA_SUCCESS;
1093 break;
1094 }
1095 }
1096 rval = QLA_FUNCTION_FAILED;
1097
1098 if (time_after_eq(jiffies, wtime))
1099 break;
1100
1101 /* Delay for a while */
1102 msleep(500);
1103
1104 ql_dbg(ql_dbg_init, vha, 0x013c,
1105 "fw_state=%x curr time=%lx.\n", state[0], jiffies);
1106 } while (1);
1107
1108
1109 if (rval)
1110 ql_dbg(ql_dbg_init, vha, 0x013d,
1111 "Firmware ready **** FAILED ****.\n");
1112 else
1113 ql_dbg(ql_dbg_init, vha, 0x013e,
1114 "Firmware ready **** SUCCESS ****.\n");
1115
1116 return rval;
1117}
1118
1119static int
1120qlafx00_find_all_targets(scsi_qla_host_t *vha,
1121 struct list_head *new_fcports)
1122{
1123 int rval;
1124 uint16_t tgt_id;
1125 fc_port_t *fcport, *new_fcport;
1126 int found;
1127 struct qla_hw_data *ha = vha->hw;
1128
1129 rval = QLA_SUCCESS;
1130
1131 if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1132 return QLA_FUNCTION_FAILED;
1133
1134 if ((atomic_read(&vha->loop_down_timer) ||
1135 STATE_TRANSITION(vha))) {
1136 atomic_set(&vha->loop_down_timer, 0);
1137 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1138 return QLA_FUNCTION_FAILED;
1139 }
1140
1141 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
1142 "Listing Target bit map...\n");
1143 ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
1144 0x2089, (uint8_t *)ha->gid_list, 32);
1145
1146 /* Allocate temporary rmtport for any new rmtports discovered. */
1147 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1148 if (new_fcport == NULL)
1149 return QLA_MEMORY_ALLOC_FAILED;
1150
1151 for_each_set_bit(tgt_id, (void *)ha->gid_list,
1152 QLAFX00_TGT_NODE_LIST_SIZE) {
1153
1154 /* Send get target node info */
1155 new_fcport->tgt_id = tgt_id;
1156 rval = qlafx00_fx_disc(vha, new_fcport,
1157 FXDISC_GET_TGT_NODE_INFO);
1158 if (rval != QLA_SUCCESS) {
1159 ql_log(ql_log_warn, vha, 0x208a,
1160 "Target info scan failed -- assuming zero-entry "
1161 "result...\n");
1162 continue;
1163 }
1164
1165 /* Locate matching device in database. */
1166 found = 0;
1167 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1168 if (memcmp(new_fcport->port_name,
1169 fcport->port_name, WWN_SIZE))
1170 continue;
1171
1172 found++;
1173
1174 /*
1175 * If tgt_id is same and state FCS_ONLINE, nothing
1176 * changed.
1177 */
1178 if (fcport->tgt_id == new_fcport->tgt_id &&
1179 atomic_read(&fcport->state) == FCS_ONLINE)
1180 break;
1181
1182 /*
1183 * Tgt ID changed or device was marked to be updated.
1184 */
1185 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
1186 "TGT-ID Change(%s): Present tgt id: "
1187 "0x%x state: 0x%x "
1188 "wwnn = %llx wwpn = %llx.\n",
1189 __func__, fcport->tgt_id,
1190 atomic_read(&fcport->state),
1191 (unsigned long long)wwn_to_u64(fcport->node_name),
1192 (unsigned long long)wwn_to_u64(fcport->port_name));
1193
1194 ql_log(ql_log_info, vha, 0x208c,
1195 "TGT-ID Announce(%s): Discovered tgt "
1196 "id 0x%x wwnn = %llx "
1197 "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
1198 (unsigned long long)
1199 wwn_to_u64(new_fcport->node_name),
1200 (unsigned long long)
1201 wwn_to_u64(new_fcport->port_name));
1202
1203 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1204 fcport->old_tgt_id = fcport->tgt_id;
1205 fcport->tgt_id = new_fcport->tgt_id;
1206 ql_log(ql_log_info, vha, 0x208d,
1207 "TGT-ID: New fcport Added: %p\n", fcport);
1208 qla2x00_update_fcport(vha, fcport);
1209 } else {
1210 ql_log(ql_log_info, vha, 0x208e,
1211 " Existing TGT-ID %x did not get "
1212 " offline event from firmware.\n",
1213 fcport->old_tgt_id);
1214 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1215 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1216 kfree(new_fcport);
1217 return rval;
1218 }
1219 break;
1220 }
1221
1222 if (found)
1223 continue;
1224
1225 /* If device was not in our fcports list, then add it. */
1226 list_add_tail(&new_fcport->list, new_fcports);
1227
1228 /* Allocate a new replacement fcport. */
1229 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1230 if (new_fcport == NULL)
1231 return QLA_MEMORY_ALLOC_FAILED;
1232 }
1233
1234 kfree(new_fcport);
1235 return rval;
1236}
1237
1238/*
1239 * qlafx00_configure_all_targets
1240 * Setup target devices with node ID's.
1241 *
1242 * Input:
1243 * ha = adapter block pointer.
1244 *
1245 * Returns:
1246 * 0 = success.
1247 * BIT_0 = error
1248 */
1249static int
1250qlafx00_configure_all_targets(scsi_qla_host_t *vha)
1251{
1252 int rval;
1253 fc_port_t *fcport, *rmptemp;
1254 LIST_HEAD(new_fcports);
1255
1256 rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1257 FXDISC_GET_TGT_NODE_LIST);
1258 if (rval != QLA_SUCCESS) {
1259 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1260 return rval;
1261 }
1262
1263 rval = qlafx00_find_all_targets(vha, &new_fcports);
1264 if (rval != QLA_SUCCESS) {
1265 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1266 return rval;
1267 }
1268
1269 /*
1270 * Delete all previous devices marked lost.
1271 */
1272 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1273 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1274 break;
1275
1276 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
1277 if (fcport->port_type != FCT_INITIATOR)
1278 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1279 }
1280 }
1281
1282 /*
1283 * Add the new devices to our devices list.
1284 */
1285 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
1286 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1287 break;
1288
1289 qla2x00_update_fcport(vha, fcport);
1290 list_move_tail(&fcport->list, &vha->vp_fcports);
1291 ql_log(ql_log_info, vha, 0x208f,
1292 "Attach new target id 0x%x wwnn = %llx "
1293 "wwpn = %llx.\n",
1294 fcport->tgt_id,
1295 (unsigned long long)wwn_to_u64(fcport->node_name),
1296 (unsigned long long)wwn_to_u64(fcport->port_name));
1297 }
1298
1299 /* Free all new device structures not processed. */
1300 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
1301 list_del(&fcport->list);
1302 kfree(fcport);
1303 }
1304
1305 return rval;
1306}
1307
1308/*
1309 * qlafx00_configure_devices
1310 * Updates Fibre Channel Device Database with what is actually on loop.
1311 *
1312 * Input:
1313 * ha = adapter block pointer.
1314 *
1315 * Returns:
1316 * 0 = success.
1317 * 1 = error.
1318 * 2 = database was full and device was not configured.
1319 */
1320int
1321qlafx00_configure_devices(scsi_qla_host_t *vha)
1322{
1323 int rval;
1324 unsigned long flags, save_flags;
1325 rval = QLA_SUCCESS;
1326
1327 save_flags = flags = vha->dpc_flags;
1328
1329 ql_dbg(ql_dbg_disc, vha, 0x2090,
1330 "Configure devices -- dpc flags =0x%lx\n", flags);
1331
1332 rval = qlafx00_configure_all_targets(vha);
1333
1334 if (rval == QLA_SUCCESS) {
1335 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1336 rval = QLA_FUNCTION_FAILED;
1337 } else {
1338 atomic_set(&vha->loop_state, LOOP_READY);
1339 ql_log(ql_log_info, vha, 0x2091,
1340 "Device Ready\n");
1341 }
1342 }
1343
1344 if (rval) {
1345 ql_dbg(ql_dbg_disc, vha, 0x2092,
1346 "%s *** FAILED ***.\n", __func__);
1347 } else {
1348 ql_dbg(ql_dbg_disc, vha, 0x2093,
1349 "%s: exiting normally.\n", __func__);
1350 }
1351 return rval;
1352}
1353
1354static void
1355qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
1356{
1357 struct qla_hw_data *ha = vha->hw;
1358 fc_port_t *fcport;
1359
1360 vha->flags.online = 0;
1361 ha->flags.chip_reset_done = 0;
1362 ha->mr.fw_hbt_en = 0;
1363 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1364 vha->qla_stats.total_isp_aborts++;
1365
1366 ql_log(ql_log_info, vha, 0x013f,
1367 "Performing ISP error recovery - ha = %p.\n", ha);
1368
1369 ha->isp_ops->reset_chip(vha);
1370
1371 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1372 atomic_set(&vha->loop_state, LOOP_DOWN);
1373 atomic_set(&vha->loop_down_timer,
1374 QLAFX00_LOOP_DOWN_TIME);
1375 } else {
1376 if (!atomic_read(&vha->loop_down_timer))
1377 atomic_set(&vha->loop_down_timer,
1378 QLAFX00_LOOP_DOWN_TIME);
1379 }
1380
1381 /* Clear all async request states across all VPs. */
1382 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1383 fcport->flags = 0;
1384 if (atomic_read(&fcport->state) == FCS_ONLINE)
1385 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
1386 }
1387
1388 if (!ha->flags.eeh_busy) {
1389 /* Requeue all commands in outstanding command list. */
1390 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
1391 }
1392
1393 qla2x00_free_irqs(vha);
1394 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1395
1396 /* Clear the Interrupts */
1397 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1398
1399 ql_log(ql_log_info, vha, 0x0140,
1400 "%s Done done - ha=%p.\n", __func__, ha);
1401}
1402
1403/**
1404 * qlafx00_init_response_q_entries() - Initializes response queue entries.
1405 * @ha: HA context
1406 *
1407 * Beginning of request ring has initialization control block already built
1408 * by nvram config routine.
1409 *
1410 * Returns 0 on success.
1411 */
1412void
1413qlafx00_init_response_q_entries(struct rsp_que *rsp)
1414{
1415 uint16_t cnt;
1416 response_t *pkt;
1417
1418 rsp->ring_ptr = rsp->ring;
1419 rsp->ring_index = 0;
1420 rsp->status_srb = NULL;
1421 pkt = rsp->ring_ptr;
1422 for (cnt = 0; cnt < rsp->length; cnt++) {
1423 pkt->signature = RESPONSE_PROCESSED;
1424 WRT_REG_DWORD(&pkt->signature, RESPONSE_PROCESSED);
1425 pkt++;
1426 }
1427}
1428
1429int
1430qlafx00_rescan_isp(scsi_qla_host_t *vha)
1431{
1432 uint32_t status = QLA_FUNCTION_FAILED;
1433 struct qla_hw_data *ha = vha->hw;
1434 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1435 uint32_t aenmbx7;
1436
1437 qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
1438
1439 aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
1440 ha->mbx_intr_code = MSW(aenmbx7);
1441 ha->rqstq_intr_code = LSW(aenmbx7);
1442 ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
1443 ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
1444 ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
1445 ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
1446
1447 ql_dbg(ql_dbg_disc, vha, 0x2094,
1448 "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
1449 " Req que offset 0x%x Rsp que offset 0x%x\n",
1450 ha->mbx_intr_code, ha->rqstq_intr_code,
1451 ha->req_que_off, ha->rsp_que_len);
1452
1453 /* Clear the Interrupts */
1454 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1455
1456 status = qla2x00_init_rings(vha);
1457 if (!status) {
1458 vha->flags.online = 1;
1459
1460 /* if no cable then assume it's good */
1461 if ((vha->device_flags & DFLG_NO_CABLE))
1462 status = 0;
1463 /* Register system information */
1464 if (qlafx00_fx_disc(vha,
1465 &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
1466 ql_dbg(ql_dbg_disc, vha, 0x2095,
1467 "failed to register host info\n");
1468 }
1469 scsi_unblock_requests(vha->host);
1470 return status;
1471}
1472
1473void
1474qlafx00_timer_routine(scsi_qla_host_t *vha)
1475{
1476 struct qla_hw_data *ha = vha->hw;
1477 uint32_t fw_heart_beat;
1478 uint32_t aenmbx0;
1479 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1480
1481 /* Check firmware health */
1482 if (ha->mr.fw_hbt_cnt)
1483 ha->mr.fw_hbt_cnt--;
1484 else {
1485 if ((!ha->flags.mr_reset_hdlr_active) &&
1486 (!test_bit(UNLOADING, &vha->dpc_flags)) &&
1487 (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
1488 (ha->mr.fw_hbt_en)) {
1489 fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
1490 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
1491 ha->mr.old_fw_hbt_cnt = fw_heart_beat;
1492 ha->mr.fw_hbt_miss_cnt = 0;
1493 } else {
1494 ha->mr.fw_hbt_miss_cnt++;
1495 if (ha->mr.fw_hbt_miss_cnt ==
1496 QLAFX00_HEARTBEAT_MISS_CNT) {
1497 set_bit(ISP_ABORT_NEEDED,
1498 &vha->dpc_flags);
1499 qla2xxx_wake_dpc(vha);
1500 ha->mr.fw_hbt_miss_cnt = 0;
1501 }
1502 }
1503 }
1504 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
1505 }
1506
1507 if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
1508 /* Reset recovery to be performed in timer routine */
1509 aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
1510 if (ha->mr.fw_reset_timer_exp) {
1511 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1512 qla2xxx_wake_dpc(vha);
1513 ha->mr.fw_reset_timer_exp = 0;
1514 } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
1515 /* Wake up DPC to rescan the targets */
1516 set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
1517 clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1518 qla2xxx_wake_dpc(vha);
1519 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1520 } else if ((aenmbx0 == MBA_FW_STARTING) &&
1521 (!ha->mr.fw_hbt_en)) {
1522 ha->mr.fw_hbt_en = 1;
1523 } else if (!ha->mr.fw_reset_timer_tick) {
1524 if (aenmbx0 == ha->mr.old_aenmbx0_state)
1525 ha->mr.fw_reset_timer_exp = 1;
1526 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1527 } else if (aenmbx0 == 0xFFFFFFFF) {
1528 uint32_t data0, data1;
1529
1530 data0 = QLAFX00_RD_REG(ha,
1531 QLAFX00_BAR1_BASE_ADDR_REG);
1532 data1 = QLAFX00_RD_REG(ha,
1533 QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
1534
1535 data0 &= 0xffff0000;
1536 data1 &= 0x0000ffff;
1537
1538 QLAFX00_WR_REG(ha,
1539 QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
1540 (data0 | data1));
1541 } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
1542 ha->mr.fw_reset_timer_tick =
1543 QLAFX00_MAX_RESET_INTERVAL;
1544 }
1545 ha->mr.old_aenmbx0_state = aenmbx0;
1546 ha->mr.fw_reset_timer_tick--;
1547 }
1548}
1549
1550/*
1551 * qlfx00a_reset_initialize
1552 * Re-initialize after a iSA device reset.
1553 *
1554 * Input:
1555 * ha = adapter block pointer.
1556 *
1557 * Returns:
1558 * 0 = success
1559 */
1560int
1561qlafx00_reset_initialize(scsi_qla_host_t *vha)
1562{
1563 struct qla_hw_data *ha = vha->hw;
1564
1565 if (vha->device_flags & DFLG_DEV_FAILED) {
1566 ql_dbg(ql_dbg_init, vha, 0x0142,
1567 "Device in failed state\n");
1568 return QLA_SUCCESS;
1569 }
1570
1571 ha->flags.mr_reset_hdlr_active = 1;
1572
1573 if (vha->flags.online) {
1574 scsi_block_requests(vha->host);
1575 qlafx00_abort_isp_cleanup(vha);
1576 }
1577
1578 ql_log(ql_log_info, vha, 0x0143,
1579 "(%s): succeeded.\n", __func__);
1580 ha->flags.mr_reset_hdlr_active = 0;
1581 return QLA_SUCCESS;
1582}
1583
1584/*
1585 * qlafx00_abort_isp
1586 * Resets ISP and aborts all outstanding commands.
1587 *
1588 * Input:
1589 * ha = adapter block pointer.
1590 *
1591 * Returns:
1592 * 0 = success
1593 */
1594int
1595qlafx00_abort_isp(scsi_qla_host_t *vha)
1596{
1597 struct qla_hw_data *ha = vha->hw;
1598
1599 if (vha->flags.online) {
1600 if (unlikely(pci_channel_offline(ha->pdev) &&
1601 ha->flags.pci_channel_io_perm_failure)) {
1602 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1603 return QLA_SUCCESS;
1604 }
1605
1606 scsi_block_requests(vha->host);
1607 qlafx00_abort_isp_cleanup(vha);
1608 }
1609
1610 ql_log(ql_log_info, vha, 0x0145,
1611 "(%s): succeeded.\n", __func__);
1612
1613 return QLA_SUCCESS;
1614}
1615
1616static inline fc_port_t*
1617qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
1618{
1619 fc_port_t *fcport;
1620
1621 /* Check for matching device in remote port list. */
1622 fcport = NULL;
1623 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1624 if (fcport->tgt_id == tgt_id) {
1625 ql_dbg(ql_dbg_async, vha, 0x5072,
1626 "Matching fcport(%p) found with TGT-ID: 0x%x "
1627 "and Remote TGT_ID: 0x%x\n",
1628 fcport, fcport->tgt_id, tgt_id);
1629 break;
1630 }
1631 }
1632 return fcport;
1633}
1634
1635static void
1636qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
1637{
1638 fc_port_t *fcport;
1639
1640 ql_log(ql_log_info, vha, 0x5073,
1641 "Detach TGT-ID: 0x%x\n", tgt_id);
1642
1643 fcport = qlafx00_get_fcport(vha, tgt_id);
1644 if (!fcport)
1645 return;
1646
1647 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1648
1649 return;
1650}
1651
1652int
1653qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
1654{
1655 int rval = 0;
1656 uint32_t aen_code, aen_data;
1657
1658 aen_code = FCH_EVT_VENDOR_UNIQUE;
1659 aen_data = evt->u.aenfx.evtcode;
1660
1661 switch (evt->u.aenfx.evtcode) {
1662 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
1663 if (evt->u.aenfx.mbx[1] == 0) {
1664 if (evt->u.aenfx.mbx[2] == 1) {
1665 if (!vha->flags.fw_tgt_reported)
1666 vha->flags.fw_tgt_reported = 1;
1667 atomic_set(&vha->loop_down_timer, 0);
1668 atomic_set(&vha->loop_state, LOOP_UP);
1669 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1670 qla2xxx_wake_dpc(vha);
1671 } else if (evt->u.aenfx.mbx[2] == 2) {
1672 qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
1673 }
1674 } else if (evt->u.aenfx.mbx[1] == 0xffff) {
1675 if (evt->u.aenfx.mbx[2] == 1) {
1676 if (!vha->flags.fw_tgt_reported)
1677 vha->flags.fw_tgt_reported = 1;
1678 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1679 } else if (evt->u.aenfx.mbx[2] == 2) {
1680 vha->device_flags |= DFLG_NO_CABLE;
1681 qla2x00_mark_all_devices_lost(vha, 1);
1682 }
1683 }
1684 break;
1685 case QLAFX00_MBA_LINK_UP:
1686 aen_code = FCH_EVT_LINKUP;
1687 aen_data = 0;
1688 break;
1689 case QLAFX00_MBA_LINK_DOWN:
1690 aen_code = FCH_EVT_LINKDOWN;
1691 aen_data = 0;
1692 break;
1693 }
1694
1695 fc_host_post_event(vha->host, fc_get_event_number(),
1696 aen_code, aen_data);
1697
1698 return rval;
1699}
1700
1701static void
1702qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
1703{
1704 u64 port_name = 0, node_name = 0;
1705
1706 port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
1707 node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
1708
1709 fc_host_node_name(vha->host) = node_name;
1710 fc_host_port_name(vha->host) = port_name;
1711 if (!pinfo->port_type)
1712 vha->hw->current_topology = ISP_CFG_F;
1713 if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
1714 atomic_set(&vha->loop_state, LOOP_READY);
1715 else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
1716 atomic_set(&vha->loop_state, LOOP_DOWN);
1717 vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
1718}
1719
1720static void
1721qla2x00_fxdisc_iocb_timeout(void *data)
1722{
1723 srb_t *sp = (srb_t *)data;
1724 struct srb_iocb *lio = &sp->u.iocb_cmd;
1725
1726 complete(&lio->u.fxiocb.fxiocb_comp);
1727}
1728
1729static void
1730qla2x00_fxdisc_sp_done(void *data, void *ptr, int res)
1731{
1732 srb_t *sp = (srb_t *)ptr;
1733 struct srb_iocb *lio = &sp->u.iocb_cmd;
1734
1735 complete(&lio->u.fxiocb.fxiocb_comp);
1736}
1737
1738int
1739qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t fx_type)
1740{
1741 srb_t *sp;
1742 struct srb_iocb *fdisc;
1743 int rval = QLA_FUNCTION_FAILED;
1744 struct qla_hw_data *ha = vha->hw;
1745 struct host_system_info *phost_info;
1746 struct register_host_info *preg_hsi;
1747 struct new_utsname *p_sysid = NULL;
1748 struct timeval tv;
1749
1750 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1751 if (!sp)
1752 goto done;
1753
1754 fdisc = &sp->u.iocb_cmd;
1755 switch (fx_type) {
1756 case FXDISC_GET_CONFIG_INFO:
1757 fdisc->u.fxiocb.flags =
1758 SRB_FXDISC_RESP_DMA_VALID;
1759 fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
1760 break;
1761 case FXDISC_GET_PORT_INFO:
1762 fdisc->u.fxiocb.flags =
1763 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1764 fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
1765 fdisc->u.fxiocb.req_data = fcport->port_id;
1766 break;
1767 case FXDISC_GET_TGT_NODE_INFO:
1768 fdisc->u.fxiocb.flags =
1769 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1770 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
1771 fdisc->u.fxiocb.req_data = fcport->tgt_id;
1772 break;
1773 case FXDISC_GET_TGT_NODE_LIST:
1774 fdisc->u.fxiocb.flags =
1775 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1776 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
1777 break;
1778 case FXDISC_REG_HOST_INFO:
1779 fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
1780 fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
1781 p_sysid = utsname();
1782 if (!p_sysid) {
1783 ql_log(ql_log_warn, vha, 0x303c,
1784 "Not able to get the system informtion\n");
1785 goto done_free_sp;
1786 }
1787 break;
1788 default:
1789 break;
1790 }
1791
1792 if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
1793 fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
1794 fdisc->u.fxiocb.req_len,
1795 &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
1796 if (!fdisc->u.fxiocb.req_addr)
1797 goto done_free_sp;
1798
1799 if (fx_type == FXDISC_REG_HOST_INFO) {
1800 preg_hsi = (struct register_host_info *)
1801 fdisc->u.fxiocb.req_addr;
1802 phost_info = &preg_hsi->hsi;
1803 memset(preg_hsi, 0, sizeof(struct register_host_info));
1804 phost_info->os_type = OS_TYPE_LINUX;
1805 strncpy(phost_info->sysname,
1806 p_sysid->sysname, SYSNAME_LENGTH);
1807 strncpy(phost_info->nodename,
1808 p_sysid->nodename, NODENAME_LENGTH);
1809 strncpy(phost_info->release,
1810 p_sysid->release, RELEASE_LENGTH);
1811 strncpy(phost_info->version,
1812 p_sysid->version, VERSION_LENGTH);
1813 strncpy(phost_info->machine,
1814 p_sysid->machine, MACHINE_LENGTH);
1815 strncpy(phost_info->domainname,
1816 p_sysid->domainname, DOMNAME_LENGTH);
1817 strncpy(phost_info->hostdriver,
1818 QLA2XXX_VERSION, VERSION_LENGTH);
1819 do_gettimeofday(&tv);
1820 preg_hsi->utc = (uint64_t)tv.tv_sec;
1821 ql_dbg(ql_dbg_init, vha, 0x0149,
1822 "ISP%04X: Host registration with firmware\n",
1823 ha->pdev->device);
1824 ql_dbg(ql_dbg_init, vha, 0x014a,
1825 "os_type = '%d', sysname = '%s', nodname = '%s'\n",
1826 phost_info->os_type,
1827 phost_info->sysname,
1828 phost_info->nodename);
1829 ql_dbg(ql_dbg_init, vha, 0x014b,
1830 "release = '%s', version = '%s'\n",
1831 phost_info->release,
1832 phost_info->version);
1833 ql_dbg(ql_dbg_init, vha, 0x014c,
1834 "machine = '%s' "
1835 "domainname = '%s', hostdriver = '%s'\n",
1836 phost_info->machine,
1837 phost_info->domainname,
1838 phost_info->hostdriver);
1839 ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
1840 (uint8_t *)phost_info,
1841 sizeof(struct host_system_info));
1842 }
1843 }
1844
1845 if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
1846 fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
1847 fdisc->u.fxiocb.rsp_len,
1848 &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
1849 if (!fdisc->u.fxiocb.rsp_addr)
1850 goto done_unmap_req;
1851 }
1852
1853 sp->type = SRB_FXIOCB_DCMD;
1854 sp->name = "fxdisc";
1855 qla2x00_init_timer(sp, FXDISC_TIMEOUT);
1856 fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
1857 fdisc->u.fxiocb.req_func_type = fx_type;
1858 sp->done = qla2x00_fxdisc_sp_done;
1859
1860 rval = qla2x00_start_sp(sp);
1861 if (rval != QLA_SUCCESS)
1862 goto done_unmap_dma;
1863
1864 wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
1865
1866 if (fx_type == FXDISC_GET_CONFIG_INFO) {
1867 struct config_info_data *pinfo =
1868 (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
1869 memcpy(&vha->hw->mr.product_name, pinfo->product_name,
1870 sizeof(vha->hw->mr.product_name));
1871 memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
1872 sizeof(vha->hw->mr.symbolic_name));
1873 memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
1874 sizeof(vha->hw->mr.serial_num));
1875 memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
1876 sizeof(vha->hw->mr.hw_version));
1877 memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
1878 sizeof(vha->hw->mr.fw_version));
1879 strim(vha->hw->mr.fw_version);
1880 memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
1881 sizeof(vha->hw->mr.uboot_version));
1882 memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
1883 sizeof(vha->hw->mr.fru_serial_num));
1884 } else if (fx_type == FXDISC_GET_PORT_INFO) {
1885 struct port_info_data *pinfo =
1886 (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
1887 memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
1888 memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
1889 vha->d_id.b.domain = pinfo->port_id[0];
1890 vha->d_id.b.area = pinfo->port_id[1];
1891 vha->d_id.b.al_pa = pinfo->port_id[2];
1892 qlafx00_update_host_attr(vha, pinfo);
1893 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
1894 (uint8_t *)pinfo, 16);
1895 } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
1896 struct qlafx00_tgt_node_info *pinfo =
1897 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
1898 memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
1899 memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
1900 fcport->port_type = FCT_TARGET;
1901 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
1902 (uint8_t *)pinfo, 16);
1903 } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
1904 struct qlafx00_tgt_node_info *pinfo =
1905 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
1906 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
1907 (uint8_t *)pinfo, 16);
1908 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
1909 }
1910 rval = fdisc->u.fxiocb.result;
1911
1912done_unmap_dma:
1913 if (fdisc->u.fxiocb.rsp_addr)
1914 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
1915 fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
1916
1917done_unmap_req:
1918 if (fdisc->u.fxiocb.req_addr)
1919 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
1920 fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
1921done_free_sp:
1922 sp->free(vha, sp);
1923done:
1924 return rval;
1925}
1926
1927static void
1928qlafx00_abort_iocb_timeout(void *data)
1929{
1930 srb_t *sp = (srb_t *)data;
1931 struct srb_iocb *abt = &sp->u.iocb_cmd;
1932
1933 abt->u.abt.comp_status = CS_TIMEOUT;
1934 complete(&abt->u.abt.comp);
1935}
1936
1937static void
1938qlafx00_abort_sp_done(void *data, void *ptr, int res)
1939{
1940 srb_t *sp = (srb_t *)ptr;
1941 struct srb_iocb *abt = &sp->u.iocb_cmd;
1942
1943 complete(&abt->u.abt.comp);
1944}
1945
1946static int
1947qlafx00_async_abt_cmd(srb_t *cmd_sp)
1948{
1949 scsi_qla_host_t *vha = cmd_sp->fcport->vha;
1950 fc_port_t *fcport = cmd_sp->fcport;
1951 struct srb_iocb *abt_iocb;
1952 srb_t *sp;
1953 int rval = QLA_FUNCTION_FAILED;
1954
1955 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1956 if (!sp)
1957 goto done;
1958
1959 abt_iocb = &sp->u.iocb_cmd;
1960 sp->type = SRB_ABT_CMD;
1961 sp->name = "abort";
1962 qla2x00_init_timer(sp, FXDISC_TIMEOUT);
1963 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
1964 sp->done = qlafx00_abort_sp_done;
1965 abt_iocb->timeout = qlafx00_abort_iocb_timeout;
1966 init_completion(&abt_iocb->u.abt.comp);
1967
1968 rval = qla2x00_start_sp(sp);
1969 if (rval != QLA_SUCCESS)
1970 goto done_free_sp;
1971
1972 ql_dbg(ql_dbg_async, vha, 0x507c,
1973 "Abort command issued - hdl=%x, target_id=%x\n",
1974 cmd_sp->handle, fcport->tgt_id);
1975
1976 wait_for_completion(&abt_iocb->u.abt.comp);
1977
1978 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
1979 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1980
1981done_free_sp:
1982 sp->free(vha, sp);
1983done:
1984 return rval;
1985}
1986
1987int
1988qlafx00_abort_command(srb_t *sp)
1989{
1990 unsigned long flags = 0;
1991
1992 uint32_t handle;
1993 fc_port_t *fcport = sp->fcport;
1994 struct scsi_qla_host *vha = fcport->vha;
1995 struct qla_hw_data *ha = vha->hw;
1996 struct req_que *req = vha->req;
1997
1998 spin_lock_irqsave(&ha->hardware_lock, flags);
1999 for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) {
2000 if (req->outstanding_cmds[handle] == sp)
2001 break;
2002 }
2003 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2004 if (handle == DEFAULT_OUTSTANDING_COMMANDS) {
2005 /* Command not found. */
2006 return QLA_FUNCTION_FAILED;
2007 }
2008 return qlafx00_async_abt_cmd(sp);
2009}
2010
2011/*
2012 * qlafx00_initialize_adapter
2013 * Initialize board.
2014 *
2015 * Input:
2016 * ha = adapter block pointer.
2017 *
2018 * Returns:
2019 * 0 = success
2020 */
2021int
2022qlafx00_initialize_adapter(scsi_qla_host_t *vha)
2023{
2024 int rval;
2025 struct qla_hw_data *ha = vha->hw;
2026
2027 /* Clear adapter flags. */
2028 vha->flags.online = 0;
2029 ha->flags.chip_reset_done = 0;
2030 vha->flags.reset_active = 0;
2031 ha->flags.pci_channel_io_perm_failure = 0;
2032 ha->flags.eeh_busy = 0;
2033 ha->thermal_support = 0;
2034 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2035 atomic_set(&vha->loop_state, LOOP_DOWN);
2036 vha->device_flags = DFLG_NO_CABLE;
2037 vha->dpc_flags = 0;
2038 vha->flags.management_server_logged_in = 0;
2039 vha->marker_needed = 0;
2040 ha->isp_abort_cnt = 0;
2041 ha->beacon_blink_led = 0;
2042
2043 set_bit(0, ha->req_qid_map);
2044 set_bit(0, ha->rsp_qid_map);
2045
2046 ql_dbg(ql_dbg_init, vha, 0x0147,
2047 "Configuring PCI space...\n");
2048
2049 rval = ha->isp_ops->pci_config(vha);
2050 if (rval) {
2051 ql_log(ql_log_warn, vha, 0x0148,
2052 "Unable to configure PCI space.\n");
2053 return rval;
2054 }
2055
2056 rval = qlafx00_init_fw_ready(vha);
2057 if (rval != QLA_SUCCESS)
2058 return rval;
2059
2060 qlafx00_save_queue_ptrs(vha);
2061
2062 rval = qlafx00_config_queues(vha);
2063 if (rval != QLA_SUCCESS)
2064 return rval;
2065
2066 /*
2067 * Allocate the array of outstanding commands
2068 * now that we know the firmware resources.
2069 */
2070 rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
2071 if (rval != QLA_SUCCESS)
2072 return rval;
2073
2074 rval = qla2x00_init_rings(vha);
2075 ha->flags.chip_reset_done = 1;
2076
2077 return rval;
2078}
2079
2080uint32_t
2081qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
2082 char *buf)
2083{
2084 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2085 int rval = QLA_FUNCTION_FAILED;
2086 uint32_t state[1];
2087
2088 if (qla2x00_reset_active(vha))
2089 ql_log(ql_log_warn, vha, 0x70ce,
2090 "ISP reset active.\n");
2091 else if (!vha->hw->flags.eeh_busy) {
2092 rval = qlafx00_get_firmware_state(vha, state);
2093 }
2094 if (rval != QLA_SUCCESS)
2095 memset(state, -1, sizeof(state));
2096
2097 return state[0];
2098}
2099
2100void
2101qlafx00_get_host_speed(struct Scsi_Host *shost)
2102{
2103 struct qla_hw_data *ha = ((struct scsi_qla_host *)
2104 (shost_priv(shost)))->hw;
2105 u32 speed = FC_PORTSPEED_UNKNOWN;
2106
2107 switch (ha->link_data_rate) {
2108 case QLAFX00_PORT_SPEED_2G:
2109 speed = FC_PORTSPEED_2GBIT;
2110 break;
2111 case QLAFX00_PORT_SPEED_4G:
2112 speed = FC_PORTSPEED_4GBIT;
2113 break;
2114 case QLAFX00_PORT_SPEED_8G:
2115 speed = FC_PORTSPEED_8GBIT;
2116 break;
2117 case QLAFX00_PORT_SPEED_10G:
2118 speed = FC_PORTSPEED_10GBIT;
2119 break;
2120 }
2121 fc_host_speed(shost) = speed;
2122}
2123
2124/** QLAFX00 specific ISR implementation functions */
2125
2126static inline void
2127qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2128 uint32_t sense_len, struct rsp_que *rsp, int res)
2129{
2130 struct scsi_qla_host *vha = sp->fcport->vha;
2131 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2132 uint32_t track_sense_len;
2133
2134 SET_FW_SENSE_LEN(sp, sense_len);
2135
2136 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2137 sense_len = SCSI_SENSE_BUFFERSIZE;
2138
2139 SET_CMD_SENSE_LEN(sp, sense_len);
2140 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2141 track_sense_len = sense_len;
2142
2143 if (sense_len > par_sense_len)
2144 sense_len = par_sense_len;
2145
2146 memcpy(cp->sense_buffer, sense_data, sense_len);
2147
2148 SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
2149
2150 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2151 track_sense_len -= sense_len;
2152 SET_CMD_SENSE_LEN(sp, track_sense_len);
2153
2154 ql_dbg(ql_dbg_io, vha, 0x304d,
2155 "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
2156 sense_len, par_sense_len, track_sense_len);
2157 if (GET_FW_SENSE_LEN(sp) > 0) {
2158 rsp->status_srb = sp;
2159 cp->result = res;
2160 }
2161
2162 if (sense_len) {
2163 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
2164 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
2165 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
2166 cp);
2167 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
2168 cp->sense_buffer, sense_len);
2169 }
2170}
2171
2172static void
2173qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2174 struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
2175 uint16_t sstatus, uint16_t cpstatus)
2176{
2177 struct srb_iocb *tmf;
2178
2179 tmf = &sp->u.iocb_cmd;
2180 if (cpstatus != CS_COMPLETE ||
2181 (sstatus & SS_RESPONSE_INFO_LEN_VALID))
2182 cpstatus = CS_INCOMPLETE;
2183 tmf->u.tmf.comp_status = cpstatus;
2184 sp->done(vha, sp, 0);
2185}
2186
2187static void
2188qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2189 struct abort_iocb_entry_fx00 *pkt)
2190{
2191 const char func[] = "ABT_IOCB";
2192 srb_t *sp;
2193 struct srb_iocb *abt;
2194
2195 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2196 if (!sp)
2197 return;
2198
2199 abt = &sp->u.iocb_cmd;
2200 abt->u.abt.comp_status = le32_to_cpu(pkt->tgt_id_sts);
2201 sp->done(vha, sp, 0);
2202}
2203
2204static void
2205qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2206 struct ioctl_iocb_entry_fx00 *pkt)
2207{
2208 const char func[] = "IOSB_IOCB";
2209 srb_t *sp;
2210 struct fc_bsg_job *bsg_job;
2211 struct srb_iocb *iocb_job;
2212 int res;
2213 struct qla_mt_iocb_rsp_fx00 fstatus;
2214 uint8_t *fw_sts_ptr;
2215
2216 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2217 if (!sp)
2218 return;
2219
2220 if (sp->type == SRB_FXIOCB_DCMD) {
2221 iocb_job = &sp->u.iocb_cmd;
2222 iocb_job->u.fxiocb.seq_number = le32_to_cpu(pkt->seq_no);
2223 iocb_job->u.fxiocb.fw_flags = le32_to_cpu(pkt->fw_iotcl_flags);
2224 iocb_job->u.fxiocb.result = le32_to_cpu(pkt->status);
2225 if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
2226 iocb_job->u.fxiocb.req_data =
2227 le32_to_cpu(pkt->dataword_r);
2228 } else {
2229 bsg_job = sp->u.bsg_job;
2230
2231 memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
2232
2233 fstatus.reserved_1 = pkt->reserved_0;
2234 fstatus.func_type = pkt->comp_func_num;
2235 fstatus.ioctl_flags = pkt->fw_iotcl_flags;
2236 fstatus.ioctl_data = pkt->dataword_r;
2237 fstatus.adapid = pkt->adapid;
2238 fstatus.adapid_hi = pkt->adapid_hi;
2239 fstatus.reserved_2 = pkt->reserved_1;
2240 fstatus.res_count = pkt->residuallen;
2241 fstatus.status = pkt->status;
2242 fstatus.seq_number = pkt->seq_no;
2243 memcpy(fstatus.reserved_3,
2244 pkt->reserved_2, 20 * sizeof(uint8_t));
2245
2246 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
2247 sizeof(struct fc_bsg_reply);
2248
2249 memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
2250 sizeof(struct qla_mt_iocb_rsp_fx00));
2251 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
2252 sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
2253
2254 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
2255 sp->fcport->vha, 0x5080,
2256 (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
2257
2258 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
2259 sp->fcport->vha, 0x5074,
2260 (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
2261
2262 res = bsg_job->reply->result = DID_OK << 16;
2263 bsg_job->reply->reply_payload_rcv_len =
2264 bsg_job->reply_payload.payload_len;
2265 }
2266 sp->done(vha, sp, res);
2267}
2268
2269/**
2270 * qlafx00_status_entry() - Process a Status IOCB entry.
2271 * @ha: SCSI driver HA context
2272 * @pkt: Entry pointer
2273 */
2274static void
2275qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2276{
2277 srb_t *sp;
2278 fc_port_t *fcport;
2279 struct scsi_cmnd *cp;
2280 struct sts_entry_fx00 *sts;
2281 uint16_t comp_status;
2282 uint16_t scsi_status;
2283 uint16_t ox_id;
2284 uint8_t lscsi_status;
2285 int32_t resid;
2286 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2287 fw_resid_len;
2288 uint8_t *rsp_info = NULL, *sense_data = NULL;
2289 struct qla_hw_data *ha = vha->hw;
2290 uint32_t hindex, handle;
2291 uint16_t que;
2292 struct req_que *req;
2293 int logit = 1;
2294 int res = 0;
2295
2296 sts = (struct sts_entry_fx00 *) pkt;
2297
2298 comp_status = le16_to_cpu(sts->comp_status);
2299 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2300 hindex = sts->handle;
2301 handle = LSW(hindex);
2302
2303 que = MSW(hindex);
2304 req = ha->req_q_map[que];
2305
2306 /* Validate handle. */
2307 if (handle < req->num_outstanding_cmds)
2308 sp = req->outstanding_cmds[handle];
2309 else
2310 sp = NULL;
2311
2312 if (sp == NULL) {
2313 ql_dbg(ql_dbg_io, vha, 0x3034,
2314 "Invalid status handle (0x%x).\n", handle);
2315
2316 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2317 qla2xxx_wake_dpc(vha);
2318 return;
2319 }
2320
2321 if (sp->type == SRB_TM_CMD) {
2322 req->outstanding_cmds[handle] = NULL;
2323 qlafx00_tm_iocb_entry(vha, req, pkt, sp,
2324 scsi_status, comp_status);
2325 return;
2326 }
2327
2328 /* Fast path completion. */
2329 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2330 qla2x00_do_host_ramp_up(vha);
2331 qla2x00_process_completed_request(vha, req, handle);
2332 return;
2333 }
2334
2335 req->outstanding_cmds[handle] = NULL;
2336 cp = GET_CMD_SP(sp);
2337 if (cp == NULL) {
2338 ql_dbg(ql_dbg_io, vha, 0x3048,
2339 "Command already returned (0x%x/%p).\n",
2340 handle, sp);
2341
2342 return;
2343 }
2344
2345 lscsi_status = scsi_status & STATUS_MASK;
2346
2347 fcport = sp->fcport;
2348
2349 ox_id = 0;
2350 sense_len = par_sense_len = rsp_info_len = resid_len =
2351 fw_resid_len = 0;
2352 if (scsi_status & SS_SENSE_LEN_VALID)
2353 sense_len = le32_to_cpu(sts->sense_len);
2354 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2355 resid_len = le32_to_cpu(sts->residual_len);
2356 if (comp_status == CS_DATA_UNDERRUN)
2357 fw_resid_len = le32_to_cpu(sts->residual_len);
2358 rsp_info = sense_data = sts->data;
2359 par_sense_len = sizeof(sts->data);
2360
2361 /* Check for overrun. */
2362 if (comp_status == CS_COMPLETE &&
2363 scsi_status & SS_RESIDUAL_OVER)
2364 comp_status = CS_DATA_OVERRUN;
2365
2366 /*
2367 * Based on Host and scsi status generate status code for Linux
2368 */
2369 switch (comp_status) {
2370 case CS_COMPLETE:
2371 case CS_QUEUE_FULL:
2372 if (scsi_status == 0) {
2373 res = DID_OK << 16;
2374 break;
2375 }
2376 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2377 resid = resid_len;
2378 scsi_set_resid(cp, resid);
2379
2380 if (!lscsi_status &&
2381 ((unsigned)(scsi_bufflen(cp) - resid) <
2382 cp->underflow)) {
2383 ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
2384 "Mid-layer underflow "
2385 "detected (0x%x of 0x%x bytes).\n",
2386 resid, scsi_bufflen(cp));
2387
2388 res = DID_ERROR << 16;
2389 break;
2390 }
2391 }
2392 res = DID_OK << 16 | lscsi_status;
2393
2394 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2395 ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
2396 "QUEUE FULL detected.\n");
2397 break;
2398 }
2399 logit = 0;
2400 if (lscsi_status != SS_CHECK_CONDITION)
2401 break;
2402
2403 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2404 if (!(scsi_status & SS_SENSE_LEN_VALID))
2405 break;
2406
2407 qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2408 rsp, res);
2409 break;
2410
2411 case CS_DATA_UNDERRUN:
2412 /* Use F/W calculated residual length. */
2413 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2414 resid = fw_resid_len;
2415 else
2416 resid = resid_len;
2417 scsi_set_resid(cp, resid);
2418 if (scsi_status & SS_RESIDUAL_UNDER) {
2419 if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2420 && fw_resid_len != resid_len) {
2421 ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
2422 "Dropped frame(s) detected "
2423 "(0x%x of 0x%x bytes).\n",
2424 resid, scsi_bufflen(cp));
2425
2426 res = DID_ERROR << 16 | lscsi_status;
2427 goto check_scsi_status;
2428 }
2429
2430 if (!lscsi_status &&
2431 ((unsigned)(scsi_bufflen(cp) - resid) <
2432 cp->underflow)) {
2433 ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
2434 "Mid-layer underflow "
2435 "detected (0x%x of 0x%x bytes, "
2436 "cp->underflow: 0x%x).\n",
2437 resid, scsi_bufflen(cp), cp->underflow);
2438
2439 res = DID_ERROR << 16;
2440 break;
2441 }
2442 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2443 lscsi_status != SAM_STAT_BUSY) {
2444 /*
2445 * scsi status of task set and busy are considered
2446 * to be task not completed.
2447 */
2448
2449 ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
2450 "Dropped frame(s) detected (0x%x "
2451 "of 0x%x bytes).\n", resid,
2452 scsi_bufflen(cp));
2453
2454 res = DID_ERROR << 16 | lscsi_status;
2455 goto check_scsi_status;
2456 } else {
2457 ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
2458 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2459 scsi_status, lscsi_status);
2460 }
2461
2462 res = DID_OK << 16 | lscsi_status;
2463 logit = 0;
2464
2465check_scsi_status:
2466 /*
2467 * Check to see if SCSI Status is non zero. If so report SCSI
2468 * Status.
2469 */
2470 if (lscsi_status != 0) {
2471 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2472 ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
2473 "QUEUE FULL detected.\n");
2474 logit = 1;
2475 break;
2476 }
2477 if (lscsi_status != SS_CHECK_CONDITION)
2478 break;
2479
2480 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2481 if (!(scsi_status & SS_SENSE_LEN_VALID))
2482 break;
2483
2484 qlafx00_handle_sense(sp, sense_data, par_sense_len,
2485 sense_len, rsp, res);
2486 }
2487 break;
2488
2489 case CS_PORT_LOGGED_OUT:
2490 case CS_PORT_CONFIG_CHG:
2491 case CS_PORT_BUSY:
2492 case CS_INCOMPLETE:
2493 case CS_PORT_UNAVAILABLE:
2494 case CS_TIMEOUT:
2495 case CS_RESET:
2496
2497 /*
2498 * We are going to have the fc class block the rport
2499 * while we try to recover so instruct the mid layer
2500 * to requeue until the class decides how to handle this.
2501 */
2502 res = DID_TRANSPORT_DISRUPTED << 16;
2503
2504 ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
2505 "Port down status: port-state=0x%x.\n",
2506 atomic_read(&fcport->state));
2507
2508 if (atomic_read(&fcport->state) == FCS_ONLINE)
2509 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2510 break;
2511
2512 case CS_ABORTED:
2513 res = DID_RESET << 16;
2514 break;
2515
2516 default:
2517 res = DID_ERROR << 16;
2518 break;
2519 }
2520
2521 if (logit)
2522 ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
2523 "FCP command status: 0x%x-0x%x (0x%x) "
2524 "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x"
2525 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
2526 "rsp_info=0x%x resid=0x%x fw_resid=0x%x "
2527 "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n",
2528 comp_status, scsi_status, res, vha->host_no,
2529 cp->device->id, cp->device->lun, fcport->tgt_id,
2530 lscsi_status, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2],
2531 cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6],
2532 cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp),
2533 rsp_info_len, resid_len, fw_resid_len, sense_len,
2534 par_sense_len, rsp_info_len);
2535
2536 if (!res)
2537 qla2x00_do_host_ramp_up(vha);
2538
2539 if (rsp->status_srb == NULL)
2540 sp->done(ha, sp, res);
2541}
2542
2543/**
2544 * qlafx00_status_cont_entry() - Process a Status Continuations entry.
2545 * @ha: SCSI driver HA context
2546 * @pkt: Entry pointer
2547 *
2548 * Extended sense data.
2549 */
2550static void
2551qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2552{
2553 uint8_t sense_sz = 0;
2554 struct qla_hw_data *ha = rsp->hw;
2555 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2556 srb_t *sp = rsp->status_srb;
2557 struct scsi_cmnd *cp;
2558 uint32_t sense_len;
2559 uint8_t *sense_ptr;
2560
2561 if (!sp) {
2562 ql_dbg(ql_dbg_io, vha, 0x3037,
2563 "no SP, sp = %p\n", sp);
2564 return;
2565 }
2566
2567 if (!GET_FW_SENSE_LEN(sp)) {
2568 ql_dbg(ql_dbg_io, vha, 0x304b,
2569 "no fw sense data, sp = %p\n", sp);
2570 return;
2571 }
2572 cp = GET_CMD_SP(sp);
2573 if (cp == NULL) {
2574 ql_log(ql_log_warn, vha, 0x303b,
2575 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2576
2577 rsp->status_srb = NULL;
2578 return;
2579 }
2580
2581 if (!GET_CMD_SENSE_LEN(sp)) {
2582 ql_dbg(ql_dbg_io, vha, 0x304c,
2583 "no sense data, sp = %p\n", sp);
2584 } else {
2585 sense_len = GET_CMD_SENSE_LEN(sp);
2586 sense_ptr = GET_CMD_SENSE_PTR(sp);
2587 ql_dbg(ql_dbg_io, vha, 0x304f,
2588 "sp=%p sense_len=0x%x sense_ptr=%p.\n",
2589 sp, sense_len, sense_ptr);
2590
2591 if (sense_len > sizeof(pkt->data))
2592 sense_sz = sizeof(pkt->data);
2593 else
2594 sense_sz = sense_len;
2595
2596 /* Move sense data. */
2597 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
2598 (uint8_t *)pkt, sizeof(sts_cont_entry_t));
2599 memcpy(sense_ptr, pkt->data, sense_sz);
2600 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
2601 sense_ptr, sense_sz);
2602
2603 sense_len -= sense_sz;
2604 sense_ptr += sense_sz;
2605
2606 SET_CMD_SENSE_PTR(sp, sense_ptr);
2607 SET_CMD_SENSE_LEN(sp, sense_len);
2608 }
2609 sense_len = GET_FW_SENSE_LEN(sp);
2610 sense_len = (sense_len > sizeof(pkt->data)) ?
2611 (sense_len - sizeof(pkt->data)) : 0;
2612 SET_FW_SENSE_LEN(sp, sense_len);
2613
2614 /* Place command on done queue. */
2615 if (sense_len == 0) {
2616 rsp->status_srb = NULL;
2617 sp->done(ha, sp, cp->result);
2618 }
2619}
2620
2621/**
2622 * qlafx00_multistatus_entry() - Process Multi response queue entries.
2623 * @ha: SCSI driver HA context
2624 */
2625static void
2626qlafx00_multistatus_entry(struct scsi_qla_host *vha,
2627 struct rsp_que *rsp, void *pkt)
2628{
2629 srb_t *sp;
2630 struct multi_sts_entry_fx00 *stsmfx;
2631 struct qla_hw_data *ha = vha->hw;
2632 uint32_t handle, hindex, handle_count, i;
2633 uint16_t que;
2634 struct req_que *req;
2635 uint32_t *handle_ptr;
2636
2637 stsmfx = (struct multi_sts_entry_fx00 *) pkt;
2638
2639 handle_count = stsmfx->handle_count;
2640
2641 if (handle_count > MAX_HANDLE_COUNT) {
2642 ql_dbg(ql_dbg_io, vha, 0x3035,
2643 "Invalid handle count (0x%x).\n", handle_count);
2644 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2645 qla2xxx_wake_dpc(vha);
2646 return;
2647 }
2648
2649 handle_ptr = (uint32_t *) &stsmfx->handles[0];
2650
2651 for (i = 0; i < handle_count; i++) {
2652 hindex = le32_to_cpu(*handle_ptr);
2653 handle = LSW(hindex);
2654 que = MSW(hindex);
2655 req = ha->req_q_map[que];
2656
2657 /* Validate handle. */
2658 if (handle < req->num_outstanding_cmds)
2659 sp = req->outstanding_cmds[handle];
2660 else
2661 sp = NULL;
2662
2663 if (sp == NULL) {
2664 ql_dbg(ql_dbg_io, vha, 0x3044,
2665 "Invalid status handle (0x%x).\n", handle);
2666 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2667 qla2xxx_wake_dpc(vha);
2668 return;
2669 }
2670 qla2x00_process_completed_request(vha, req, handle);
2671 handle_ptr++;
2672 }
2673}
2674
2675/**
2676 * qlafx00_error_entry() - Process an error entry.
2677 * @ha: SCSI driver HA context
2678 * @pkt: Entry pointer
2679 */
2680static void
2681qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
2682 struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
2683{
2684 srb_t *sp;
2685 struct qla_hw_data *ha = vha->hw;
2686 const char func[] = "ERROR-IOCB";
2687 uint16_t que = MSW(pkt->handle);
2688 struct req_que *req = NULL;
2689 int res = DID_ERROR << 16;
2690
2691 ql_dbg(ql_dbg_async, vha, 0x507f,
2692 "type of error status in response: 0x%x\n", estatus);
2693
2694 req = ha->req_q_map[que];
2695
2696 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2697 if (sp) {
2698 sp->done(ha, sp, res);
2699 return;
2700 }
2701
2702 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2703 qla2xxx_wake_dpc(vha);
2704}
2705
2706/**
2707 * qlafx00_process_response_queue() - Process response queue entries.
2708 * @ha: SCSI driver HA context
2709 */
2710static void
2711qlafx00_process_response_queue(struct scsi_qla_host *vha,
2712 struct rsp_que *rsp)
2713{
2714 struct sts_entry_fx00 *pkt;
2715 response_t *lptr;
2716
2717 if (!vha->flags.online)
2718 return;
2719
2720 while (RD_REG_DWORD(&(rsp->ring_ptr->signature)) !=
2721 RESPONSE_PROCESSED) {
2722 lptr = rsp->ring_ptr;
2723 memcpy_fromio(rsp->rsp_pkt, lptr, sizeof(rsp->rsp_pkt));
2724 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
2725
2726 rsp->ring_index++;
2727 if (rsp->ring_index == rsp->length) {
2728 rsp->ring_index = 0;
2729 rsp->ring_ptr = rsp->ring;
2730 } else {
2731 rsp->ring_ptr++;
2732 }
2733
2734 if (pkt->entry_status != 0 &&
2735 pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
2736 qlafx00_error_entry(vha, rsp,
2737 (struct sts_entry_fx00 *)pkt, pkt->entry_status,
2738 pkt->entry_type);
2739 goto next_iter;
2740 continue;
2741 }
2742
2743 switch (pkt->entry_type) {
2744 case STATUS_TYPE_FX00:
2745 qlafx00_status_entry(vha, rsp, pkt);
2746 break;
2747
2748 case STATUS_CONT_TYPE_FX00:
2749 qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2750 break;
2751
2752 case MULTI_STATUS_TYPE_FX00:
2753 qlafx00_multistatus_entry(vha, rsp, pkt);
2754 break;
2755
2756 case ABORT_IOCB_TYPE_FX00:
2757 qlafx00_abort_iocb_entry(vha, rsp->req,
2758 (struct abort_iocb_entry_fx00 *)pkt);
2759 break;
2760
2761 case IOCTL_IOSB_TYPE_FX00:
2762 qlafx00_ioctl_iosb_entry(vha, rsp->req,
2763 (struct ioctl_iocb_entry_fx00 *)pkt);
2764 break;
2765 default:
2766 /* Type Not Supported. */
2767 ql_dbg(ql_dbg_async, vha, 0x5081,
2768 "Received unknown response pkt type %x "
2769 "entry status=%x.\n",
2770 pkt->entry_type, pkt->entry_status);
2771 break;
2772 }
2773next_iter:
2774 WRT_REG_DWORD(&lptr->signature, RESPONSE_PROCESSED);
2775 wmb();
2776 }
2777
2778 /* Adjust ring index */
2779 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2780}
2781
2782/**
2783 * qlafx00_async_event() - Process aynchronous events.
2784 * @ha: SCSI driver HA context
2785 */
2786static void
2787qlafx00_async_event(scsi_qla_host_t *vha)
2788{
2789 struct qla_hw_data *ha = vha->hw;
2790 struct device_reg_fx00 __iomem *reg;
2791 int data_size = 1;
2792
2793 reg = &ha->iobase->ispfx00;
2794 /* Setup to process RIO completion. */
2795 switch (ha->aenmb[0]) {
2796 case QLAFX00_MBA_SYSTEM_ERR: /* System Error */
2797 ql_log(ql_log_warn, vha, 0x5079,
2798 "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
2799 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2800 break;
2801
2802 case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */
2803 ql_dbg(ql_dbg_async, vha, 0x5076,
2804 "Asynchronous FW shutdown requested.\n");
2805 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2806 qla2xxx_wake_dpc(vha);
2807 break;
2808
2809 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
2810 ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
2811 ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
2812 ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
2813 ql_dbg(ql_dbg_async, vha, 0x5077,
2814 "Asynchronous port Update received "
2815 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
2816 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
2817 data_size = 4;
2818 break;
2819 default:
2820 ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
2821 ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
2822 ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
2823 ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
2824 ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
2825 ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
2826 ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
2827 ql_dbg(ql_dbg_async, vha, 0x5078,
2828 "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
2829 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
2830 ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
2831 break;
2832 }
2833 qlafx00_post_aenfx_work(vha, ha->aenmb[0],
2834 (uint32_t *)ha->aenmb, data_size);
2835}
2836
2837/**
2838 *
2839 * qlafx00x_mbx_completion() - Process mailbox command completions.
2840 * @ha: SCSI driver HA context
2841 * @mb16: Mailbox16 register
2842 */
2843static void
2844qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
2845{
2846 uint16_t cnt;
2847 uint16_t __iomem *wptr;
2848 struct qla_hw_data *ha = vha->hw;
2849 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
2850
2851 if (!ha->mcp32)
2852 ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
2853
2854 /* Load return mailbox registers. */
2855 ha->flags.mbox_int = 1;
2856 ha->mailbox_out32[0] = mb0;
2857 wptr = (uint16_t __iomem *)&reg->mailbox17;
2858
2859 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2860 ha->mailbox_out32[cnt] = RD_REG_WORD(wptr);
2861 wptr++;
2862 }
2863}
2864
2865/**
2866 * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
2867 * @irq:
2868 * @dev_id: SCSI driver HA context
2869 *
2870 * Called by system whenever the host adapter generates an interrupt.
2871 *
2872 * Returns handled flag.
2873 */
2874irqreturn_t
2875qlafx00_intr_handler(int irq, void *dev_id)
2876{
2877 scsi_qla_host_t *vha;
2878 struct qla_hw_data *ha;
2879 struct device_reg_fx00 __iomem *reg;
2880 int status;
2881 unsigned long iter;
2882 uint32_t stat;
2883 uint32_t mb[8];
2884 struct rsp_que *rsp;
2885 unsigned long flags;
2886 uint32_t clr_intr = 0;
2887
2888 rsp = (struct rsp_que *) dev_id;
2889 if (!rsp) {
2890 ql_log(ql_log_info, NULL, 0x507d,
2891 "%s: NULL response queue pointer.\n", __func__);
2892 return IRQ_NONE;
2893 }
2894
2895 ha = rsp->hw;
2896 reg = &ha->iobase->ispfx00;
2897 status = 0;
2898
2899 if (unlikely(pci_channel_offline(ha->pdev)))
2900 return IRQ_HANDLED;
2901
2902 spin_lock_irqsave(&ha->hardware_lock, flags);
2903 vha = pci_get_drvdata(ha->pdev);
2904 for (iter = 50; iter--; clr_intr = 0) {
2905 stat = QLAFX00_RD_INTR_REG(ha);
2906 if ((stat & QLAFX00_HST_INT_STS_BITS) == 0)
2907 break;
2908
2909 switch (stat & QLAFX00_HST_INT_STS_BITS) {
2910 case QLAFX00_INTR_MB_CMPLT:
2911 case QLAFX00_INTR_MB_RSP_CMPLT:
2912 case QLAFX00_INTR_MB_ASYNC_CMPLT:
2913 case QLAFX00_INTR_ALL_CMPLT:
2914 mb[0] = RD_REG_WORD(&reg->mailbox16);
2915 qlafx00_mbx_completion(vha, mb[0]);
2916 status |= MBX_INTERRUPT;
2917 clr_intr |= QLAFX00_INTR_MB_CMPLT;
2918 break;
2919 case QLAFX00_INTR_ASYNC_CMPLT:
2920 case QLAFX00_INTR_RSP_ASYNC_CMPLT:
2921 ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
2922 qlafx00_async_event(vha);
2923 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
2924 break;
2925 case QLAFX00_INTR_RSP_CMPLT:
2926 qlafx00_process_response_queue(vha, rsp);
2927 clr_intr |= QLAFX00_INTR_RSP_CMPLT;
2928 break;
2929 default:
2930 ql_dbg(ql_dbg_async, vha, 0x507a,
2931 "Unrecognized interrupt type (%d).\n", stat);
2932 break;
2933 }
2934 QLAFX00_CLR_INTR_REG(ha, clr_intr);
2935 QLAFX00_RD_INTR_REG(ha);
2936 }
2937 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2938
2939 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2940 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2941 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2942 complete(&ha->mbx_intr_comp);
2943 }
2944 return IRQ_HANDLED;
2945}
2946
2947/** QLAFX00 specific IOCB implementation functions */
2948
2949static inline cont_a64_entry_t *
2950qlafx00_prep_cont_type1_iocb(struct req_que *req,
2951 cont_a64_entry_t *lcont_pkt)
2952{
2953 cont_a64_entry_t *cont_pkt;
2954
2955 /* Adjust ring index. */
2956 req->ring_index++;
2957 if (req->ring_index == req->length) {
2958 req->ring_index = 0;
2959 req->ring_ptr = req->ring;
2960 } else {
2961 req->ring_ptr++;
2962 }
2963
2964 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
2965
2966 /* Load packet defaults. */
2967 *((uint32_t *)(&lcont_pkt->entry_type)) =
2968 __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00);
2969
2970 return cont_pkt;
2971}
2972
2973static inline void
2974qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
2975 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
2976{
2977 uint16_t avail_dsds;
2978 uint32_t *cur_dsd;
2979 scsi_qla_host_t *vha;
2980 struct scsi_cmnd *cmd;
2981 struct scatterlist *sg;
2982 int i, cont;
2983 struct req_que *req;
2984 cont_a64_entry_t lcont_pkt;
2985 cont_a64_entry_t *cont_pkt;
2986
2987 vha = sp->fcport->vha;
2988 req = vha->req;
2989
2990 cmd = GET_CMD_SP(sp);
2991 cont = 0;
2992 cont_pkt = NULL;
2993
2994 /* Update entry type to indicate Command Type 3 IOCB */
2995 *((uint32_t *)(&lcmd_pkt->entry_type)) =
2996 __constant_cpu_to_le32(FX00_COMMAND_TYPE_7);
2997
2998 /* No data transfer */
2999 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
3000 lcmd_pkt->byte_count = __constant_cpu_to_le32(0);
3001 return;
3002 }
3003
3004 /* Set transfer direction */
3005 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
3006 lcmd_pkt->cntrl_flags =
3007 __constant_cpu_to_le16(TMF_WRITE_DATA);
3008 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
3009 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
3010 lcmd_pkt->cntrl_flags =
3011 __constant_cpu_to_le16(TMF_READ_DATA);
3012 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
3013 }
3014
3015 /* One DSD is available in the Command Type 3 IOCB */
3016 avail_dsds = 1;
3017 cur_dsd = (uint32_t *)&lcmd_pkt->dseg_0_address;
3018
3019 /* Load data segments */
3020 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
3021 dma_addr_t sle_dma;
3022
3023 /* Allocate additional continuation packets? */
3024 if (avail_dsds == 0) {
3025 /*
3026 * Five DSDs are available in the Continuation
3027 * Type 1 IOCB.
3028 */
3029 memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
3030 cont_pkt =
3031 qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
3032 cur_dsd = (uint32_t *)lcont_pkt.dseg_0_address;
3033 avail_dsds = 5;
3034 cont = 1;
3035 }
3036
3037 sle_dma = sg_dma_address(sg);
3038 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3039 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3040 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3041 avail_dsds--;
3042 if (avail_dsds == 0 && cont == 1) {
3043 cont = 0;
3044 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
3045 REQUEST_ENTRY_SIZE);
3046 }
3047
3048 }
3049 if (avail_dsds != 0 && cont == 1) {
3050 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
3051 REQUEST_ENTRY_SIZE);
3052 }
3053}
3054
3055/**
3056 * qlafx00_start_scsi() - Send a SCSI command to the ISP
3057 * @sp: command to send to the ISP
3058 *
3059 * Returns non-zero if a failure occurred, else zero.
3060 */
3061int
3062qlafx00_start_scsi(srb_t *sp)
3063{
3064 int ret, nseg;
3065 unsigned long flags;
3066 uint32_t index;
3067 uint32_t handle;
3068 uint16_t cnt;
3069 uint16_t req_cnt;
3070 uint16_t tot_dsds;
3071 struct req_que *req = NULL;
3072 struct rsp_que *rsp = NULL;
3073 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3074 struct scsi_qla_host *vha = sp->fcport->vha;
3075 struct qla_hw_data *ha = vha->hw;
3076 struct cmd_type_7_fx00 *cmd_pkt;
3077 struct cmd_type_7_fx00 lcmd_pkt;
3078 struct scsi_lun llun;
3079 char tag[2];
3080
3081 /* Setup device pointers. */
3082 ret = 0;
3083
3084 rsp = ha->rsp_q_map[0];
3085 req = vha->req;
3086
3087 /* So we know we haven't pci_map'ed anything yet */
3088 tot_dsds = 0;
3089
3090 /* Forcing marker needed for now */
3091 vha->marker_needed = 0;
3092
3093 /* Send marker if required */
3094 if (vha->marker_needed != 0) {
3095 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
3096 QLA_SUCCESS)
3097 return QLA_FUNCTION_FAILED;
3098 vha->marker_needed = 0;
3099 }
3100
3101 /* Acquire ring specific lock */
3102 spin_lock_irqsave(&ha->hardware_lock, flags);
3103
3104 /* Check for room in outstanding command list. */
3105 handle = req->current_outstanding_cmd;
3106 for (index = 1; index < req->num_outstanding_cmds; index++) {
3107 handle++;
3108 if (handle == req->num_outstanding_cmds)
3109 handle = 1;
3110 if (!req->outstanding_cmds[handle])
3111 break;
3112 }
3113 if (index == req->num_outstanding_cmds)
3114 goto queuing_error;
3115
3116 /* Map the sg table so we have an accurate count of sg entries needed */
3117 if (scsi_sg_count(cmd)) {
3118 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3119 scsi_sg_count(cmd), cmd->sc_data_direction);
3120 if (unlikely(!nseg))
3121 goto queuing_error;
3122 } else
3123 nseg = 0;
3124
3125 tot_dsds = nseg;
3126 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3127 if (req->cnt < (req_cnt + 2)) {
3128 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
3129
3130 if (req->ring_index < cnt)
3131 req->cnt = cnt - req->ring_index;
3132 else
3133 req->cnt = req->length -
3134 (req->ring_index - cnt);
3135 if (req->cnt < (req_cnt + 2))
3136 goto queuing_error;
3137 }
3138
3139 /* Build command packet. */
3140 req->current_outstanding_cmd = handle;
3141 req->outstanding_cmds[handle] = sp;
3142 sp->handle = handle;
3143 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3144 req->cnt -= req_cnt;
3145
3146 cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
3147
3148 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
3149
3150 lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
3151 lcmd_pkt.handle_hi = 0;
3152 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
3153 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
3154
3155 int_to_scsilun(cmd->device->lun, &llun);
3156 host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
3157 sizeof(lcmd_pkt.lun));
3158
3159 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
3160 if (scsi_populate_tag_msg(cmd, tag)) {
3161 switch (tag[0]) {
3162 case HEAD_OF_QUEUE_TAG:
3163 lcmd_pkt.task = TSK_HEAD_OF_QUEUE;
3164 break;
3165 case ORDERED_QUEUE_TAG:
3166 lcmd_pkt.task = TSK_ORDERED;
3167 break;
3168 }
3169 }
3170
3171 /* Load SCSI command packet. */
3172 host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
3173 lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3174
3175 /* Build IOCB segments */
3176 qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
3177
3178 /* Set total data segment count. */
3179 lcmd_pkt.entry_count = (uint8_t)req_cnt;
3180
3181 /* Specify response queue number where completion should happen */
3182 lcmd_pkt.entry_status = (uint8_t) rsp->id;
3183
3184 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
3185 (uint8_t *)cmd->cmnd, cmd->cmd_len);
3186 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
3187 (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
3188
3189 memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
3190 wmb();
3191
3192 /* Adjust ring index. */
3193 req->ring_index++;
3194 if (req->ring_index == req->length) {
3195 req->ring_index = 0;
3196 req->ring_ptr = req->ring;
3197 } else
3198 req->ring_ptr++;
3199
3200 sp->flags |= SRB_DMA_VALID;
3201
3202 /* Set chip new ring index. */
3203 WRT_REG_DWORD(req->req_q_in, req->ring_index);
3204 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
3205
3206 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3207 return QLA_SUCCESS;
3208
3209queuing_error:
3210 if (tot_dsds)
3211 scsi_dma_unmap(cmd);
3212
3213 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3214
3215 return QLA_FUNCTION_FAILED;
3216}
3217
3218void
3219qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
3220{
3221 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3222 scsi_qla_host_t *vha = sp->fcport->vha;
3223 struct req_que *req = vha->req;
3224 struct tsk_mgmt_entry_fx00 tm_iocb;
3225 struct scsi_lun llun;
3226
3227 memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
3228 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
3229 tm_iocb.entry_count = 1;
3230 tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3231 tm_iocb.handle_hi = 0;
3232 tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3233 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
3234 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
3235 if (tm_iocb.control_flags == TCF_LUN_RESET) {
3236 int_to_scsilun(fxio->u.tmf.lun, &llun);
3237 host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
3238 sizeof(struct scsi_lun));
3239 }
3240
3241 memcpy((void __iomem *)ptm_iocb, &tm_iocb,
3242 sizeof(struct tsk_mgmt_entry_fx00));
3243 wmb();
3244}
3245
3246void
3247qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
3248{
3249 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3250 scsi_qla_host_t *vha = sp->fcport->vha;
3251 struct req_que *req = vha->req;
3252 struct abort_iocb_entry_fx00 abt_iocb;
3253
3254 memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
3255 abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
3256 abt_iocb.entry_count = 1;
3257 abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3258 abt_iocb.abort_handle =
3259 cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
3260 abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
3261 abt_iocb.req_que_no = cpu_to_le16(req->id);
3262
3263 memcpy((void __iomem *)pabt_iocb, &abt_iocb,
3264 sizeof(struct abort_iocb_entry_fx00));
3265 wmb();
3266}
3267
3268void
3269qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3270{
3271 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3272 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
3273 struct fc_bsg_job *bsg_job;
3274 struct fxdisc_entry_fx00 fx_iocb;
3275 uint8_t entry_cnt = 1;
3276
3277 memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
3278 fx_iocb.entry_type = FX00_IOCB_TYPE;
3279 fx_iocb.handle = cpu_to_le32(sp->handle);
3280 fx_iocb.entry_count = entry_cnt;
3281
3282 if (sp->type == SRB_FXIOCB_DCMD) {
3283 fx_iocb.func_num =
3284 cpu_to_le16(sp->u.iocb_cmd.u.fxiocb.req_func_type);
3285 fx_iocb.adapid = cpu_to_le32(fxio->u.fxiocb.adapter_id);
3286 fx_iocb.adapid_hi = cpu_to_le32(fxio->u.fxiocb.adapter_id_hi);
3287 fx_iocb.reserved_0 = cpu_to_le32(fxio->u.fxiocb.reserved_0);
3288 fx_iocb.reserved_1 = cpu_to_le32(fxio->u.fxiocb.reserved_1);
3289 fx_iocb.dataword_extra =
3290 cpu_to_le32(fxio->u.fxiocb.req_data_extra);
3291
3292 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
3293 fx_iocb.req_dsdcnt = cpu_to_le16(1);
3294 fx_iocb.req_xfrcnt =
3295 cpu_to_le16(fxio->u.fxiocb.req_len);
3296 fx_iocb.dseg_rq_address[0] =
3297 cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
3298 fx_iocb.dseg_rq_address[1] =
3299 cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
3300 fx_iocb.dseg_rq_len =
3301 cpu_to_le32(fxio->u.fxiocb.req_len);
3302 }
3303
3304 if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
3305 fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
3306 fx_iocb.rsp_xfrcnt =
3307 cpu_to_le16(fxio->u.fxiocb.rsp_len);
3308 fx_iocb.dseg_rsp_address[0] =
3309 cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
3310 fx_iocb.dseg_rsp_address[1] =
3311 cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
3312 fx_iocb.dseg_rsp_len =
3313 cpu_to_le32(fxio->u.fxiocb.rsp_len);
3314 }
3315
3316 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
3317 fx_iocb.dataword =
3318 cpu_to_le32(fxio->u.fxiocb.req_data);
3319 }
3320 fx_iocb.flags = fxio->u.fxiocb.flags;
3321 } else {
3322 struct scatterlist *sg;
3323 bsg_job = sp->u.bsg_job;
3324 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
3325 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
3326
3327 fx_iocb.func_num = piocb_rqst->func_type;
3328 fx_iocb.adapid = piocb_rqst->adapid;
3329 fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
3330 fx_iocb.reserved_0 = piocb_rqst->reserved_0;
3331 fx_iocb.reserved_1 = piocb_rqst->reserved_1;
3332 fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
3333 fx_iocb.dataword = piocb_rqst->dataword;
3334 fx_iocb.req_xfrcnt = cpu_to_le16(piocb_rqst->req_len);
3335 fx_iocb.rsp_xfrcnt = cpu_to_le16(piocb_rqst->rsp_len);
3336
3337 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
3338 int avail_dsds, tot_dsds;
3339 cont_a64_entry_t lcont_pkt;
3340 cont_a64_entry_t *cont_pkt = NULL;
3341 uint32_t *cur_dsd;
3342 int index = 0, cont = 0;
3343
3344 fx_iocb.req_dsdcnt =
3345 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3346 tot_dsds =
3347 cpu_to_le32(bsg_job->request_payload.sg_cnt);
3348 cur_dsd = (uint32_t *)&fx_iocb.dseg_rq_address[0];
3349 avail_dsds = 1;
3350 for_each_sg(bsg_job->request_payload.sg_list, sg,
3351 tot_dsds, index) {
3352 dma_addr_t sle_dma;
3353
3354 /* Allocate additional continuation packets? */
3355 if (avail_dsds == 0) {
3356 /*
3357 * Five DSDs are available in the Cont.
3358 * Type 1 IOCB.
3359 */
3360 memset(&lcont_pkt, 0,
3361 REQUEST_ENTRY_SIZE);
3362 cont_pkt =
3363 qlafx00_prep_cont_type1_iocb(
3364 sp->fcport->vha->req,
3365 &lcont_pkt);
3366 cur_dsd = (uint32_t *)
3367 lcont_pkt.dseg_0_address;
3368 avail_dsds = 5;
3369 cont = 1;
3370 entry_cnt++;
3371 }
3372
3373 sle_dma = sg_dma_address(sg);
3374 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3375 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3376 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3377 avail_dsds--;
3378
3379 if (avail_dsds == 0 && cont == 1) {
3380 cont = 0;
3381 memcpy_toio(
3382 (void __iomem *)cont_pkt,
3383 &lcont_pkt, REQUEST_ENTRY_SIZE);
3384 ql_dump_buffer(
3385 ql_dbg_user + ql_dbg_verbose,
3386 sp->fcport->vha, 0x3042,
3387 (uint8_t *)&lcont_pkt,
3388 REQUEST_ENTRY_SIZE);
3389 }
3390 }
3391 if (avail_dsds != 0 && cont == 1) {
3392 memcpy_toio((void __iomem *)cont_pkt,
3393 &lcont_pkt, REQUEST_ENTRY_SIZE);
3394 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3395 sp->fcport->vha, 0x3043,
3396 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
3397 }
3398 }
3399
3400 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
3401 int avail_dsds, tot_dsds;
3402 cont_a64_entry_t lcont_pkt;
3403 cont_a64_entry_t *cont_pkt = NULL;
3404 uint32_t *cur_dsd;
3405 int index = 0, cont = 0;
3406
3407 fx_iocb.rsp_dsdcnt =
3408 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3409 tot_dsds = cpu_to_le32(bsg_job->reply_payload.sg_cnt);
3410 cur_dsd = (uint32_t *)&fx_iocb.dseg_rsp_address[0];
3411 avail_dsds = 1;
3412
3413 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3414 tot_dsds, index) {
3415 dma_addr_t sle_dma;
3416
3417 /* Allocate additional continuation packets? */
3418 if (avail_dsds == 0) {
3419 /*
3420 * Five DSDs are available in the Cont.
3421 * Type 1 IOCB.
3422 */
3423 memset(&lcont_pkt, 0,
3424 REQUEST_ENTRY_SIZE);
3425 cont_pkt =
3426 qlafx00_prep_cont_type1_iocb(
3427 sp->fcport->vha->req,
3428 &lcont_pkt);
3429 cur_dsd = (uint32_t *)
3430 lcont_pkt.dseg_0_address;
3431 avail_dsds = 5;
3432 cont = 1;
3433 entry_cnt++;
3434 }
3435
3436 sle_dma = sg_dma_address(sg);
3437 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3438 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3439 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3440 avail_dsds--;
3441
3442 if (avail_dsds == 0 && cont == 1) {
3443 cont = 0;
3444 memcpy_toio((void __iomem *)cont_pkt,
3445 &lcont_pkt,
3446 REQUEST_ENTRY_SIZE);
3447 ql_dump_buffer(
3448 ql_dbg_user + ql_dbg_verbose,
3449 sp->fcport->vha, 0x3045,
3450 (uint8_t *)&lcont_pkt,
3451 REQUEST_ENTRY_SIZE);
3452 }
3453 }
3454 if (avail_dsds != 0 && cont == 1) {
3455 memcpy_toio((void __iomem *)cont_pkt,
3456 &lcont_pkt, REQUEST_ENTRY_SIZE);
3457 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3458 sp->fcport->vha, 0x3046,
3459 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
3460 }
3461 }
3462
3463 if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
3464 fx_iocb.dataword = cpu_to_le32(piocb_rqst->dataword);
3465 fx_iocb.flags = piocb_rqst->flags;
3466 fx_iocb.entry_count = entry_cnt;
3467 }
3468
3469 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3470 sp->fcport->vha, 0x3047,
3471 (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
3472
3473 memcpy((void __iomem *)pfxiocb, &fx_iocb,
3474 sizeof(struct fxdisc_entry_fx00));
3475 wmb();
3476}
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
new file mode 100644
index 000000000000..cc327dc2fd10
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -0,0 +1,510 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#ifndef __QLA_MR_H
8#define __QLA_MR_H
9
10/*
11 * The PCI VendorID and DeviceID for our board.
12 */
13#define PCI_DEVICE_ID_QLOGIC_ISPF001 0xF001
14
15/* FX00 specific definitions */
16
17#define FX00_COMMAND_TYPE_7 0x07 /* Command Type 7 entry for 7XXX */
18struct cmd_type_7_fx00 {
19 uint8_t entry_type; /* Entry type. */
20 uint8_t entry_count; /* Entry count. */
21 uint8_t sys_define; /* System defined. */
22 uint8_t entry_status; /* Entry Status. */
23
24 uint32_t handle; /* System handle. */
25 uint32_t handle_hi;
26
27 uint16_t tgt_idx; /* Target Idx. */
28 uint16_t timeout; /* Command timeout. */
29
30 uint16_t dseg_count; /* Data segment count. */
31 uint16_t scsi_rsp_dsd_len;
32
33 struct scsi_lun lun; /* LUN (LE). */
34
35 uint8_t cntrl_flags;
36
37 uint8_t task_mgmt_flags; /* Task management flags. */
38
39 uint8_t task;
40
41 uint8_t crn;
42
43 uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */
44 uint32_t byte_count; /* Total byte count. */
45
46 uint32_t dseg_0_address[2]; /* Data segment 0 address. */
47 uint32_t dseg_0_len; /* Data segment 0 length. */
48};
49
50/*
51 * ISP queue - marker entry structure definition.
52 */
53struct mrk_entry_fx00 {
54 uint8_t entry_type; /* Entry type. */
55 uint8_t entry_count; /* Entry count. */
56 uint8_t handle_count; /* Handle count. */
57 uint8_t entry_status; /* Entry Status. */
58
59 uint32_t handle; /* System handle. */
60 uint32_t handle_hi; /* System handle. */
61
62 uint16_t tgt_id; /* Target ID. */
63
64 uint8_t modifier; /* Modifier (7-0). */
65 uint8_t reserved_1;
66
67 uint8_t reserved_2[5];
68
69 uint8_t lun[8]; /* FCP LUN (BE). */
70 uint8_t reserved_3[36];
71};
72
73
74#define STATUS_TYPE_FX00 0x01 /* Status entry. */
75struct sts_entry_fx00 {
76 uint8_t entry_type; /* Entry type. */
77 uint8_t entry_count; /* Entry count. */
78 uint8_t sys_define; /* System defined. */
79 uint8_t entry_status; /* Entry Status. */
80
81 uint32_t handle; /* System handle. */
82 uint32_t handle_hi; /* System handle. */
83
84 uint16_t comp_status; /* Completion status. */
85 uint16_t reserved_0; /* OX_ID used by the firmware. */
86
87 uint32_t residual_len; /* FW calc residual transfer length. */
88
89 uint16_t reserved_1;
90 uint16_t state_flags; /* State flags. */
91
92 uint16_t reserved_2;
93 uint16_t scsi_status; /* SCSI status. */
94
95 uint32_t sense_len; /* FCP SENSE length. */
96 uint8_t data[32]; /* FCP response/sense information. */
97};
98
99
100#define MAX_HANDLE_COUNT 15
101#define MULTI_STATUS_TYPE_FX00 0x0D
102
103struct multi_sts_entry_fx00 {
104 uint8_t entry_type; /* Entry type. */
105 uint8_t sys_define; /* System defined. */
106 uint8_t handle_count;
107 uint8_t entry_status;
108
109 uint32_t handles[MAX_HANDLE_COUNT];
110};
111
112#define TSK_MGMT_IOCB_TYPE_FX00 0x05
113struct tsk_mgmt_entry_fx00 {
114 uint8_t entry_type; /* Entry type. */
115 uint8_t entry_count; /* Entry count. */
116 uint8_t sys_define;
117 uint8_t entry_status; /* Entry Status. */
118
119 uint32_t handle; /* System handle. */
120
121 uint32_t handle_hi; /* System handle. */
122
123 uint16_t tgt_id; /* Target Idx. */
124
125 uint16_t reserved_1;
126
127 uint16_t delay; /* Activity delay in seconds. */
128
129 uint16_t timeout; /* Command timeout. */
130
131 struct scsi_lun lun; /* LUN (LE). */
132
133 uint32_t control_flags; /* Control Flags. */
134
135 uint8_t reserved_2[32];
136};
137
138
139#define ABORT_IOCB_TYPE_FX00 0x08 /* Abort IOCB status. */
140struct abort_iocb_entry_fx00 {
141 uint8_t entry_type; /* Entry type. */
142 uint8_t entry_count; /* Entry count. */
143 uint8_t sys_define; /* System defined. */
144 uint8_t entry_status; /* Entry Status. */
145
146 uint32_t handle; /* System handle. */
147 uint32_t handle_hi; /* System handle. */
148
149 uint16_t tgt_id_sts; /* Completion status. */
150 uint16_t options;
151
152 uint32_t abort_handle; /* System handle. */
153 uint32_t abort_handle_hi; /* System handle. */
154
155 uint16_t req_que_no;
156 uint8_t reserved_1[38];
157};
158
159#define IOCTL_IOSB_TYPE_FX00 0x0C
160struct ioctl_iocb_entry_fx00 {
161 uint8_t entry_type; /* Entry type. */
162 uint8_t entry_count; /* Entry count. */
163 uint8_t sys_define; /* System defined. */
164 uint8_t entry_status; /* Entry Status. */
165
166 uint32_t handle; /* System handle. */
167 uint32_t reserved_0; /* System handle. */
168
169 uint16_t comp_func_num;
170 uint16_t fw_iotcl_flags;
171
172 uint32_t dataword_r; /* Data word returned */
173 uint32_t adapid; /* Adapter ID */
174 uint32_t adapid_hi; /* Adapter ID high */
175 uint32_t reserved_1;
176
177 uint32_t seq_no;
178 uint8_t reserved_2[20];
179 uint32_t residuallen;
180 uint32_t status;
181};
182
183#define STATUS_CONT_TYPE_FX00 0x04
184
185#define FX00_IOCB_TYPE 0x0B
186struct fxdisc_entry_fx00 {
187 uint8_t entry_type; /* Entry type. */
188 uint8_t entry_count; /* Entry count. */
189 uint8_t sys_define; /* System Defined. */
190 uint8_t entry_status; /* Entry Status. */
191
192 uint32_t handle; /* System handle. */
193 uint32_t reserved_0; /* System handle. */
194
195 uint16_t func_num;
196 uint16_t req_xfrcnt;
197 uint16_t req_dsdcnt;
198 uint16_t rsp_xfrcnt;
199 uint16_t rsp_dsdcnt;
200 uint8_t flags;
201 uint8_t reserved_1;
202
203 uint32_t dseg_rq_address[2]; /* Data segment 0 address. */
204 uint32_t dseg_rq_len; /* Data segment 0 length. */
205 uint32_t dseg_rsp_address[2]; /* Data segment 1 address. */
206 uint32_t dseg_rsp_len; /* Data segment 1 length. */
207
208 uint32_t dataword;
209 uint32_t adapid;
210 uint32_t adapid_hi;
211 uint32_t dataword_extra;
212};
213
214struct qlafx00_tgt_node_info {
215 uint8_t tgt_node_wwpn[WWN_SIZE];
216 uint8_t tgt_node_wwnn[WWN_SIZE];
217 uint32_t tgt_node_state;
218 uint8_t reserved[128];
219 uint32_t reserved_1[8];
220 uint64_t reserved_2[4];
221} __packed;
222
223#define QLAFX00_TGT_NODE_INFO sizeof(struct qlafx00_tgt_node_info)
224
225#define QLAFX00_LINK_STATUS_DOWN 0x10
226#define QLAFX00_LINK_STATUS_UP 0x11
227
228#define QLAFX00_PORT_SPEED_2G 0x2
229#define QLAFX00_PORT_SPEED_4G 0x4
230#define QLAFX00_PORT_SPEED_8G 0x8
231#define QLAFX00_PORT_SPEED_10G 0xa
232struct port_info_data {
233 uint8_t port_state;
234 uint8_t port_type;
235 uint16_t port_identifier;
236 uint32_t up_port_state;
237 uint8_t fw_ver_num[32];
238 uint8_t portal_attrib;
239 uint16_t host_option;
240 uint8_t reset_delay;
241 uint8_t pdwn_retry_cnt;
242 uint16_t max_luns2tgt;
243 uint8_t risc_ver;
244 uint8_t pconn_option;
245 uint16_t risc_option;
246 uint16_t max_frame_len;
247 uint16_t max_iocb_alloc;
248 uint16_t exec_throttle;
249 uint8_t retry_cnt;
250 uint8_t retry_delay;
251 uint8_t port_name[8];
252 uint8_t port_id[3];
253 uint8_t link_status;
254 uint8_t plink_rate;
255 uint32_t link_config;
256 uint16_t adap_haddr;
257 uint8_t tgt_disc;
258 uint8_t log_tout;
259 uint8_t node_name[8];
260 uint16_t erisc_opt1;
261 uint8_t resp_acc_tmr;
262 uint8_t intr_del_tmr;
263 uint8_t erisc_opt2;
264 uint8_t alt_port_name[8];
265 uint8_t alt_node_name[8];
266 uint8_t link_down_tout;
267 uint8_t conn_type;
268 uint8_t fc_fw_mode;
269 uint32_t uiReserved[48];
270} __packed;
271
272/* OS Type Designations */
273#define OS_TYPE_UNKNOWN 0
274#define OS_TYPE_LINUX 2
275
276/* Linux Info */
277#define SYSNAME_LENGTH 128
278#define NODENAME_LENGTH 64
279#define RELEASE_LENGTH 64
280#define VERSION_LENGTH 64
281#define MACHINE_LENGTH 64
282#define DOMNAME_LENGTH 64
283
284struct host_system_info {
285 uint32_t os_type;
286 char sysname[SYSNAME_LENGTH];
287 char nodename[NODENAME_LENGTH];
288 char release[RELEASE_LENGTH];
289 char version[VERSION_LENGTH];
290 char machine[MACHINE_LENGTH];
291 char domainname[DOMNAME_LENGTH];
292 char hostdriver[VERSION_LENGTH];
293 uint32_t reserved[64];
294} __packed;
295
296struct register_host_info {
297 struct host_system_info hsi; /* host system info */
298 uint64_t utc; /* UTC (system time) */
299 uint32_t reserved[64]; /* future additions */
300} __packed;
301
302
303#define QLAFX00_PORT_DATA_INFO (sizeof(struct port_info_data))
304#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
305
306struct config_info_data {
307 uint8_t product_name[256];
308 uint8_t symbolic_name[64];
309 uint8_t serial_num[32];
310 uint8_t hw_version[16];
311 uint8_t fw_version[16];
312 uint8_t uboot_version[16];
313 uint8_t fru_serial_num[32];
314
315 uint8_t fc_port_count;
316 uint8_t iscsi_port_count;
317 uint8_t reserved1[2];
318
319 uint8_t mode;
320 uint8_t log_level;
321 uint8_t reserved2[2];
322
323 uint32_t log_size;
324
325 uint8_t tgt_pres_mode;
326 uint8_t iqn_flags;
327 uint8_t lun_mapping;
328
329 uint64_t adapter_id;
330
331 uint32_t cluster_key_len;
332 uint8_t cluster_key[10];
333
334 uint64_t cluster_master_id;
335 uint64_t cluster_slave_id;
336 uint8_t cluster_flags;
337} __packed;
338
339#define FXDISC_GET_CONFIG_INFO 0x01
340#define FXDISC_GET_PORT_INFO 0x02
341#define FXDISC_GET_TGT_NODE_INFO 0x80
342#define FXDISC_GET_TGT_NODE_LIST 0x81
343#define FXDISC_REG_HOST_INFO 0x99
344
345#define QLAFX00_HBA_ICNTRL_REG 0x21B08
346#define QLAFX00_ICR_ENB_MASK 0x80000000
347#define QLAFX00_ICR_DIS_MASK 0x7fffffff
348#define QLAFX00_HST_RST_REG 0x18264
349#define QLAFX00_HST_TO_HBA_REG 0x20A04
350#define QLAFX00_HBA_TO_HOST_REG 0x21B70
351#define QLAFX00_HST_INT_STS_BITS 0x7
352#define QLAFX00_BAR1_BASE_ADDR_REG 0x40018
353#define QLAFX00_PEX0_WIN0_BASE_ADDR_REG 0x41824
354
355#define QLAFX00_INTR_MB_CMPLT 0x1
356#define QLAFX00_INTR_RSP_CMPLT 0x2
357#define QLAFX00_INTR_MB_RSP_CMPLT 0x3
358#define QLAFX00_INTR_ASYNC_CMPLT 0x4
359#define QLAFX00_INTR_MB_ASYNC_CMPLT 0x5
360#define QLAFX00_INTR_RSP_ASYNC_CMPLT 0x6
361#define QLAFX00_INTR_ALL_CMPLT 0x7
362
363#define QLAFX00_MBA_SYSTEM_ERR 0x8002
364#define QLAFX00_MBA_LINK_UP 0x8011
365#define QLAFX00_MBA_LINK_DOWN 0x8012
366#define QLAFX00_MBA_PORT_UPDATE 0x8014
367#define QLAFX00_MBA_SHUTDOWN_RQSTD 0x8062
368
369#define SOC_SW_RST_CONTROL_REG_CORE0 0x0020800
370#define SOC_FABRIC_RST_CONTROL_REG 0x0020840
371#define SOC_FABRIC_CONTROL_REG 0x0020200
372#define SOC_FABRIC_CONFIG_REG 0x0020204
373
374#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00
375#define SOC_CORE_TIMER_REG 0x0021850
376#define SOC_IRQ_ACK_REG 0x00218b4
377
378#define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */
379
380#define QLAFX00_SET_HST_INTR(ha, value) \
381 WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
382 value)
383
384#define QLAFX00_CLR_HST_INTR(ha, value) \
385 WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
386 ~value)
387
388#define QLAFX00_RD_INTR_REG(ha) \
389 RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
390
391#define QLAFX00_CLR_INTR_REG(ha, value) \
392 WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
393 ~value)
394
395#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\
396 WRT_REG_DWORD((ha)->cregbase + off, val)
397
398#define QLAFX00_GET_HBA_SOC_REG(ha, off)\
399 RD_REG_DWORD((ha)->cregbase + off)
400
401#define QLAFX00_HBA_RST_REG(ha, val)\
402 WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val)
403
404#define QLAFX00_RD_ICNTRL_REG(ha) \
405 RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
406
407#define QLAFX00_ENABLE_ICNTRL_REG(ha) \
408 WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
409 (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \
410 QLAFX00_ICR_ENB_MASK))
411
412#define QLAFX00_DISABLE_ICNTRL_REG(ha) \
413 WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
414 (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \
415 QLAFX00_ICR_DIS_MASK))
416
417#define QLAFX00_RD_REG(ha, off) \
418 RD_REG_DWORD((ha)->cregbase + off)
419
420#define QLAFX00_WR_REG(ha, off, val) \
421 WRT_REG_DWORD((ha)->cregbase + off, val)
422
423struct qla_mt_iocb_rqst_fx00 {
424 uint32_t reserved_0;
425
426 uint16_t func_type;
427 uint8_t flags;
428 uint8_t reserved_1;
429
430 uint32_t dataword;
431
432 uint32_t adapid;
433 uint32_t adapid_hi;
434
435 uint32_t dataword_extra;
436
437 uint32_t req_len;
438
439 uint32_t rsp_len;
440};
441
442struct qla_mt_iocb_rsp_fx00 {
443 uint32_t reserved_1;
444
445 uint16_t func_type;
446 uint16_t ioctl_flags;
447
448 uint32_t ioctl_data;
449
450 uint32_t adapid;
451 uint32_t adapid_hi;
452
453 uint32_t reserved_2;
454 uint32_t seq_number;
455
456 uint8_t reserved_3[20];
457
458 int32_t res_count;
459
460 uint32_t status;
461};
462
463
464#define MAILBOX_REGISTER_COUNT_FX00 16
465#define AEN_MAILBOX_REGISTER_COUNT_FX00 8
466#define MAX_FIBRE_DEVICES_FX00 512
467#define MAX_LUNS_FX00 0x1024
468#define MAX_TARGETS_FX00 MAX_ISA_DEVICES
469#define REQUEST_ENTRY_CNT_FX00 512 /* Number of request entries. */
470#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
471
472/*
473 * Firmware state codes for QLAFX00 adapters
474 */
475#define FSTATE_FX00_CONFIG_WAIT 0x0000 /* Waiting for driver to issue
476 * Initialize FW Mbox cmd
477 */
478#define FSTATE_FX00_INITIALIZED 0x1000 /* FW has been initialized by
479 * the driver
480 */
481
482#define FX00_DEF_RATOV 10
483
484struct mr_data_fx00 {
485 uint8_t product_name[256];
486 uint8_t symbolic_name[64];
487 uint8_t serial_num[32];
488 uint8_t hw_version[16];
489 uint8_t fw_version[16];
490 uint8_t uboot_version[16];
491 uint8_t fru_serial_num[32];
492 fc_port_t fcport; /* fcport used for requests
493 * that are not linked
494 * to a particular target
495 */
496 uint8_t fw_hbt_en;
497 uint8_t fw_hbt_cnt;
498 uint8_t fw_hbt_miss_cnt;
499 uint32_t old_fw_hbt_cnt;
500 uint16_t fw_reset_timer_tick;
501 uint8_t fw_reset_timer_exp;
502 uint32_t old_aenmbx0_state;
503};
504
505#define QLAFX00_LOOP_DOWN_TIME 615 /* 600 */
506#define QLAFX00_HEARTBEAT_INTERVAL 6 /* number of seconds */
507#define QLAFX00_HEARTBEAT_MISS_CNT 3 /* number of miss */
508#define QLAFX00_RESET_INTERVAL 120 /* number of seconds */
509#define QLAFX00_MAX_RESET_INTERVAL 600 /* number of seconds */
510#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2c6dd3dfe0f4..a083715843bd 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -47,6 +47,7 @@ MODULE_PARM_DESC(ql2xenableclass2,
47 "Specify if Class 2 operations are supported from the very " 47 "Specify if Class 2 operations are supported from the very "
48 "beginning. Default is 0 - class 2 not supported."); 48 "beginning. Default is 0 - class 2 not supported.");
49 49
50
50int ql2xlogintimeout = 20; 51int ql2xlogintimeout = 20;
51module_param(ql2xlogintimeout, int, S_IRUGO); 52module_param(ql2xlogintimeout, int, S_IRUGO);
52MODULE_PARM_DESC(ql2xlogintimeout, 53MODULE_PARM_DESC(ql2xlogintimeout,
@@ -354,7 +355,12 @@ fail_req_map:
354 355
355static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 356static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
356{ 357{
357 if (req && req->ring) 358 if (IS_QLAFX00(ha)) {
359 if (req && req->ring_fx00)
360 dma_free_coherent(&ha->pdev->dev,
361 (req->length_fx00 + 1) * sizeof(request_t),
362 req->ring_fx00, req->dma_fx00);
363 } else if (req && req->ring)
358 dma_free_coherent(&ha->pdev->dev, 364 dma_free_coherent(&ha->pdev->dev,
359 (req->length + 1) * sizeof(request_t), 365 (req->length + 1) * sizeof(request_t),
360 req->ring, req->dma); 366 req->ring, req->dma);
@@ -368,11 +374,16 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
368 374
369static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 375static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
370{ 376{
371 if (rsp && rsp->ring) 377 if (IS_QLAFX00(ha)) {
378 if (rsp && rsp->ring)
379 dma_free_coherent(&ha->pdev->dev,
380 (rsp->length_fx00 + 1) * sizeof(request_t),
381 rsp->ring_fx00, rsp->dma_fx00);
382 } else if (rsp && rsp->ring) {
372 dma_free_coherent(&ha->pdev->dev, 383 dma_free_coherent(&ha->pdev->dev,
373 (rsp->length + 1) * sizeof(response_t), 384 (rsp->length + 1) * sizeof(response_t),
374 rsp->ring, rsp->dma); 385 rsp->ring, rsp->dma);
375 386 }
376 kfree(rsp); 387 kfree(rsp);
377 rsp = NULL; 388 rsp = NULL;
378} 389}
@@ -633,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
633 qla2x00_rel_sp(sp->fcport->vha, sp); 644 qla2x00_rel_sp(sp->fcport->vha, sp);
634} 645}
635 646
636static void 647void
637qla2x00_sp_compl(void *data, void *ptr, int res) 648qla2x00_sp_compl(void *data, void *ptr, int res)
638{ 649{
639 struct qla_hw_data *ha = (struct qla_hw_data *)data; 650 struct qla_hw_data *ha = (struct qla_hw_data *)data;
@@ -657,6 +668,9 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
657 cmd->scsi_done(cmd); 668 cmd->scsi_done(cmd);
658} 669}
659 670
671/* If we are SP1 here, we need to still take and release the host_lock as SP1
672 * does not have the changes necessary to avoid taking host->host_lock.
673 */
660static int 674static int
661qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 675qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
662{ 676{
@@ -1304,6 +1318,9 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1304 } 1318 }
1305 } 1319 }
1306 1320
1321 if (IS_QLAFX00(ha))
1322 return QLA_SUCCESS;
1323
1307 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { 1324 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
1308 atomic_set(&vha->loop_state, LOOP_DOWN); 1325 atomic_set(&vha->loop_state, LOOP_DOWN);
1309 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1326 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -1858,6 +1875,7 @@ static struct isp_operations qla2100_isp_ops = {
1858 .start_scsi = qla2x00_start_scsi, 1875 .start_scsi = qla2x00_start_scsi,
1859 .abort_isp = qla2x00_abort_isp, 1876 .abort_isp = qla2x00_abort_isp,
1860 .iospace_config = qla2x00_iospace_config, 1877 .iospace_config = qla2x00_iospace_config,
1878 .initialize_adapter = qla2x00_initialize_adapter,
1861}; 1879};
1862 1880
1863static struct isp_operations qla2300_isp_ops = { 1881static struct isp_operations qla2300_isp_ops = {
@@ -1895,6 +1913,7 @@ static struct isp_operations qla2300_isp_ops = {
1895 .start_scsi = qla2x00_start_scsi, 1913 .start_scsi = qla2x00_start_scsi,
1896 .abort_isp = qla2x00_abort_isp, 1914 .abort_isp = qla2x00_abort_isp,
1897 .iospace_config = qla2x00_iospace_config, 1915 .iospace_config = qla2x00_iospace_config,
1916 .initialize_adapter = qla2x00_initialize_adapter,
1898}; 1917};
1899 1918
1900static struct isp_operations qla24xx_isp_ops = { 1919static struct isp_operations qla24xx_isp_ops = {
@@ -1932,6 +1951,7 @@ static struct isp_operations qla24xx_isp_ops = {
1932 .start_scsi = qla24xx_start_scsi, 1951 .start_scsi = qla24xx_start_scsi,
1933 .abort_isp = qla2x00_abort_isp, 1952 .abort_isp = qla2x00_abort_isp,
1934 .iospace_config = qla2x00_iospace_config, 1953 .iospace_config = qla2x00_iospace_config,
1954 .initialize_adapter = qla2x00_initialize_adapter,
1935}; 1955};
1936 1956
1937static struct isp_operations qla25xx_isp_ops = { 1957static struct isp_operations qla25xx_isp_ops = {
@@ -1969,6 +1989,7 @@ static struct isp_operations qla25xx_isp_ops = {
1969 .start_scsi = qla24xx_dif_start_scsi, 1989 .start_scsi = qla24xx_dif_start_scsi,
1970 .abort_isp = qla2x00_abort_isp, 1990 .abort_isp = qla2x00_abort_isp,
1971 .iospace_config = qla2x00_iospace_config, 1991 .iospace_config = qla2x00_iospace_config,
1992 .initialize_adapter = qla2x00_initialize_adapter,
1972}; 1993};
1973 1994
1974static struct isp_operations qla81xx_isp_ops = { 1995static struct isp_operations qla81xx_isp_ops = {
@@ -2006,6 +2027,7 @@ static struct isp_operations qla81xx_isp_ops = {
2006 .start_scsi = qla24xx_dif_start_scsi, 2027 .start_scsi = qla24xx_dif_start_scsi,
2007 .abort_isp = qla2x00_abort_isp, 2028 .abort_isp = qla2x00_abort_isp,
2008 .iospace_config = qla2x00_iospace_config, 2029 .iospace_config = qla2x00_iospace_config,
2030 .initialize_adapter = qla2x00_initialize_adapter,
2009}; 2031};
2010 2032
2011static struct isp_operations qla82xx_isp_ops = { 2033static struct isp_operations qla82xx_isp_ops = {
@@ -2043,6 +2065,7 @@ static struct isp_operations qla82xx_isp_ops = {
2043 .start_scsi = qla82xx_start_scsi, 2065 .start_scsi = qla82xx_start_scsi,
2044 .abort_isp = qla82xx_abort_isp, 2066 .abort_isp = qla82xx_abort_isp,
2045 .iospace_config = qla82xx_iospace_config, 2067 .iospace_config = qla82xx_iospace_config,
2068 .initialize_adapter = qla2x00_initialize_adapter,
2046}; 2069};
2047 2070
2048static struct isp_operations qla83xx_isp_ops = { 2071static struct isp_operations qla83xx_isp_ops = {
@@ -2080,6 +2103,45 @@ static struct isp_operations qla83xx_isp_ops = {
2080 .start_scsi = qla24xx_dif_start_scsi, 2103 .start_scsi = qla24xx_dif_start_scsi,
2081 .abort_isp = qla2x00_abort_isp, 2104 .abort_isp = qla2x00_abort_isp,
2082 .iospace_config = qla83xx_iospace_config, 2105 .iospace_config = qla83xx_iospace_config,
2106 .initialize_adapter = qla2x00_initialize_adapter,
2107};
2108
2109static struct isp_operations qlafx00_isp_ops = {
2110 .pci_config = qlafx00_pci_config,
2111 .reset_chip = qlafx00_soft_reset,
2112 .chip_diag = qlafx00_chip_diag,
2113 .config_rings = qlafx00_config_rings,
2114 .reset_adapter = qlafx00_soft_reset,
2115 .nvram_config = NULL,
2116 .update_fw_options = NULL,
2117 .load_risc = NULL,
2118 .pci_info_str = qlafx00_pci_info_str,
2119 .fw_version_str = qlafx00_fw_version_str,
2120 .intr_handler = qlafx00_intr_handler,
2121 .enable_intrs = qlafx00_enable_intrs,
2122 .disable_intrs = qlafx00_disable_intrs,
2123 .abort_command = qlafx00_abort_command,
2124 .target_reset = qlafx00_abort_target,
2125 .lun_reset = qlafx00_lun_reset,
2126 .fabric_login = NULL,
2127 .fabric_logout = NULL,
2128 .calc_req_entries = NULL,
2129 .build_iocbs = NULL,
2130 .prep_ms_iocb = qla24xx_prep_ms_iocb,
2131 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2132 .read_nvram = qla24xx_read_nvram_data,
2133 .write_nvram = qla24xx_write_nvram_data,
2134 .fw_dump = NULL,
2135 .beacon_on = qla24xx_beacon_on,
2136 .beacon_off = qla24xx_beacon_off,
2137 .beacon_blink = NULL,
2138 .read_optrom = qla24xx_read_optrom_data,
2139 .write_optrom = qla24xx_write_optrom_data,
2140 .get_flash_version = qla24xx_get_flash_version,
2141 .start_scsi = qlafx00_start_scsi,
2142 .abort_isp = qlafx00_abort_isp,
2143 .iospace_config = qlafx00_iospace_config,
2144 .initialize_adapter = qlafx00_initialize_adapter,
2083}; 2145};
2084 2146
2085static inline void 2147static inline void
@@ -2192,6 +2254,9 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
2192 ha->device_type |= DT_T10_PI; 2254 ha->device_type |= DT_T10_PI;
2193 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2255 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2194 break; 2256 break;
2257 case PCI_DEVICE_ID_QLOGIC_ISPF001:
2258 ha->device_type |= DT_ISPFX00;
2259 break;
2195 } 2260 }
2196 2261
2197 if (IS_QLA82XX(ha)) 2262 if (IS_QLA82XX(ha))
@@ -2265,7 +2330,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2265 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || 2330 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
2266 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || 2331 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
2267 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2332 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
2268 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) { 2333 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
2334 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001) {
2269 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2335 bars = pci_select_bars(pdev, IORESOURCE_MEM);
2270 mem_only = 1; 2336 mem_only = 1;
2271 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2337 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2436,6 +2502,18 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2436 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2502 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
2437 ha->nvram_conf_off = ~0; 2503 ha->nvram_conf_off = ~0;
2438 ha->nvram_data_off = ~0; 2504 ha->nvram_data_off = ~0;
2505 } else if (IS_QLAFX00(ha)) {
2506 ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
2507 ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
2508 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
2509 req_length = REQUEST_ENTRY_CNT_FX00;
2510 rsp_length = RESPONSE_ENTRY_CNT_FX00;
2511 ha->init_cb_size = sizeof(struct init_cb_fx);
2512 ha->isp_ops = &qlafx00_isp_ops;
2513 ha->port_down_retry_count = 30; /* default value */
2514 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
2515 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
2516 ha->mr.fw_hbt_en = 1;
2439 } 2517 }
2440 2518
2441 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 2519 ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -2500,13 +2578,24 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2500 2578
2501 host = base_vha->host; 2579 host = base_vha->host;
2502 base_vha->req = req; 2580 base_vha->req = req;
2503 host->can_queue = req->length + 128; 2581 if (IS_QLAFX00(ha))
2582 host->can_queue = 1024;
2583 else
2584 host->can_queue = req->length + 128;
2504 if (IS_QLA2XXX_MIDTYPE(ha)) 2585 if (IS_QLA2XXX_MIDTYPE(ha))
2505 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 2586 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
2506 else 2587 else
2507 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 2588 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
2508 base_vha->vp_idx; 2589 base_vha->vp_idx;
2509 2590
2591 /* Setup fcport template structure. */
2592 ha->mr.fcport.vha = base_vha;
2593 ha->mr.fcport.port_type = FCT_UNKNOWN;
2594 ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
2595 qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
2596 ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
2597 ha->mr.fcport.scan_state = 1;
2598
2510 /* Set the SG table size based on ISP type */ 2599 /* Set the SG table size based on ISP type */
2511 if (!IS_FWI2_CAPABLE(ha)) { 2600 if (!IS_FWI2_CAPABLE(ha)) {
2512 if (IS_QLA2100(ha)) 2601 if (IS_QLA2100(ha))
@@ -2562,6 +2651,13 @@ que_init:
2562 rsp->req = req; 2651 rsp->req = req;
2563 req->rsp = rsp; 2652 req->rsp = rsp;
2564 2653
2654 if (IS_QLAFX00(ha)) {
2655 ha->rsp_q_map[0] = rsp;
2656 ha->req_q_map[0] = req;
2657 set_bit(0, ha->req_qid_map);
2658 set_bit(0, ha->rsp_qid_map);
2659 }
2660
2565 /* FWI2-capable only. */ 2661 /* FWI2-capable only. */
2566 req->req_q_in = &ha->iobase->isp24.req_q_in; 2662 req->req_q_in = &ha->iobase->isp24.req_q_in;
2567 req->req_q_out = &ha->iobase->isp24.req_q_out; 2663 req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2574,6 +2670,13 @@ que_init:
2574 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; 2670 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
2575 } 2671 }
2576 2672
2673 if (IS_QLAFX00(ha)) {
2674 req->req_q_in = &ha->iobase->ispfx00.req_q_in;
2675 req->req_q_out = &ha->iobase->ispfx00.req_q_out;
2676 rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
2677 rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
2678 }
2679
2577 if (IS_QLA82XX(ha)) { 2680 if (IS_QLA82XX(ha)) {
2578 req->req_q_out = &ha->iobase->isp82.req_q_out[0]; 2681 req->req_q_out = &ha->iobase->isp82.req_q_out[0];
2579 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; 2682 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
@@ -2595,7 +2698,7 @@ que_init:
2595 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 2698 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2596 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); 2699 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
2597 2700
2598 if (qla2x00_initialize_adapter(base_vha)) { 2701 if (ha->isp_ops->initialize_adapter(base_vha)) {
2599 ql_log(ql_log_fatal, base_vha, 0x00d6, 2702 ql_log(ql_log_fatal, base_vha, 0x00d6,
2600 "Failed to initialize adapter - Adapter flags %x.\n", 2703 "Failed to initialize adapter - Adapter flags %x.\n",
2601 base_vha->device_flags); 2704 base_vha->device_flags);
@@ -2720,6 +2823,18 @@ skip_dpc:
2720 2823
2721 qla2x00_alloc_sysfs_attr(base_vha); 2824 qla2x00_alloc_sysfs_attr(base_vha);
2722 2825
2826 if (IS_QLAFX00(ha)) {
2827 ret = qlafx00_fx_disc(base_vha,
2828 &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
2829
2830 ret = qlafx00_fx_disc(base_vha,
2831 &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
2832
2833 /* Register system information */
2834 ret = qlafx00_fx_disc(base_vha,
2835 &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
2836 }
2837
2723 qla2x00_init_host_attr(base_vha); 2838 qla2x00_init_host_attr(base_vha);
2724 2839
2725 qla2x00_dfs_setup(base_vha); 2840 qla2x00_dfs_setup(base_vha);
@@ -2777,6 +2892,8 @@ iospace_config_failed:
2777 } else { 2892 } else {
2778 if (ha->iobase) 2893 if (ha->iobase)
2779 iounmap(ha->iobase); 2894 iounmap(ha->iobase);
2895 if (ha->cregbase)
2896 iounmap(ha->cregbase);
2780 } 2897 }
2781 pci_release_selected_regions(ha->pdev, ha->bars); 2898 pci_release_selected_regions(ha->pdev, ha->bars);
2782 kfree(ha); 2899 kfree(ha);
@@ -2960,6 +3077,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
2960 if (ha->iobase) 3077 if (ha->iobase)
2961 iounmap(ha->iobase); 3078 iounmap(ha->iobase);
2962 3079
3080 if (ha->cregbase)
3081 iounmap(ha->cregbase);
3082
2963 if (ha->mqiobase) 3083 if (ha->mqiobase)
2964 iounmap(ha->mqiobase); 3084 iounmap(ha->mqiobase);
2965 3085
@@ -3068,6 +3188,12 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
3068void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, 3188void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
3069 int do_login, int defer) 3189 int do_login, int defer)
3070{ 3190{
3191 if (IS_QLAFX00(vha->hw)) {
3192 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
3193 qla2x00_schedule_rport_del(vha, fcport, defer);
3194 return;
3195 }
3196
3071 if (atomic_read(&fcport->state) == FCS_ONLINE && 3197 if (atomic_read(&fcport->state) == FCS_ONLINE &&
3072 vha->vp_idx == fcport->vha->vp_idx) { 3198 vha->vp_idx == fcport->vha->vp_idx) {
3073 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 3199 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
@@ -3710,6 +3836,22 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
3710 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); 3836 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
3711} 3837}
3712 3838
3839int
3840qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
3841 uint32_t *data, int cnt)
3842{
3843 struct qla_work_evt *e;
3844
3845 e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
3846 if (!e)
3847 return QLA_FUNCTION_FAILED;
3848
3849 e->u.aenfx.evtcode = evtcode;
3850 e->u.aenfx.count = cnt;
3851 memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
3852 return qla2x00_post_work(vha, e);
3853}
3854
3713void 3855void
3714qla2x00_do_work(struct scsi_qla_host *vha) 3856qla2x00_do_work(struct scsi_qla_host *vha)
3715{ 3857{
@@ -3758,6 +3900,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
3758 case QLA_EVT_UEVENT: 3900 case QLA_EVT_UEVENT:
3759 qla2x00_uevent_emit(vha, e->u.uevent.code); 3901 qla2x00_uevent_emit(vha, e->u.uevent.code);
3760 break; 3902 break;
3903 case QLA_EVT_AENFX:
3904 qlafx00_process_aen(vha, e);
3905 break;
3761 } 3906 }
3762 if (e->flags & QLA_EVT_FLAG_FREE) 3907 if (e->flags & QLA_EVT_FLAG_FREE)
3763 kfree(e); 3908 kfree(e);
@@ -4592,6 +4737,38 @@ qla2x00_do_dpc(void *data)
4592 ql_dbg(ql_dbg_dpc, base_vha, 0x4006, 4737 ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
4593 "FCoE context reset end.\n"); 4738 "FCoE context reset end.\n");
4594 } 4739 }
4740 } else if (IS_QLAFX00(ha)) {
4741 if (test_and_clear_bit(ISP_UNRECOVERABLE,
4742 &base_vha->dpc_flags)) {
4743 ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
4744 "Firmware Reset Recovery\n");
4745 if (qlafx00_reset_initialize(base_vha)) {
4746 /* Failed. Abort isp later. */
4747 if (!test_bit(UNLOADING,
4748 &base_vha->dpc_flags))
4749 set_bit(ISP_UNRECOVERABLE,
4750 &base_vha->dpc_flags);
4751 ql_dbg(ql_dbg_dpc, base_vha,
4752 0x4021,
4753 "Reset Recovery Failed\n");
4754 }
4755 }
4756
4757 if (test_and_clear_bit(FX00_TARGET_SCAN,
4758 &base_vha->dpc_flags)) {
4759 ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
4760 "ISPFx00 Target Scan scheduled\n");
4761 if (qlafx00_rescan_isp(base_vha)) {
4762 if (!test_bit(UNLOADING,
4763 &base_vha->dpc_flags))
4764 set_bit(ISP_UNRECOVERABLE,
4765 &base_vha->dpc_flags);
4766 ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
4767 "ISPFx00 Target Scan Failed\n");
4768 }
4769 ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
4770 "ISPFx00 Target Scan End\n");
4771 }
4595 } 4772 }
4596 4773
4597 if (test_and_clear_bit(ISP_ABORT_NEEDED, 4774 if (test_and_clear_bit(ISP_ABORT_NEEDED,
@@ -4630,6 +4807,9 @@ qla2x00_do_dpc(void *data)
4630 clear_bit(SCR_PENDING, &base_vha->dpc_flags); 4807 clear_bit(SCR_PENDING, &base_vha->dpc_flags);
4631 } 4808 }
4632 4809
4810 if (IS_QLAFX00(ha))
4811 goto loop_resync_check;
4812
4633 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 4813 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
4634 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 4814 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
4635 "Quiescence mode scheduled.\n"); 4815 "Quiescence mode scheduled.\n");
@@ -4654,7 +4834,7 @@ qla2x00_do_dpc(void *data)
4654 } 4834 }
4655 4835
4656 if (test_and_clear_bit(RESET_MARKER_NEEDED, 4836 if (test_and_clear_bit(RESET_MARKER_NEEDED,
4657 &base_vha->dpc_flags) && 4837 &base_vha->dpc_flags) &&
4658 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 4838 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
4659 4839
4660 ql_dbg(ql_dbg_dpc, base_vha, 0x400b, 4840 ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
@@ -4677,9 +4857,9 @@ qla2x00_do_dpc(void *data)
4677 ql_dbg(ql_dbg_dpc, base_vha, 0x400e, 4857 ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
4678 "Relogin end.\n"); 4858 "Relogin end.\n");
4679 } 4859 }
4680 4860loop_resync_check:
4681 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 4861 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
4682 &base_vha->dpc_flags)) { 4862 &base_vha->dpc_flags)) {
4683 4863
4684 ql_dbg(ql_dbg_dpc, base_vha, 0x400f, 4864 ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
4685 "Loop resync scheduled.\n"); 4865 "Loop resync scheduled.\n");
@@ -4697,6 +4877,9 @@ qla2x00_do_dpc(void *data)
4697 "Loop resync end.\n"); 4877 "Loop resync end.\n");
4698 } 4878 }
4699 4879
4880 if (IS_QLAFX00(ha))
4881 goto intr_on_check;
4882
4700 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 4883 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
4701 atomic_read(&base_vha->loop_state) == LOOP_READY) { 4884 atomic_read(&base_vha->loop_state) == LOOP_READY) {
4702 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); 4885 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
@@ -4714,7 +4897,7 @@ qla2x00_do_dpc(void *data)
4714 if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH, 4897 if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
4715 &base_vha->dpc_flags)) 4898 &base_vha->dpc_flags))
4716 qla2x00_host_ramp_up_queuedepth(base_vha); 4899 qla2x00_host_ramp_up_queuedepth(base_vha);
4717 4900intr_on_check:
4718 if (!ha->interrupts_on) 4901 if (!ha->interrupts_on)
4719 ha->isp_ops->enable_intrs(ha); 4902 ha->isp_ops->enable_intrs(ha);
4720 4903
@@ -4722,7 +4905,8 @@ qla2x00_do_dpc(void *data)
4722 &base_vha->dpc_flags)) 4905 &base_vha->dpc_flags))
4723 ha->isp_ops->beacon_blink(base_vha); 4906 ha->isp_ops->beacon_blink(base_vha);
4724 4907
4725 qla2x00_do_dpc_all_vps(base_vha); 4908 if (!IS_QLAFX00(ha))
4909 qla2x00_do_dpc_all_vps(base_vha);
4726 4910
4727 ha->dpc_active = 0; 4911 ha->dpc_active = 0;
4728end_loop: 4912end_loop:
@@ -4818,6 +5002,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
4818 qla82xx_watchdog(vha); 5002 qla82xx_watchdog(vha);
4819 } 5003 }
4820 5004
5005 if (!vha->vp_idx && IS_QLAFX00(ha))
5006 qlafx00_timer_routine(vha);
5007
4821 /* Loop down handler. */ 5008 /* Loop down handler. */
4822 if (atomic_read(&vha->loop_down_timer) > 0 && 5009 if (atomic_read(&vha->loop_down_timer) > 0 &&
4823 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && 5010 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
@@ -5335,6 +5522,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
5335 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 5522 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
5336 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 5523 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
5337 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 5524 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
5525 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
5338 { 0 }, 5526 { 0 },
5339}; 5527};
5340MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 5528MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ec54036d1e12..6c66d22eb1b1 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.04.00.13-k" 10#define QLA2XXX_VERSION "8.05.00.03-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 4 13#define QLA_DRIVER_MINOR_VER 5
14#define QLA_DRIVER_PATCH_VER 0 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 5d8fe4f75650..d607eb8e24cb 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -1629,9 +1629,37 @@ static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1629 ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n"); 1629 ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
1630} 1630}
1631 1631
1632/**
1633 * qla4_83xx_eport_init - Initialize EPort.
1634 * @ha: Pointer to host adapter structure.
1635 *
1636 * If EPort hardware is in reset state before disabling pause, there would be
1637 * serious hardware wedging issues. To prevent this perform eport init everytime
1638 * before disabling pause frames.
1639 **/
1640static void qla4_83xx_eport_init(struct scsi_qla_host *ha)
1641{
1642 /* Clear the 8 registers */
1643 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
1644 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
1645 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
1646 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
1647 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
1648 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
1649 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
1650 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
1651
1652 /* Write any value to Reset Control register */
1653 qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
1654
1655 ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
1656}
1657
1632void qla4_83xx_disable_pause(struct scsi_qla_host *ha) 1658void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1633{ 1659{
1634 ha->isp_ops->idc_lock(ha); 1660 ha->isp_ops->idc_lock(ha);
1661 /* Before disabling pause frames, ensure that eport is not in reset */
1662 qla4_83xx_eport_init(ha);
1635 qla4_83xx_dump_pause_control_regs(ha); 1663 qla4_83xx_dump_pause_control_regs(ha);
1636 __qla4_83xx_disable_pause(ha); 1664 __qla4_83xx_disable_pause(ha);
1637 ha->isp_ops->idc_unlock(ha); 1665 ha->isp_ops->idc_unlock(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
index 6a00f903f2a6..fab237fa32cc 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.h
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -55,6 +55,16 @@
55#define QLA83XX_SET_PAUSE_VAL 0x0 55#define QLA83XX_SET_PAUSE_VAL 0x0
56#define QLA83XX_SET_TC_MAX_CELL_VAL 0x03FF03FF 56#define QLA83XX_SET_TC_MAX_CELL_VAL 0x03FF03FF
57 57
58#define QLA83XX_RESET_CONTROL 0x28084E50
59#define QLA83XX_RESET_REG 0x28084E60
60#define QLA83XX_RESET_PORT0 0x28084E70
61#define QLA83XX_RESET_PORT1 0x28084E80
62#define QLA83XX_RESET_PORT2 0x28084E90
63#define QLA83XX_RESET_PORT3 0x28084EA0
64#define QLA83XX_RESET_SRE_SHIM 0x28084EB0
65#define QLA83XX_RESET_EPG_SHIM 0x28084EC0
66#define QLA83XX_RESET_ETHER_PCS 0x28084ED0
67
58/* qla_83xx_reg_tbl registers */ 68/* qla_83xx_reg_tbl registers */
59#define QLA83XX_PEG_HALT_STATUS1 0x34A8 69#define QLA83XX_PEG_HALT_STATUS1 0x34A8
60#define QLA83XX_PEG_HALT_STATUS2 0x34AC 70#define QLA83XX_PEG_HALT_STATUS2 0x34AC
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
index 5b0afc18ef18..51c365bcf912 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.h
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -12,6 +12,7 @@
12/* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */ 12/* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */
13/* #define QL_DEBUG_LEVEL_4 */ 13/* #define QL_DEBUG_LEVEL_4 */
14/* #define QL_DEBUG_LEVEL_5 */ 14/* #define QL_DEBUG_LEVEL_5 */
15/* #define QL_DEBUG_LEVEL_7 */
15/* #define QL_DEBUG_LEVEL_9 */ 16/* #define QL_DEBUG_LEVEL_9 */
16 17
17#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */ 18#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */
@@ -48,6 +49,12 @@
48#define DEBUG5(x) do {} while (0); 49#define DEBUG5(x) do {} while (0);
49#endif /* */ 50#endif /* */
50 51
52#if defined(QL_DEBUG_LEVEL_7)
53#define DEBUG7(x) do {x; } while (0)
54#else /* */
55#define DEBUG7(x) do {} while (0)
56#endif /* */
57
51#if defined(QL_DEBUG_LEVEL_9) 58#if defined(QL_DEBUG_LEVEL_9)
52#define DEBUG9(x) do {x;} while (0); 59#define DEBUG9(x) do {x;} while (0);
53#else /* */ 60#else /* */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 129f5dd02822..ddf16a86bbf5 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -159,6 +159,22 @@
159#define LSDW(x) ((u32)((u64)(x))) 159#define LSDW(x) ((u32)((u64)(x)))
160#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16)) 160#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
161 161
162#define DEV_DB_NON_PERSISTENT 0
163#define DEV_DB_PERSISTENT 1
164
165#define COPY_ISID(dst_isid, src_isid) { \
166 int i, j; \
167 for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \
168 dst_isid[i++] = src_isid[j--]; \
169}
170
171#define SET_BITVAL(o, n, v) { \
172 if (o) \
173 n |= v; \
174 else \
175 n &= ~v; \
176}
177
162/* 178/*
163 * Retry & Timeout Values 179 * Retry & Timeout Values
164 */ 180 */
@@ -363,6 +379,8 @@ struct ql82xx_hw_data {
363 uint32_t flt_iscsi_param; 379 uint32_t flt_iscsi_param;
364 uint32_t flt_region_chap; 380 uint32_t flt_region_chap;
365 uint32_t flt_chap_size; 381 uint32_t flt_chap_size;
382 uint32_t flt_region_ddb;
383 uint32_t flt_ddb_size;
366}; 384};
367 385
368struct qla4_8xxx_legacy_intr_set { 386struct qla4_8xxx_legacy_intr_set {
@@ -501,6 +519,7 @@ struct scsi_qla_host {
501#define AF_INIT_DONE 1 /* 0x00000002 */ 519#define AF_INIT_DONE 1 /* 0x00000002 */
502#define AF_MBOX_COMMAND 2 /* 0x00000004 */ 520#define AF_MBOX_COMMAND 2 /* 0x00000004 */
503#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */ 521#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
522#define AF_ST_DISCOVERY_IN_PROGRESS 4 /* 0x00000010 */
504#define AF_INTERRUPTS_ON 6 /* 0x00000040 */ 523#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
505#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ 524#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
506#define AF_LINK_UP 8 /* 0x00000100 */ 525#define AF_LINK_UP 8 /* 0x00000100 */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index ad9d2e2d370f..c7b8892b5a83 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -288,6 +288,8 @@ union external_hw_config_reg {
288#define FA_GOLD_RISC_CODE_ADDR_82 0x80000 288#define FA_GOLD_RISC_CODE_ADDR_82 0x80000
289#define FA_FLASH_ISCSI_CHAP 0x540000 289#define FA_FLASH_ISCSI_CHAP 0x540000
290#define FA_FLASH_CHAP_SIZE 0xC0000 290#define FA_FLASH_CHAP_SIZE 0xC0000
291#define FA_FLASH_ISCSI_DDB 0x420000
292#define FA_FLASH_DDB_SIZE 0x080000
291 293
292/* Flash Description Table */ 294/* Flash Description Table */
293struct qla_fdt_layout { 295struct qla_fdt_layout {
@@ -348,6 +350,7 @@ struct qla_flt_header {
348#define FLT_REG_BOOT_CODE_82 0x78 350#define FLT_REG_BOOT_CODE_82 0x78
349#define FLT_REG_ISCSI_PARAM 0x65 351#define FLT_REG_ISCSI_PARAM 0x65
350#define FLT_REG_ISCSI_CHAP 0x63 352#define FLT_REG_ISCSI_CHAP 0x63
353#define FLT_REG_ISCSI_DDB 0x6A
351 354
352struct qla_flt_region { 355struct qla_flt_region {
353 uint32_t code; 356 uint32_t code;
@@ -490,12 +493,16 @@ struct qla_flt_region {
490#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027 493#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
491#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028 494#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028
492#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029 495#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029
496#define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED 0x802A
493#define MBOX_ASTS_IPV6_PREFIX_EXPIRED 0x802B 497#define MBOX_ASTS_IPV6_PREFIX_EXPIRED 0x802B
494#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C 498#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
495#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D 499#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
496#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E 500#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
501#define MBOX_ASTS_INITIALIZATION_FAILED 0x8031
502#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036
497#define MBOX_ASTS_IDC_COMPLETE 0x8100 503#define MBOX_ASTS_IDC_COMPLETE 0x8100
498#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101 504#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101
505#define MBOX_ASTS_DCBX_CONF_CHANGE 0x8110
499#define MBOX_ASTS_TXSCVR_INSERTED 0x8130 506#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
500#define MBOX_ASTS_TXSCVR_REMOVED 0x8131 507#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
501 508
@@ -779,12 +786,41 @@ struct dev_db_entry {
779#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */ 786#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */
780#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */ 787#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */
781 788
789#define OPT_IS_FW_ASSIGNED_IPV6 11
790#define OPT_IPV6_DEVICE 8
791#define OPT_AUTO_SENDTGTS_DISABLE 6
792#define OPT_DISC_SESSION 4
793#define OPT_ENTRY_STATE 3
782 uint16_t exec_throttle; /* 02-03 */ 794 uint16_t exec_throttle; /* 02-03 */
783 uint16_t exec_count; /* 04-05 */ 795 uint16_t exec_count; /* 04-05 */
784 uint16_t res0; /* 06-07 */ 796 uint16_t res0; /* 06-07 */
785 uint16_t iscsi_options; /* 08-09 */ 797 uint16_t iscsi_options; /* 08-09 */
798#define ISCSIOPT_HEADER_DIGEST_EN 13
799#define ISCSIOPT_DATA_DIGEST_EN 12
800#define ISCSIOPT_IMMEDIATE_DATA_EN 11
801#define ISCSIOPT_INITIAL_R2T_EN 10
802#define ISCSIOPT_DATA_SEQ_IN_ORDER 9
803#define ISCSIOPT_DATA_PDU_IN_ORDER 8
804#define ISCSIOPT_CHAP_AUTH_EN 7
805#define ISCSIOPT_SNACK_REQ_EN 6
806#define ISCSIOPT_DISCOVERY_LOGOUT_EN 5
807#define ISCSIOPT_BIDI_CHAP_EN 4
808#define ISCSIOPT_DISCOVERY_AUTH_OPTIONAL 3
809#define ISCSIOPT_ERL1 1
810#define ISCSIOPT_ERL0 0
811
786 uint16_t tcp_options; /* 0A-0B */ 812 uint16_t tcp_options; /* 0A-0B */
813#define TCPOPT_TIMESTAMP_STAT 6
814#define TCPOPT_NAGLE_DISABLE 5
815#define TCPOPT_WSF_DISABLE 4
816#define TCPOPT_TIMER_SCALE3 3
817#define TCPOPT_TIMER_SCALE2 2
818#define TCPOPT_TIMER_SCALE1 1
819#define TCPOPT_TIMESTAMP_EN 0
820
787 uint16_t ip_options; /* 0C-0D */ 821 uint16_t ip_options; /* 0C-0D */
822#define IPOPT_FRAGMENT_DISABLE 4
823
788 uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */ 824 uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */
789#define BYTE_UNITS 512 825#define BYTE_UNITS 512
790 uint32_t res1; /* 10-13 */ 826 uint32_t res1; /* 10-13 */
@@ -816,6 +852,8 @@ struct dev_db_entry {
816 * much RAM */ 852 * much RAM */
817 uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */ 853 uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
818 uint8_t res5[0x10]; /* 1B0-1BF */ 854 uint8_t res5[0x10]; /* 1B0-1BF */
855#define DDB_NO_LINK 0xFFFF
856#define DDB_ISNS 0xFFFD
819 uint16_t ddb_link; /* 1C0-1C1 */ 857 uint16_t ddb_link; /* 1C0-1C1 */
820 uint16_t chap_tbl_idx; /* 1C2-1C3 */ 858 uint16_t chap_tbl_idx; /* 1C2-1C3 */
821 uint16_t tgt_portal_grp; /* 1C4-1C5 */ 859 uint16_t tgt_portal_grp; /* 1C4-1C5 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 982293edf02c..4a428009f699 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -191,6 +191,9 @@ int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
191int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, 191int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
192 uint32_t status, uint32_t pid, 192 uint32_t status, uint32_t pid,
193 uint32_t data_size, uint8_t *data); 193 uint32_t data_size, uint8_t *data);
194int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
195 struct dev_db_entry *fw_ddb_entry,
196 dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
194 197
195/* BSG Functions */ 198/* BSG Functions */
196int qla4xxx_bsg_request(struct bsg_job *bsg_job); 199int qla4xxx_bsg_request(struct bsg_job *bsg_job);
@@ -224,8 +227,6 @@ void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
224int qla4_83xx_isp_reset(struct scsi_qla_host *ha); 227int qla4_83xx_isp_reset(struct scsi_qla_host *ha);
225void qla4_83xx_queue_iocb(struct scsi_qla_host *ha); 228void qla4_83xx_queue_iocb(struct scsi_qla_host *ha);
226void qla4_83xx_complete_iocb(struct scsi_qla_host *ha); 229void qla4_83xx_complete_iocb(struct scsi_qla_host *ha);
227uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
228uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
229uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr); 230uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr);
230void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val); 231void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val);
231int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, 232int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
@@ -261,6 +262,10 @@ int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
261void qla4_83xx_disable_pause(struct scsi_qla_host *ha); 262void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
262void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha); 263void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
263int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha); 264int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
265int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
266 dma_addr_t dma_addr);
267int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
268 char *password, uint16_t chap_index);
264 269
265extern int ql4xextended_error_logging; 270extern int ql4xextended_error_logging;
266extern int ql4xdontresethba; 271extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 1b83dc283d2e..482287f4005f 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -396,7 +396,6 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
396 396
397 task_data = task->dd_data; 397 task_data = task->dd_data;
398 memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status)); 398 memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
399 ha->req_q_count += task_data->iocb_req_cnt;
400 ha->iocb_cnt -= task_data->iocb_req_cnt; 399 ha->iocb_cnt -= task_data->iocb_req_cnt;
401 queue_work(ha->task_wq, &task_data->task_work); 400 queue_work(ha->task_wq, &task_data->task_work);
402} 401}
@@ -416,7 +415,6 @@ static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
416 return mrb; 415 return mrb;
417 416
418 /* update counters */ 417 /* update counters */
419 ha->req_q_count += mrb->iocb_cnt;
420 ha->iocb_cnt -= mrb->iocb_cnt; 418 ha->iocb_cnt -= mrb->iocb_cnt;
421 419
422 return mrb; 420 return mrb;
@@ -877,6 +875,43 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
877 } 875 }
878 break; 876 break;
879 877
878 case MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED:
879 DEBUG2(ql4_printk(KERN_INFO, ha,
880 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
881 ha->host_no, mbox_sts[0], mbox_sts[1],
882 mbox_sts[2], mbox_sts[3], mbox_sts[4],
883 mbox_sts[5]));
884 DEBUG2(ql4_printk(KERN_INFO, ha,
885 "scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
886 ha->host_no, mbox_sts[0]));
887 break;
888
889 case MBOX_ASTS_INITIALIZATION_FAILED:
890 DEBUG2(ql4_printk(KERN_INFO, ha,
891 "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n",
892 ha->host_no, mbox_sts[0],
893 mbox_sts[3]));
894 break;
895
896 case MBOX_ASTS_SYSTEM_WARNING_EVENT:
897 DEBUG2(ql4_printk(KERN_WARNING, ha,
898 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
899 ha->host_no, mbox_sts[0], mbox_sts[1],
900 mbox_sts[2], mbox_sts[3], mbox_sts[4],
901 mbox_sts[5]));
902 break;
903
904 case MBOX_ASTS_DCBX_CONF_CHANGE:
905 DEBUG2(ql4_printk(KERN_INFO, ha,
906 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
907 ha->host_no, mbox_sts[0], mbox_sts[1],
908 mbox_sts[2], mbox_sts[3], mbox_sts[4],
909 mbox_sts[5]));
910 DEBUG2(ql4_printk(KERN_INFO, ha,
911 "scsi%ld: AEN %04x Received DCBX configuration changed notification\n",
912 ha->host_no, mbox_sts[0]));
913 break;
914
880 default: 915 default:
881 DEBUG2(printk(KERN_WARNING 916 DEBUG2(printk(KERN_WARNING
882 "scsi%ld: AEN %04x UNKNOWN\n", 917 "scsi%ld: AEN %04x UNKNOWN\n",
@@ -1099,8 +1134,8 @@ irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
1099 1134
1100 status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG); 1135 status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
1101 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) { 1136 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
1102 DEBUG2(ql4_printk(KERN_INFO, ha, 1137 DEBUG7(ql4_printk(KERN_INFO, ha,
1103 "%s legacy Int not triggered\n", __func__)); 1138 "%s legacy Int not triggered\n", __func__));
1104 return IRQ_NONE; 1139 return IRQ_NONE;
1105 } 1140 }
1106 1141
@@ -1158,7 +1193,7 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
1158 1193
1159 /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */ 1194 /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
1160 if (!(leg_int_ptr & LEG_INT_PTR_B31)) { 1195 if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
1161 DEBUG2(ql4_printk(KERN_ERR, ha, 1196 DEBUG7(ql4_printk(KERN_ERR, ha,
1162 "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n", 1197 "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
1163 __func__)); 1198 __func__));
1164 return IRQ_NONE; 1199 return IRQ_NONE;
@@ -1166,7 +1201,7 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
1166 1201
1167 /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */ 1202 /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
1168 if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) { 1203 if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
1169 DEBUG2(ql4_printk(KERN_ERR, ha, 1204 DEBUG7(ql4_printk(KERN_ERR, ha,
1170 "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n", 1205 "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
1171 __func__, (leg_int_ptr & PF_BITS_MASK), 1206 __func__, (leg_int_ptr & PF_BITS_MASK),
1172 ha->pf_bit)); 1207 ha->pf_bit));
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 160d33697216..a501beab3ffe 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1129,6 +1129,7 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
1129{ 1129{
1130 uint32_t mbox_cmd[MBOX_REG_COUNT]; 1130 uint32_t mbox_cmd[MBOX_REG_COUNT];
1131 uint32_t mbox_sts[MBOX_REG_COUNT]; 1131 uint32_t mbox_sts[MBOX_REG_COUNT];
1132 uint32_t scsi_lun[2];
1132 int status = QLA_SUCCESS; 1133 int status = QLA_SUCCESS;
1133 1134
1134 DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no, 1135 DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no,
@@ -1140,10 +1141,16 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
1140 */ 1141 */
1141 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 1142 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1142 memset(&mbox_sts, 0, sizeof(mbox_sts)); 1143 memset(&mbox_sts, 0, sizeof(mbox_sts));
1144 int_to_scsilun(lun, (struct scsi_lun *) scsi_lun);
1143 1145
1144 mbox_cmd[0] = MBOX_CMD_LUN_RESET; 1146 mbox_cmd[0] = MBOX_CMD_LUN_RESET;
1145 mbox_cmd[1] = ddb_entry->fw_ddb_index; 1147 mbox_cmd[1] = ddb_entry->fw_ddb_index;
1146 mbox_cmd[2] = lun << 8; 1148 /* FW expects LUN bytes 0-3 in Incoming Mailbox 2
1149 * (LUN byte 0 is LSByte, byte 3 is MSByte) */
1150 mbox_cmd[2] = cpu_to_le32(scsi_lun[0]);
1151 /* FW expects LUN bytes 4-7 in Incoming Mailbox 3
1152 * (LUN byte 4 is LSByte, byte 7 is MSByte) */
1153 mbox_cmd[3] = cpu_to_le32(scsi_lun[1]);
1147 mbox_cmd[5] = 0x01; /* Immediate Command Enable */ 1154 mbox_cmd[5] = 0x01; /* Immediate Command Enable */
1148 1155
1149 qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]); 1156 qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
@@ -1281,8 +1288,8 @@ exit_about_fw:
1281 return status; 1288 return status;
1282} 1289}
1283 1290
1284static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options, 1291int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
1285 dma_addr_t dma_addr) 1292 dma_addr_t dma_addr)
1286{ 1293{
1287 uint32_t mbox_cmd[MBOX_REG_COUNT]; 1294 uint32_t mbox_cmd[MBOX_REG_COUNT];
1288 uint32_t mbox_sts[MBOX_REG_COUNT]; 1295 uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -1410,6 +1417,55 @@ exit_bootdb_failed:
1410 return status; 1417 return status;
1411} 1418}
1412 1419
1420int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
1421 struct dev_db_entry *fw_ddb_entry,
1422 dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
1423{
1424 uint32_t dev_db_start_offset;
1425 uint32_t dev_db_end_offset;
1426 int status = QLA_ERROR;
1427
1428 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
1429
1430 if (is_qla40XX(ha)) {
1431 dev_db_start_offset = FLASH_OFFSET_DB_INFO;
1432 dev_db_end_offset = FLASH_OFFSET_DB_END;
1433 } else {
1434 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
1435 (ha->hw.flt_region_ddb << 2);
1436 /* flt_ddb_size is DDB table size for both ports
1437 * so divide it by 2 to calculate the offset for second port
1438 */
1439 if (ha->port_num == 1)
1440 dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
1441
1442 dev_db_end_offset = dev_db_start_offset +
1443 (ha->hw.flt_ddb_size / 2);
1444 }
1445
1446 dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
1447
1448 if (dev_db_start_offset > dev_db_end_offset) {
1449 DEBUG2(ql4_printk(KERN_ERR, ha,
1450 "%s:Invalid DDB index %d", __func__,
1451 ddb_index));
1452 goto exit_fdb_failed;
1453 }
1454
1455 if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
1456 sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
1457 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash failed\n",
1458 ha->host_no, __func__);
1459 goto exit_fdb_failed;
1460 }
1461
1462 if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
1463 status = QLA_SUCCESS;
1464
1465exit_fdb_failed:
1466 return status;
1467}
1468
1413int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password, 1469int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
1414 uint16_t idx) 1470 uint16_t idx)
1415{ 1471{
@@ -1503,6 +1559,62 @@ exit_set_chap:
1503 return ret; 1559 return ret;
1504} 1560}
1505 1561
1562
1563int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
1564 char *password, uint16_t chap_index)
1565{
1566 int rval = QLA_ERROR;
1567 struct ql4_chap_table *chap_table = NULL;
1568 int max_chap_entries;
1569
1570 if (!ha->chap_list) {
1571 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
1572 rval = QLA_ERROR;
1573 goto exit_uni_chap;
1574 }
1575
1576 if (!username || !password) {
1577 ql4_printk(KERN_ERR, ha, "No memory for username & secret\n");
1578 rval = QLA_ERROR;
1579 goto exit_uni_chap;
1580 }
1581
1582 if (is_qla80XX(ha))
1583 max_chap_entries = (ha->hw.flt_chap_size / 2) /
1584 sizeof(struct ql4_chap_table);
1585 else
1586 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
1587
1588 if (chap_index > max_chap_entries) {
1589 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
1590 rval = QLA_ERROR;
1591 goto exit_uni_chap;
1592 }
1593
1594 mutex_lock(&ha->chap_sem);
1595 chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
1596 if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
1597 rval = QLA_ERROR;
1598 goto exit_unlock_uni_chap;
1599 }
1600
1601 if (!(chap_table->flags & BIT_6)) {
1602 ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
1603 rval = QLA_ERROR;
1604 goto exit_unlock_uni_chap;
1605 }
1606
1607 strncpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
1608 strncpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
1609
1610 rval = QLA_SUCCESS;
1611
1612exit_unlock_uni_chap:
1613 mutex_unlock(&ha->chap_sem);
1614exit_uni_chap:
1615 return rval;
1616}
1617
1506/** 1618/**
1507 * qla4xxx_get_chap_index - Get chap index given username and secret 1619 * qla4xxx_get_chap_index - Get chap index given username and secret
1508 * @ha: pointer to adapter structure 1620 * @ha: pointer to adapter structure
@@ -1524,7 +1636,7 @@ int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
1524 int max_chap_entries = 0; 1636 int max_chap_entries = 0;
1525 struct ql4_chap_table *chap_table; 1637 struct ql4_chap_table *chap_table;
1526 1638
1527 if (is_qla8022(ha)) 1639 if (is_qla80XX(ha))
1528 max_chap_entries = (ha->hw.flt_chap_size / 2) / 1640 max_chap_entries = (ha->hw.flt_chap_size / 2) /
1529 sizeof(struct ql4_chap_table); 1641 sizeof(struct ql4_chap_table);
1530 else 1642 else
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 9299400d3c9e..eaf00c162eb2 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -3154,6 +3154,10 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
3154 hw->flt_region_chap = start; 3154 hw->flt_region_chap = start;
3155 hw->flt_chap_size = le32_to_cpu(region->size); 3155 hw->flt_chap_size = le32_to_cpu(region->size);
3156 break; 3156 break;
3157 case FLT_REG_ISCSI_DDB:
3158 hw->flt_region_ddb = start;
3159 hw->flt_ddb_size = le32_to_cpu(region->size);
3160 break;
3157 } 3161 }
3158 } 3162 }
3159 goto done; 3163 goto done;
@@ -3166,14 +3170,19 @@ no_flash_data:
3166 hw->flt_region_boot = FA_BOOT_CODE_ADDR_82; 3170 hw->flt_region_boot = FA_BOOT_CODE_ADDR_82;
3167 hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82; 3171 hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82;
3168 hw->flt_region_fw = FA_RISC_CODE_ADDR_82; 3172 hw->flt_region_fw = FA_RISC_CODE_ADDR_82;
3169 hw->flt_region_chap = FA_FLASH_ISCSI_CHAP; 3173 hw->flt_region_chap = FA_FLASH_ISCSI_CHAP >> 2;
3170 hw->flt_chap_size = FA_FLASH_CHAP_SIZE; 3174 hw->flt_chap_size = FA_FLASH_CHAP_SIZE;
3175 hw->flt_region_ddb = FA_FLASH_ISCSI_DDB >> 2;
3176 hw->flt_ddb_size = FA_FLASH_DDB_SIZE;
3171 3177
3172done: 3178done:
3173 DEBUG2(ql4_printk(KERN_INFO, ha, "FLT[%s]: flt=0x%x fdt=0x%x " 3179 DEBUG2(ql4_printk(KERN_INFO, ha,
3174 "boot=0x%x bootload=0x%x fw=0x%x\n", loc, hw->flt_region_flt, 3180 "FLT[%s]: flt=0x%x fdt=0x%x boot=0x%x bootload=0x%x fw=0x%x chap=0x%x chap_size=0x%x ddb=0x%x ddb_size=0x%x\n",
3175 hw->flt_region_fdt, hw->flt_region_boot, hw->flt_region_bootload, 3181 loc, hw->flt_region_flt, hw->flt_region_fdt,
3176 hw->flt_region_fw)); 3182 hw->flt_region_boot, hw->flt_region_bootload,
3183 hw->flt_region_fw, hw->flt_region_chap,
3184 hw->flt_chap_size, hw->flt_region_ddb,
3185 hw->flt_ddb_size));
3177} 3186}
3178 3187
3179static void 3188static void
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 6142729167f4..a47f99957ba8 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -166,6 +166,26 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
166static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth, 166static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
167 int reason); 167 int reason);
168 168
169/*
170 * iSCSI Flash DDB sysfs entry points
171 */
172static int
173qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
174 struct iscsi_bus_flash_conn *fnode_conn,
175 void *data, int len);
176static int
177qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
178 int param, char *buf);
179static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
180 int len);
181static int
182qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
183static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
184 struct iscsi_bus_flash_conn *fnode_conn);
185static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
186 struct iscsi_bus_flash_conn *fnode_conn);
187static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
188
169static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 189static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
170 QLA82XX_LEGACY_INTR_CONFIG; 190 QLA82XX_LEGACY_INTR_CONFIG;
171 191
@@ -232,6 +252,13 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
232 .send_ping = qla4xxx_send_ping, 252 .send_ping = qla4xxx_send_ping,
233 .get_chap = qla4xxx_get_chap_list, 253 .get_chap = qla4xxx_get_chap_list,
234 .delete_chap = qla4xxx_delete_chap, 254 .delete_chap = qla4xxx_delete_chap,
255 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param,
256 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param,
257 .new_flashnode = qla4xxx_sysfs_ddb_add,
258 .del_flashnode = qla4xxx_sysfs_ddb_delete,
259 .login_flashnode = qla4xxx_sysfs_ddb_login,
260 .logout_flashnode = qla4xxx_sysfs_ddb_logout,
261 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid,
235}; 262};
236 263
237static struct scsi_transport_template *qla4xxx_scsi_transport; 264static struct scsi_transport_template *qla4xxx_scsi_transport;
@@ -376,6 +403,68 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
376 default: 403 default:
377 return 0; 404 return 0;
378 } 405 }
406 case ISCSI_FLASHNODE_PARAM:
407 switch (param) {
408 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
409 case ISCSI_FLASHNODE_PORTAL_TYPE:
410 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
411 case ISCSI_FLASHNODE_DISCOVERY_SESS:
412 case ISCSI_FLASHNODE_ENTRY_EN:
413 case ISCSI_FLASHNODE_HDR_DGST_EN:
414 case ISCSI_FLASHNODE_DATA_DGST_EN:
415 case ISCSI_FLASHNODE_IMM_DATA_EN:
416 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
417 case ISCSI_FLASHNODE_DATASEQ_INORDER:
418 case ISCSI_FLASHNODE_PDU_INORDER:
419 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
420 case ISCSI_FLASHNODE_SNACK_REQ_EN:
421 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
422 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
423 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
424 case ISCSI_FLASHNODE_ERL:
425 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
426 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
427 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
428 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
429 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
430 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
431 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
432 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
433 case ISCSI_FLASHNODE_FIRST_BURST:
434 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
435 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
436 case ISCSI_FLASHNODE_MAX_R2T:
437 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
438 case ISCSI_FLASHNODE_ISID:
439 case ISCSI_FLASHNODE_TSID:
440 case ISCSI_FLASHNODE_PORT:
441 case ISCSI_FLASHNODE_MAX_BURST:
442 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
443 case ISCSI_FLASHNODE_IPADDR:
444 case ISCSI_FLASHNODE_ALIAS:
445 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
446 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
447 case ISCSI_FLASHNODE_LOCAL_PORT:
448 case ISCSI_FLASHNODE_IPV4_TOS:
449 case ISCSI_FLASHNODE_IPV6_TC:
450 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
451 case ISCSI_FLASHNODE_NAME:
452 case ISCSI_FLASHNODE_TPGT:
453 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
454 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
455 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
456 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
457 case ISCSI_FLASHNODE_TCP_RECV_WSF:
458 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
459 case ISCSI_FLASHNODE_USERNAME:
460 case ISCSI_FLASHNODE_PASSWORD:
461 case ISCSI_FLASHNODE_STATSN:
462 case ISCSI_FLASHNODE_EXP_STATSN:
463 case ISCSI_FLASHNODE_IS_BOOT_TGT:
464 return S_IRUGO;
465 default:
466 return 0;
467 }
379 } 468 }
380 469
381 return 0; 470 return 0;
@@ -391,7 +480,7 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
391 int valid_chap_entries = 0; 480 int valid_chap_entries = 0;
392 int ret = 0, i; 481 int ret = 0, i;
393 482
394 if (is_qla8022(ha)) 483 if (is_qla80XX(ha))
395 max_chap_entries = (ha->hw.flt_chap_size / 2) / 484 max_chap_entries = (ha->hw.flt_chap_size / 2) /
396 sizeof(struct ql4_chap_table); 485 sizeof(struct ql4_chap_table);
397 else 486 else
@@ -495,7 +584,7 @@ static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
495 584
496 memset(chap_table, 0, sizeof(struct ql4_chap_table)); 585 memset(chap_table, 0, sizeof(struct ql4_chap_table));
497 586
498 if (is_qla8022(ha)) 587 if (is_qla80XX(ha))
499 max_chap_entries = (ha->hw.flt_chap_size / 2) / 588 max_chap_entries = (ha->hw.flt_chap_size / 2) /
500 sizeof(struct ql4_chap_table); 589 sizeof(struct ql4_chap_table);
501 else 590 else
@@ -1922,6 +2011,252 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
1922 return -ENOSYS; 2011 return -ENOSYS;
1923} 2012}
1924 2013
2014static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
2015 struct iscsi_bus_flash_conn *conn,
2016 struct dev_db_entry *fw_ddb_entry)
2017{
2018 unsigned long options = 0;
2019 int rc = 0;
2020
2021 options = le16_to_cpu(fw_ddb_entry->options);
2022 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2023 if (test_bit(OPT_IPV6_DEVICE, &options)) {
2024 rc = iscsi_switch_str_param(&sess->portal_type,
2025 PORTAL_TYPE_IPV6);
2026 if (rc)
2027 goto exit_copy;
2028 } else {
2029 rc = iscsi_switch_str_param(&sess->portal_type,
2030 PORTAL_TYPE_IPV4);
2031 if (rc)
2032 goto exit_copy;
2033 }
2034
2035 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2036 &options);
2037 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2038 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
2039
2040 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2041 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2042 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2043 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2044 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2045 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2046 &options);
2047 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2048 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2049 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
2050 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2051 &options);
2052 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2053 sess->discovery_auth_optional =
2054 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2055 if (test_bit(ISCSIOPT_ERL1, &options))
2056 sess->erl |= BIT_1;
2057 if (test_bit(ISCSIOPT_ERL0, &options))
2058 sess->erl |= BIT_0;
2059
2060 options = le16_to_cpu(fw_ddb_entry->tcp_options);
2061 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2062 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2063 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2064 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2065 conn->tcp_timer_scale |= BIT_3;
2066 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2067 conn->tcp_timer_scale |= BIT_2;
2068 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2069 conn->tcp_timer_scale |= BIT_1;
2070
2071 conn->tcp_timer_scale >>= 1;
2072 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2073
2074 options = le16_to_cpu(fw_ddb_entry->ip_options);
2075 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2076
2077 conn->max_recv_dlength = BYTE_UNITS *
2078 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2079 conn->max_xmit_dlength = BYTE_UNITS *
2080 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2081 sess->first_burst = BYTE_UNITS *
2082 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2083 sess->max_burst = BYTE_UNITS *
2084 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2085 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2086 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2087 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2088 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2089 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2090 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2091 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2092 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
2093 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
2094 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2095 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2096 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2097 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
2098 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
2099 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2100 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2101
2102 sess->default_taskmgmt_timeout =
2103 le16_to_cpu(fw_ddb_entry->def_timeout);
2104 conn->port = le16_to_cpu(fw_ddb_entry->port);
2105
2106 options = le16_to_cpu(fw_ddb_entry->options);
2107 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2108 if (!conn->ipaddress) {
2109 rc = -ENOMEM;
2110 goto exit_copy;
2111 }
2112
2113 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2114 if (!conn->redirect_ipaddr) {
2115 rc = -ENOMEM;
2116 goto exit_copy;
2117 }
2118
2119 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
2120 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
2121
2122 if (test_bit(OPT_IPV6_DEVICE, &options)) {
2123 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
2124
2125 conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2126 if (!conn->link_local_ipv6_addr) {
2127 rc = -ENOMEM;
2128 goto exit_copy;
2129 }
2130
2131 memcpy(conn->link_local_ipv6_addr,
2132 fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
2133 } else {
2134 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2135 }
2136
2137 if (fw_ddb_entry->iscsi_name[0]) {
2138 rc = iscsi_switch_str_param(&sess->targetname,
2139 (char *)fw_ddb_entry->iscsi_name);
2140 if (rc)
2141 goto exit_copy;
2142 }
2143
2144 if (fw_ddb_entry->iscsi_alias[0]) {
2145 rc = iscsi_switch_str_param(&sess->targetalias,
2146 (char *)fw_ddb_entry->iscsi_alias);
2147 if (rc)
2148 goto exit_copy;
2149 }
2150
2151 COPY_ISID(sess->isid, fw_ddb_entry->isid);
2152
2153exit_copy:
2154 return rc;
2155}
2156
2157static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
2158 struct iscsi_bus_flash_conn *conn,
2159 struct dev_db_entry *fw_ddb_entry)
2160{
2161 uint16_t options;
2162 int rc = 0;
2163
2164 options = le16_to_cpu(fw_ddb_entry->options);
2165 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11);
2166 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2167 options |= BIT_8;
2168 else
2169 options &= ~BIT_8;
2170
2171 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
2172 SET_BITVAL(sess->discovery_sess, options, BIT_4);
2173 SET_BITVAL(sess->entry_state, options, BIT_3);
2174 fw_ddb_entry->options = cpu_to_le16(options);
2175
2176 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2177 SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
2178 SET_BITVAL(conn->datadgst_en, options, BIT_12);
2179 SET_BITVAL(sess->imm_data_en, options, BIT_11);
2180 SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
2181 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
2182 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
2183 SET_BITVAL(sess->chap_auth_en, options, BIT_7);
2184 SET_BITVAL(conn->snack_req_en, options, BIT_6);
2185 SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
2186 SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
2187 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
2188 SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
2189 SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
2190 fw_ddb_entry->iscsi_options = cpu_to_le16(options);
2191
2192 options = le16_to_cpu(fw_ddb_entry->tcp_options);
2193 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
2194 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
2195 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
2196 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
2197 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
2198 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
2199 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
2200 fw_ddb_entry->tcp_options = cpu_to_le16(options);
2201
2202 options = le16_to_cpu(fw_ddb_entry->ip_options);
2203 SET_BITVAL(conn->fragment_disable, options, BIT_4);
2204 fw_ddb_entry->ip_options = cpu_to_le16(options);
2205
2206 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
2207 fw_ddb_entry->iscsi_max_rcv_data_seg_len =
2208 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
2209 fw_ddb_entry->iscsi_max_snd_data_seg_len =
2210 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
2211 fw_ddb_entry->iscsi_first_burst_len =
2212 cpu_to_le16(sess->first_burst / BYTE_UNITS);
2213 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
2214 BYTE_UNITS);
2215 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
2216 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
2217 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
2218 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
2219 fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf);
2220 fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf);
2221 fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
2222 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
2223 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
2224 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
2225 fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn);
2226 fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn);
2227 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
2228 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
2229 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
2230 fw_ddb_entry->port = cpu_to_le16(conn->port);
2231 fw_ddb_entry->def_timeout =
2232 cpu_to_le16(sess->default_taskmgmt_timeout);
2233
2234 if (conn->ipaddress)
2235 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
2236 sizeof(fw_ddb_entry->ip_addr));
2237
2238 if (conn->redirect_ipaddr)
2239 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
2240 sizeof(fw_ddb_entry->tgt_addr));
2241
2242 if (conn->link_local_ipv6_addr)
2243 memcpy(fw_ddb_entry->link_local_ipv6_addr,
2244 conn->link_local_ipv6_addr,
2245 sizeof(fw_ddb_entry->link_local_ipv6_addr));
2246
2247 if (sess->targetname)
2248 memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
2249 sizeof(fw_ddb_entry->iscsi_name));
2250
2251 if (sess->targetalias)
2252 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
2253 sizeof(fw_ddb_entry->iscsi_alias));
2254
2255 COPY_ISID(fw_ddb_entry->isid, sess->isid);
2256
2257 return rc;
2258}
2259
1925static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, 2260static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
1926 struct dev_db_entry *fw_ddb_entry, 2261 struct dev_db_entry *fw_ddb_entry,
1927 struct iscsi_cls_session *cls_sess, 2262 struct iscsi_cls_session *cls_sess,
@@ -2543,6 +2878,7 @@ static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
2543void qla4_8xxx_watchdog(struct scsi_qla_host *ha) 2878void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2544{ 2879{
2545 uint32_t dev_state; 2880 uint32_t dev_state;
2881 uint32_t idc_ctrl;
2546 2882
2547 /* don't poll if reset is going on */ 2883 /* don't poll if reset is going on */
2548 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 2884 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
@@ -2561,10 +2897,23 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2561 qla4xxx_wake_dpc(ha); 2897 qla4xxx_wake_dpc(ha);
2562 } else if (dev_state == QLA8XXX_DEV_NEED_RESET && 2898 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
2563 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 2899 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2900
2901 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
2902 __func__);
2903
2904 if (is_qla8032(ha)) {
2905 idc_ctrl = qla4_83xx_rd_reg(ha,
2906 QLA83XX_IDC_DRV_CTRL);
2907 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
2908 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
2909 __func__);
2910 qla4xxx_mailbox_premature_completion(
2911 ha);
2912 }
2913 }
2914
2564 if (is_qla8032(ha) || 2915 if (is_qla8032(ha) ||
2565 (is_qla8022(ha) && !ql4xdontresethba)) { 2916 (is_qla8022(ha) && !ql4xdontresethba)) {
2566 ql4_printk(KERN_INFO, ha, "%s: HW State: "
2567 "NEED RESET!\n", __func__);
2568 set_bit(DPC_RESET_HA, &ha->dpc_flags); 2917 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2569 qla4xxx_wake_dpc(ha); 2918 qla4xxx_wake_dpc(ha);
2570 } 2919 }
@@ -3737,8 +4086,8 @@ static struct isp_operations qla4_83xx_isp_ops = {
3737 .reset_firmware = qla4_8xxx_stop_firmware, 4086 .reset_firmware = qla4_8xxx_stop_firmware,
3738 .queue_iocb = qla4_83xx_queue_iocb, 4087 .queue_iocb = qla4_83xx_queue_iocb,
3739 .complete_iocb = qla4_83xx_complete_iocb, 4088 .complete_iocb = qla4_83xx_complete_iocb,
3740 .rd_shdw_req_q_out = qla4_83xx_rd_shdw_req_q_out, 4089 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
3741 .rd_shdw_rsp_q_in = qla4_83xx_rd_shdw_rsp_q_in, 4090 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
3742 .get_sys_info = qla4_8xxx_get_sys_info, 4091 .get_sys_info = qla4_8xxx_get_sys_info,
3743 .rd_reg_direct = qla4_83xx_rd_reg, 4092 .rd_reg_direct = qla4_83xx_rd_reg,
3744 .wr_reg_direct = qla4_83xx_wr_reg, 4093 .wr_reg_direct = qla4_83xx_wr_reg,
@@ -3761,11 +4110,6 @@ uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3761 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out)); 4110 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
3762} 4111}
3763 4112
3764uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3765{
3766 return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->req_q_out));
3767}
3768
3769uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 4113uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3770{ 4114{
3771 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); 4115 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
@@ -3776,11 +4120,6 @@ uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3776 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in)); 4120 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
3777} 4121}
3778 4122
3779uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3780{
3781 return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->rsp_q_in));
3782}
3783
3784static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) 4123static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
3785{ 4124{
3786 struct scsi_qla_host *ha = data; 4125 struct scsi_qla_host *ha = data;
@@ -4005,7 +4344,7 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
4005 if (val & BIT_7) 4344 if (val & BIT_7)
4006 ddb_index[1] = (val & 0x7f); 4345 ddb_index[1] = (val & 0x7f);
4007 4346
4008 } else if (is_qla8022(ha)) { 4347 } else if (is_qla80XX(ha)) {
4009 buf = dma_alloc_coherent(&ha->pdev->dev, size, 4348 buf = dma_alloc_coherent(&ha->pdev->dev, size,
4010 &buf_dma, GFP_KERNEL); 4349 &buf_dma, GFP_KERNEL);
4011 if (!buf) { 4350 if (!buf) {
@@ -4083,7 +4422,7 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
4083 int max_chap_entries = 0; 4422 int max_chap_entries = 0;
4084 struct ql4_chap_table *chap_table; 4423 struct ql4_chap_table *chap_table;
4085 4424
4086 if (is_qla8022(ha)) 4425 if (is_qla80XX(ha))
4087 max_chap_entries = (ha->hw.flt_chap_size / 2) / 4426 max_chap_entries = (ha->hw.flt_chap_size / 2) /
4088 sizeof(struct ql4_chap_table); 4427 sizeof(struct ql4_chap_table);
4089 else 4428 else
@@ -5058,6 +5397,1342 @@ exit_nt_list:
5058 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 5397 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5059} 5398}
5060 5399
5400static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
5401 struct list_head *list_nt)
5402{
5403 struct dev_db_entry *fw_ddb_entry;
5404 dma_addr_t fw_ddb_dma;
5405 int max_ddbs;
5406 int fw_idx_size;
5407 int ret;
5408 uint32_t idx = 0, next_idx = 0;
5409 uint32_t state = 0, conn_err = 0;
5410 uint16_t conn_id = 0;
5411 struct qla_ddb_index *nt_ddb_idx;
5412
5413 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5414 &fw_ddb_dma);
5415 if (fw_ddb_entry == NULL) {
5416 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5417 goto exit_new_nt_list;
5418 }
5419 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5420 MAX_DEV_DB_ENTRIES;
5421 fw_idx_size = sizeof(struct qla_ddb_index);
5422
5423 for (idx = 0; idx < max_ddbs; idx = next_idx) {
5424 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5425 NULL, &next_idx, &state,
5426 &conn_err, NULL, &conn_id);
5427 if (ret == QLA_ERROR)
5428 break;
5429
5430 /* Check if NT, then add it to list */
5431 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
5432 goto continue_next_new_nt;
5433
5434 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
5435 goto continue_next_new_nt;
5436
5437 DEBUG2(ql4_printk(KERN_INFO, ha,
5438 "Adding DDB to session = 0x%x\n", idx));
5439
5440 nt_ddb_idx = vmalloc(fw_idx_size);
5441 if (!nt_ddb_idx)
5442 break;
5443
5444 nt_ddb_idx->fw_ddb_idx = idx;
5445
5446 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
5447 if (ret == QLA_SUCCESS) {
5448 /* free nt_ddb_idx and do not add to list_nt */
5449 vfree(nt_ddb_idx);
5450 goto continue_next_new_nt;
5451 }
5452
5453 list_add_tail(&nt_ddb_idx->list, list_nt);
5454
5455 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
5456 idx);
5457 if (ret == QLA_ERROR)
5458 goto exit_new_nt_list;
5459
5460continue_next_new_nt:
5461 if (next_idx == 0)
5462 break;
5463 }
5464
5465exit_new_nt_list:
5466 if (fw_ddb_entry)
5467 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5468}
5469
5470/**
5471 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
5472 * @dev: dev associated with the sysfs entry
5473 * @data: pointer to flashnode session object
5474 *
5475 * Returns:
5476 * 1: if flashnode entry is non-persistent
5477 * 0: if flashnode entry is persistent
5478 **/
5479static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
5480{
5481 struct iscsi_bus_flash_session *fnode_sess;
5482
5483 if (!iscsi_flashnode_bus_match(dev, NULL))
5484 return 0;
5485
5486 fnode_sess = iscsi_dev_to_flash_session(dev);
5487
5488 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
5489}
5490
5491/**
5492 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
5493 * @ha: pointer to host
5494 * @fw_ddb_entry: flash ddb data
5495 * @idx: target index
5496 * @user: if set then this call is made from userland else from kernel
5497 *
5498 * Returns:
5499 * On sucess: QLA_SUCCESS
5500 * On failure: QLA_ERROR
5501 *
5502 * This create separate sysfs entries for session and connection attributes of
5503 * the given fw ddb entry.
5504 * If this is invoked as a result of a userspace call then the entry is marked
5505 * as nonpersistent using flash_state field.
5506 **/
5507int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
5508 struct dev_db_entry *fw_ddb_entry,
5509 uint16_t *idx, int user)
5510{
5511 struct iscsi_bus_flash_session *fnode_sess = NULL;
5512 struct iscsi_bus_flash_conn *fnode_conn = NULL;
5513 int rc = QLA_ERROR;
5514
5515 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
5516 &qla4xxx_iscsi_transport, 0);
5517 if (!fnode_sess) {
5518 ql4_printk(KERN_ERR, ha,
5519 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
5520 __func__, *idx, ha->host_no);
5521 goto exit_tgt_create;
5522 }
5523
5524 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
5525 &qla4xxx_iscsi_transport, 0);
5526 if (!fnode_conn) {
5527 ql4_printk(KERN_ERR, ha,
5528 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
5529 __func__, *idx, ha->host_no);
5530 goto free_sess;
5531 }
5532
5533 if (user) {
5534 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
5535 } else {
5536 fnode_sess->flash_state = DEV_DB_PERSISTENT;
5537
5538 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
5539 fnode_sess->is_boot_target = 1;
5540 else
5541 fnode_sess->is_boot_target = 0;
5542 }
5543
5544 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
5545 fw_ddb_entry);
5546
5547 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
5548 __func__, fnode_sess->dev.kobj.name);
5549
5550 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
5551 __func__, fnode_conn->dev.kobj.name);
5552
5553 return QLA_SUCCESS;
5554
5555free_sess:
5556 iscsi_destroy_flashnode_sess(fnode_sess);
5557
5558exit_tgt_create:
5559 return QLA_ERROR;
5560}
5561
5562/**
5563 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
5564 * @shost: pointer to host
5565 * @buf: type of ddb entry (ipv4/ipv6)
5566 * @len: length of buf
5567 *
5568 * This creates new ddb entry in the flash by finding first free index and
5569 * storing default ddb there. And then create sysfs entry for the new ddb entry.
5570 **/
5571static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
5572 int len)
5573{
5574 struct scsi_qla_host *ha = to_qla_host(shost);
5575 struct dev_db_entry *fw_ddb_entry = NULL;
5576 dma_addr_t fw_ddb_entry_dma;
5577 struct device *dev;
5578 uint16_t idx = 0;
5579 uint16_t max_ddbs = 0;
5580 uint32_t options = 0;
5581 uint32_t rval = QLA_ERROR;
5582
5583 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
5584 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
5585 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
5586 __func__));
5587 goto exit_ddb_add;
5588 }
5589
5590 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
5591 MAX_DEV_DB_ENTRIES;
5592
5593 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5594 &fw_ddb_entry_dma, GFP_KERNEL);
5595 if (!fw_ddb_entry) {
5596 DEBUG2(ql4_printk(KERN_ERR, ha,
5597 "%s: Unable to allocate dma buffer\n",
5598 __func__));
5599 goto exit_ddb_add;
5600 }
5601
5602 dev = iscsi_find_flashnode_sess(ha->host, NULL,
5603 qla4xxx_sysfs_ddb_is_non_persistent);
5604 if (dev) {
5605 ql4_printk(KERN_ERR, ha,
5606 "%s: A non-persistent entry %s found\n",
5607 __func__, dev->kobj.name);
5608 goto exit_ddb_add;
5609 }
5610
5611 for (idx = 0; idx < max_ddbs; idx++) {
5612 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
5613 fw_ddb_entry_dma, idx))
5614 break;
5615 }
5616
5617 if (idx == max_ddbs)
5618 goto exit_ddb_add;
5619
5620 if (!strncasecmp("ipv6", buf, 4))
5621 options |= IPV6_DEFAULT_DDB_ENTRY;
5622
5623 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5624 if (rval == QLA_ERROR)
5625 goto exit_ddb_add;
5626
5627 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
5628
5629exit_ddb_add:
5630 if (fw_ddb_entry)
5631 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5632 fw_ddb_entry, fw_ddb_entry_dma);
5633 if (rval == QLA_SUCCESS)
5634 return idx;
5635 else
5636 return -EIO;
5637}
5638
5639/**
5640 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
5641 * @fnode_sess: pointer to session attrs of flash ddb entry
5642 * @fnode_conn: pointer to connection attrs of flash ddb entry
5643 *
5644 * This writes the contents of target ddb buffer to Flash with a valid cookie
5645 * value in order to make the ddb entry persistent.
5646 **/
5647static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
5648 struct iscsi_bus_flash_conn *fnode_conn)
5649{
5650 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
5651 struct scsi_qla_host *ha = to_qla_host(shost);
5652 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
5653 struct dev_db_entry *fw_ddb_entry = NULL;
5654 dma_addr_t fw_ddb_entry_dma;
5655 uint32_t options = 0;
5656 int rval = 0;
5657
5658 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5659 &fw_ddb_entry_dma, GFP_KERNEL);
5660 if (!fw_ddb_entry) {
5661 DEBUG2(ql4_printk(KERN_ERR, ha,
5662 "%s: Unable to allocate dma buffer\n",
5663 __func__));
5664 rval = -ENOMEM;
5665 goto exit_ddb_apply;
5666 }
5667
5668 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
5669 options |= IPV6_DEFAULT_DDB_ENTRY;
5670
5671 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5672 if (rval == QLA_ERROR)
5673 goto exit_ddb_apply;
5674
5675 dev_db_start_offset += (fnode_sess->target_id *
5676 sizeof(*fw_ddb_entry));
5677
5678 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
5679 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
5680
5681 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
5682 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
5683
5684 if (rval == QLA_SUCCESS) {
5685 fnode_sess->flash_state = DEV_DB_PERSISTENT;
5686 ql4_printk(KERN_INFO, ha,
5687 "%s: flash node %u of host %lu written to flash\n",
5688 __func__, fnode_sess->target_id, ha->host_no);
5689 } else {
5690 rval = -EIO;
5691 ql4_printk(KERN_ERR, ha,
5692 "%s: Error while writing flash node %u of host %lu to flash\n",
5693 __func__, fnode_sess->target_id, ha->host_no);
5694 }
5695
5696exit_ddb_apply:
5697 if (fw_ddb_entry)
5698 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5699 fw_ddb_entry, fw_ddb_entry_dma);
5700 return rval;
5701}
5702
5703static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
5704 struct dev_db_entry *fw_ddb_entry,
5705 uint16_t idx)
5706{
5707 struct dev_db_entry *ddb_entry = NULL;
5708 dma_addr_t ddb_entry_dma;
5709 unsigned long wtime;
5710 uint32_t mbx_sts = 0;
5711 uint32_t state = 0, conn_err = 0;
5712 uint16_t tmo = 0;
5713 int ret = 0;
5714
5715 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
5716 &ddb_entry_dma, GFP_KERNEL);
5717 if (!ddb_entry) {
5718 DEBUG2(ql4_printk(KERN_ERR, ha,
5719 "%s: Unable to allocate dma buffer\n",
5720 __func__));
5721 return QLA_ERROR;
5722 }
5723
5724 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
5725
5726 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
5727 if (ret != QLA_SUCCESS) {
5728 DEBUG2(ql4_printk(KERN_ERR, ha,
5729 "%s: Unable to set ddb entry for index %d\n",
5730 __func__, idx));
5731 goto exit_ddb_conn_open;
5732 }
5733
5734 qla4xxx_conn_open(ha, idx);
5735
5736 /* To ensure that sendtargets is done, wait for at least 12 secs */
5737 tmo = ((ha->def_timeout > LOGIN_TOV) &&
5738 (ha->def_timeout < LOGIN_TOV * 10) ?
5739 ha->def_timeout : LOGIN_TOV);
5740
5741 DEBUG2(ql4_printk(KERN_INFO, ha,
5742 "Default time to wait for login to ddb %d\n", tmo));
5743
5744 wtime = jiffies + (HZ * tmo);
5745 do {
5746 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
5747 NULL, &state, &conn_err, NULL,
5748 NULL);
5749 if (ret == QLA_ERROR)
5750 continue;
5751
5752 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
5753 state == DDB_DS_SESSION_FAILED)
5754 break;
5755
5756 schedule_timeout_uninterruptible(HZ / 10);
5757 } while (time_after(wtime, jiffies));
5758
5759exit_ddb_conn_open:
5760 if (ddb_entry)
5761 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
5762 ddb_entry, ddb_entry_dma);
5763 return ret;
5764}
5765
5766static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
5767 struct dev_db_entry *fw_ddb_entry)
5768{
5769 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
5770 struct list_head list_nt;
5771 uint16_t ddb_index;
5772 int ret = 0;
5773
5774 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
5775 ql4_printk(KERN_WARNING, ha,
5776 "%s: A discovery already in progress!\n", __func__);
5777 return QLA_ERROR;
5778 }
5779
5780 INIT_LIST_HEAD(&list_nt);
5781
5782 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
5783
5784 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
5785 if (ret == QLA_ERROR)
5786 goto exit_login_st_clr_bit;
5787
5788 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
5789 if (ret == QLA_ERROR)
5790 goto exit_login_st;
5791
5792 qla4xxx_build_new_nt_list(ha, &list_nt);
5793
5794 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
5795 list_del_init(&ddb_idx->list);
5796 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
5797 vfree(ddb_idx);
5798 }
5799
5800exit_login_st:
5801 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
5802 ql4_printk(KERN_ERR, ha,
5803 "Unable to clear DDB index = 0x%x\n", ddb_index);
5804 }
5805
5806 clear_bit(ddb_index, ha->ddb_idx_map);
5807
5808exit_login_st_clr_bit:
5809 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
5810 return ret;
5811}
5812
5813static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
5814 struct dev_db_entry *fw_ddb_entry,
5815 uint16_t idx)
5816{
5817 int ret = QLA_ERROR;
5818
5819 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
5820 if (ret != QLA_SUCCESS)
5821 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
5822 idx);
5823 else
5824 ret = -EPERM;
5825
5826 return ret;
5827}
5828
5829/**
5830 * qla4xxx_sysfs_ddb_login - Login to the specified target
5831 * @fnode_sess: pointer to session attrs of flash ddb entry
5832 * @fnode_conn: pointer to connection attrs of flash ddb entry
5833 *
5834 * This logs in to the specified target
5835 **/
5836static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
5837 struct iscsi_bus_flash_conn *fnode_conn)
5838{
5839 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
5840 struct scsi_qla_host *ha = to_qla_host(shost);
5841 struct dev_db_entry *fw_ddb_entry = NULL;
5842 dma_addr_t fw_ddb_entry_dma;
5843 uint32_t options = 0;
5844 int ret = 0;
5845
5846 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
5847 ql4_printk(KERN_ERR, ha,
5848 "%s: Target info is not persistent\n", __func__);
5849 ret = -EIO;
5850 goto exit_ddb_login;
5851 }
5852
5853 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5854 &fw_ddb_entry_dma, GFP_KERNEL);
5855 if (!fw_ddb_entry) {
5856 DEBUG2(ql4_printk(KERN_ERR, ha,
5857 "%s: Unable to allocate dma buffer\n",
5858 __func__));
5859 ret = -ENOMEM;
5860 goto exit_ddb_login;
5861 }
5862
5863 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
5864 options |= IPV6_DEFAULT_DDB_ENTRY;
5865
5866 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5867 if (ret == QLA_ERROR)
5868 goto exit_ddb_login;
5869
5870 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
5871 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
5872
5873 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
5874 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry);
5875 else
5876 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
5877 fnode_sess->target_id);
5878
5879 if (ret > 0)
5880 ret = -EIO;
5881
5882exit_ddb_login:
5883 if (fw_ddb_entry)
5884 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5885 fw_ddb_entry, fw_ddb_entry_dma);
5886 return ret;
5887}
5888
5889/**
5890 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
5891 * @cls_sess: pointer to session to be logged out
5892 *
5893 * This performs session log out from the specified target
5894 **/
5895static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
5896{
5897 struct iscsi_session *sess;
5898 struct ddb_entry *ddb_entry = NULL;
5899 struct scsi_qla_host *ha;
5900 struct dev_db_entry *fw_ddb_entry = NULL;
5901 dma_addr_t fw_ddb_entry_dma;
5902 unsigned long flags;
5903 unsigned long wtime;
5904 uint32_t ddb_state;
5905 int options;
5906 int ret = 0;
5907
5908 sess = cls_sess->dd_data;
5909 ddb_entry = sess->dd_data;
5910 ha = ddb_entry->ha;
5911
5912 if (ddb_entry->ddb_type != FLASH_DDB) {
5913 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
5914 __func__);
5915 ret = -ENXIO;
5916 goto exit_ddb_logout;
5917 }
5918
5919 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
5920 ql4_printk(KERN_ERR, ha,
5921 "%s: Logout from boot target entry is not permitted.\n",
5922 __func__);
5923 ret = -EPERM;
5924 goto exit_ddb_logout;
5925 }
5926
5927 options = LOGOUT_OPTION_CLOSE_SESSION;
5928 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
5929 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
5930 ret = -EIO;
5931 goto exit_ddb_logout;
5932 }
5933
5934 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5935 &fw_ddb_entry_dma, GFP_KERNEL);
5936 if (!fw_ddb_entry) {
5937 ql4_printk(KERN_ERR, ha,
5938 "%s: Unable to allocate dma buffer\n", __func__);
5939 ret = -ENOMEM;
5940 goto exit_ddb_logout;
5941 }
5942
5943 wtime = jiffies + (HZ * LOGOUT_TOV);
5944 do {
5945 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
5946 fw_ddb_entry, fw_ddb_entry_dma,
5947 NULL, NULL, &ddb_state, NULL,
5948 NULL, NULL);
5949 if (ret == QLA_ERROR)
5950 goto ddb_logout_clr_sess;
5951
5952 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
5953 (ddb_state == DDB_DS_SESSION_FAILED))
5954 goto ddb_logout_clr_sess;
5955
5956 schedule_timeout_uninterruptible(HZ);
5957 } while ((time_after(wtime, jiffies)));
5958
5959ddb_logout_clr_sess:
5960 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
5961 /*
5962 * we have decremented the reference count of the driver
5963 * when we setup the session to have the driver unload
5964 * to be seamless without actually destroying the
5965 * session
5966 **/
5967 try_module_get(qla4xxx_iscsi_transport.owner);
5968 iscsi_destroy_endpoint(ddb_entry->conn->ep);
5969
5970 spin_lock_irqsave(&ha->hardware_lock, flags);
5971 qla4xxx_free_ddb(ha, ddb_entry);
5972 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5973
5974 iscsi_session_teardown(ddb_entry->sess);
5975
5976 ret = QLA_SUCCESS;
5977
5978exit_ddb_logout:
5979 if (fw_ddb_entry)
5980 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5981 fw_ddb_entry, fw_ddb_entry_dma);
5982 return ret;
5983}
5984
5985/**
5986 * qla4xxx_sysfs_ddb_logout - Logout from the specified target
5987 * @fnode_sess: pointer to session attrs of flash ddb entry
5988 * @fnode_conn: pointer to connection attrs of flash ddb entry
5989 *
5990 * This performs log out from the specified target
5991 **/
5992static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
5993 struct iscsi_bus_flash_conn *fnode_conn)
5994{
5995 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
5996 struct scsi_qla_host *ha = to_qla_host(shost);
5997 struct ql4_tuple_ddb *flash_tddb = NULL;
5998 struct ql4_tuple_ddb *tmp_tddb = NULL;
5999 struct dev_db_entry *fw_ddb_entry = NULL;
6000 struct ddb_entry *ddb_entry = NULL;
6001 dma_addr_t fw_ddb_dma;
6002 uint32_t next_idx = 0;
6003 uint32_t state = 0, conn_err = 0;
6004 uint16_t conn_id = 0;
6005 int idx, index;
6006 int status, ret = 0;
6007
6008 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6009 &fw_ddb_dma);
6010 if (fw_ddb_entry == NULL) {
6011 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
6012 ret = -ENOMEM;
6013 goto exit_ddb_logout;
6014 }
6015
6016 flash_tddb = vzalloc(sizeof(*flash_tddb));
6017 if (!flash_tddb) {
6018 ql4_printk(KERN_WARNING, ha,
6019 "%s:Memory Allocation failed.\n", __func__);
6020 ret = -ENOMEM;
6021 goto exit_ddb_logout;
6022 }
6023
6024 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6025 if (!tmp_tddb) {
6026 ql4_printk(KERN_WARNING, ha,
6027 "%s:Memory Allocation failed.\n", __func__);
6028 ret = -ENOMEM;
6029 goto exit_ddb_logout;
6030 }
6031
6032 if (!fnode_sess->targetname) {
6033 ql4_printk(KERN_ERR, ha,
6034 "%s:Cannot logout from SendTarget entry\n",
6035 __func__);
6036 ret = -EPERM;
6037 goto exit_ddb_logout;
6038 }
6039
6040 if (fnode_sess->is_boot_target) {
6041 ql4_printk(KERN_ERR, ha,
6042 "%s: Logout from boot target entry is not permitted.\n",
6043 __func__);
6044 ret = -EPERM;
6045 goto exit_ddb_logout;
6046 }
6047
6048 strncpy(flash_tddb->iscsi_name, fnode_sess->targetname,
6049 ISCSI_NAME_SIZE);
6050
6051 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6052 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
6053 else
6054 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
6055
6056 flash_tddb->tpgt = fnode_sess->tpgt;
6057 flash_tddb->port = fnode_conn->port;
6058
6059 COPY_ISID(flash_tddb->isid, fnode_sess->isid);
6060
6061 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
6062 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6063 if (ddb_entry == NULL)
6064 continue;
6065
6066 if (ddb_entry->ddb_type != FLASH_DDB)
6067 continue;
6068
6069 index = ddb_entry->sess->target_id;
6070 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
6071 fw_ddb_dma, NULL, &next_idx,
6072 &state, &conn_err, NULL,
6073 &conn_id);
6074 if (status == QLA_ERROR) {
6075 ret = -ENOMEM;
6076 break;
6077 }
6078
6079 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
6080
6081 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
6082 true);
6083 if (status == QLA_SUCCESS) {
6084 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
6085 break;
6086 }
6087 }
6088
6089 if (idx == MAX_DDB_ENTRIES)
6090 ret = -ESRCH;
6091
6092exit_ddb_logout:
6093 if (flash_tddb)
6094 vfree(flash_tddb);
6095 if (tmp_tddb)
6096 vfree(tmp_tddb);
6097 if (fw_ddb_entry)
6098 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
6099
6100 return ret;
6101}
6102
6103static int
6104qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6105 int param, char *buf)
6106{
6107 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6108 struct scsi_qla_host *ha = to_qla_host(shost);
6109 struct iscsi_bus_flash_conn *fnode_conn;
6110 struct ql4_chap_table chap_tbl;
6111 struct device *dev;
6112 int parent_type, parent_index = 0xffff;
6113 int rc = 0;
6114
6115 dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
6116 iscsi_is_flashnode_conn_dev);
6117 if (!dev)
6118 return -EIO;
6119
6120 fnode_conn = iscsi_dev_to_flash_conn(dev);
6121
6122 switch (param) {
6123 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6124 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
6125 break;
6126 case ISCSI_FLASHNODE_PORTAL_TYPE:
6127 rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
6128 break;
6129 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6130 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
6131 break;
6132 case ISCSI_FLASHNODE_DISCOVERY_SESS:
6133 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
6134 break;
6135 case ISCSI_FLASHNODE_ENTRY_EN:
6136 rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
6137 break;
6138 case ISCSI_FLASHNODE_HDR_DGST_EN:
6139 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
6140 break;
6141 case ISCSI_FLASHNODE_DATA_DGST_EN:
6142 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
6143 break;
6144 case ISCSI_FLASHNODE_IMM_DATA_EN:
6145 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
6146 break;
6147 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6148 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
6149 break;
6150 case ISCSI_FLASHNODE_DATASEQ_INORDER:
6151 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
6152 break;
6153 case ISCSI_FLASHNODE_PDU_INORDER:
6154 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
6155 break;
6156 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6157 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
6158 break;
6159 case ISCSI_FLASHNODE_SNACK_REQ_EN:
6160 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
6161 break;
6162 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6163 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
6164 break;
6165 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6166 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
6167 break;
6168 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6169 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
6170 break;
6171 case ISCSI_FLASHNODE_ERL:
6172 rc = sprintf(buf, "%u\n", fnode_sess->erl);
6173 break;
6174 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6175 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
6176 break;
6177 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6178 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
6179 break;
6180 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6181 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
6182 break;
6183 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6184 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
6185 break;
6186 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6187 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
6188 break;
6189 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6190 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
6191 break;
6192 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6193 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
6194 break;
6195 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6196 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
6197 break;
6198 case ISCSI_FLASHNODE_FIRST_BURST:
6199 rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
6200 break;
6201 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
6202 rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
6203 break;
6204 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
6205 rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
6206 break;
6207 case ISCSI_FLASHNODE_MAX_R2T:
6208 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
6209 break;
6210 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
6211 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
6212 break;
6213 case ISCSI_FLASHNODE_ISID:
6214 rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
6215 fnode_sess->isid[0], fnode_sess->isid[1],
6216 fnode_sess->isid[2], fnode_sess->isid[3],
6217 fnode_sess->isid[4], fnode_sess->isid[5]);
6218 break;
6219 case ISCSI_FLASHNODE_TSID:
6220 rc = sprintf(buf, "%u\n", fnode_sess->tsid);
6221 break;
6222 case ISCSI_FLASHNODE_PORT:
6223 rc = sprintf(buf, "%d\n", fnode_conn->port);
6224 break;
6225 case ISCSI_FLASHNODE_MAX_BURST:
6226 rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
6227 break;
6228 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
6229 rc = sprintf(buf, "%u\n",
6230 fnode_sess->default_taskmgmt_timeout);
6231 break;
6232 case ISCSI_FLASHNODE_IPADDR:
6233 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6234 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
6235 else
6236 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
6237 break;
6238 case ISCSI_FLASHNODE_ALIAS:
6239 if (fnode_sess->targetalias)
6240 rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
6241 else
6242 rc = sprintf(buf, "\n");
6243 break;
6244 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
6245 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6246 rc = sprintf(buf, "%pI6\n",
6247 fnode_conn->redirect_ipaddr);
6248 else
6249 rc = sprintf(buf, "%pI4\n",
6250 fnode_conn->redirect_ipaddr);
6251 break;
6252 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
6253 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
6254 break;
6255 case ISCSI_FLASHNODE_LOCAL_PORT:
6256 rc = sprintf(buf, "%u\n", fnode_conn->local_port);
6257 break;
6258 case ISCSI_FLASHNODE_IPV4_TOS:
6259 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
6260 break;
6261 case ISCSI_FLASHNODE_IPV6_TC:
6262 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6263 rc = sprintf(buf, "%u\n",
6264 fnode_conn->ipv6_traffic_class);
6265 else
6266 rc = sprintf(buf, "\n");
6267 break;
6268 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
6269 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
6270 break;
6271 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
6272 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6273 rc = sprintf(buf, "%pI6\n",
6274 fnode_conn->link_local_ipv6_addr);
6275 else
6276 rc = sprintf(buf, "\n");
6277 break;
6278 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6279 if ((fnode_sess->discovery_parent_idx) >= 0 &&
6280 (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES))
6281 parent_index = fnode_sess->discovery_parent_idx;
6282
6283 rc = sprintf(buf, "%u\n", parent_index);
6284 break;
6285 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
6286 if (fnode_sess->discovery_parent_type == DDB_ISNS)
6287 parent_type = ISCSI_DISC_PARENT_ISNS;
6288 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
6289 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6290 else if (fnode_sess->discovery_parent_type >= 0 &&
6291 fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
6292 parent_type = ISCSI_DISC_PARENT_SENDTGT;
6293 else
6294 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6295
6296 rc = sprintf(buf, "%s\n",
6297 iscsi_get_discovery_parent_name(parent_type));
6298 break;
6299 case ISCSI_FLASHNODE_NAME:
6300 if (fnode_sess->targetname)
6301 rc = sprintf(buf, "%s\n", fnode_sess->targetname);
6302 else
6303 rc = sprintf(buf, "\n");
6304 break;
6305 case ISCSI_FLASHNODE_TPGT:
6306 rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
6307 break;
6308 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
6309 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
6310 break;
6311 case ISCSI_FLASHNODE_TCP_RECV_WSF:
6312 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
6313 break;
6314 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
6315 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
6316 break;
6317 case ISCSI_FLASHNODE_USERNAME:
6318 if (fnode_sess->chap_auth_en) {
6319 qla4xxx_get_uni_chap_at_index(ha,
6320 chap_tbl.name,
6321 chap_tbl.secret,
6322 fnode_sess->chap_out_idx);
6323 rc = sprintf(buf, "%s\n", chap_tbl.name);
6324 } else {
6325 rc = sprintf(buf, "\n");
6326 }
6327 break;
6328 case ISCSI_FLASHNODE_PASSWORD:
6329 if (fnode_sess->chap_auth_en) {
6330 qla4xxx_get_uni_chap_at_index(ha,
6331 chap_tbl.name,
6332 chap_tbl.secret,
6333 fnode_sess->chap_out_idx);
6334 rc = sprintf(buf, "%s\n", chap_tbl.secret);
6335 } else {
6336 rc = sprintf(buf, "\n");
6337 }
6338 break;
6339 case ISCSI_FLASHNODE_STATSN:
6340 rc = sprintf(buf, "%u\n", fnode_conn->statsn);
6341 break;
6342 case ISCSI_FLASHNODE_EXP_STATSN:
6343 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
6344 break;
6345 case ISCSI_FLASHNODE_IS_BOOT_TGT:
6346 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
6347 break;
6348 default:
6349 rc = -ENOSYS;
6350 break;
6351 }
6352 return rc;
6353}
6354
6355/**
6356 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
6357 * @fnode_sess: pointer to session attrs of flash ddb entry
6358 * @fnode_conn: pointer to connection attrs of flash ddb entry
6359 * @data: Parameters and their values to update
6360 * @len: len of data
6361 *
6362 * This sets the parameter of flash ddb entry and writes them to flash
6363 **/
6364static int
6365qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
6366 struct iscsi_bus_flash_conn *fnode_conn,
6367 void *data, int len)
6368{
6369 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6370 struct scsi_qla_host *ha = to_qla_host(shost);
6371 struct dev_db_entry *fw_ddb_entry = NULL;
6372 struct iscsi_flashnode_param_info *fnode_param;
6373 struct nlattr *attr;
6374 int rc = QLA_ERROR;
6375 uint32_t rem = len;
6376
6377 fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL);
6378 if (!fw_ddb_entry) {
6379 DEBUG2(ql4_printk(KERN_ERR, ha,
6380 "%s: Unable to allocate ddb buffer\n",
6381 __func__));
6382 return -ENOMEM;
6383 }
6384
6385 nla_for_each_attr(attr, data, len, rem) {
6386 fnode_param = nla_data(attr);
6387
6388 switch (fnode_param->param) {
6389 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6390 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
6391 break;
6392 case ISCSI_FLASHNODE_PORTAL_TYPE:
6393 memcpy(fnode_sess->portal_type, fnode_param->value,
6394 strlen(fnode_sess->portal_type));
6395 break;
6396 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6397 fnode_sess->auto_snd_tgt_disable =
6398 fnode_param->value[0];
6399 break;
6400 case ISCSI_FLASHNODE_DISCOVERY_SESS:
6401 fnode_sess->discovery_sess = fnode_param->value[0];
6402 break;
6403 case ISCSI_FLASHNODE_ENTRY_EN:
6404 fnode_sess->entry_state = fnode_param->value[0];
6405 break;
6406 case ISCSI_FLASHNODE_HDR_DGST_EN:
6407 fnode_conn->hdrdgst_en = fnode_param->value[0];
6408 break;
6409 case ISCSI_FLASHNODE_DATA_DGST_EN:
6410 fnode_conn->datadgst_en = fnode_param->value[0];
6411 break;
6412 case ISCSI_FLASHNODE_IMM_DATA_EN:
6413 fnode_sess->imm_data_en = fnode_param->value[0];
6414 break;
6415 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6416 fnode_sess->initial_r2t_en = fnode_param->value[0];
6417 break;
6418 case ISCSI_FLASHNODE_DATASEQ_INORDER:
6419 fnode_sess->dataseq_inorder_en = fnode_param->value[0];
6420 break;
6421 case ISCSI_FLASHNODE_PDU_INORDER:
6422 fnode_sess->pdu_inorder_en = fnode_param->value[0];
6423 break;
6424 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6425 fnode_sess->chap_auth_en = fnode_param->value[0];
6426 break;
6427 case ISCSI_FLASHNODE_SNACK_REQ_EN:
6428 fnode_conn->snack_req_en = fnode_param->value[0];
6429 break;
6430 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6431 fnode_sess->discovery_logout_en = fnode_param->value[0];
6432 break;
6433 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6434 fnode_sess->bidi_chap_en = fnode_param->value[0];
6435 break;
6436 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6437 fnode_sess->discovery_auth_optional =
6438 fnode_param->value[0];
6439 break;
6440 case ISCSI_FLASHNODE_ERL:
6441 fnode_sess->erl = fnode_param->value[0];
6442 break;
6443 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6444 fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
6445 break;
6446 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6447 fnode_conn->tcp_nagle_disable = fnode_param->value[0];
6448 break;
6449 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6450 fnode_conn->tcp_wsf_disable = fnode_param->value[0];
6451 break;
6452 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6453 fnode_conn->tcp_timer_scale = fnode_param->value[0];
6454 break;
6455 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6456 fnode_conn->tcp_timestamp_en = fnode_param->value[0];
6457 break;
6458 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6459 fnode_conn->fragment_disable = fnode_param->value[0];
6460 break;
6461 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6462 fnode_conn->max_recv_dlength =
6463 *(unsigned *)fnode_param->value;
6464 break;
6465 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6466 fnode_conn->max_xmit_dlength =
6467 *(unsigned *)fnode_param->value;
6468 break;
6469 case ISCSI_FLASHNODE_FIRST_BURST:
6470 fnode_sess->first_burst =
6471 *(unsigned *)fnode_param->value;
6472 break;
6473 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
6474 fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
6475 break;
6476 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
6477 fnode_sess->time2retain =
6478 *(uint16_t *)fnode_param->value;
6479 break;
6480 case ISCSI_FLASHNODE_MAX_R2T:
6481 fnode_sess->max_r2t =
6482 *(uint16_t *)fnode_param->value;
6483 break;
6484 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
6485 fnode_conn->keepalive_timeout =
6486 *(uint16_t *)fnode_param->value;
6487 break;
6488 case ISCSI_FLASHNODE_ISID:
6489 memcpy(fnode_sess->isid, fnode_param->value,
6490 sizeof(fnode_sess->isid));
6491 break;
6492 case ISCSI_FLASHNODE_TSID:
6493 fnode_sess->tsid = *(uint16_t *)fnode_param->value;
6494 break;
6495 case ISCSI_FLASHNODE_PORT:
6496 fnode_conn->port = *(uint16_t *)fnode_param->value;
6497 break;
6498 case ISCSI_FLASHNODE_MAX_BURST:
6499 fnode_sess->max_burst = *(unsigned *)fnode_param->value;
6500 break;
6501 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
6502 fnode_sess->default_taskmgmt_timeout =
6503 *(uint16_t *)fnode_param->value;
6504 break;
6505 case ISCSI_FLASHNODE_IPADDR:
6506 memcpy(fnode_conn->ipaddress, fnode_param->value,
6507 IPv6_ADDR_LEN);
6508 break;
6509 case ISCSI_FLASHNODE_ALIAS:
6510 rc = iscsi_switch_str_param(&fnode_sess->targetalias,
6511 (char *)fnode_param->value);
6512 break;
6513 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
6514 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
6515 IPv6_ADDR_LEN);
6516 break;
6517 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
6518 fnode_conn->max_segment_size =
6519 *(unsigned *)fnode_param->value;
6520 break;
6521 case ISCSI_FLASHNODE_LOCAL_PORT:
6522 fnode_conn->local_port =
6523 *(uint16_t *)fnode_param->value;
6524 break;
6525 case ISCSI_FLASHNODE_IPV4_TOS:
6526 fnode_conn->ipv4_tos = fnode_param->value[0];
6527 break;
6528 case ISCSI_FLASHNODE_IPV6_TC:
6529 fnode_conn->ipv6_traffic_class = fnode_param->value[0];
6530 break;
6531 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
6532 fnode_conn->ipv6_flow_label = fnode_param->value[0];
6533 break;
6534 case ISCSI_FLASHNODE_NAME:
6535 rc = iscsi_switch_str_param(&fnode_sess->targetname,
6536 (char *)fnode_param->value);
6537 break;
6538 case ISCSI_FLASHNODE_TPGT:
6539 fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
6540 break;
6541 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
6542 memcpy(fnode_conn->link_local_ipv6_addr,
6543 fnode_param->value, IPv6_ADDR_LEN);
6544 break;
6545 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
6546 fnode_sess->discovery_parent_type =
6547 *(uint16_t *)fnode_param->value;
6548 break;
6549 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
6550 fnode_conn->tcp_xmit_wsf =
6551 *(uint8_t *)fnode_param->value;
6552 break;
6553 case ISCSI_FLASHNODE_TCP_RECV_WSF:
6554 fnode_conn->tcp_recv_wsf =
6555 *(uint8_t *)fnode_param->value;
6556 break;
6557 case ISCSI_FLASHNODE_STATSN:
6558 fnode_conn->statsn = *(uint32_t *)fnode_param->value;
6559 break;
6560 case ISCSI_FLASHNODE_EXP_STATSN:
6561 fnode_conn->exp_statsn =
6562 *(uint32_t *)fnode_param->value;
6563 break;
6564 default:
6565 ql4_printk(KERN_ERR, ha,
6566 "%s: No such sysfs attribute\n", __func__);
6567 rc = -ENOSYS;
6568 goto exit_set_param;
6569 }
6570 }
6571
6572 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
6573
6574exit_set_param:
6575 return rc;
6576}
6577
6578/**
6579 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
6580 * @fnode_sess: pointer to session attrs of flash ddb entry
6581 *
6582 * This invalidates the flash ddb entry at the given index
6583 **/
6584static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
6585{
6586 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6587 struct scsi_qla_host *ha = to_qla_host(shost);
6588 uint32_t dev_db_start_offset;
6589 uint32_t dev_db_end_offset;
6590 struct dev_db_entry *fw_ddb_entry = NULL;
6591 dma_addr_t fw_ddb_entry_dma;
6592 uint16_t *ddb_cookie = NULL;
6593 size_t ddb_size;
6594 void *pddb = NULL;
6595 int target_id;
6596 int rc = 0;
6597
6598 if (!fnode_sess) {
6599 rc = -EINVAL;
6600 goto exit_ddb_del;
6601 }
6602
6603 if (fnode_sess->is_boot_target) {
6604 rc = -EPERM;
6605 DEBUG2(ql4_printk(KERN_ERR, ha,
6606 "%s: Deletion of boot target entry is not permitted.\n",
6607 __func__));
6608 goto exit_ddb_del;
6609 }
6610
6611 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
6612 goto sysfs_ddb_del;
6613
6614 if (is_qla40XX(ha)) {
6615 dev_db_start_offset = FLASH_OFFSET_DB_INFO;
6616 dev_db_end_offset = FLASH_OFFSET_DB_END;
6617 dev_db_start_offset += (fnode_sess->target_id *
6618 sizeof(*fw_ddb_entry));
6619 ddb_size = sizeof(*fw_ddb_entry);
6620 } else {
6621 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
6622 (ha->hw.flt_region_ddb << 2);
6623 /* flt_ddb_size is DDB table size for both ports
6624 * so divide it by 2 to calculate the offset for second port
6625 */
6626 if (ha->port_num == 1)
6627 dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
6628
6629 dev_db_end_offset = dev_db_start_offset +
6630 (ha->hw.flt_ddb_size / 2);
6631
6632 dev_db_start_offset += (fnode_sess->target_id *
6633 sizeof(*fw_ddb_entry));
6634 dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) -
6635 (void *)fw_ddb_entry;
6636
6637 ddb_size = sizeof(*ddb_cookie);
6638 }
6639
6640 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
6641 __func__, dev_db_start_offset, dev_db_end_offset));
6642
6643 if (dev_db_start_offset > dev_db_end_offset) {
6644 rc = -EIO;
6645 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
6646 __func__, fnode_sess->target_id));
6647 goto exit_ddb_del;
6648 }
6649
6650 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
6651 &fw_ddb_entry_dma, GFP_KERNEL);
6652 if (!pddb) {
6653 rc = -ENOMEM;
6654 DEBUG2(ql4_printk(KERN_ERR, ha,
6655 "%s: Unable to allocate dma buffer\n",
6656 __func__));
6657 goto exit_ddb_del;
6658 }
6659
6660 if (is_qla40XX(ha)) {
6661 fw_ddb_entry = pddb;
6662 memset(fw_ddb_entry, 0, ddb_size);
6663 ddb_cookie = &fw_ddb_entry->cookie;
6664 } else {
6665 ddb_cookie = pddb;
6666 }
6667
6668 /* invalidate the cookie */
6669 *ddb_cookie = 0xFFEE;
6670 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
6671 ddb_size, FLASH_OPT_RMW_COMMIT);
6672
6673sysfs_ddb_del:
6674 target_id = fnode_sess->target_id;
6675 iscsi_destroy_flashnode_sess(fnode_sess);
6676 ql4_printk(KERN_INFO, ha,
6677 "%s: session and conn entries for flashnode %u of host %lu deleted\n",
6678 __func__, target_id, ha->host_no);
6679exit_ddb_del:
6680 if (pddb)
6681 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
6682 fw_ddb_entry_dma);
6683 return rc;
6684}
6685
6686/**
6687 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
6688 * @ha: pointer to adapter structure
6689 *
6690 * Export the firmware DDB for all send targets and normal targets to sysfs.
6691 **/
6692static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
6693{
6694 struct dev_db_entry *fw_ddb_entry = NULL;
6695 dma_addr_t fw_ddb_entry_dma;
6696 uint16_t max_ddbs;
6697 uint16_t idx = 0;
6698 int ret = QLA_SUCCESS;
6699
6700 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
6701 sizeof(*fw_ddb_entry),
6702 &fw_ddb_entry_dma, GFP_KERNEL);
6703 if (!fw_ddb_entry) {
6704 DEBUG2(ql4_printk(KERN_ERR, ha,
6705 "%s: Unable to allocate dma buffer\n",
6706 __func__));
6707 return -ENOMEM;
6708 }
6709
6710 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
6711 MAX_DEV_DB_ENTRIES;
6712
6713 for (idx = 0; idx < max_ddbs; idx++) {
6714 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
6715 idx))
6716 continue;
6717
6718 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
6719 if (ret) {
6720 ret = -EIO;
6721 break;
6722 }
6723 }
6724
6725 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
6726 fw_ddb_entry_dma);
6727
6728 return ret;
6729}
6730
6731static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
6732{
6733 iscsi_destroy_all_flashnode(ha->host);
6734}
6735
5061/** 6736/**
5062 * qla4xxx_build_ddb_list - Build ddb list and setup sessions 6737 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
5063 * @ha: pointer to adapter structure 6738 * @ha: pointer to adapter structure
@@ -5341,8 +7016,11 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
5341 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 7016 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
5342 7017
5343 /* Dont retry adapter initialization if IRQ allocation failed */ 7018 /* Dont retry adapter initialization if IRQ allocation failed */
5344 if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) 7019 if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
7020 ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
7021 __func__);
5345 goto skip_retry_init; 7022 goto skip_retry_init;
7023 }
5346 7024
5347 while ((!test_bit(AF_ONLINE, &ha->flags)) && 7025 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
5348 init_retry_count++ < MAX_INIT_RETRIES) { 7026 init_retry_count++ < MAX_INIT_RETRIES) {
@@ -5445,6 +7123,10 @@ skip_retry_init:
5445 ql4_printk(KERN_ERR, ha, 7123 ql4_printk(KERN_ERR, ha,
5446 "%s: No iSCSI boot target configured\n", __func__); 7124 "%s: No iSCSI boot target configured\n", __func__);
5447 7125
7126 if (qla4xxx_sysfs_ddb_export(ha))
7127 ql4_printk(KERN_ERR, ha,
7128 "%s: Error exporting ddb to sysfs\n", __func__);
7129
5448 /* Perform the build ddb list and login to each */ 7130 /* Perform the build ddb list and login to each */
5449 qla4xxx_build_ddb_list(ha, INIT_ADAPTER); 7131 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
5450 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); 7132 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
@@ -5570,6 +7252,7 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev)
5570 qla4xxx_destroy_fw_ddb_session(ha); 7252 qla4xxx_destroy_fw_ddb_session(ha);
5571 qla4_8xxx_free_sysfs_attr(ha); 7253 qla4_8xxx_free_sysfs_attr(ha);
5572 7254
7255 qla4xxx_sysfs_ddb_remove(ha);
5573 scsi_remove_host(ha->host); 7256 scsi_remove_host(ha->host);
5574 7257
5575 qla4xxx_free_adapter(ha); 7258 qla4xxx_free_adapter(ha);
@@ -5669,7 +7352,6 @@ struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
5669 7352
5670 /* update counters */ 7353 /* update counters */
5671 if (srb->flags & SRB_DMA_VALID) { 7354 if (srb->flags & SRB_DMA_VALID) {
5672 ha->req_q_count += srb->iocb_cnt;
5673 ha->iocb_cnt -= srb->iocb_cnt; 7355 ha->iocb_cnt -= srb->iocb_cnt;
5674 if (srb->cmd) 7356 if (srb->cmd)
5675 srb->cmd->host_scribble = 7357 srb->cmd->host_scribble =
@@ -6081,6 +7763,7 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
6081{ 7763{
6082 struct scsi_qla_host *ha = to_qla_host(shost); 7764 struct scsi_qla_host *ha = to_qla_host(shost);
6083 int rval = QLA_SUCCESS; 7765 int rval = QLA_SUCCESS;
7766 uint32_t idc_ctrl;
6084 7767
6085 if (ql4xdontresethba) { 7768 if (ql4xdontresethba) {
6086 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", 7769 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
@@ -6111,6 +7794,14 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
6111 } 7794 }
6112 7795
6113recover_adapter: 7796recover_adapter:
7797 /* For ISP83XX set graceful reset bit in IDC_DRV_CTRL if
7798 * reset is issued by application */
7799 if (is_qla8032(ha) && test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
7800 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
7801 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
7802 (idc_ctrl | GRACEFUL_RESET_BIT1));
7803 }
7804
6114 rval = qla4xxx_recover_adapter(ha); 7805 rval = qla4xxx_recover_adapter(ha);
6115 if (rval != QLA_SUCCESS) { 7806 if (rval != QLA_SUCCESS) {
6116 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", 7807 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 6775a45af315..83e0fec35d56 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.03.00-k4" 8#define QLA4XXX_DRIVER_VERSION "5.03.00-k8"
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0a74b975efdf..ce06e8772f3a 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/bsg-lib.h> 26#include <linux/bsg-lib.h>
27#include <linux/idr.h> 27#include <linux/idr.h>
28#include <linux/list.h>
28#include <net/tcp.h> 29#include <net/tcp.h>
29#include <scsi/scsi.h> 30#include <scsi/scsi.h>
30#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
@@ -460,6 +461,689 @@ void iscsi_destroy_iface(struct iscsi_iface *iface)
460EXPORT_SYMBOL_GPL(iscsi_destroy_iface); 461EXPORT_SYMBOL_GPL(iscsi_destroy_iface);
461 462
462/* 463/*
464 * Interface to display flash node params to sysfs
465 */
466
467#define ISCSI_FLASHNODE_ATTR(_prefix, _name, _mode, _show, _store) \
468struct device_attribute dev_attr_##_prefix##_##_name = \
469 __ATTR(_name, _mode, _show, _store)
470
471/* flash node session attrs show */
472#define iscsi_flashnode_sess_attr_show(type, name, param) \
473static ssize_t \
474show_##type##_##name(struct device *dev, struct device_attribute *attr, \
475 char *buf) \
476{ \
477 struct iscsi_bus_flash_session *fnode_sess = \
478 iscsi_dev_to_flash_session(dev);\
479 struct iscsi_transport *t = fnode_sess->transport; \
480 return t->get_flashnode_param(fnode_sess, param, buf); \
481} \
482
483
484#define iscsi_flashnode_sess_attr(type, name, param) \
485 iscsi_flashnode_sess_attr_show(type, name, param) \
486static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \
487 show_##type##_##name, NULL);
488
489/* Flash node session attributes */
490
491iscsi_flashnode_sess_attr(fnode, auto_snd_tgt_disable,
492 ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE);
493iscsi_flashnode_sess_attr(fnode, discovery_session,
494 ISCSI_FLASHNODE_DISCOVERY_SESS);
495iscsi_flashnode_sess_attr(fnode, portal_type, ISCSI_FLASHNODE_PORTAL_TYPE);
496iscsi_flashnode_sess_attr(fnode, entry_enable, ISCSI_FLASHNODE_ENTRY_EN);
497iscsi_flashnode_sess_attr(fnode, immediate_data, ISCSI_FLASHNODE_IMM_DATA_EN);
498iscsi_flashnode_sess_attr(fnode, initial_r2t, ISCSI_FLASHNODE_INITIAL_R2T_EN);
499iscsi_flashnode_sess_attr(fnode, data_seq_in_order,
500 ISCSI_FLASHNODE_DATASEQ_INORDER);
501iscsi_flashnode_sess_attr(fnode, data_pdu_in_order,
502 ISCSI_FLASHNODE_PDU_INORDER);
503iscsi_flashnode_sess_attr(fnode, chap_auth, ISCSI_FLASHNODE_CHAP_AUTH_EN);
504iscsi_flashnode_sess_attr(fnode, discovery_logout,
505 ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN);
506iscsi_flashnode_sess_attr(fnode, bidi_chap, ISCSI_FLASHNODE_BIDI_CHAP_EN);
507iscsi_flashnode_sess_attr(fnode, discovery_auth_optional,
508 ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL);
509iscsi_flashnode_sess_attr(fnode, erl, ISCSI_FLASHNODE_ERL);
510iscsi_flashnode_sess_attr(fnode, first_burst_len, ISCSI_FLASHNODE_FIRST_BURST);
511iscsi_flashnode_sess_attr(fnode, def_time2wait, ISCSI_FLASHNODE_DEF_TIME2WAIT);
512iscsi_flashnode_sess_attr(fnode, def_time2retain,
513 ISCSI_FLASHNODE_DEF_TIME2RETAIN);
514iscsi_flashnode_sess_attr(fnode, max_outstanding_r2t, ISCSI_FLASHNODE_MAX_R2T);
515iscsi_flashnode_sess_attr(fnode, isid, ISCSI_FLASHNODE_ISID);
516iscsi_flashnode_sess_attr(fnode, tsid, ISCSI_FLASHNODE_TSID);
517iscsi_flashnode_sess_attr(fnode, max_burst_len, ISCSI_FLASHNODE_MAX_BURST);
518iscsi_flashnode_sess_attr(fnode, def_taskmgmt_tmo,
519 ISCSI_FLASHNODE_DEF_TASKMGMT_TMO);
520iscsi_flashnode_sess_attr(fnode, targetalias, ISCSI_FLASHNODE_ALIAS);
521iscsi_flashnode_sess_attr(fnode, targetname, ISCSI_FLASHNODE_NAME);
522iscsi_flashnode_sess_attr(fnode, tpgt, ISCSI_FLASHNODE_TPGT);
523iscsi_flashnode_sess_attr(fnode, discovery_parent_idx,
524 ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX);
525iscsi_flashnode_sess_attr(fnode, discovery_parent_type,
526 ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE);
527iscsi_flashnode_sess_attr(fnode, chap_in_idx, ISCSI_FLASHNODE_CHAP_IN_IDX);
528iscsi_flashnode_sess_attr(fnode, chap_out_idx, ISCSI_FLASHNODE_CHAP_OUT_IDX);
529iscsi_flashnode_sess_attr(fnode, username, ISCSI_FLASHNODE_USERNAME);
530iscsi_flashnode_sess_attr(fnode, username_in, ISCSI_FLASHNODE_USERNAME_IN);
531iscsi_flashnode_sess_attr(fnode, password, ISCSI_FLASHNODE_PASSWORD);
532iscsi_flashnode_sess_attr(fnode, password_in, ISCSI_FLASHNODE_PASSWORD_IN);
533iscsi_flashnode_sess_attr(fnode, is_boot_target, ISCSI_FLASHNODE_IS_BOOT_TGT);
534
535static struct attribute *iscsi_flashnode_sess_attrs[] = {
536 &dev_attr_fnode_auto_snd_tgt_disable.attr,
537 &dev_attr_fnode_discovery_session.attr,
538 &dev_attr_fnode_portal_type.attr,
539 &dev_attr_fnode_entry_enable.attr,
540 &dev_attr_fnode_immediate_data.attr,
541 &dev_attr_fnode_initial_r2t.attr,
542 &dev_attr_fnode_data_seq_in_order.attr,
543 &dev_attr_fnode_data_pdu_in_order.attr,
544 &dev_attr_fnode_chap_auth.attr,
545 &dev_attr_fnode_discovery_logout.attr,
546 &dev_attr_fnode_bidi_chap.attr,
547 &dev_attr_fnode_discovery_auth_optional.attr,
548 &dev_attr_fnode_erl.attr,
549 &dev_attr_fnode_first_burst_len.attr,
550 &dev_attr_fnode_def_time2wait.attr,
551 &dev_attr_fnode_def_time2retain.attr,
552 &dev_attr_fnode_max_outstanding_r2t.attr,
553 &dev_attr_fnode_isid.attr,
554 &dev_attr_fnode_tsid.attr,
555 &dev_attr_fnode_max_burst_len.attr,
556 &dev_attr_fnode_def_taskmgmt_tmo.attr,
557 &dev_attr_fnode_targetalias.attr,
558 &dev_attr_fnode_targetname.attr,
559 &dev_attr_fnode_tpgt.attr,
560 &dev_attr_fnode_discovery_parent_idx.attr,
561 &dev_attr_fnode_discovery_parent_type.attr,
562 &dev_attr_fnode_chap_in_idx.attr,
563 &dev_attr_fnode_chap_out_idx.attr,
564 &dev_attr_fnode_username.attr,
565 &dev_attr_fnode_username_in.attr,
566 &dev_attr_fnode_password.attr,
567 &dev_attr_fnode_password_in.attr,
568 &dev_attr_fnode_is_boot_target.attr,
569 NULL,
570};
571
572static umode_t iscsi_flashnode_sess_attr_is_visible(struct kobject *kobj,
573 struct attribute *attr,
574 int i)
575{
576 struct device *dev = container_of(kobj, struct device, kobj);
577 struct iscsi_bus_flash_session *fnode_sess =
578 iscsi_dev_to_flash_session(dev);
579 struct iscsi_transport *t = fnode_sess->transport;
580 int param;
581
582 if (attr == &dev_attr_fnode_auto_snd_tgt_disable.attr) {
583 param = ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE;
584 } else if (attr == &dev_attr_fnode_discovery_session.attr) {
585 param = ISCSI_FLASHNODE_DISCOVERY_SESS;
586 } else if (attr == &dev_attr_fnode_portal_type.attr) {
587 param = ISCSI_FLASHNODE_PORTAL_TYPE;
588 } else if (attr == &dev_attr_fnode_entry_enable.attr) {
589 param = ISCSI_FLASHNODE_ENTRY_EN;
590 } else if (attr == &dev_attr_fnode_immediate_data.attr) {
591 param = ISCSI_FLASHNODE_IMM_DATA_EN;
592 } else if (attr == &dev_attr_fnode_initial_r2t.attr) {
593 param = ISCSI_FLASHNODE_INITIAL_R2T_EN;
594 } else if (attr == &dev_attr_fnode_data_seq_in_order.attr) {
595 param = ISCSI_FLASHNODE_DATASEQ_INORDER;
596 } else if (attr == &dev_attr_fnode_data_pdu_in_order.attr) {
597 param = ISCSI_FLASHNODE_PDU_INORDER;
598 } else if (attr == &dev_attr_fnode_chap_auth.attr) {
599 param = ISCSI_FLASHNODE_CHAP_AUTH_EN;
600 } else if (attr == &dev_attr_fnode_discovery_logout.attr) {
601 param = ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN;
602 } else if (attr == &dev_attr_fnode_bidi_chap.attr) {
603 param = ISCSI_FLASHNODE_BIDI_CHAP_EN;
604 } else if (attr == &dev_attr_fnode_discovery_auth_optional.attr) {
605 param = ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL;
606 } else if (attr == &dev_attr_fnode_erl.attr) {
607 param = ISCSI_FLASHNODE_ERL;
608 } else if (attr == &dev_attr_fnode_first_burst_len.attr) {
609 param = ISCSI_FLASHNODE_FIRST_BURST;
610 } else if (attr == &dev_attr_fnode_def_time2wait.attr) {
611 param = ISCSI_FLASHNODE_DEF_TIME2WAIT;
612 } else if (attr == &dev_attr_fnode_def_time2retain.attr) {
613 param = ISCSI_FLASHNODE_DEF_TIME2RETAIN;
614 } else if (attr == &dev_attr_fnode_max_outstanding_r2t.attr) {
615 param = ISCSI_FLASHNODE_MAX_R2T;
616 } else if (attr == &dev_attr_fnode_isid.attr) {
617 param = ISCSI_FLASHNODE_ISID;
618 } else if (attr == &dev_attr_fnode_tsid.attr) {
619 param = ISCSI_FLASHNODE_TSID;
620 } else if (attr == &dev_attr_fnode_max_burst_len.attr) {
621 param = ISCSI_FLASHNODE_MAX_BURST;
622 } else if (attr == &dev_attr_fnode_def_taskmgmt_tmo.attr) {
623 param = ISCSI_FLASHNODE_DEF_TASKMGMT_TMO;
624 } else if (attr == &dev_attr_fnode_targetalias.attr) {
625 param = ISCSI_FLASHNODE_ALIAS;
626 } else if (attr == &dev_attr_fnode_targetname.attr) {
627 param = ISCSI_FLASHNODE_NAME;
628 } else if (attr == &dev_attr_fnode_tpgt.attr) {
629 param = ISCSI_FLASHNODE_TPGT;
630 } else if (attr == &dev_attr_fnode_discovery_parent_idx.attr) {
631 param = ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX;
632 } else if (attr == &dev_attr_fnode_discovery_parent_type.attr) {
633 param = ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE;
634 } else if (attr == &dev_attr_fnode_chap_in_idx.attr) {
635 param = ISCSI_FLASHNODE_CHAP_IN_IDX;
636 } else if (attr == &dev_attr_fnode_chap_out_idx.attr) {
637 param = ISCSI_FLASHNODE_CHAP_OUT_IDX;
638 } else if (attr == &dev_attr_fnode_username.attr) {
639 param = ISCSI_FLASHNODE_USERNAME;
640 } else if (attr == &dev_attr_fnode_username_in.attr) {
641 param = ISCSI_FLASHNODE_USERNAME_IN;
642 } else if (attr == &dev_attr_fnode_password.attr) {
643 param = ISCSI_FLASHNODE_PASSWORD;
644 } else if (attr == &dev_attr_fnode_password_in.attr) {
645 param = ISCSI_FLASHNODE_PASSWORD_IN;
646 } else if (attr == &dev_attr_fnode_is_boot_target.attr) {
647 param = ISCSI_FLASHNODE_IS_BOOT_TGT;
648 } else {
649 WARN_ONCE(1, "Invalid flashnode session attr");
650 return 0;
651 }
652
653 return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
654}
655
656static struct attribute_group iscsi_flashnode_sess_attr_group = {
657 .attrs = iscsi_flashnode_sess_attrs,
658 .is_visible = iscsi_flashnode_sess_attr_is_visible,
659};
660
661static const struct attribute_group *iscsi_flashnode_sess_attr_groups[] = {
662 &iscsi_flashnode_sess_attr_group,
663 NULL,
664};
665
666static void iscsi_flashnode_sess_release(struct device *dev)
667{
668 struct iscsi_bus_flash_session *fnode_sess =
669 iscsi_dev_to_flash_session(dev);
670
671 kfree(fnode_sess->targetname);
672 kfree(fnode_sess->targetalias);
673 kfree(fnode_sess->portal_type);
674 kfree(fnode_sess);
675}
676
677struct device_type iscsi_flashnode_sess_dev_type = {
678 .name = "iscsi_flashnode_sess_dev_type",
679 .groups = iscsi_flashnode_sess_attr_groups,
680 .release = iscsi_flashnode_sess_release,
681};
682
683/* flash node connection attrs show */
684#define iscsi_flashnode_conn_attr_show(type, name, param) \
685static ssize_t \
686show_##type##_##name(struct device *dev, struct device_attribute *attr, \
687 char *buf) \
688{ \
689 struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);\
690 struct iscsi_bus_flash_session *fnode_sess = \
691 iscsi_flash_conn_to_flash_session(fnode_conn);\
692 struct iscsi_transport *t = fnode_conn->transport; \
693 return t->get_flashnode_param(fnode_sess, param, buf); \
694} \
695
696
697#define iscsi_flashnode_conn_attr(type, name, param) \
698 iscsi_flashnode_conn_attr_show(type, name, param) \
699static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \
700 show_##type##_##name, NULL);
701
702/* Flash node connection attributes */
703
704iscsi_flashnode_conn_attr(fnode, is_fw_assigned_ipv6,
705 ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6);
706iscsi_flashnode_conn_attr(fnode, header_digest, ISCSI_FLASHNODE_HDR_DGST_EN);
707iscsi_flashnode_conn_attr(fnode, data_digest, ISCSI_FLASHNODE_DATA_DGST_EN);
708iscsi_flashnode_conn_attr(fnode, snack_req, ISCSI_FLASHNODE_SNACK_REQ_EN);
709iscsi_flashnode_conn_attr(fnode, tcp_timestamp_stat,
710 ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT);
711iscsi_flashnode_conn_attr(fnode, tcp_nagle_disable,
712 ISCSI_FLASHNODE_TCP_NAGLE_DISABLE);
713iscsi_flashnode_conn_attr(fnode, tcp_wsf_disable,
714 ISCSI_FLASHNODE_TCP_WSF_DISABLE);
715iscsi_flashnode_conn_attr(fnode, tcp_timer_scale,
716 ISCSI_FLASHNODE_TCP_TIMER_SCALE);
717iscsi_flashnode_conn_attr(fnode, tcp_timestamp_enable,
718 ISCSI_FLASHNODE_TCP_TIMESTAMP_EN);
719iscsi_flashnode_conn_attr(fnode, fragment_disable,
720 ISCSI_FLASHNODE_IP_FRAG_DISABLE);
721iscsi_flashnode_conn_attr(fnode, keepalive_tmo, ISCSI_FLASHNODE_KEEPALIVE_TMO);
722iscsi_flashnode_conn_attr(fnode, port, ISCSI_FLASHNODE_PORT);
723iscsi_flashnode_conn_attr(fnode, ipaddress, ISCSI_FLASHNODE_IPADDR);
724iscsi_flashnode_conn_attr(fnode, max_recv_dlength,
725 ISCSI_FLASHNODE_MAX_RECV_DLENGTH);
726iscsi_flashnode_conn_attr(fnode, max_xmit_dlength,
727 ISCSI_FLASHNODE_MAX_XMIT_DLENGTH);
728iscsi_flashnode_conn_attr(fnode, local_port, ISCSI_FLASHNODE_LOCAL_PORT);
729iscsi_flashnode_conn_attr(fnode, ipv4_tos, ISCSI_FLASHNODE_IPV4_TOS);
730iscsi_flashnode_conn_attr(fnode, ipv6_traffic_class, ISCSI_FLASHNODE_IPV6_TC);
731iscsi_flashnode_conn_attr(fnode, ipv6_flow_label,
732 ISCSI_FLASHNODE_IPV6_FLOW_LABEL);
733iscsi_flashnode_conn_attr(fnode, redirect_ipaddr,
734 ISCSI_FLASHNODE_REDIRECT_IPADDR);
735iscsi_flashnode_conn_attr(fnode, max_segment_size,
736 ISCSI_FLASHNODE_MAX_SEGMENT_SIZE);
737iscsi_flashnode_conn_attr(fnode, link_local_ipv6,
738 ISCSI_FLASHNODE_LINK_LOCAL_IPV6);
739iscsi_flashnode_conn_attr(fnode, tcp_xmit_wsf, ISCSI_FLASHNODE_TCP_XMIT_WSF);
740iscsi_flashnode_conn_attr(fnode, tcp_recv_wsf, ISCSI_FLASHNODE_TCP_RECV_WSF);
741iscsi_flashnode_conn_attr(fnode, statsn, ISCSI_FLASHNODE_STATSN);
742iscsi_flashnode_conn_attr(fnode, exp_statsn, ISCSI_FLASHNODE_EXP_STATSN);
743
744static struct attribute *iscsi_flashnode_conn_attrs[] = {
745 &dev_attr_fnode_is_fw_assigned_ipv6.attr,
746 &dev_attr_fnode_header_digest.attr,
747 &dev_attr_fnode_data_digest.attr,
748 &dev_attr_fnode_snack_req.attr,
749 &dev_attr_fnode_tcp_timestamp_stat.attr,
750 &dev_attr_fnode_tcp_nagle_disable.attr,
751 &dev_attr_fnode_tcp_wsf_disable.attr,
752 &dev_attr_fnode_tcp_timer_scale.attr,
753 &dev_attr_fnode_tcp_timestamp_enable.attr,
754 &dev_attr_fnode_fragment_disable.attr,
755 &dev_attr_fnode_max_recv_dlength.attr,
756 &dev_attr_fnode_max_xmit_dlength.attr,
757 &dev_attr_fnode_keepalive_tmo.attr,
758 &dev_attr_fnode_port.attr,
759 &dev_attr_fnode_ipaddress.attr,
760 &dev_attr_fnode_redirect_ipaddr.attr,
761 &dev_attr_fnode_max_segment_size.attr,
762 &dev_attr_fnode_local_port.attr,
763 &dev_attr_fnode_ipv4_tos.attr,
764 &dev_attr_fnode_ipv6_traffic_class.attr,
765 &dev_attr_fnode_ipv6_flow_label.attr,
766 &dev_attr_fnode_link_local_ipv6.attr,
767 &dev_attr_fnode_tcp_xmit_wsf.attr,
768 &dev_attr_fnode_tcp_recv_wsf.attr,
769 &dev_attr_fnode_statsn.attr,
770 &dev_attr_fnode_exp_statsn.attr,
771 NULL,
772};
773
774static umode_t iscsi_flashnode_conn_attr_is_visible(struct kobject *kobj,
775 struct attribute *attr,
776 int i)
777{
778 struct device *dev = container_of(kobj, struct device, kobj);
779 struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);
780 struct iscsi_transport *t = fnode_conn->transport;
781 int param;
782
783 if (attr == &dev_attr_fnode_is_fw_assigned_ipv6.attr) {
784 param = ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6;
785 } else if (attr == &dev_attr_fnode_header_digest.attr) {
786 param = ISCSI_FLASHNODE_HDR_DGST_EN;
787 } else if (attr == &dev_attr_fnode_data_digest.attr) {
788 param = ISCSI_FLASHNODE_DATA_DGST_EN;
789 } else if (attr == &dev_attr_fnode_snack_req.attr) {
790 param = ISCSI_FLASHNODE_SNACK_REQ_EN;
791 } else if (attr == &dev_attr_fnode_tcp_timestamp_stat.attr) {
792 param = ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT;
793 } else if (attr == &dev_attr_fnode_tcp_nagle_disable.attr) {
794 param = ISCSI_FLASHNODE_TCP_NAGLE_DISABLE;
795 } else if (attr == &dev_attr_fnode_tcp_wsf_disable.attr) {
796 param = ISCSI_FLASHNODE_TCP_WSF_DISABLE;
797 } else if (attr == &dev_attr_fnode_tcp_timer_scale.attr) {
798 param = ISCSI_FLASHNODE_TCP_TIMER_SCALE;
799 } else if (attr == &dev_attr_fnode_tcp_timestamp_enable.attr) {
800 param = ISCSI_FLASHNODE_TCP_TIMESTAMP_EN;
801 } else if (attr == &dev_attr_fnode_fragment_disable.attr) {
802 param = ISCSI_FLASHNODE_IP_FRAG_DISABLE;
803 } else if (attr == &dev_attr_fnode_max_recv_dlength.attr) {
804 param = ISCSI_FLASHNODE_MAX_RECV_DLENGTH;
805 } else if (attr == &dev_attr_fnode_max_xmit_dlength.attr) {
806 param = ISCSI_FLASHNODE_MAX_XMIT_DLENGTH;
807 } else if (attr == &dev_attr_fnode_keepalive_tmo.attr) {
808 param = ISCSI_FLASHNODE_KEEPALIVE_TMO;
809 } else if (attr == &dev_attr_fnode_port.attr) {
810 param = ISCSI_FLASHNODE_PORT;
811 } else if (attr == &dev_attr_fnode_ipaddress.attr) {
812 param = ISCSI_FLASHNODE_IPADDR;
813 } else if (attr == &dev_attr_fnode_redirect_ipaddr.attr) {
814 param = ISCSI_FLASHNODE_REDIRECT_IPADDR;
815 } else if (attr == &dev_attr_fnode_max_segment_size.attr) {
816 param = ISCSI_FLASHNODE_MAX_SEGMENT_SIZE;
817 } else if (attr == &dev_attr_fnode_local_port.attr) {
818 param = ISCSI_FLASHNODE_LOCAL_PORT;
819 } else if (attr == &dev_attr_fnode_ipv4_tos.attr) {
820 param = ISCSI_FLASHNODE_IPV4_TOS;
821 } else if (attr == &dev_attr_fnode_ipv6_traffic_class.attr) {
822 param = ISCSI_FLASHNODE_IPV6_TC;
823 } else if (attr == &dev_attr_fnode_ipv6_flow_label.attr) {
824 param = ISCSI_FLASHNODE_IPV6_FLOW_LABEL;
825 } else if (attr == &dev_attr_fnode_link_local_ipv6.attr) {
826 param = ISCSI_FLASHNODE_LINK_LOCAL_IPV6;
827 } else if (attr == &dev_attr_fnode_tcp_xmit_wsf.attr) {
828 param = ISCSI_FLASHNODE_TCP_XMIT_WSF;
829 } else if (attr == &dev_attr_fnode_tcp_recv_wsf.attr) {
830 param = ISCSI_FLASHNODE_TCP_RECV_WSF;
831 } else if (attr == &dev_attr_fnode_statsn.attr) {
832 param = ISCSI_FLASHNODE_STATSN;
833 } else if (attr == &dev_attr_fnode_exp_statsn.attr) {
834 param = ISCSI_FLASHNODE_EXP_STATSN;
835 } else {
836 WARN_ONCE(1, "Invalid flashnode connection attr");
837 return 0;
838 }
839
840 return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
841}
842
843static struct attribute_group iscsi_flashnode_conn_attr_group = {
844 .attrs = iscsi_flashnode_conn_attrs,
845 .is_visible = iscsi_flashnode_conn_attr_is_visible,
846};
847
848static const struct attribute_group *iscsi_flashnode_conn_attr_groups[] = {
849 &iscsi_flashnode_conn_attr_group,
850 NULL,
851};
852
853static void iscsi_flashnode_conn_release(struct device *dev)
854{
855 struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);
856
857 kfree(fnode_conn->ipaddress);
858 kfree(fnode_conn->redirect_ipaddr);
859 kfree(fnode_conn->link_local_ipv6_addr);
860 kfree(fnode_conn);
861}
862
863struct device_type iscsi_flashnode_conn_dev_type = {
864 .name = "iscsi_flashnode_conn_dev_type",
865 .groups = iscsi_flashnode_conn_attr_groups,
866 .release = iscsi_flashnode_conn_release,
867};
868
869struct bus_type iscsi_flashnode_bus;
870
871int iscsi_flashnode_bus_match(struct device *dev,
872 struct device_driver *drv)
873{
874 if (dev->bus == &iscsi_flashnode_bus)
875 return 1;
876 return 0;
877}
878EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match);
879
880struct bus_type iscsi_flashnode_bus = {
881 .name = "iscsi_flashnode",
882 .match = &iscsi_flashnode_bus_match,
883};
884
885/**
886 * iscsi_create_flashnode_sess - Add flashnode session entry in sysfs
887 * @shost: pointer to host data
888 * @index: index of flashnode to add in sysfs
889 * @transport: pointer to transport data
890 * @dd_size: total size to allocate
891 *
892 * Adds a sysfs entry for the flashnode session attributes
893 *
894 * Returns:
895 * pointer to allocated flashnode sess on sucess
896 * %NULL on failure
897 */
898struct iscsi_bus_flash_session *
899iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index,
900 struct iscsi_transport *transport,
901 int dd_size)
902{
903 struct iscsi_bus_flash_session *fnode_sess;
904 int err;
905
906 fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL);
907 if (!fnode_sess)
908 return NULL;
909
910 fnode_sess->transport = transport;
911 fnode_sess->target_id = index;
912 fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type;
913 fnode_sess->dev.bus = &iscsi_flashnode_bus;
914 fnode_sess->dev.parent = &shost->shost_gendev;
915 dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u",
916 shost->host_no, index);
917
918 err = device_register(&fnode_sess->dev);
919 if (err)
920 goto free_fnode_sess;
921
922 if (dd_size)
923 fnode_sess->dd_data = &fnode_sess[1];
924
925 return fnode_sess;
926
927free_fnode_sess:
928 kfree(fnode_sess);
929 return NULL;
930}
931EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess);
932
933/**
934 * iscsi_create_flashnode_conn - Add flashnode conn entry in sysfs
935 * @shost: pointer to host data
936 * @fnode_sess: pointer to the parent flashnode session entry
937 * @transport: pointer to transport data
938 * @dd_size: total size to allocate
939 *
940 * Adds a sysfs entry for the flashnode connection attributes
941 *
942 * Returns:
943 * pointer to allocated flashnode conn on success
944 * %NULL on failure
945 */
946struct iscsi_bus_flash_conn *
947iscsi_create_flashnode_conn(struct Scsi_Host *shost,
948 struct iscsi_bus_flash_session *fnode_sess,
949 struct iscsi_transport *transport,
950 int dd_size)
951{
952 struct iscsi_bus_flash_conn *fnode_conn;
953 int err;
954
955 fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL);
956 if (!fnode_conn)
957 return NULL;
958
959 fnode_conn->transport = transport;
960 fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type;
961 fnode_conn->dev.bus = &iscsi_flashnode_bus;
962 fnode_conn->dev.parent = &fnode_sess->dev;
963 dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0",
964 shost->host_no, fnode_sess->target_id);
965
966 err = device_register(&fnode_conn->dev);
967 if (err)
968 goto free_fnode_conn;
969
970 if (dd_size)
971 fnode_conn->dd_data = &fnode_conn[1];
972
973 return fnode_conn;
974
975free_fnode_conn:
976 kfree(fnode_conn);
977 return NULL;
978}
979EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn);
980
981/**
982 * iscsi_is_flashnode_conn_dev - verify passed device is to be flashnode conn
983 * @dev: device to verify
984 * @data: pointer to data containing value to use for verification
985 *
986 * Verifies if the passed device is flashnode conn device
987 *
988 * Returns:
989 * 1 on success
990 * 0 on failure
991 */
992int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
993{
994 return dev->bus == &iscsi_flashnode_bus;
995}
996EXPORT_SYMBOL_GPL(iscsi_is_flashnode_conn_dev);
997
998static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn)
999{
1000 device_unregister(&fnode_conn->dev);
1001 return 0;
1002}
1003
1004static int flashnode_match_index(struct device *dev, void *data)
1005{
1006 struct iscsi_bus_flash_session *fnode_sess = NULL;
1007 int ret = 0;
1008
1009 if (!iscsi_flashnode_bus_match(dev, NULL))
1010 goto exit_match_index;
1011
1012 fnode_sess = iscsi_dev_to_flash_session(dev);
1013 ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0;
1014
1015exit_match_index:
1016 return ret;
1017}
1018
1019/**
1020 * iscsi_get_flashnode_by_index -finds flashnode session entry by index
1021 * @shost: pointer to host data
1022 * @data: pointer to data containing value to use for comparison
1023 * @fn: function pointer that does actual comparison
1024 *
1025 * Finds the flashnode session object for the passed index
1026 *
1027 * Returns:
1028 * pointer to found flashnode session object on success
1029 * %NULL on failure
1030 */
1031static struct iscsi_bus_flash_session *
1032iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data,
1033 int (*fn)(struct device *dev, void *data))
1034{
1035 struct iscsi_bus_flash_session *fnode_sess = NULL;
1036 struct device *dev;
1037
1038 dev = device_find_child(&shost->shost_gendev, data, fn);
1039 if (dev)
1040 fnode_sess = iscsi_dev_to_flash_session(dev);
1041
1042 return fnode_sess;
1043}
1044
1045/**
1046 * iscsi_find_flashnode_sess - finds flashnode session entry
1047 * @shost: pointer to host data
1048 * @data: pointer to data containing value to use for comparison
1049 * @fn: function pointer that does actual comparison
1050 *
1051 * Finds the flashnode session object comparing the data passed using logic
1052 * defined in passed function pointer
1053 *
1054 * Returns:
1055 * pointer to found flashnode session device object on success
1056 * %NULL on failure
1057 */
1058struct device *
1059iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
1060 int (*fn)(struct device *dev, void *data))
1061{
1062 struct device *dev;
1063
1064 dev = device_find_child(&shost->shost_gendev, data, fn);
1065 return dev;
1066}
1067EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
1068
1069/**
1070 * iscsi_find_flashnode_conn - finds flashnode connection entry
1071 * @fnode_sess: pointer to parent flashnode session entry
1072 * @data: pointer to data containing value to use for comparison
1073 * @fn: function pointer that does actual comparison
1074 *
1075 * Finds the flashnode connection object comparing the data passed using logic
1076 * defined in passed function pointer
1077 *
1078 * Returns:
1079 * pointer to found flashnode connection device object on success
1080 * %NULL on failure
1081 */
1082struct device *
1083iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
1084 void *data,
1085 int (*fn)(struct device *dev, void *data))
1086{
1087 struct device *dev;
1088
1089 dev = device_find_child(&fnode_sess->dev, data, fn);
1090 return dev;
1091}
1092EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
1093
1094static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data)
1095{
1096 if (!iscsi_is_flashnode_conn_dev(dev, NULL))
1097 return 0;
1098
1099 return iscsi_destroy_flashnode_conn(iscsi_dev_to_flash_conn(dev));
1100}
1101
1102/**
1103 * iscsi_destroy_flashnode_sess - destory flashnode session entry
1104 * @fnode_sess: pointer to flashnode session entry to be destroyed
1105 *
1106 * Deletes the flashnode session entry and all children flashnode connection
1107 * entries from sysfs
1108 */
1109void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess)
1110{
1111 int err;
1112
1113 err = device_for_each_child(&fnode_sess->dev, NULL,
1114 iscsi_iter_destroy_flashnode_conn_fn);
1115 if (err)
1116 pr_err("Could not delete all connections for %s. Error %d.\n",
1117 fnode_sess->dev.kobj.name, err);
1118
1119 device_unregister(&fnode_sess->dev);
1120}
1121EXPORT_SYMBOL_GPL(iscsi_destroy_flashnode_sess);
1122
1123static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data)
1124{
1125 if (!iscsi_flashnode_bus_match(dev, NULL))
1126 return 0;
1127
1128 iscsi_destroy_flashnode_sess(iscsi_dev_to_flash_session(dev));
1129 return 0;
1130}
1131
1132/**
1133 * iscsi_destroy_all_flashnode - destory all flashnode session entries
1134 * @shost: pointer to host data
1135 *
1136 * Destroys all the flashnode session entries and all corresponding children
1137 * flashnode connection entries from sysfs
1138 */
1139void iscsi_destroy_all_flashnode(struct Scsi_Host *shost)
1140{
1141 device_for_each_child(&shost->shost_gendev, NULL,
1142 iscsi_iter_destroy_flashnode_fn);
1143}
1144EXPORT_SYMBOL_GPL(iscsi_destroy_all_flashnode);
1145
1146/*
463 * BSG support 1147 * BSG support
464 */ 1148 */
465/** 1149/**
@@ -2092,6 +2776,294 @@ static int iscsi_delete_chap(struct iscsi_transport *transport,
2092 return err; 2776 return err;
2093} 2777}
2094 2778
2779static const struct {
2780 enum iscsi_discovery_parent_type value;
2781 char *name;
2782} iscsi_discovery_parent_names[] = {
2783 {ISCSI_DISC_PARENT_UNKNOWN, "Unknown" },
2784 {ISCSI_DISC_PARENT_SENDTGT, "Sendtarget" },
2785 {ISCSI_DISC_PARENT_ISNS, "isns" },
2786};
2787
2788char *iscsi_get_discovery_parent_name(int parent_type)
2789{
2790 int i;
2791 char *state = "Unknown!";
2792
2793 for (i = 0; i < ARRAY_SIZE(iscsi_discovery_parent_names); i++) {
2794 if (iscsi_discovery_parent_names[i].value & parent_type) {
2795 state = iscsi_discovery_parent_names[i].name;
2796 break;
2797 }
2798 }
2799 return state;
2800}
2801EXPORT_SYMBOL_GPL(iscsi_get_discovery_parent_name);
2802
2803static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
2804 struct iscsi_uevent *ev, uint32_t len)
2805{
2806 char *data = (char *)ev + sizeof(*ev);
2807 struct Scsi_Host *shost;
2808 struct iscsi_bus_flash_session *fnode_sess;
2809 struct iscsi_bus_flash_conn *fnode_conn;
2810 struct device *dev;
2811 uint32_t *idx;
2812 int err = 0;
2813
2814 if (!transport->set_flashnode_param) {
2815 err = -ENOSYS;
2816 goto exit_set_fnode;
2817 }
2818
2819 shost = scsi_host_lookup(ev->u.set_flashnode.host_no);
2820 if (!shost) {
2821 pr_err("%s could not find host no %u\n",
2822 __func__, ev->u.set_flashnode.host_no);
2823 err = -ENODEV;
2824 goto put_host;
2825 }
2826
2827 idx = &ev->u.set_flashnode.flashnode_idx;
2828 fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
2829 flashnode_match_index);
2830 if (!fnode_sess) {
2831 pr_err("%s could not find flashnode %u for host no %u\n",
2832 __func__, *idx, ev->u.set_flashnode.host_no);
2833 err = -ENODEV;
2834 goto put_host;
2835 }
2836
2837 dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
2838 iscsi_is_flashnode_conn_dev);
2839 if (!dev) {
2840 err = -ENODEV;
2841 goto put_host;
2842 }
2843
2844 fnode_conn = iscsi_dev_to_flash_conn(dev);
2845 err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
2846
2847put_host:
2848 scsi_host_put(shost);
2849
2850exit_set_fnode:
2851 return err;
2852}
2853
2854static int iscsi_new_flashnode(struct iscsi_transport *transport,
2855 struct iscsi_uevent *ev, uint32_t len)
2856{
2857 char *data = (char *)ev + sizeof(*ev);
2858 struct Scsi_Host *shost;
2859 int index;
2860 int err = 0;
2861
2862 if (!transport->new_flashnode) {
2863 err = -ENOSYS;
2864 goto exit_new_fnode;
2865 }
2866
2867 shost = scsi_host_lookup(ev->u.new_flashnode.host_no);
2868 if (!shost) {
2869 pr_err("%s could not find host no %u\n",
2870 __func__, ev->u.new_flashnode.host_no);
2871 err = -ENODEV;
2872 goto put_host;
2873 }
2874
2875 index = transport->new_flashnode(shost, data, len);
2876
2877 if (index >= 0)
2878 ev->r.new_flashnode_ret.flashnode_idx = index;
2879 else
2880 err = -EIO;
2881
2882put_host:
2883 scsi_host_put(shost);
2884
2885exit_new_fnode:
2886 return err;
2887}
2888
2889static int iscsi_del_flashnode(struct iscsi_transport *transport,
2890 struct iscsi_uevent *ev)
2891{
2892 struct Scsi_Host *shost;
2893 struct iscsi_bus_flash_session *fnode_sess;
2894 uint32_t *idx;
2895 int err = 0;
2896
2897 if (!transport->del_flashnode) {
2898 err = -ENOSYS;
2899 goto exit_del_fnode;
2900 }
2901
2902 shost = scsi_host_lookup(ev->u.del_flashnode.host_no);
2903 if (!shost) {
2904 pr_err("%s could not find host no %u\n",
2905 __func__, ev->u.del_flashnode.host_no);
2906 err = -ENODEV;
2907 goto put_host;
2908 }
2909
2910 idx = &ev->u.del_flashnode.flashnode_idx;
2911 fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
2912 flashnode_match_index);
2913 if (!fnode_sess) {
2914 pr_err("%s could not find flashnode %u for host no %u\n",
2915 __func__, *idx, ev->u.del_flashnode.host_no);
2916 err = -ENODEV;
2917 goto put_host;
2918 }
2919
2920 err = transport->del_flashnode(fnode_sess);
2921
2922put_host:
2923 scsi_host_put(shost);
2924
2925exit_del_fnode:
2926 return err;
2927}
2928
2929static int iscsi_login_flashnode(struct iscsi_transport *transport,
2930 struct iscsi_uevent *ev)
2931{
2932 struct Scsi_Host *shost;
2933 struct iscsi_bus_flash_session *fnode_sess;
2934 struct iscsi_bus_flash_conn *fnode_conn;
2935 struct device *dev;
2936 uint32_t *idx;
2937 int err = 0;
2938
2939 if (!transport->login_flashnode) {
2940 err = -ENOSYS;
2941 goto exit_login_fnode;
2942 }
2943
2944 shost = scsi_host_lookup(ev->u.login_flashnode.host_no);
2945 if (!shost) {
2946 pr_err("%s could not find host no %u\n",
2947 __func__, ev->u.login_flashnode.host_no);
2948 err = -ENODEV;
2949 goto put_host;
2950 }
2951
2952 idx = &ev->u.login_flashnode.flashnode_idx;
2953 fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
2954 flashnode_match_index);
2955 if (!fnode_sess) {
2956 pr_err("%s could not find flashnode %u for host no %u\n",
2957 __func__, *idx, ev->u.login_flashnode.host_no);
2958 err = -ENODEV;
2959 goto put_host;
2960 }
2961
2962 dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
2963 iscsi_is_flashnode_conn_dev);
2964 if (!dev) {
2965 err = -ENODEV;
2966 goto put_host;
2967 }
2968
2969 fnode_conn = iscsi_dev_to_flash_conn(dev);
2970 err = transport->login_flashnode(fnode_sess, fnode_conn);
2971
2972put_host:
2973 scsi_host_put(shost);
2974
2975exit_login_fnode:
2976 return err;
2977}
2978
2979static int iscsi_logout_flashnode(struct iscsi_transport *transport,
2980 struct iscsi_uevent *ev)
2981{
2982 struct Scsi_Host *shost;
2983 struct iscsi_bus_flash_session *fnode_sess;
2984 struct iscsi_bus_flash_conn *fnode_conn;
2985 struct device *dev;
2986 uint32_t *idx;
2987 int err = 0;
2988
2989 if (!transport->logout_flashnode) {
2990 err = -ENOSYS;
2991 goto exit_logout_fnode;
2992 }
2993
2994 shost = scsi_host_lookup(ev->u.logout_flashnode.host_no);
2995 if (!shost) {
2996 pr_err("%s could not find host no %u\n",
2997 __func__, ev->u.logout_flashnode.host_no);
2998 err = -ENODEV;
2999 goto put_host;
3000 }
3001
3002 idx = &ev->u.logout_flashnode.flashnode_idx;
3003 fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
3004 flashnode_match_index);
3005 if (!fnode_sess) {
3006 pr_err("%s could not find flashnode %u for host no %u\n",
3007 __func__, *idx, ev->u.logout_flashnode.host_no);
3008 err = -ENODEV;
3009 goto put_host;
3010 }
3011
3012 dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
3013 iscsi_is_flashnode_conn_dev);
3014 if (!dev) {
3015 err = -ENODEV;
3016 goto put_host;
3017 }
3018
3019 fnode_conn = iscsi_dev_to_flash_conn(dev);
3020
3021 err = transport->logout_flashnode(fnode_sess, fnode_conn);
3022
3023put_host:
3024 scsi_host_put(shost);
3025
3026exit_logout_fnode:
3027 return err;
3028}
3029
3030static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport,
3031 struct iscsi_uevent *ev)
3032{
3033 struct Scsi_Host *shost;
3034 struct iscsi_cls_session *session;
3035 int err = 0;
3036
3037 if (!transport->logout_flashnode_sid) {
3038 err = -ENOSYS;
3039 goto exit_logout_sid;
3040 }
3041
3042 shost = scsi_host_lookup(ev->u.logout_flashnode_sid.host_no);
3043 if (!shost) {
3044 pr_err("%s could not find host no %u\n",
3045 __func__, ev->u.logout_flashnode.host_no);
3046 err = -ENODEV;
3047 goto put_host;
3048 }
3049
3050 session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid);
3051 if (!session) {
3052 pr_err("%s could not find session id %u\n",
3053 __func__, ev->u.logout_flashnode_sid.sid);
3054 err = -EINVAL;
3055 goto put_host;
3056 }
3057
3058 err = transport->logout_flashnode_sid(session);
3059
3060put_host:
3061 scsi_host_put(shost);
3062
3063exit_logout_sid:
3064 return err;
3065}
3066
2095static int 3067static int
2096iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) 3068iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
2097{ 3069{
@@ -2246,6 +3218,27 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
2246 case ISCSI_UEVENT_DELETE_CHAP: 3218 case ISCSI_UEVENT_DELETE_CHAP:
2247 err = iscsi_delete_chap(transport, ev); 3219 err = iscsi_delete_chap(transport, ev);
2248 break; 3220 break;
3221 case ISCSI_UEVENT_SET_FLASHNODE_PARAMS:
3222 err = iscsi_set_flashnode_param(transport, ev,
3223 nlmsg_attrlen(nlh,
3224 sizeof(*ev)));
3225 break;
3226 case ISCSI_UEVENT_NEW_FLASHNODE:
3227 err = iscsi_new_flashnode(transport, ev,
3228 nlmsg_attrlen(nlh, sizeof(*ev)));
3229 break;
3230 case ISCSI_UEVENT_DEL_FLASHNODE:
3231 err = iscsi_del_flashnode(transport, ev);
3232 break;
3233 case ISCSI_UEVENT_LOGIN_FLASHNODE:
3234 err = iscsi_login_flashnode(transport, ev);
3235 break;
3236 case ISCSI_UEVENT_LOGOUT_FLASHNODE:
3237 err = iscsi_logout_flashnode(transport, ev);
3238 break;
3239 case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID:
3240 err = iscsi_logout_flashnode_sid(transport, ev);
3241 break;
2249 default: 3242 default:
2250 err = -ENOSYS; 3243 err = -ENOSYS;
2251 break; 3244 break;
@@ -2981,10 +3974,14 @@ static __init int iscsi_transport_init(void)
2981 if (err) 3974 if (err)
2982 goto unregister_conn_class; 3975 goto unregister_conn_class;
2983 3976
3977 err = bus_register(&iscsi_flashnode_bus);
3978 if (err)
3979 goto unregister_session_class;
3980
2984 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg); 3981 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg);
2985 if (!nls) { 3982 if (!nls) {
2986 err = -ENOBUFS; 3983 err = -ENOBUFS;
2987 goto unregister_session_class; 3984 goto unregister_flashnode_bus;
2988 } 3985 }
2989 3986
2990 iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh"); 3987 iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
@@ -2995,6 +3992,8 @@ static __init int iscsi_transport_init(void)
2995 3992
2996release_nls: 3993release_nls:
2997 netlink_kernel_release(nls); 3994 netlink_kernel_release(nls);
3995unregister_flashnode_bus:
3996 bus_unregister(&iscsi_flashnode_bus);
2998unregister_session_class: 3997unregister_session_class:
2999 transport_class_unregister(&iscsi_session_class); 3998 transport_class_unregister(&iscsi_session_class);
3000unregister_conn_class: 3999unregister_conn_class:
@@ -3014,6 +4013,7 @@ static void __exit iscsi_transport_exit(void)
3014{ 4013{
3015 destroy_workqueue(iscsi_eh_timer_workq); 4014 destroy_workqueue(iscsi_eh_timer_workq);
3016 netlink_kernel_release(nls); 4015 netlink_kernel_release(nls);
4016 bus_unregister(&iscsi_flashnode_bus);
3017 transport_class_unregister(&iscsi_connection_class); 4017 transport_class_unregister(&iscsi_connection_class);
3018 transport_class_unregister(&iscsi_session_class); 4018 transport_class_unregister(&iscsi_session_class);
3019 transport_class_unregister(&iscsi_host_class); 4019 transport_class_unregister(&iscsi_host_class);
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 917741bb8e11..fe7f06c86f68 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -63,6 +63,12 @@ enum iscsi_uevent_e {
63 ISCSI_UEVENT_PING = UEVENT_BASE + 22, 63 ISCSI_UEVENT_PING = UEVENT_BASE + 22,
64 ISCSI_UEVENT_GET_CHAP = UEVENT_BASE + 23, 64 ISCSI_UEVENT_GET_CHAP = UEVENT_BASE + 23,
65 ISCSI_UEVENT_DELETE_CHAP = UEVENT_BASE + 24, 65 ISCSI_UEVENT_DELETE_CHAP = UEVENT_BASE + 24,
66 ISCSI_UEVENT_SET_FLASHNODE_PARAMS = UEVENT_BASE + 25,
67 ISCSI_UEVENT_NEW_FLASHNODE = UEVENT_BASE + 26,
68 ISCSI_UEVENT_DEL_FLASHNODE = UEVENT_BASE + 27,
69 ISCSI_UEVENT_LOGIN_FLASHNODE = UEVENT_BASE + 28,
70 ISCSI_UEVENT_LOGOUT_FLASHNODE = UEVENT_BASE + 29,
71 ISCSI_UEVENT_LOGOUT_FLASHNODE_SID = UEVENT_BASE + 30,
66 72
67 /* up events */ 73 /* up events */
68 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, 74 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
@@ -210,6 +216,31 @@ struct iscsi_uevent {
210 uint32_t host_no; 216 uint32_t host_no;
211 uint16_t chap_tbl_idx; 217 uint16_t chap_tbl_idx;
212 } delete_chap; 218 } delete_chap;
219 struct msg_set_flashnode_param {
220 uint32_t host_no;
221 uint32_t flashnode_idx;
222 uint32_t count;
223 } set_flashnode;
224 struct msg_new_flashnode {
225 uint32_t host_no;
226 uint32_t len;
227 } new_flashnode;
228 struct msg_del_flashnode {
229 uint32_t host_no;
230 uint32_t flashnode_idx;
231 } del_flashnode;
232 struct msg_login_flashnode {
233 uint32_t host_no;
234 uint32_t flashnode_idx;
235 } login_flashnode;
236 struct msg_logout_flashnode {
237 uint32_t host_no;
238 uint32_t flashnode_idx;
239 } logout_flashnode;
240 struct msg_logout_flashnode_sid {
241 uint32_t host_no;
242 uint32_t sid;
243 } logout_flashnode_sid;
213 } u; 244 } u;
214 union { 245 union {
215 /* messages k -> u */ 246 /* messages k -> u */
@@ -267,6 +298,9 @@ struct iscsi_uevent {
267 with each ping request */ 298 with each ping request */
268 uint32_t data_size; 299 uint32_t data_size;
269 } ping_comp; 300 } ping_comp;
301 struct msg_new_flashnode_ret {
302 uint32_t flashnode_idx;
303 } new_flashnode_ret;
270 } r; 304 } r;
271} __attribute__ ((aligned (sizeof(uint64_t)))); 305} __attribute__ ((aligned (sizeof(uint64_t))));
272 306
@@ -274,6 +308,7 @@ enum iscsi_param_type {
274 ISCSI_PARAM, /* iscsi_param (session, conn, target, LU) */ 308 ISCSI_PARAM, /* iscsi_param (session, conn, target, LU) */
275 ISCSI_HOST_PARAM, /* iscsi_host_param */ 309 ISCSI_HOST_PARAM, /* iscsi_host_param */
276 ISCSI_NET_PARAM, /* iscsi_net_param */ 310 ISCSI_NET_PARAM, /* iscsi_net_param */
311 ISCSI_FLASHNODE_PARAM, /* iscsi_flashnode_param */
277}; 312};
278 313
279struct iscsi_iface_param_info { 314struct iscsi_iface_param_info {
@@ -469,6 +504,88 @@ enum iscsi_host_param {
469 ISCSI_HOST_PARAM_MAX, 504 ISCSI_HOST_PARAM_MAX,
470}; 505};
471 506
507/* portal type */
508#define PORTAL_TYPE_IPV4 "ipv4"
509#define PORTAL_TYPE_IPV6 "ipv6"
510
511/* iSCSI Flash Target params */
512enum iscsi_flashnode_param {
513 ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6,
514 ISCSI_FLASHNODE_PORTAL_TYPE,
515 ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE,
516 ISCSI_FLASHNODE_DISCOVERY_SESS,
517 ISCSI_FLASHNODE_ENTRY_EN,
518 ISCSI_FLASHNODE_HDR_DGST_EN,
519 ISCSI_FLASHNODE_DATA_DGST_EN,
520 ISCSI_FLASHNODE_IMM_DATA_EN,
521 ISCSI_FLASHNODE_INITIAL_R2T_EN,
522 ISCSI_FLASHNODE_DATASEQ_INORDER,
523 ISCSI_FLASHNODE_PDU_INORDER,
524 ISCSI_FLASHNODE_CHAP_AUTH_EN,
525 ISCSI_FLASHNODE_SNACK_REQ_EN,
526 ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN,
527 ISCSI_FLASHNODE_BIDI_CHAP_EN,
528 /* make authentication for discovery sessions optional */
529 ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL,
530 ISCSI_FLASHNODE_ERL,
531 ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT,
532 ISCSI_FLASHNODE_TCP_NAGLE_DISABLE,
533 ISCSI_FLASHNODE_TCP_WSF_DISABLE,
534 ISCSI_FLASHNODE_TCP_TIMER_SCALE,
535 ISCSI_FLASHNODE_TCP_TIMESTAMP_EN,
536 ISCSI_FLASHNODE_IP_FRAG_DISABLE,
537 ISCSI_FLASHNODE_MAX_RECV_DLENGTH,
538 ISCSI_FLASHNODE_MAX_XMIT_DLENGTH,
539 ISCSI_FLASHNODE_FIRST_BURST,
540 ISCSI_FLASHNODE_DEF_TIME2WAIT,
541 ISCSI_FLASHNODE_DEF_TIME2RETAIN,
542 ISCSI_FLASHNODE_MAX_R2T,
543 ISCSI_FLASHNODE_KEEPALIVE_TMO,
544 ISCSI_FLASHNODE_ISID,
545 ISCSI_FLASHNODE_TSID,
546 ISCSI_FLASHNODE_PORT,
547 ISCSI_FLASHNODE_MAX_BURST,
548 ISCSI_FLASHNODE_DEF_TASKMGMT_TMO,
549 ISCSI_FLASHNODE_IPADDR,
550 ISCSI_FLASHNODE_ALIAS,
551 ISCSI_FLASHNODE_REDIRECT_IPADDR,
552 ISCSI_FLASHNODE_MAX_SEGMENT_SIZE,
553 ISCSI_FLASHNODE_LOCAL_PORT,
554 ISCSI_FLASHNODE_IPV4_TOS,
555 ISCSI_FLASHNODE_IPV6_TC,
556 ISCSI_FLASHNODE_IPV6_FLOW_LABEL,
557 ISCSI_FLASHNODE_NAME,
558 ISCSI_FLASHNODE_TPGT,
559 ISCSI_FLASHNODE_LINK_LOCAL_IPV6,
560 ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX,
561 ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE,
562 ISCSI_FLASHNODE_TCP_XMIT_WSF,
563 ISCSI_FLASHNODE_TCP_RECV_WSF,
564 ISCSI_FLASHNODE_CHAP_IN_IDX,
565 ISCSI_FLASHNODE_CHAP_OUT_IDX,
566 ISCSI_FLASHNODE_USERNAME,
567 ISCSI_FLASHNODE_USERNAME_IN,
568 ISCSI_FLASHNODE_PASSWORD,
569 ISCSI_FLASHNODE_PASSWORD_IN,
570 ISCSI_FLASHNODE_STATSN,
571 ISCSI_FLASHNODE_EXP_STATSN,
572 ISCSI_FLASHNODE_IS_BOOT_TGT,
573
574 ISCSI_FLASHNODE_MAX,
575};
576
577struct iscsi_flashnode_param_info {
578 uint32_t len; /* Actual length of the param */
579 uint16_t param; /* iscsi param value */
580 uint8_t value[0]; /* length sized value follows */
581} __packed;
582
583enum iscsi_discovery_parent_type {
584 ISCSI_DISC_PARENT_UNKNOWN = 0x1,
585 ISCSI_DISC_PARENT_SENDTGT = 0x2,
586 ISCSI_DISC_PARENT_ISNS = 0x3,
587};
588
472/* iSCSI port Speed */ 589/* iSCSI port Speed */
473enum iscsi_port_speed { 590enum iscsi_port_speed {
474 ISCSI_PORT_SPEED_UNKNOWN = 0x1, 591 ISCSI_PORT_SPEED_UNKNOWN = 0x1,
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 6e33386a3898..09c041e6c35f 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -427,6 +427,7 @@ extern void iscsi_complete_scsi_task(struct iscsi_task *task,
427 */ 427 */
428extern void iscsi_pool_free(struct iscsi_pool *); 428extern void iscsi_pool_free(struct iscsi_pool *);
429extern int iscsi_pool_init(struct iscsi_pool *, int, void ***, int); 429extern int iscsi_pool_init(struct iscsi_pool *, int, void ***, int);
430extern int iscsi_switch_str_param(char **, char *);
430 431
431/* 432/*
432 * inline functions to deal with padding. 433 * inline functions to deal with padding.
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 53f0b361d668..4a58cca2ecc1 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -39,6 +39,8 @@ struct iscsi_task;
39struct sockaddr; 39struct sockaddr;
40struct iscsi_iface; 40struct iscsi_iface;
41struct bsg_job; 41struct bsg_job;
42struct iscsi_bus_flash_session;
43struct iscsi_bus_flash_conn;
42 44
43/** 45/**
44 * struct iscsi_transport - iSCSI Transport template 46 * struct iscsi_transport - iSCSI Transport template
@@ -150,6 +152,19 @@ struct iscsi_transport {
150 int (*get_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx, 152 int (*get_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx,
151 uint32_t *num_entries, char *buf); 153 uint32_t *num_entries, char *buf);
152 int (*delete_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx); 154 int (*delete_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx);
155 int (*get_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,
156 int param, char *buf);
157 int (*set_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,
158 struct iscsi_bus_flash_conn *fnode_conn,
159 void *data, int len);
160 int (*new_flashnode) (struct Scsi_Host *shost, const char *buf,
161 int len);
162 int (*del_flashnode) (struct iscsi_bus_flash_session *fnode_sess);
163 int (*login_flashnode) (struct iscsi_bus_flash_session *fnode_sess,
164 struct iscsi_bus_flash_conn *fnode_conn);
165 int (*logout_flashnode) (struct iscsi_bus_flash_session *fnode_sess,
166 struct iscsi_bus_flash_conn *fnode_conn);
167 int (*logout_flashnode_sid) (struct iscsi_cls_session *cls_sess);
153}; 168};
154 169
155/* 170/*
@@ -286,6 +301,112 @@ struct iscsi_iface {
286#define iscsi_iface_to_shost(_iface) \ 301#define iscsi_iface_to_shost(_iface) \
287 dev_to_shost(_iface->dev.parent) 302 dev_to_shost(_iface->dev.parent)
288 303
304
305struct iscsi_bus_flash_conn {
306 struct list_head conn_list; /* item in connlist */
307 void *dd_data; /* LLD private data */
308 struct iscsi_transport *transport;
309 struct device dev; /* sysfs transport/container device */
310 /* iscsi connection parameters */
311 uint32_t exp_statsn;
312 uint32_t statsn;
313 unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
314 unsigned max_xmit_dlength; /* target_max_recv_dsl */
315 unsigned max_segment_size;
316 unsigned tcp_xmit_wsf;
317 unsigned tcp_recv_wsf;
318 int hdrdgst_en;
319 int datadgst_en;
320 int port;
321 char *ipaddress;
322 char *link_local_ipv6_addr;
323 char *redirect_ipaddr;
324 uint16_t keepalive_timeout;
325 uint16_t local_port;
326 uint8_t snack_req_en;
327 /* tcp timestamp negotiation status */
328 uint8_t tcp_timestamp_stat;
329 uint8_t tcp_nagle_disable;
330 /* tcp window scale factor */
331 uint8_t tcp_wsf_disable;
332 uint8_t tcp_timer_scale;
333 uint8_t tcp_timestamp_en;
334 uint8_t ipv4_tos;
335 uint8_t ipv6_traffic_class;
336 uint8_t ipv6_flow_label;
337 uint8_t fragment_disable;
338 /* Link local IPv6 address is assigned by firmware or driver */
339 uint8_t is_fw_assigned_ipv6;
340};
341
342#define iscsi_dev_to_flash_conn(_dev) \
343 container_of(_dev, struct iscsi_bus_flash_conn, dev)
344
345#define iscsi_flash_conn_to_flash_session(_conn) \
346 iscsi_dev_to_flash_session(_conn->dev.parent)
347
348#define ISID_SIZE 6
349
350struct iscsi_bus_flash_session {
351 struct list_head sess_list; /* item in session_list */
352 struct iscsi_transport *transport;
353 unsigned int target_id;
354 int flash_state; /* persistent or non-persistent */
355 void *dd_data; /* LLD private data */
356 struct device dev; /* sysfs transport/container device */
357 /* iscsi session parameters */
358 unsigned first_burst;
359 unsigned max_burst;
360 unsigned short max_r2t;
361 int default_taskmgmt_timeout;
362 int initial_r2t_en;
363 int imm_data_en;
364 int time2wait;
365 int time2retain;
366 int pdu_inorder_en;
367 int dataseq_inorder_en;
368 int erl;
369 int tpgt;
370 char *username;
371 char *username_in;
372 char *password;
373 char *password_in;
374 char *targetname;
375 char *targetalias;
376 char *portal_type;
377 uint16_t tsid;
378 uint16_t chap_in_idx;
379 uint16_t chap_out_idx;
380 /* index of iSCSI discovery session if the entry is
381 * discovered by iSCSI discovery session
382 */
383 uint16_t discovery_parent_idx;
384 /* indicates if discovery was done through iSNS discovery service
385 * or through sendTarget */
386 uint16_t discovery_parent_type;
387 /* Firmware auto sendtarget discovery disable */
388 uint8_t auto_snd_tgt_disable;
389 uint8_t discovery_sess;
390 /* indicates if this flashnode entry is enabled or disabled */
391 uint8_t entry_state;
392 uint8_t chap_auth_en;
393 /* enables firmware to auto logout the discovery session on discovery
394 * completion
395 */
396 uint8_t discovery_logout_en;
397 uint8_t bidi_chap_en;
398 /* makes authentication for discovery session optional */
399 uint8_t discovery_auth_optional;
400 uint8_t isid[ISID_SIZE];
401 uint8_t is_boot_target;
402};
403
404#define iscsi_dev_to_flash_session(_dev) \
405 container_of(_dev, struct iscsi_bus_flash_session, dev)
406
407#define iscsi_flash_session_to_shost(_session) \
408 dev_to_shost(_session->dev.parent)
409
289/* 410/*
290 * session and connection functions that can be used by HW iSCSI LLDs 411 * session and connection functions that can be used by HW iSCSI LLDs
291 */ 412 */
@@ -330,4 +451,34 @@ extern char *iscsi_get_port_speed_name(struct Scsi_Host *shost);
330extern char *iscsi_get_port_state_name(struct Scsi_Host *shost); 451extern char *iscsi_get_port_state_name(struct Scsi_Host *shost);
331extern int iscsi_is_session_dev(const struct device *dev); 452extern int iscsi_is_session_dev(const struct device *dev);
332 453
454extern char *iscsi_get_discovery_parent_name(int parent_type);
455extern struct device *
456iscsi_find_flashnode(struct Scsi_Host *shost, void *data,
457 int (*fn)(struct device *dev, void *data));
458
459extern struct iscsi_bus_flash_session *
460iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index,
461 struct iscsi_transport *transport, int dd_size);
462
463extern struct iscsi_bus_flash_conn *
464iscsi_create_flashnode_conn(struct Scsi_Host *shost,
465 struct iscsi_bus_flash_session *fnode_sess,
466 struct iscsi_transport *transport, int dd_size);
467
468extern void
469iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess);
470
471extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
472extern int iscsi_flashnode_bus_match(struct device *dev,
473 struct device_driver *drv);
474extern int iscsi_is_flashnode_conn_dev(struct device *dev, void *data);
475
476extern struct device *
477iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
478 int (*fn)(struct device *dev, void *data));
479
480extern struct device *
481iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
482 void *data,
483 int (*fn)(struct device *dev, void *data));
333#endif 484#endif