aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2006-08-18 17:47:08 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-09-04 22:25:21 -0400
commitc01f32087960edd60a302ad62ad6b8b525e4aeec (patch)
treeabefc5afc051d379802de42175e14df37d79b4ae /drivers
parent0f29b966d60e9a4f5ecff9f3832257b38aea4f13 (diff)
[SCSI] lpfc 8.1.10 : Add support for dev_loss_tmo_callbk and fast_io_fail_tmo_callbk
Add support for new dev_loss_tmo callback Goodness is that it removes code for a parallel nodev timer that existed in the driver Add support for the new fast_io_fail callback Signed-off-by: James Smart <James.Smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c155
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c178
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c10
8 files changed, 209 insertions, 173 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 4a4048d7ba91..efec44d267c7 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -285,6 +285,7 @@ struct lpfc_hba {
285 uint32_t cfg_log_verbose; 285 uint32_t cfg_log_verbose;
286 uint32_t cfg_lun_queue_depth; 286 uint32_t cfg_lun_queue_depth;
287 uint32_t cfg_nodev_tmo; 287 uint32_t cfg_nodev_tmo;
288 uint32_t cfg_devloss_tmo;
288 uint32_t cfg_hba_queue_depth; 289 uint32_t cfg_hba_queue_depth;
289 uint32_t cfg_fcp_class; 290 uint32_t cfg_fcp_class;
290 uint32_t cfg_use_adisc; 291 uint32_t cfg_use_adisc;
@@ -303,6 +304,8 @@ struct lpfc_hba {
303 uint32_t cfg_sg_seg_cnt; 304 uint32_t cfg_sg_seg_cnt;
304 uint32_t cfg_sg_dma_buf_size; 305 uint32_t cfg_sg_dma_buf_size;
305 306
307 uint32_t dev_loss_tmo_changed;
308
306 lpfc_vpd_t vpd; /* vital product data */ 309 lpfc_vpd_t vpd; /* vital product data */
307 310
308 struct Scsi_Host *host; 311 struct Scsi_Host *host;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c6d683d86cff..0de69324212e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -39,6 +39,9 @@
39#include "lpfc_compat.h" 39#include "lpfc_compat.h"
40#include "lpfc_crtn.h" 40#include "lpfc_crtn.h"
41 41
42#define LPFC_DEF_DEVLOSS_TMO 30
43#define LPFC_MIN_DEVLOSS_TMO 1
44#define LPFC_MAX_DEVLOSS_TMO 255
42 45
43static void 46static void
44lpfc_jedec_to_ascii(int incr, char hdw[]) 47lpfc_jedec_to_ascii(int incr, char hdw[])
@@ -559,6 +562,123 @@ static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
559 lpfc_poll_show, lpfc_poll_store); 562 lpfc_poll_show, lpfc_poll_store);
560 563
561/* 564/*
565# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
566# until the timer expires. Value range is [0,255]. Default value is 30.
567*/
568static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
569static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
570module_param(lpfc_nodev_tmo, int, 0);
571MODULE_PARM_DESC(lpfc_nodev_tmo,
572 "Seconds driver will hold I/O waiting "
573 "for a device to come back");
574static ssize_t
575lpfc_nodev_tmo_show(struct class_device *cdev, char *buf)
576{
577 struct Scsi_Host *host = class_to_shost(cdev);
578 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
579 int val = 0;
580 val = phba->cfg_devloss_tmo;
581 return snprintf(buf, PAGE_SIZE, "%d\n",
582 phba->cfg_devloss_tmo);
583}
584
585static int
586lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val)
587{
588 static int warned;
589 if (phba->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
590 phba->cfg_nodev_tmo = phba->cfg_devloss_tmo;
591 if (!warned && val != LPFC_DEF_DEVLOSS_TMO) {
592 warned = 1;
593 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
594 "%d:0402 Ignoring nodev_tmo module "
595 "parameter because devloss_tmo is"
596 " set.\n",
597 phba->brd_no);
598 }
599 return 0;
600 }
601
602 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
603 phba->cfg_nodev_tmo = val;
604 phba->cfg_devloss_tmo = val;
605 return 0;
606 }
607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
608 "%d:0400 lpfc_nodev_tmo attribute cannot be set to %d, "
609 "allowed range is [%d, %d]\n",
610 phba->brd_no, val,
611 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
612 phba->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
613 return -EINVAL;
614}
615
616static int
617lpfc_nodev_tmo_set(struct lpfc_hba *phba, int val)
618{
619 if (phba->dev_loss_tmo_changed ||
620 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
621 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
622 "%d:0401 Ignoring change to nodev_tmo "
623 "because devloss_tmo is set.\n",
624 phba->brd_no);
625 return 0;
626 }
627
628 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
629 phba->cfg_nodev_tmo = val;
630 phba->cfg_devloss_tmo = val;
631 return 0;
632 }
633
634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
635 "%d:0403 lpfc_nodev_tmo attribute cannot be set to %d, "
636 "allowed range is [%d, %d]\n",
637 phba->brd_no, val, LPFC_MIN_DEVLOSS_TMO,
638 LPFC_MAX_DEVLOSS_TMO);
639 return -EINVAL;
640}
641
642lpfc_param_store(nodev_tmo)
643
644static CLASS_DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
645 lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
646
647/*
648# lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
649# disappear until the timer expires. Value range is [0,255]. Default
650# value is 30.
651*/
652module_param(lpfc_devloss_tmo, int, 0);
653MODULE_PARM_DESC(lpfc_devloss_tmo,
654 "Seconds driver will hold I/O waiting "
655 "for a device to come back");
656lpfc_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
657 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
658lpfc_param_show(devloss_tmo)
659static int
660lpfc_devloss_tmo_set(struct lpfc_hba *phba, int val)
661{
662 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
663 phba->cfg_nodev_tmo = val;
664 phba->cfg_devloss_tmo = val;
665 phba->dev_loss_tmo_changed = 1;
666 return 0;
667 }
668
669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
670 "%d:0404 lpfc_devloss_tmo attribute cannot be set to"
671 " %d, allowed range is [%d, %d]\n",
672 phba->brd_no, val, LPFC_MIN_DEVLOSS_TMO,
673 LPFC_MAX_DEVLOSS_TMO);
674 return -EINVAL;
675}
676
677lpfc_param_store(devloss_tmo)
678static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
679 lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
680
681/*
562# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 682# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
563# deluged with LOTS of information. 683# deluged with LOTS of information.
564# You can set a bit mask to record specific types of verbose messages: 684# You can set a bit mask to record specific types of verbose messages:
@@ -617,14 +737,6 @@ LPFC_ATTR_R(scan_down, 1, 0, 1,
617 "Start scanning for devices from highest ALPA to lowest"); 737 "Start scanning for devices from highest ALPA to lowest");
618 738
619/* 739/*
620# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
621# until the timer expires. Value range is [0,255]. Default value is 30.
622# NOTE: this MUST be less then the SCSI Layer command timeout - 1.
623*/
624LPFC_ATTR_RW(nodev_tmo, 30, 0, 255,
625 "Seconds driver will hold I/O waiting for a device to come back");
626
627/*
628# lpfc_topology: link topology for init link 740# lpfc_topology: link topology for init link
629# 0x0 = attempt loop mode then point-to-point 741# 0x0 = attempt loop mode then point-to-point
630# 0x01 = internal loopback mode 742# 0x01 = internal loopback mode
@@ -737,6 +849,7 @@ struct class_device_attribute *lpfc_host_attrs[] = {
737 &class_device_attr_lpfc_lun_queue_depth, 849 &class_device_attr_lpfc_lun_queue_depth,
738 &class_device_attr_lpfc_hba_queue_depth, 850 &class_device_attr_lpfc_hba_queue_depth,
739 &class_device_attr_lpfc_nodev_tmo, 851 &class_device_attr_lpfc_nodev_tmo,
852 &class_device_attr_lpfc_devloss_tmo,
740 &class_device_attr_lpfc_fcp_class, 853 &class_device_attr_lpfc_fcp_class,
741 &class_device_attr_lpfc_use_adisc, 854 &class_device_attr_lpfc_use_adisc,
742 &class_device_attr_lpfc_ack0, 855 &class_device_attr_lpfc_ack0,
@@ -1450,27 +1563,12 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
1450} 1563}
1451 1564
1452static void 1565static void
1453lpfc_get_rport_loss_tmo(struct fc_rport *rport)
1454{
1455 /*
1456 * Return the driver's global value for device loss timeout plus
1457 * five seconds to allow the driver's nodev timer to run.
1458 */
1459 rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
1460}
1461
1462static void
1463lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 1566lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1464{ 1567{
1465 /*
1466 * The driver doesn't have a per-target timeout setting. Set
1467 * this value globally. lpfc_nodev_tmo should be greater then 0.
1468 */
1469 if (timeout) 1568 if (timeout)
1470 lpfc_nodev_tmo = timeout; 1569 rport->dev_loss_tmo = timeout;
1471 else 1570 else
1472 lpfc_nodev_tmo = 1; 1571 rport->dev_loss_tmo = 1;
1473 rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
1474} 1572}
1475 1573
1476 1574
@@ -1532,7 +1630,6 @@ struct fc_function_template lpfc_transport_functions = {
1532 .show_rport_maxframe_size = 1, 1630 .show_rport_maxframe_size = 1,
1533 .show_rport_supported_classes = 1, 1631 .show_rport_supported_classes = 1,
1534 1632
1535 .get_rport_dev_loss_tmo = lpfc_get_rport_loss_tmo,
1536 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, 1633 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
1537 .show_rport_dev_loss_tmo = 1, 1634 .show_rport_dev_loss_tmo = 1,
1538 1635
@@ -1546,6 +1643,8 @@ struct fc_function_template lpfc_transport_functions = {
1546 .show_starget_port_name = 1, 1643 .show_starget_port_name = 1,
1547 1644
1548 .issue_fc_host_lip = lpfc_issue_lip, 1645 .issue_fc_host_lip = lpfc_issue_lip,
1646 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
1647 .terminate_rport_io = lpfc_terminate_rport_io,
1549}; 1648};
1550 1649
1551void 1650void
@@ -1561,13 +1660,13 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1561 lpfc_ack0_init(phba, lpfc_ack0); 1660 lpfc_ack0_init(phba, lpfc_ack0);
1562 lpfc_topology_init(phba, lpfc_topology); 1661 lpfc_topology_init(phba, lpfc_topology);
1563 lpfc_scan_down_init(phba, lpfc_scan_down); 1662 lpfc_scan_down_init(phba, lpfc_scan_down);
1564 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
1565 lpfc_link_speed_init(phba, lpfc_link_speed); 1663 lpfc_link_speed_init(phba, lpfc_link_speed);
1566 lpfc_fdmi_on_init(phba, lpfc_fdmi_on); 1664 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
1567 lpfc_discovery_threads_init(phba, lpfc_discovery_threads); 1665 lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
1568 lpfc_max_luns_init(phba, lpfc_max_luns); 1666 lpfc_max_luns_init(phba, lpfc_max_luns);
1569 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 1667 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
1570 1668 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
1669 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
1571 phba->cfg_poll = lpfc_poll; 1670 phba->cfg_poll = lpfc_poll;
1572 1671
1573 /* 1672 /*
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 2a176467f71b..3d684496acde 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -18,6 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21struct fc_rport;
21void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 22void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
22void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 23void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
23int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, 24int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
@@ -200,6 +201,8 @@ extern struct scsi_host_template lpfc_template;
200extern struct fc_function_template lpfc_transport_functions; 201extern struct fc_function_template lpfc_transport_functions;
201 202
202void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp); 203void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp);
204void lpfc_terminate_rport_io(struct fc_rport *);
205void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
203 206
204#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 207#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
205#define HBA_EVENT_RSCN 5 208#define HBA_EVENT_RSCN 5
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index bbb7310210b0..ae4106458991 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -324,7 +324,6 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
324 struct lpfc_sli_ct_request *Response = 324 struct lpfc_sli_ct_request *Response =
325 (struct lpfc_sli_ct_request *) mp->virt; 325 (struct lpfc_sli_ct_request *) mp->virt;
326 struct lpfc_nodelist *ndlp = NULL; 326 struct lpfc_nodelist *ndlp = NULL;
327 struct lpfc_nodelist *next_ndlp;
328 struct lpfc_dmabuf *mlast, *next_mp; 327 struct lpfc_dmabuf *mlast, *next_mp;
329 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; 328 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
330 uint32_t Did; 329 uint32_t Did;
@@ -399,30 +398,6 @@ nsout1:
399 * current driver state. 398 * current driver state.
400 */ 399 */
401 if (phba->hba_state == LPFC_HBA_READY) { 400 if (phba->hba_state == LPFC_HBA_READY) {
402
403 /*
404 * Switch ports that connect a loop of multiple targets need
405 * special consideration. The driver wants to unregister the
406 * rpi only on the target that was pulled from the loop. On
407 * RSCN, the driver wants to rediscover an NPort only if the
408 * driver flagged it as NLP_NPR_2B_DISC. Provided adisc is
409 * not enabled and the NPort is not capable of retransmissions
410 * (FC Tape) prevent timing races with the scsi error handler by
411 * unregistering the Nport's RPI. This action causes all
412 * outstanding IO to flush back to the midlayer.
413 */
414 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
415 nlp_listp) {
416 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
417 (lpfc_rscn_payload_check(phba, ndlp->nlp_DID))) {
418 if ((phba->cfg_use_adisc == 0) &&
419 !(ndlp->nlp_fcp_info &
420 NLP_FCP_2_DEVICE)) {
421 lpfc_unreg_rpi(phba, ndlp);
422 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
423 }
424 }
425 }
426 lpfc_els_flush_rscn(phba); 401 lpfc_els_flush_rscn(phba);
427 spin_lock_irq(phba->host->host_lock); 402 spin_lock_irq(phba->host->host_lock);
428 phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */ 403 phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 41cf5d3ea6ce..9766f909c9c6 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -30,7 +30,6 @@
30 30
31/* worker thread events */ 31/* worker thread events */
32enum lpfc_work_type { 32enum lpfc_work_type {
33 LPFC_EVT_NODEV_TMO,
34 LPFC_EVT_ONLINE, 33 LPFC_EVT_ONLINE,
35 LPFC_EVT_OFFLINE, 34 LPFC_EVT_OFFLINE,
36 LPFC_EVT_WARM_START, 35 LPFC_EVT_WARM_START,
@@ -74,11 +73,9 @@ struct lpfc_nodelist {
74#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ 73#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
75 74
76 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ 75 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
77 struct timer_list nlp_tmofunc; /* Used for nodev tmo */
78 struct fc_rport *rport; /* Corresponding FC transport 76 struct fc_rport *rport; /* Corresponding FC transport
79 port structure */ 77 port structure */
80 struct lpfc_hba *nlp_phba; 78 struct lpfc_hba *nlp_phba;
81 struct lpfc_work_evt nodev_timeout_evt;
82 struct lpfc_work_evt els_retry_evt; 79 struct lpfc_work_evt els_retry_evt;
83 unsigned long last_ramp_up_time; /* jiffy of last ramp up */ 80 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
84 unsigned long last_q_full_time; /* jiffy of last queue full */ 81 unsigned long last_q_full_time; /* jiffy of last queue full */
@@ -102,7 +99,6 @@ struct lpfc_nodelist {
102#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ 99#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */
103#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ 100#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */
104#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ 101#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */
105#define NLP_NODEV_TMO 0x10000 /* nodev timeout is running for node */
106#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ 102#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */
107#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ 103#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */
108#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ 104#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */
@@ -169,7 +165,7 @@ struct lpfc_nodelist {
169 */ 165 */
170/* 166/*
171 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 167 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
172 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers 168 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
173 * expire, all effected nodes will receive a DEVICE_RM event. 169 * expire, all effected nodes will receive a DEVICE_RM event.
174 */ 170 */
175/* 171/*
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 53821e5778b3..97973af980a0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -56,28 +56,63 @@ static uint8_t lpfcAlpaArray[] = {
56 56
57static void lpfc_disc_timeout_handler(struct lpfc_hba *); 57static void lpfc_disc_timeout_handler(struct lpfc_hba *);
58 58
59static void 59void
60lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 60lpfc_terminate_rport_io(struct fc_rport *rport)
61{ 61{
62 uint8_t *name = (uint8_t *)&ndlp->nlp_portname; 62 struct lpfc_rport_data *rdata;
63 int warn_on = 0; 63 struct lpfc_nodelist * ndlp;
64 struct lpfc_hba *phba;
64 65
65 spin_lock_irq(phba->host->host_lock); 66 rdata = rport->dd_data;
66 if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) { 67 ndlp = rdata->pnode;
67 spin_unlock_irq(phba->host->host_lock); 68
69 if (!ndlp) {
70 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
71 printk(KERN_ERR "Cannot find remote node"
72 " to terminate I/O Data x%x\n",
73 rport->port_id);
68 return; 74 return;
69 } 75 }
70 76
71 /* 77 phba = ndlp->nlp_phba;
72 * If a discovery event readded nodev_timer after timer 78
73 * firing and before processing the timer, cancel the
74 * nlp_tmofunc.
75 */
76 spin_unlock_irq(phba->host->host_lock);
77 del_timer_sync(&ndlp->nlp_tmofunc);
78 spin_lock_irq(phba->host->host_lock); 79 spin_lock_irq(phba->host->host_lock);
80 if (ndlp->nlp_sid != NLP_NO_SID) {
81 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
82 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
83 }
84 spin_unlock_irq(phba->host->host_lock);
85
86 return;
87}
88
89/*
90 * This function will be called when dev_loss_tmo fire.
91 */
92void
93lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
94{
95 struct lpfc_rport_data *rdata;
96 struct lpfc_nodelist * ndlp;
97 uint8_t *name;
98 int warn_on = 0;
99 struct lpfc_hba *phba;
100
101 rdata = rport->dd_data;
102 ndlp = rdata->pnode;
79 103
80 ndlp->nlp_flag &= ~NLP_NODEV_TMO; 104 if (!ndlp) {
105 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
106 printk(KERN_ERR "Cannot find remote node"
107 " for rport in dev_loss_tmo_callbk x%x\n",
108 rport->port_id);
109 return;
110 }
111
112 name = (uint8_t *)&ndlp->nlp_portname;
113 phba = ndlp->nlp_phba;
114
115 spin_lock_irq(phba->host->host_lock);
81 116
82 if (ndlp->nlp_sid != NLP_NO_SID) { 117 if (ndlp->nlp_sid != NLP_NO_SID) {
83 warn_on = 1; 118 warn_on = 1;
@@ -85,11 +120,14 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
85 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 120 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
86 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 121 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
87 } 122 }
123 if (phba->fc_flag & FC_UNLOADING)
124 warn_on = 0;
125
88 spin_unlock_irq(phba->host->host_lock); 126 spin_unlock_irq(phba->host->host_lock);
89 127
90 if (warn_on) { 128 if (warn_on) {
91 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 129 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92 "%d:0203 Nodev timeout on " 130 "%d:0203 Devloss timeout on "
93 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 131 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
94 "NPort x%x Data: x%x x%x x%x\n", 132 "NPort x%x Data: x%x x%x x%x\n",
95 phba->brd_no, 133 phba->brd_no,
@@ -99,7 +137,7 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
99 ndlp->nlp_state, ndlp->nlp_rpi); 137 ndlp->nlp_state, ndlp->nlp_rpi);
100 } else { 138 } else {
101 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 139 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
102 "%d:0204 Nodev timeout on " 140 "%d:0204 Devloss timeout on "
103 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 141 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
104 "NPort x%x Data: x%x x%x x%x\n", 142 "NPort x%x Data: x%x x%x x%x\n",
105 phba->brd_no, 143 phba->brd_no,
@@ -109,7 +147,12 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
109 ndlp->nlp_state, ndlp->nlp_rpi); 147 ndlp->nlp_state, ndlp->nlp_rpi);
110 } 148 }
111 149
112 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM); 150 ndlp->rport = NULL;
151 rdata->pnode = NULL;
152
153 if (!(phba->fc_flag & FC_UNLOADING))
154 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
155
113 return; 156 return;
114} 157}
115 158
@@ -127,11 +170,6 @@ lpfc_work_list_done(struct lpfc_hba * phba)
127 spin_unlock_irq(phba->host->host_lock); 170 spin_unlock_irq(phba->host->host_lock);
128 free_evt = 1; 171 free_evt = 1;
129 switch (evtp->evt) { 172 switch (evtp->evt) {
130 case LPFC_EVT_NODEV_TMO:
131 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
132 lpfc_process_nodev_timeout(phba, ndlp);
133 free_evt = 0;
134 break;
135 case LPFC_EVT_ELS_RETRY: 173 case LPFC_EVT_ELS_RETRY:
136 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 174 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
137 lpfc_els_retry_delay_handler(ndlp); 175 lpfc_els_retry_delay_handler(ndlp);
@@ -377,16 +415,6 @@ lpfc_linkdown(struct lpfc_hba * phba)
377 rc = lpfc_disc_state_machine(phba, ndlp, NULL, 415 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
378 NLP_EVT_DEVICE_RECOVERY); 416 NLP_EVT_DEVICE_RECOVERY);
379 417
380 /* Check config parameter use-adisc or FCP-2 */
381 if ((rc != NLP_STE_FREED_NODE) &&
382 (phba->cfg_use_adisc == 0) &&
383 !(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) {
384 /* We know we will have to relogin, so
385 * unreglogin the rpi right now to fail
386 * any outstanding I/Os quickly.
387 */
388 lpfc_unreg_rpi(phba, ndlp);
389 }
390 } 418 }
391 } 419 }
392 420
@@ -1104,8 +1132,11 @@ lpfc_unregister_remote_port(struct lpfc_hba * phba,
1104 struct fc_rport *rport = ndlp->rport; 1132 struct fc_rport *rport = ndlp->rport;
1105 struct lpfc_rport_data *rdata = rport->dd_data; 1133 struct lpfc_rport_data *rdata = rport->dd_data;
1106 1134
1107 ndlp->rport = NULL; 1135 if (rport->scsi_target_id == -1) {
1108 rdata->pnode = NULL; 1136 ndlp->rport = NULL;
1137 rdata->pnode = NULL;
1138 }
1139
1109 fc_remote_port_delete(rport); 1140 fc_remote_port_delete(rport);
1110 1141
1111 return; 1142 return;
@@ -1233,17 +1264,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1233 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list); 1264 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1234 phba->fc_unmap_cnt++; 1265 phba->fc_unmap_cnt++;
1235 phba->nport_event_cnt++; 1266 phba->nport_event_cnt++;
1236 /* stop nodev tmo if running */
1237 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1238 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1239 spin_unlock_irq(phba->host->host_lock);
1240 del_timer_sync(&nlp->nlp_tmofunc);
1241 spin_lock_irq(phba->host->host_lock);
1242 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1243 list_del_init(&nlp->nodev_timeout_evt.
1244 evt_listp);
1245
1246 }
1247 nlp->nlp_flag &= ~NLP_NODEV_REMOVE; 1267 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1248 nlp->nlp_type |= NLP_FC_NODE; 1268 nlp->nlp_type |= NLP_FC_NODE;
1249 break; 1269 break;
@@ -1254,17 +1274,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1254 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list); 1274 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1255 phba->fc_map_cnt++; 1275 phba->fc_map_cnt++;
1256 phba->nport_event_cnt++; 1276 phba->nport_event_cnt++;
1257 /* stop nodev tmo if running */
1258 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1259 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1260 spin_unlock_irq(phba->host->host_lock);
1261 del_timer_sync(&nlp->nlp_tmofunc);
1262 spin_lock_irq(phba->host->host_lock);
1263 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1264 list_del_init(&nlp->nodev_timeout_evt.
1265 evt_listp);
1266
1267 }
1268 nlp->nlp_flag &= ~NLP_NODEV_REMOVE; 1277 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1269 break; 1278 break;
1270 case NLP_NPR_LIST: 1279 case NLP_NPR_LIST:
@@ -1273,11 +1282,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1273 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list); 1282 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1274 phba->fc_npr_cnt++; 1283 phba->fc_npr_cnt++;
1275 1284
1276 if (!(nlp->nlp_flag & NLP_NODEV_TMO))
1277 mod_timer(&nlp->nlp_tmofunc,
1278 jiffies + HZ * phba->cfg_nodev_tmo);
1279
1280 nlp->nlp_flag |= NLP_NODEV_TMO;
1281 nlp->nlp_flag &= ~NLP_RCV_PLOGI; 1285 nlp->nlp_flag &= ~NLP_RCV_PLOGI;
1282 break; 1286 break;
1283 case NLP_JUST_DQ: 1287 case NLP_JUST_DQ:
@@ -1307,7 +1311,8 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1307 * already. If we have, and it's a scsi entity, be 1311 * already. If we have, and it's a scsi entity, be
1308 * sure to unblock any attached scsi devices 1312 * sure to unblock any attached scsi devices
1309 */ 1313 */
1310 if (!nlp->rport) 1314 if ((!nlp->rport) || (nlp->rport->port_state ==
1315 FC_PORTSTATE_BLOCKED))
1311 lpfc_register_remote_port(phba, nlp); 1316 lpfc_register_remote_port(phba, nlp);
1312 1317
1313 /* 1318 /*
@@ -1581,15 +1586,12 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1581 1586
1582 lpfc_els_abort(phba,ndlp,0); 1587 lpfc_els_abort(phba,ndlp,0);
1583 spin_lock_irq(phba->host->host_lock); 1588 spin_lock_irq(phba->host->host_lock);
1584 ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO); 1589 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1585 spin_unlock_irq(phba->host->host_lock); 1590 spin_unlock_irq(phba->host->host_lock);
1586 del_timer_sync(&ndlp->nlp_tmofunc);
1587 1591
1588 ndlp->nlp_last_elscmd = 0; 1592 ndlp->nlp_last_elscmd = 0;
1589 del_timer_sync(&ndlp->nlp_delayfunc); 1593 del_timer_sync(&ndlp->nlp_delayfunc);
1590 1594
1591 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1592 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1593 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) 1595 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1594 list_del_init(&ndlp->els_retry_evt.evt_listp); 1596 list_del_init(&ndlp->els_retry_evt.evt_listp);
1595 1597
@@ -1606,16 +1608,6 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1606int 1608int
1607lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1609lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1608{ 1610{
1609 if (ndlp->nlp_flag & NLP_NODEV_TMO) {
1610 spin_lock_irq(phba->host->host_lock);
1611 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1612 spin_unlock_irq(phba->host->host_lock);
1613 del_timer_sync(&ndlp->nlp_tmofunc);
1614 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1615 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1616
1617 }
1618
1619 1611
1620 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1612 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1621 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1613 lpfc_cancel_retry_delay_tmo(phba, ndlp);
@@ -2430,34 +2422,6 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2430 return; 2422 return;
2431} 2423}
2432 2424
2433static void
2434lpfc_nodev_timeout(unsigned long ptr)
2435{
2436 struct lpfc_hba *phba;
2437 struct lpfc_nodelist *ndlp;
2438 unsigned long iflag;
2439 struct lpfc_work_evt *evtp;
2440
2441 ndlp = (struct lpfc_nodelist *)ptr;
2442 phba = ndlp->nlp_phba;
2443 evtp = &ndlp->nodev_timeout_evt;
2444 spin_lock_irqsave(phba->host->host_lock, iflag);
2445
2446 if (!list_empty(&evtp->evt_listp)) {
2447 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2448 return;
2449 }
2450 evtp->evt_arg1 = ndlp;
2451 evtp->evt = LPFC_EVT_NODEV_TMO;
2452 list_add_tail(&evtp->evt_listp, &phba->work_list);
2453 if (phba->work_wait)
2454 wake_up(phba->work_wait);
2455
2456 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2457 return;
2458}
2459
2460
2461/* 2425/*
2462 * This routine handles processing a NameServer REG_LOGIN mailbox 2426 * This routine handles processing a NameServer REG_LOGIN mailbox
2463 * command upon completion. It is setup in the LPFC_MBOXQ 2427 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -2581,11 +2545,7 @@ lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2581 uint32_t did) 2545 uint32_t did)
2582{ 2546{
2583 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 2547 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2584 INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
2585 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 2548 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2586 init_timer(&ndlp->nlp_tmofunc);
2587 ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
2588 ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
2589 init_timer(&ndlp->nlp_delayfunc); 2549 init_timer(&ndlp->nlp_delayfunc);
2590 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 2550 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2591 ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 2551 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 20449a8dd53d..d5f415007db2 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1813,7 +1813,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1813 */ 1813 */
1814/* 1814/*
1815 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 1815 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
1816 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers 1816 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
1817 * expire, all effected nodes will receive a DEVICE_RM event. 1817 * expire, all effected nodes will receive a DEVICE_RM event.
1818 */ 1818 */
1819/* 1819/*
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a8816a8738f8..97ae98dc95d0 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -935,7 +935,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
935 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); 935 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
936 spin_lock_irq(phba->host->host_lock); 936 spin_lock_irq(phba->host->host_lock);
937 if (++loop_count 937 if (++loop_count
938 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT) 938 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
939 break; 939 break;
940 } 940 }
941 941
@@ -978,7 +978,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
978 spin_lock_irq(shost->host_lock); 978 spin_lock_irq(shost->host_lock);
979 /* 979 /*
980 * If target is not in a MAPPED state, delay the reset until 980 * If target is not in a MAPPED state, delay the reset until
981 * target is rediscovered or nodev timeout expires. 981 * target is rediscovered or devloss timeout expires.
982 */ 982 */
983 while ( 1 ) { 983 while ( 1 ) {
984 if (!pnode) 984 if (!pnode)
@@ -1050,7 +1050,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1050 spin_lock_irq(phba->host->host_lock); 1050 spin_lock_irq(phba->host->host_lock);
1051 1051
1052 if (++loopcnt 1052 if (++loopcnt
1053 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1053 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1054 break; 1054 break;
1055 1055
1056 cnt = lpfc_sli_sum_iocb(phba, 1056 cnt = lpfc_sli_sum_iocb(phba,
@@ -1151,7 +1151,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1151 spin_lock_irq(phba->host->host_lock); 1151 spin_lock_irq(phba->host->host_lock);
1152 1152
1153 if (++loopcnt 1153 if (++loopcnt
1154 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1154 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1155 break; 1155 break;
1156 1156
1157 cnt = lpfc_sli_sum_iocb(phba, 1157 cnt = lpfc_sli_sum_iocb(phba,
@@ -1249,7 +1249,7 @@ lpfc_slave_configure(struct scsi_device *sdev)
1249 * target pointer is stored in the starget_data for the 1249 * target pointer is stored in the starget_data for the
1250 * driver's sysfs entry point functions. 1250 * driver's sysfs entry point functions.
1251 */ 1251 */
1252 rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; 1252 rport->dev_loss_tmo = phba->cfg_devloss_tmo;
1253 1253
1254 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1254 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1255 lpfc_sli_poll_fcp_ring(phba); 1255 lpfc_sli_poll_fcp_ring(phba);