aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_scsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c279
1 files changed, 278 insertions, 1 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7b17f52660b4..bd1867411821 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -32,6 +32,7 @@
32#include "lpfc_version.h" 32#include "lpfc_version.h"
33#include "lpfc_hw.h" 33#include "lpfc_hw.h"
34#include "lpfc_sli.h" 34#include "lpfc_sli.h"
35#include "lpfc_nl.h"
35#include "lpfc_disc.h" 36#include "lpfc_disc.h"
36#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
37#include "lpfc.h" 38#include "lpfc.h"
@@ -42,6 +43,111 @@
42#define LPFC_RESET_WAIT 2 43#define LPFC_RESET_WAIT 2
43#define LPFC_ABORT_WAIT 2 44#define LPFC_ABORT_WAIT 2
44 45
46/**
47 * lpfc_update_stats: Update statistical data for the command completion.
48 * @phba: Pointer to HBA object.
49 * @lpfc_cmd: lpfc scsi command object pointer.
50 *
51 * This function is called when there is a command completion and this
52 * function updates the statistical data for the command completion.
53 **/
54static void
55lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
56{
57 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
58 struct lpfc_nodelist *pnode = rdata->pnode;
59 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
60 unsigned long flags;
61 struct Scsi_Host *shost = cmd->device->host;
62 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
63 unsigned long latency;
64 int i;
65
66 if (cmd->result)
67 return;
68
69 spin_lock_irqsave(shost->host_lock, flags);
70 if (!vport->stat_data_enabled ||
71 vport->stat_data_blocked ||
72 !pnode->lat_data ||
73 (phba->bucket_type == LPFC_NO_BUCKET)) {
74 spin_unlock_irqrestore(shost->host_lock, flags);
75 return;
76 }
77 latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
78
79 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
80 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
81 phba->bucket_step;
82 if (i >= LPFC_MAX_BUCKET_COUNT)
83 i = LPFC_MAX_BUCKET_COUNT;
84 } else {
85 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
86 if (latency <= (phba->bucket_base +
87 ((1<<i)*phba->bucket_step)))
88 break;
89 }
90
91 pnode->lat_data[i].cmd_count++;
92 spin_unlock_irqrestore(shost->host_lock, flags);
93}
94
95
96/**
97 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
98 * event.
99 * @phba: Pointer to HBA context object.
100 * @vport: Pointer to vport object.
101 * @ndlp: Pointer to FC node associated with the target.
102 * @lun: Lun number of the scsi device.
103 * @old_val: Old value of the queue depth.
104 * @new_val: New value of the queue depth.
105 *
106 * This function sends an event to the mgmt application indicating
107 * there is a change in the scsi device queue depth.
108 **/
109static void
110lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
111 struct lpfc_vport *vport,
112 struct lpfc_nodelist *ndlp,
113 uint32_t lun,
114 uint32_t old_val,
115 uint32_t new_val)
116{
117 struct lpfc_fast_path_event *fast_path_evt;
118 unsigned long flags;
119
120 fast_path_evt = lpfc_alloc_fast_evt(phba);
121 if (!fast_path_evt)
122 return;
123
124 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
125 FC_REG_SCSI_EVENT;
126 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
127 LPFC_EVENT_VARQUEDEPTH;
128
129 /* Report all luns with change in queue depth */
130 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
131 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
132 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
133 &ndlp->nlp_portname, sizeof(struct lpfc_name));
134 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
135 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
136 }
137
138 fast_path_evt->un.queue_depth_evt.oldval = old_val;
139 fast_path_evt->un.queue_depth_evt.newval = new_val;
140 fast_path_evt->vport = vport;
141
142 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
143 spin_lock_irqsave(&phba->hbalock, flags);
144 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
145 spin_unlock_irqrestore(&phba->hbalock, flags);
146 lpfc_worker_wake_up(phba);
147
148 return;
149}
150
45/* 151/*
46 * This function is called with no lock held when there is a resource 152 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware. 153 * error in driver or in firmware.
@@ -117,9 +223,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
117 struct lpfc_vport **vports; 223 struct lpfc_vport **vports;
118 struct Scsi_Host *shost; 224 struct Scsi_Host *shost;
119 struct scsi_device *sdev; 225 struct scsi_device *sdev;
120 unsigned long new_queue_depth; 226 unsigned long new_queue_depth, old_queue_depth;
121 unsigned long num_rsrc_err, num_cmd_success; 227 unsigned long num_rsrc_err, num_cmd_success;
122 int i; 228 int i;
229 struct lpfc_rport_data *rdata;
123 230
124 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 231 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
125 num_cmd_success = atomic_read(&phba->num_cmd_success); 232 num_cmd_success = atomic_read(&phba->num_cmd_success);
@@ -137,6 +244,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
137 else 244 else
138 new_queue_depth = sdev->queue_depth - 245 new_queue_depth = sdev->queue_depth -
139 new_queue_depth; 246 new_queue_depth;
247 old_queue_depth = sdev->queue_depth;
140 if (sdev->ordered_tags) 248 if (sdev->ordered_tags)
141 scsi_adjust_queue_depth(sdev, 249 scsi_adjust_queue_depth(sdev,
142 MSG_ORDERED_TAG, 250 MSG_ORDERED_TAG,
@@ -145,6 +253,13 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
145 scsi_adjust_queue_depth(sdev, 253 scsi_adjust_queue_depth(sdev,
146 MSG_SIMPLE_TAG, 254 MSG_SIMPLE_TAG,
147 new_queue_depth); 255 new_queue_depth);
256 rdata = sdev->hostdata;
257 if (rdata)
258 lpfc_send_sdev_queuedepth_change_event(
259 phba, vports[i],
260 rdata->pnode,
261 sdev->lun, old_queue_depth,
262 new_queue_depth);
148 } 263 }
149 } 264 }
150 lpfc_destroy_vport_work_array(phba, vports); 265 lpfc_destroy_vport_work_array(phba, vports);
@@ -159,6 +274,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
159 struct Scsi_Host *shost; 274 struct Scsi_Host *shost;
160 struct scsi_device *sdev; 275 struct scsi_device *sdev;
161 int i; 276 int i;
277 struct lpfc_rport_data *rdata;
162 278
163 vports = lpfc_create_vport_work_array(phba); 279 vports = lpfc_create_vport_work_array(phba);
164 if (vports != NULL) 280 if (vports != NULL)
@@ -176,6 +292,14 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
176 scsi_adjust_queue_depth(sdev, 292 scsi_adjust_queue_depth(sdev,
177 MSG_SIMPLE_TAG, 293 MSG_SIMPLE_TAG,
178 sdev->queue_depth+1); 294 sdev->queue_depth+1);
295 rdata = sdev->hostdata;
296 if (rdata)
297 lpfc_send_sdev_queuedepth_change_event(
298 phba, vports[i],
299 rdata->pnode,
300 sdev->lun,
301 sdev->queue_depth - 1,
302 sdev->queue_depth);
179 } 303 }
180 } 304 }
181 lpfc_destroy_vport_work_array(phba, vports); 305 lpfc_destroy_vport_work_array(phba, vports);
@@ -466,6 +590,97 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
466 return 0; 590 return 0;
467} 591}
468 592
593/**
594 * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
595 * @phba: Pointer to hba context object.
596 * @vport: Pointer to vport object.
597 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
598 * @rsp_iocb: Pointer to response iocb object which reported error.
599 *
600 * This function posts an event when there is a SCSI command reporting
601 * error from the scsi device.
602 **/
603static void
604lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
605 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
606 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
607 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
608 uint32_t resp_info = fcprsp->rspStatus2;
609 uint32_t scsi_status = fcprsp->rspStatus3;
610 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
611 struct lpfc_fast_path_event *fast_path_evt = NULL;
612 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
613 unsigned long flags;
614
615 /* If there is queuefull or busy condition send a scsi event */
616 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
617 (cmnd->result == SAM_STAT_BUSY)) {
618 fast_path_evt = lpfc_alloc_fast_evt(phba);
619 if (!fast_path_evt)
620 return;
621 fast_path_evt->un.scsi_evt.event_type =
622 FC_REG_SCSI_EVENT;
623 fast_path_evt->un.scsi_evt.subcategory =
624 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
625 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
626 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
627 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
628 &pnode->nlp_portname, sizeof(struct lpfc_name));
629 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
630 &pnode->nlp_nodename, sizeof(struct lpfc_name));
631 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
632 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
633 fast_path_evt = lpfc_alloc_fast_evt(phba);
634 if (!fast_path_evt)
635 return;
636 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
637 FC_REG_SCSI_EVENT;
638 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
639 LPFC_EVENT_CHECK_COND;
640 fast_path_evt->un.check_cond_evt.scsi_event.lun =
641 cmnd->device->lun;
642 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
643 &pnode->nlp_portname, sizeof(struct lpfc_name));
644 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
645 &pnode->nlp_nodename, sizeof(struct lpfc_name));
646 fast_path_evt->un.check_cond_evt.sense_key =
647 cmnd->sense_buffer[2] & 0xf;
648 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
649 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
650 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
651 fcpi_parm &&
652 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
653 ((scsi_status == SAM_STAT_GOOD) &&
654 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
655 /*
656 * If status is good or resid does not match with fcp_param and
657 * there is valid fcpi_parm, then there is a read_check error
658 */
659 fast_path_evt = lpfc_alloc_fast_evt(phba);
660 if (!fast_path_evt)
661 return;
662 fast_path_evt->un.read_check_error.header.event_type =
663 FC_REG_FABRIC_EVENT;
664 fast_path_evt->un.read_check_error.header.subcategory =
665 LPFC_EVENT_FCPRDCHKERR;
666 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
667 &pnode->nlp_portname, sizeof(struct lpfc_name));
668 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
669 &pnode->nlp_nodename, sizeof(struct lpfc_name));
670 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
671 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
672 fast_path_evt->un.read_check_error.fcpiparam =
673 fcpi_parm;
674 } else
675 return;
676
677 fast_path_evt->vport = vport;
678 spin_lock_irqsave(&phba->hbalock, flags);
679 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
680 spin_unlock_irqrestore(&phba->hbalock, flags);
681 lpfc_worker_wake_up(phba);
682 return;
683}
469static void 684static void
470lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 685lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
471{ 686{
@@ -494,6 +709,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
494 uint32_t rsplen = 0; 709 uint32_t rsplen = 0;
495 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 710 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
496 711
712
497 /* 713 /*
498 * If this is a task management command, there is no 714 * If this is a task management command, there is no
499 * scsi packet associated with this lpfc_cmd. The driver 715 * scsi packet associated with this lpfc_cmd. The driver
@@ -609,6 +825,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
609 825
610 out: 826 out:
611 cmnd->result = ScsiResult(host_status, scsi_status); 827 cmnd->result = ScsiResult(host_status, scsi_status);
828 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
612} 829}
613 830
614static void 831static void
@@ -625,6 +842,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
625 struct scsi_device *sdev, *tmp_sdev; 842 struct scsi_device *sdev, *tmp_sdev;
626 int depth = 0; 843 int depth = 0;
627 unsigned long flags; 844 unsigned long flags;
845 struct lpfc_fast_path_event *fast_path_evt;
628 846
629 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 847 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
630 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 848 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
@@ -655,6 +873,30 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
655 case IOSTAT_NPORT_BSY: 873 case IOSTAT_NPORT_BSY:
656 case IOSTAT_FABRIC_BSY: 874 case IOSTAT_FABRIC_BSY:
657 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 875 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
876 fast_path_evt = lpfc_alloc_fast_evt(phba);
877 if (!fast_path_evt)
878 break;
879 fast_path_evt->un.fabric_evt.event_type =
880 FC_REG_FABRIC_EVENT;
881 fast_path_evt->un.fabric_evt.subcategory =
882 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
883 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
884 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
885 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
886 &pnode->nlp_portname,
887 sizeof(struct lpfc_name));
888 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
889 &pnode->nlp_nodename,
890 sizeof(struct lpfc_name));
891 }
892 fast_path_evt->vport = vport;
893 fast_path_evt->work_evt.evt =
894 LPFC_EVT_FASTPATH_MGMT_EVT;
895 spin_lock_irqsave(&phba->hbalock, flags);
896 list_add_tail(&fast_path_evt->work_evt.evt_listp,
897 &phba->work_list);
898 spin_unlock_irqrestore(&phba->hbalock, flags);
899 lpfc_worker_wake_up(phba);
658 break; 900 break;
659 case IOSTAT_LOCAL_REJECT: 901 case IOSTAT_LOCAL_REJECT:
660 if (lpfc_cmd->result == IOERR_INVALID_RPI || 902 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
@@ -687,6 +929,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
687 scsi_get_resid(cmd)); 929 scsi_get_resid(cmd));
688 } 930 }
689 931
932 lpfc_update_stats(phba, lpfc_cmd);
690 result = cmd->result; 933 result = cmd->result;
691 sdev = cmd->device; 934 sdev = cmd->device;
692 if (vport->cfg_max_scsicmpl_time && 935 if (vport->cfg_max_scsicmpl_time &&
@@ -755,6 +998,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
755 pnode->last_ramp_up_time = jiffies; 998 pnode->last_ramp_up_time = jiffies;
756 } 999 }
757 } 1000 }
1001 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
1002 0xFFFFFFFF,
1003 sdev->queue_depth - 1, sdev->queue_depth);
758 } 1004 }
759 1005
760 /* 1006 /*
@@ -784,6 +1030,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
784 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1030 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
785 "0711 detected queue full - lun queue " 1031 "0711 detected queue full - lun queue "
786 "depth adjusted to %d.\n", depth); 1032 "depth adjusted to %d.\n", depth);
1033 lpfc_send_sdev_queuedepth_change_event(phba, vport,
1034 pnode, 0xFFFFFFFF,
1035 depth+1, depth);
787 } 1036 }
788 } 1037 }
789 1038
@@ -1112,6 +1361,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1112 goto out_host_busy; 1361 goto out_host_busy;
1113 } 1362 }
1114 1363
1364 lpfc_cmd->start_time = jiffies;
1115 /* 1365 /*
1116 * Store the midlayer's command structure for the completion phase 1366 * Store the midlayer's command structure for the completion phase
1117 * and complete the command initialization. 1367 * and complete the command initialization.
@@ -1280,6 +1530,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1280 int ret = SUCCESS; 1530 int ret = SUCCESS;
1281 int status; 1531 int status;
1282 int cnt; 1532 int cnt;
1533 struct lpfc_scsi_event_header scsi_event;
1283 1534
1284 lpfc_block_error_handler(cmnd); 1535 lpfc_block_error_handler(cmnd);
1285 /* 1536 /*
@@ -1298,6 +1549,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1298 break; 1549 break;
1299 pnode = rdata->pnode; 1550 pnode = rdata->pnode;
1300 } 1551 }
1552
1553 scsi_event.event_type = FC_REG_SCSI_EVENT;
1554 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
1555 scsi_event.lun = 0;
1556 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
1557 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
1558
1559 fc_host_post_vendor_event(shost,
1560 fc_get_event_number(),
1561 sizeof(scsi_event),
1562 (char *)&scsi_event,
1563 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1564
1301 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1565 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1302 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1566 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1303 "0721 LUN Reset rport " 1567 "0721 LUN Reset rport "
@@ -1381,6 +1645,19 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1381 int cnt; 1645 int cnt;
1382 struct lpfc_scsi_buf * lpfc_cmd; 1646 struct lpfc_scsi_buf * lpfc_cmd;
1383 unsigned long later; 1647 unsigned long later;
1648 struct lpfc_scsi_event_header scsi_event;
1649
1650 scsi_event.event_type = FC_REG_SCSI_EVENT;
1651 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
1652 scsi_event.lun = 0;
1653 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
1654 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
1655
1656 fc_host_post_vendor_event(shost,
1657 fc_get_event_number(),
1658 sizeof(scsi_event),
1659 (char *)&scsi_event,
1660 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1384 1661
1385 lpfc_block_error_handler(cmnd); 1662 lpfc_block_error_handler(cmnd);
1386 /* 1663 /*