diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-30 20:43:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-30 20:43:10 -0500 |
commit | 590cf28580c999c8ba70dc39b40bab09d69e2630 (patch) | |
tree | 22b9aa4b148bea8a310b760521d1032eef7d743f /drivers/scsi/lpfc | |
parent | f54a6ec0fd85002d94d05b4bb679508eeb066683 (diff) | |
parent | fb5edd020fa0fbe991f4a473611ad530d2237425 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (104 commits)
[SCSI] fcoe: fix configuration problems
[SCSI] cxgb3i: fix select/depend problem
[SCSI] fcoe: fix incorrect use of struct module
[SCSI] cxgb3i: remove use of skb->sp
[SCSI] cxgb3i: Add cxgb3i iSCSI driver.
[SCSI] zfcp: Remove unnecessary warning message
[SCSI] zfcp: Add support for unchained FSF requests
[SCSI] zfcp: Remove busid macro
[SCSI] zfcp: remove DID_DID flag
[SCSI] zfcp: Simplify mask lookups for incoming RSCNs
[SCSI] zfcp: Remove initial device data from zfcp_data
[SCSI] zfcp: fix compile warning
[SCSI] zfcp: Remove adapter list
[SCSI] zfcp: Simplify SBAL allocation to fix sparse warnings
[SCSI] zfcp: register with SCSI layer on ccw registration
[SCSI] zfcp: Fix message line break
[SCSI] qla2xxx: changes in multiq code
[SCSI] eata: fix the data buffer accessors conversion regression
[SCSI] ibmvfc: Improve async event handling
[SCSI] lpfc : correct printk types on PPC compiles
...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 22 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 169 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 15 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 17 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 164 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 160 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 69 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw.h | 249 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 685 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_logmsg.h | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mbox.c | 54 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nl.h | 30 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nportdisc.c | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 1235 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 159 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_vport.c | 28 |
19 files changed, 2623 insertions, 442 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 60a9e6e9384b..dcba267db711 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -29,8 +29,10 @@ struct lpfc_sli2_slim; | |||
29 | #define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact | 29 | #define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact |
30 | the NameServer before giving up. */ | 30 | the NameServer before giving up. */ |
31 | #define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ | 31 | #define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ |
32 | #define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ | 32 | #define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ |
33 | #define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */ | 33 | #define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ |
34 | #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ | ||
35 | #define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ | ||
34 | #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ | 36 | #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ |
35 | #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ | 37 | #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ |
36 | #define LPFC_VNAME_LEN 100 /* vport symbolic name length */ | 38 | #define LPFC_VNAME_LEN 100 /* vport symbolic name length */ |
@@ -354,8 +356,6 @@ struct lpfc_vport { | |||
354 | uint8_t load_flag; | 356 | uint8_t load_flag; |
355 | #define FC_LOADING 0x1 /* HBA in process of loading drvr */ | 357 | #define FC_LOADING 0x1 /* HBA in process of loading drvr */ |
356 | #define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */ | 358 | #define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */ |
357 | char *vname; /* Application assigned name */ | ||
358 | |||
359 | /* Vport Config Parameters */ | 359 | /* Vport Config Parameters */ |
360 | uint32_t cfg_scan_down; | 360 | uint32_t cfg_scan_down; |
361 | uint32_t cfg_lun_queue_depth; | 361 | uint32_t cfg_lun_queue_depth; |
@@ -376,7 +376,7 @@ struct lpfc_vport { | |||
376 | 376 | ||
377 | struct fc_vport *fc_vport; | 377 | struct fc_vport *fc_vport; |
378 | 378 | ||
379 | #ifdef CONFIG_LPFC_DEBUG_FS | 379 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
380 | struct dentry *debug_disc_trc; | 380 | struct dentry *debug_disc_trc; |
381 | struct dentry *debug_nodelist; | 381 | struct dentry *debug_nodelist; |
382 | struct dentry *vport_debugfs_root; | 382 | struct dentry *vport_debugfs_root; |
@@ -428,6 +428,7 @@ struct lpfc_hba { | |||
428 | #define LPFC_SLI3_VPORT_TEARDOWN 0x04 | 428 | #define LPFC_SLI3_VPORT_TEARDOWN 0x04 |
429 | #define LPFC_SLI3_CRP_ENABLED 0x08 | 429 | #define LPFC_SLI3_CRP_ENABLED 0x08 |
430 | #define LPFC_SLI3_INB_ENABLED 0x10 | 430 | #define LPFC_SLI3_INB_ENABLED 0x10 |
431 | #define LPFC_SLI3_BG_ENABLED 0x20 | ||
431 | uint32_t iocb_cmd_size; | 432 | uint32_t iocb_cmd_size; |
432 | uint32_t iocb_rsp_size; | 433 | uint32_t iocb_rsp_size; |
433 | 434 | ||
@@ -501,12 +502,14 @@ struct lpfc_hba { | |||
501 | uint32_t cfg_poll_tmo; | 502 | uint32_t cfg_poll_tmo; |
502 | uint32_t cfg_use_msi; | 503 | uint32_t cfg_use_msi; |
503 | uint32_t cfg_sg_seg_cnt; | 504 | uint32_t cfg_sg_seg_cnt; |
505 | uint32_t cfg_prot_sg_seg_cnt; | ||
504 | uint32_t cfg_sg_dma_buf_size; | 506 | uint32_t cfg_sg_dma_buf_size; |
505 | uint64_t cfg_soft_wwnn; | 507 | uint64_t cfg_soft_wwnn; |
506 | uint64_t cfg_soft_wwpn; | 508 | uint64_t cfg_soft_wwpn; |
507 | uint32_t cfg_hba_queue_depth; | 509 | uint32_t cfg_hba_queue_depth; |
508 | uint32_t cfg_enable_hba_reset; | 510 | uint32_t cfg_enable_hba_reset; |
509 | uint32_t cfg_enable_hba_heartbeat; | 511 | uint32_t cfg_enable_hba_heartbeat; |
512 | uint32_t cfg_enable_bg; | ||
510 | 513 | ||
511 | lpfc_vpd_t vpd; /* vital product data */ | 514 | lpfc_vpd_t vpd; /* vital product data */ |
512 | 515 | ||
@@ -572,6 +575,9 @@ struct lpfc_hba { | |||
572 | uint64_t fc4InputRequests; | 575 | uint64_t fc4InputRequests; |
573 | uint64_t fc4OutputRequests; | 576 | uint64_t fc4OutputRequests; |
574 | uint64_t fc4ControlRequests; | 577 | uint64_t fc4ControlRequests; |
578 | uint64_t bg_guard_err_cnt; | ||
579 | uint64_t bg_apptag_err_cnt; | ||
580 | uint64_t bg_reftag_err_cnt; | ||
575 | 581 | ||
576 | struct lpfc_sysfs_mbox sysfs_mbox; | 582 | struct lpfc_sysfs_mbox sysfs_mbox; |
577 | 583 | ||
@@ -594,6 +600,8 @@ struct lpfc_hba { | |||
594 | 600 | ||
595 | struct fc_host_statistics link_stats; | 601 | struct fc_host_statistics link_stats; |
596 | enum intr_type_t intr_type; | 602 | enum intr_type_t intr_type; |
603 | uint32_t intr_mode; | ||
604 | #define LPFC_INTR_ERROR 0xFFFFFFFF | ||
597 | struct msix_entry msix_entries[LPFC_MSIX_VECTORS]; | 605 | struct msix_entry msix_entries[LPFC_MSIX_VECTORS]; |
598 | 606 | ||
599 | struct list_head port_list; | 607 | struct list_head port_list; |
@@ -613,12 +621,14 @@ struct lpfc_hba { | |||
613 | unsigned long last_rsrc_error_time; | 621 | unsigned long last_rsrc_error_time; |
614 | unsigned long last_ramp_down_time; | 622 | unsigned long last_ramp_down_time; |
615 | unsigned long last_ramp_up_time; | 623 | unsigned long last_ramp_up_time; |
616 | #ifdef CONFIG_LPFC_DEBUG_FS | 624 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
617 | struct dentry *hba_debugfs_root; | 625 | struct dentry *hba_debugfs_root; |
618 | atomic_t debugfs_vport_count; | 626 | atomic_t debugfs_vport_count; |
619 | struct dentry *debug_hbqinfo; | 627 | struct dentry *debug_hbqinfo; |
620 | struct dentry *debug_dumpHostSlim; | 628 | struct dentry *debug_dumpHostSlim; |
621 | struct dentry *debug_dumpHBASlim; | 629 | struct dentry *debug_dumpHBASlim; |
630 | struct dentry *debug_dumpData; /* BlockGuard BPL*/ | ||
631 | struct dentry *debug_dumpDif; /* BlockGuard BPL*/ | ||
622 | struct dentry *debug_slow_ring_trc; | 632 | struct dentry *debug_slow_ring_trc; |
623 | struct lpfc_debugfs_trc *slow_ring_trc; | 633 | struct lpfc_debugfs_trc *slow_ring_trc; |
624 | atomic_t slow_ring_trc_cnt; | 634 | atomic_t slow_ring_trc_cnt; |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index aa3d6277581d..40cf0f4f327f 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -96,6 +96,61 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, | |||
96 | return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); | 96 | return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); |
97 | } | 97 | } |
98 | 98 | ||
99 | static ssize_t | ||
100 | lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, | ||
101 | char *buf) | ||
102 | { | ||
103 | struct Scsi_Host *shost = class_to_shost(dev); | ||
104 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
105 | struct lpfc_hba *phba = vport->phba; | ||
106 | |||
107 | if (phba->cfg_enable_bg) | ||
108 | if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) | ||
109 | return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n"); | ||
110 | else | ||
111 | return snprintf(buf, PAGE_SIZE, | ||
112 | "BlockGuard Not Supported\n"); | ||
113 | else | ||
114 | return snprintf(buf, PAGE_SIZE, | ||
115 | "BlockGuard Disabled\n"); | ||
116 | } | ||
117 | |||
118 | static ssize_t | ||
119 | lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr, | ||
120 | char *buf) | ||
121 | { | ||
122 | struct Scsi_Host *shost = class_to_shost(dev); | ||
123 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
124 | struct lpfc_hba *phba = vport->phba; | ||
125 | |||
126 | return snprintf(buf, PAGE_SIZE, "%llu\n", | ||
127 | (unsigned long long)phba->bg_guard_err_cnt); | ||
128 | } | ||
129 | |||
130 | static ssize_t | ||
131 | lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr, | ||
132 | char *buf) | ||
133 | { | ||
134 | struct Scsi_Host *shost = class_to_shost(dev); | ||
135 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
136 | struct lpfc_hba *phba = vport->phba; | ||
137 | |||
138 | return snprintf(buf, PAGE_SIZE, "%llu\n", | ||
139 | (unsigned long long)phba->bg_apptag_err_cnt); | ||
140 | } | ||
141 | |||
142 | static ssize_t | ||
143 | lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr, | ||
144 | char *buf) | ||
145 | { | ||
146 | struct Scsi_Host *shost = class_to_shost(dev); | ||
147 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
148 | struct lpfc_hba *phba = vport->phba; | ||
149 | |||
150 | return snprintf(buf, PAGE_SIZE, "%llu\n", | ||
151 | (unsigned long long)phba->bg_reftag_err_cnt); | ||
152 | } | ||
153 | |||
99 | /** | 154 | /** |
100 | * lpfc_info_show: Return some pci info about the host in ascii. | 155 | * lpfc_info_show: Return some pci info about the host in ascii. |
101 | * @dev: class converted to a Scsi_host structure. | 156 | * @dev: class converted to a Scsi_host structure. |
@@ -1485,6 +1540,10 @@ lpfc_vport_param_store(name)\ | |||
1485 | static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ | 1540 | static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ |
1486 | lpfc_##name##_show, lpfc_##name##_store) | 1541 | lpfc_##name##_show, lpfc_##name##_store) |
1487 | 1542 | ||
1543 | static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); | ||
1544 | static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); | ||
1545 | static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); | ||
1546 | static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL); | ||
1488 | static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL); | 1547 | static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL); |
1489 | static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); | 1548 | static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); |
1490 | static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); | 1549 | static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); |
@@ -1970,6 +2029,7 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR, | |||
1970 | # LOG_LINK_EVENT 0x10 Link events | 2029 | # LOG_LINK_EVENT 0x10 Link events |
1971 | # LOG_FCP 0x40 FCP traffic history | 2030 | # LOG_FCP 0x40 FCP traffic history |
1972 | # LOG_NODE 0x80 Node table events | 2031 | # LOG_NODE 0x80 Node table events |
2032 | # LOG_BG 0x200 BlockBuard events | ||
1973 | # LOG_MISC 0x400 Miscellaneous events | 2033 | # LOG_MISC 0x400 Miscellaneous events |
1974 | # LOG_SLI 0x800 SLI events | 2034 | # LOG_SLI 0x800 SLI events |
1975 | # LOG_FCP_ERROR 0x1000 Only log FCP errors | 2035 | # LOG_FCP_ERROR 0x1000 Only log FCP errors |
@@ -2769,6 +2829,42 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver."); | |||
2769 | LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); | 2829 | LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); |
2770 | 2830 | ||
2771 | /* | 2831 | /* |
2832 | # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) | ||
2833 | # 0 = BlockGuard disabled (default) | ||
2834 | # 1 = BlockGuard enabled | ||
2835 | # Value range is [0,1]. Default value is 0. | ||
2836 | */ | ||
2837 | LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); | ||
2838 | |||
2839 | |||
2840 | /* | ||
2841 | # lpfc_prot_mask: i | ||
2842 | # - Bit mask of host protection capabilities used to register with the | ||
2843 | # SCSI mid-layer | ||
2844 | # - Only meaningful if BG is turned on (lpfc_enable_bg=1). | ||
2845 | # - Allows you to ultimately specify which profiles to use | ||
2846 | # - Default will result in registering capabilities for all profiles. | ||
2847 | # | ||
2848 | */ | ||
2849 | unsigned int lpfc_prot_mask = SHOST_DIX_TYPE0_PROTECTION; | ||
2850 | |||
2851 | module_param(lpfc_prot_mask, uint, 0); | ||
2852 | MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); | ||
2853 | |||
2854 | /* | ||
2855 | # lpfc_prot_guard: i | ||
2856 | # - Bit mask of protection guard types to register with the SCSI mid-layer | ||
2857 | # - Guard types are currently either 1) IP checksum 2) T10-DIF CRC | ||
2858 | # - Allows you to ultimately specify which profiles to use | ||
2859 | # - Default will result in registering capabilities for all guard types | ||
2860 | # | ||
2861 | */ | ||
2862 | unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP; | ||
2863 | module_param(lpfc_prot_guard, byte, 0); | ||
2864 | MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); | ||
2865 | |||
2866 | |||
2867 | /* | ||
2772 | * lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count | 2868 | * lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count |
2773 | * This value can be set to values between 64 and 256. The default value is | 2869 | * This value can be set to values between 64 and 256. The default value is |
2774 | * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer | 2870 | * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer |
@@ -2777,7 +2873,15 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); | |||
2777 | LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, | 2873 | LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, |
2778 | LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); | 2874 | LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); |
2779 | 2875 | ||
2876 | LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, | ||
2877 | LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, | ||
2878 | "Max Protection Scatter Gather Segment Count"); | ||
2879 | |||
2780 | struct device_attribute *lpfc_hba_attrs[] = { | 2880 | struct device_attribute *lpfc_hba_attrs[] = { |
2881 | &dev_attr_bg_info, | ||
2882 | &dev_attr_bg_guard_err, | ||
2883 | &dev_attr_bg_apptag_err, | ||
2884 | &dev_attr_bg_reftag_err, | ||
2781 | &dev_attr_info, | 2885 | &dev_attr_info, |
2782 | &dev_attr_serialnum, | 2886 | &dev_attr_serialnum, |
2783 | &dev_attr_modeldesc, | 2887 | &dev_attr_modeldesc, |
@@ -2825,6 +2929,7 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
2825 | &dev_attr_lpfc_poll, | 2929 | &dev_attr_lpfc_poll, |
2826 | &dev_attr_lpfc_poll_tmo, | 2930 | &dev_attr_lpfc_poll_tmo, |
2827 | &dev_attr_lpfc_use_msi, | 2931 | &dev_attr_lpfc_use_msi, |
2932 | &dev_attr_lpfc_enable_bg, | ||
2828 | &dev_attr_lpfc_soft_wwnn, | 2933 | &dev_attr_lpfc_soft_wwnn, |
2829 | &dev_attr_lpfc_soft_wwpn, | 2934 | &dev_attr_lpfc_soft_wwpn, |
2830 | &dev_attr_lpfc_soft_wwn_enable, | 2935 | &dev_attr_lpfc_soft_wwn_enable, |
@@ -2833,6 +2938,7 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
2833 | &dev_attr_lpfc_sg_seg_cnt, | 2938 | &dev_attr_lpfc_sg_seg_cnt, |
2834 | &dev_attr_lpfc_max_scsicmpl_time, | 2939 | &dev_attr_lpfc_max_scsicmpl_time, |
2835 | &dev_attr_lpfc_stat_data_ctrl, | 2940 | &dev_attr_lpfc_stat_data_ctrl, |
2941 | &dev_attr_lpfc_prot_sg_seg_cnt, | ||
2836 | NULL, | 2942 | NULL, |
2837 | }; | 2943 | }; |
2838 | 2944 | ||
@@ -3282,25 +3388,28 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) | |||
3282 | int error; | 3388 | int error; |
3283 | 3389 | ||
3284 | error = sysfs_create_bin_file(&shost->shost_dev.kobj, | 3390 | error = sysfs_create_bin_file(&shost->shost_dev.kobj, |
3285 | &sysfs_ctlreg_attr); | 3391 | &sysfs_drvr_stat_data_attr); |
3286 | if (error) | 3392 | |
3393 | /* Virtual ports do not need ctrl_reg and mbox */ | ||
3394 | if (error || vport->port_type == LPFC_NPIV_PORT) | ||
3287 | goto out; | 3395 | goto out; |
3288 | 3396 | ||
3289 | error = sysfs_create_bin_file(&shost->shost_dev.kobj, | 3397 | error = sysfs_create_bin_file(&shost->shost_dev.kobj, |
3290 | &sysfs_mbox_attr); | 3398 | &sysfs_ctlreg_attr); |
3291 | if (error) | 3399 | if (error) |
3292 | goto out_remove_ctlreg_attr; | 3400 | goto out_remove_stat_attr; |
3293 | 3401 | ||
3294 | error = sysfs_create_bin_file(&shost->shost_dev.kobj, | 3402 | error = sysfs_create_bin_file(&shost->shost_dev.kobj, |
3295 | &sysfs_drvr_stat_data_attr); | 3403 | &sysfs_mbox_attr); |
3296 | if (error) | 3404 | if (error) |
3297 | goto out_remove_mbox_attr; | 3405 | goto out_remove_ctlreg_attr; |
3298 | 3406 | ||
3299 | return 0; | 3407 | return 0; |
3300 | out_remove_mbox_attr: | ||
3301 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); | ||
3302 | out_remove_ctlreg_attr: | 3408 | out_remove_ctlreg_attr: |
3303 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); | 3409 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); |
3410 | out_remove_stat_attr: | ||
3411 | sysfs_remove_bin_file(&shost->shost_dev.kobj, | ||
3412 | &sysfs_drvr_stat_data_attr); | ||
3304 | out: | 3413 | out: |
3305 | return error; | 3414 | return error; |
3306 | } | 3415 | } |
@@ -3315,6 +3424,9 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport) | |||
3315 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 3424 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
3316 | sysfs_remove_bin_file(&shost->shost_dev.kobj, | 3425 | sysfs_remove_bin_file(&shost->shost_dev.kobj, |
3317 | &sysfs_drvr_stat_data_attr); | 3426 | &sysfs_drvr_stat_data_attr); |
3427 | /* Virtual ports do not need ctrl_reg and mbox */ | ||
3428 | if (vport->port_type == LPFC_NPIV_PORT) | ||
3429 | return; | ||
3318 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); | 3430 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); |
3319 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); | 3431 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); |
3320 | } | 3432 | } |
@@ -3792,6 +3904,23 @@ lpfc_show_rport_##field (struct device *dev, \ | |||
3792 | lpfc_rport_show_function(field, format_string, sz, ) \ | 3904 | lpfc_rport_show_function(field, format_string, sz, ) \ |
3793 | static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL) | 3905 | static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL) |
3794 | 3906 | ||
3907 | /** | ||
3908 | * lpfc_set_vport_symbolic_name: Set the vport's symbolic name. | ||
3909 | * @fc_vport: The fc_vport who's symbolic name has been changed. | ||
3910 | * | ||
3911 | * Description: | ||
3912 | * This function is called by the transport after the @fc_vport's symbolic name | ||
3913 | * has been changed. This function re-registers the symbolic name with the | ||
3914 | * switch to propogate the change into the fabric if the vport is active. | ||
3915 | **/ | ||
3916 | static void | ||
3917 | lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) | ||
3918 | { | ||
3919 | struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; | ||
3920 | |||
3921 | if (vport->port_state == LPFC_VPORT_READY) | ||
3922 | lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); | ||
3923 | } | ||
3795 | 3924 | ||
3796 | struct fc_function_template lpfc_transport_functions = { | 3925 | struct fc_function_template lpfc_transport_functions = { |
3797 | /* fixed attributes the driver supports */ | 3926 | /* fixed attributes the driver supports */ |
@@ -3801,6 +3930,7 @@ struct fc_function_template lpfc_transport_functions = { | |||
3801 | .show_host_supported_fc4s = 1, | 3930 | .show_host_supported_fc4s = 1, |
3802 | .show_host_supported_speeds = 1, | 3931 | .show_host_supported_speeds = 1, |
3803 | .show_host_maxframe_size = 1, | 3932 | .show_host_maxframe_size = 1, |
3933 | .show_host_symbolic_name = 1, | ||
3804 | 3934 | ||
3805 | /* dynamic attributes the driver supports */ | 3935 | /* dynamic attributes the driver supports */ |
3806 | .get_host_port_id = lpfc_get_host_port_id, | 3936 | .get_host_port_id = lpfc_get_host_port_id, |
@@ -3850,6 +3980,10 @@ struct fc_function_template lpfc_transport_functions = { | |||
3850 | .terminate_rport_io = lpfc_terminate_rport_io, | 3980 | .terminate_rport_io = lpfc_terminate_rport_io, |
3851 | 3981 | ||
3852 | .dd_fcvport_size = sizeof(struct lpfc_vport *), | 3982 | .dd_fcvport_size = sizeof(struct lpfc_vport *), |
3983 | |||
3984 | .vport_disable = lpfc_vport_disable, | ||
3985 | |||
3986 | .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, | ||
3853 | }; | 3987 | }; |
3854 | 3988 | ||
3855 | struct fc_function_template lpfc_vport_transport_functions = { | 3989 | struct fc_function_template lpfc_vport_transport_functions = { |
@@ -3860,6 +3994,7 @@ struct fc_function_template lpfc_vport_transport_functions = { | |||
3860 | .show_host_supported_fc4s = 1, | 3994 | .show_host_supported_fc4s = 1, |
3861 | .show_host_supported_speeds = 1, | 3995 | .show_host_supported_speeds = 1, |
3862 | .show_host_maxframe_size = 1, | 3996 | .show_host_maxframe_size = 1, |
3997 | .show_host_symbolic_name = 1, | ||
3863 | 3998 | ||
3864 | /* dynamic attributes the driver supports */ | 3999 | /* dynamic attributes the driver supports */ |
3865 | .get_host_port_id = lpfc_get_host_port_id, | 4000 | .get_host_port_id = lpfc_get_host_port_id, |
@@ -3908,6 +4043,8 @@ struct fc_function_template lpfc_vport_transport_functions = { | |||
3908 | .terminate_rport_io = lpfc_terminate_rport_io, | 4043 | .terminate_rport_io = lpfc_terminate_rport_io, |
3909 | 4044 | ||
3910 | .vport_disable = lpfc_vport_disable, | 4045 | .vport_disable = lpfc_vport_disable, |
4046 | |||
4047 | .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, | ||
3911 | }; | 4048 | }; |
3912 | 4049 | ||
3913 | /** | 4050 | /** |
@@ -3930,13 +4067,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
3930 | lpfc_use_msi_init(phba, lpfc_use_msi); | 4067 | lpfc_use_msi_init(phba, lpfc_use_msi); |
3931 | lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); | 4068 | lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); |
3932 | lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); | 4069 | lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); |
4070 | lpfc_enable_bg_init(phba, lpfc_enable_bg); | ||
3933 | phba->cfg_poll = lpfc_poll; | 4071 | phba->cfg_poll = lpfc_poll; |
3934 | phba->cfg_soft_wwnn = 0L; | 4072 | phba->cfg_soft_wwnn = 0L; |
3935 | phba->cfg_soft_wwpn = 0L; | 4073 | phba->cfg_soft_wwpn = 0L; |
3936 | lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); | 4074 | lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); |
3937 | /* Also reinitialize the host templates with new values. */ | 4075 | lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); |
3938 | lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; | ||
3939 | lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; | ||
3940 | /* | 4076 | /* |
3941 | * Since the sg_tablesize is module parameter, the sg_dma_buf_size | 4077 | * Since the sg_tablesize is module parameter, the sg_dma_buf_size |
3942 | * used to create the sg_dma_buf_pool must be dynamically calculated. | 4078 | * used to create the sg_dma_buf_pool must be dynamically calculated. |
@@ -3945,6 +4081,17 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
3945 | phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + | 4081 | phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + |
3946 | sizeof(struct fcp_rsp) + | 4082 | sizeof(struct fcp_rsp) + |
3947 | ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); | 4083 | ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); |
4084 | |||
4085 | if (phba->cfg_enable_bg) { | ||
4086 | phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; | ||
4087 | phba->cfg_sg_dma_buf_size += | ||
4088 | phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); | ||
4089 | } | ||
4090 | |||
4091 | /* Also reinitialize the host templates with new values. */ | ||
4092 | lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; | ||
4093 | lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; | ||
4094 | |||
3948 | lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); | 4095 | lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); |
3949 | return; | 4096 | return; |
3950 | } | 4097 | } |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 044ef4057d28..07f4976319a5 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -22,6 +22,7 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *); | |||
22 | 22 | ||
23 | struct fc_rport; | 23 | struct fc_rport; |
24 | void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); | 24 | void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); |
25 | void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); | ||
25 | void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); | 26 | void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); |
26 | void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); | 27 | void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); |
27 | 28 | ||
@@ -284,12 +285,24 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t, | |||
284 | uint32_t, uint32_t); | 285 | uint32_t, uint32_t); |
285 | extern struct lpfc_hbq_init *lpfc_hbq_defs[]; | 286 | extern struct lpfc_hbq_init *lpfc_hbq_defs[]; |
286 | 287 | ||
288 | /* externs BlockGuard */ | ||
289 | extern char *_dump_buf_data; | ||
290 | extern unsigned long _dump_buf_data_order; | ||
291 | extern char *_dump_buf_dif; | ||
292 | extern unsigned long _dump_buf_dif_order; | ||
293 | extern spinlock_t _dump_buf_lock; | ||
294 | extern int _dump_buf_done; | ||
295 | extern spinlock_t pgcnt_lock; | ||
296 | extern unsigned int pgcnt; | ||
297 | extern unsigned int lpfc_prot_mask; | ||
298 | extern unsigned char lpfc_prot_guard; | ||
299 | |||
287 | /* Interface exported by fabric iocb scheduler */ | 300 | /* Interface exported by fabric iocb scheduler */ |
288 | void lpfc_fabric_abort_nport(struct lpfc_nodelist *); | 301 | void lpfc_fabric_abort_nport(struct lpfc_nodelist *); |
289 | void lpfc_fabric_abort_hba(struct lpfc_hba *); | 302 | void lpfc_fabric_abort_hba(struct lpfc_hba *); |
290 | void lpfc_fabric_block_timeout(unsigned long); | 303 | void lpfc_fabric_block_timeout(unsigned long); |
291 | void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); | 304 | void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); |
292 | void lpfc_adjust_queue_depth(struct lpfc_hba *); | 305 | void lpfc_rampdown_queue_depth(struct lpfc_hba *); |
293 | void lpfc_ramp_down_queue_handler(struct lpfc_hba *); | 306 | void lpfc_ramp_down_queue_handler(struct lpfc_hba *); |
294 | void lpfc_ramp_up_queue_handler(struct lpfc_hba *); | 307 | void lpfc_ramp_up_queue_handler(struct lpfc_hba *); |
295 | void lpfc_scsi_dev_block(struct lpfc_hba *); | 308 | void lpfc_scsi_dev_block(struct lpfc_hba *); |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 26dae8bae2d1..896c7b0351e5 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -560,18 +560,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
560 | irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry); | 560 | irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry); |
561 | 561 | ||
562 | /* Don't bother processing response if vport is being torn down. */ | 562 | /* Don't bother processing response if vport is being torn down. */ |
563 | if (vport->load_flag & FC_UNLOADING) | 563 | if (vport->load_flag & FC_UNLOADING) { |
564 | if (vport->fc_flag & FC_RSCN_MODE) | ||
565 | lpfc_els_flush_rscn(vport); | ||
564 | goto out; | 566 | goto out; |
567 | } | ||
565 | 568 | ||
566 | if (lpfc_els_chk_latt(vport)) { | 569 | if (lpfc_els_chk_latt(vport)) { |
567 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 570 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
568 | "0216 Link event during NS query\n"); | 571 | "0216 Link event during NS query\n"); |
572 | if (vport->fc_flag & FC_RSCN_MODE) | ||
573 | lpfc_els_flush_rscn(vport); | ||
569 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 574 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
570 | goto out; | 575 | goto out; |
571 | } | 576 | } |
572 | if (lpfc_error_lost_link(irsp)) { | 577 | if (lpfc_error_lost_link(irsp)) { |
573 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 578 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
574 | "0226 NS query failed due to link event\n"); | 579 | "0226 NS query failed due to link event\n"); |
580 | if (vport->fc_flag & FC_RSCN_MODE) | ||
581 | lpfc_els_flush_rscn(vport); | ||
575 | goto out; | 582 | goto out; |
576 | } | 583 | } |
577 | if (irsp->ulpStatus) { | 584 | if (irsp->ulpStatus) { |
@@ -587,6 +594,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
587 | if (rc == 0) | 594 | if (rc == 0) |
588 | goto out; | 595 | goto out; |
589 | } | 596 | } |
597 | if (vport->fc_flag & FC_RSCN_MODE) | ||
598 | lpfc_els_flush_rscn(vport); | ||
590 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 599 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
591 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 600 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
592 | "0257 GID_FT Query error: 0x%x 0x%x\n", | 601 | "0257 GID_FT Query error: 0x%x 0x%x\n", |
@@ -1008,8 +1017,10 @@ lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, | |||
1008 | if (n < size) | 1017 | if (n < size) |
1009 | n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi); | 1018 | n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi); |
1010 | 1019 | ||
1011 | if (n < size && vport->vname) | 1020 | if (n < size && |
1012 | n += snprintf(symbol + n, size - n, " VName-%s", vport->vname); | 1021 | strlen(vport->fc_vport->symbolic_name)) |
1022 | n += snprintf(symbol + n, size - n, " VName-%s", | ||
1023 | vport->fc_vport->symbolic_name); | ||
1013 | return n; | 1024 | return n; |
1014 | } | 1025 | } |
1015 | 1026 | ||
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 771920bdde44..b615eda361d5 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #include "lpfc_compat.h" | 46 | #include "lpfc_compat.h" |
47 | #include "lpfc_debugfs.h" | 47 | #include "lpfc_debugfs.h" |
48 | 48 | ||
49 | #ifdef CONFIG_LPFC_DEBUG_FS | 49 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
50 | /** | 50 | /** |
51 | * debugfs interface | 51 | * debugfs interface |
52 | * | 52 | * |
@@ -618,7 +618,7 @@ inline void | |||
618 | lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, | 618 | lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, |
619 | uint32_t data1, uint32_t data2, uint32_t data3) | 619 | uint32_t data1, uint32_t data2, uint32_t data3) |
620 | { | 620 | { |
621 | #ifdef CONFIG_LPFC_DEBUG_FS | 621 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
622 | struct lpfc_debugfs_trc *dtp; | 622 | struct lpfc_debugfs_trc *dtp; |
623 | int index; | 623 | int index; |
624 | 624 | ||
@@ -659,7 +659,7 @@ inline void | |||
659 | lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, | 659 | lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, |
660 | uint32_t data1, uint32_t data2, uint32_t data3) | 660 | uint32_t data1, uint32_t data2, uint32_t data3) |
661 | { | 661 | { |
662 | #ifdef CONFIG_LPFC_DEBUG_FS | 662 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
663 | struct lpfc_debugfs_trc *dtp; | 663 | struct lpfc_debugfs_trc *dtp; |
664 | int index; | 664 | int index; |
665 | 665 | ||
@@ -680,7 +680,7 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, | |||
680 | return; | 680 | return; |
681 | } | 681 | } |
682 | 682 | ||
683 | #ifdef CONFIG_LPFC_DEBUG_FS | 683 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
684 | /** | 684 | /** |
685 | * lpfc_debugfs_disc_trc_open - Open the discovery trace log. | 685 | * lpfc_debugfs_disc_trc_open - Open the discovery trace log. |
686 | * @inode: The inode pointer that contains a vport pointer. | 686 | * @inode: The inode pointer that contains a vport pointer. |
@@ -907,6 +907,91 @@ out: | |||
907 | return rc; | 907 | return rc; |
908 | } | 908 | } |
909 | 909 | ||
910 | static int | ||
911 | lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file) | ||
912 | { | ||
913 | struct lpfc_debug *debug; | ||
914 | int rc = -ENOMEM; | ||
915 | |||
916 | if (!_dump_buf_data) | ||
917 | return -EBUSY; | ||
918 | |||
919 | debug = kmalloc(sizeof(*debug), GFP_KERNEL); | ||
920 | if (!debug) | ||
921 | goto out; | ||
922 | |||
923 | /* Round to page boundry */ | ||
924 | printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n", | ||
925 | __func__, _dump_buf_data); | ||
926 | debug->buffer = _dump_buf_data; | ||
927 | if (!debug->buffer) { | ||
928 | kfree(debug); | ||
929 | goto out; | ||
930 | } | ||
931 | |||
932 | debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT; | ||
933 | file->private_data = debug; | ||
934 | |||
935 | rc = 0; | ||
936 | out: | ||
937 | return rc; | ||
938 | } | ||
939 | |||
940 | static int | ||
941 | lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file) | ||
942 | { | ||
943 | struct lpfc_debug *debug; | ||
944 | int rc = -ENOMEM; | ||
945 | |||
946 | if (!_dump_buf_dif) | ||
947 | return -EBUSY; | ||
948 | |||
949 | debug = kmalloc(sizeof(*debug), GFP_KERNEL); | ||
950 | if (!debug) | ||
951 | goto out; | ||
952 | |||
953 | /* Round to page boundry */ | ||
954 | printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", __func__, | ||
955 | _dump_buf_dif, file->f_dentry->d_name.name); | ||
956 | debug->buffer = _dump_buf_dif; | ||
957 | if (!debug->buffer) { | ||
958 | kfree(debug); | ||
959 | goto out; | ||
960 | } | ||
961 | |||
962 | debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT; | ||
963 | file->private_data = debug; | ||
964 | |||
965 | rc = 0; | ||
966 | out: | ||
967 | return rc; | ||
968 | } | ||
969 | |||
970 | static ssize_t | ||
971 | lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf, | ||
972 | size_t nbytes, loff_t *ppos) | ||
973 | { | ||
974 | /* | ||
975 | * The Data/DIF buffers only save one failing IO | ||
976 | * The write op is used as a reset mechanism after an IO has | ||
977 | * already been saved to the next one can be saved | ||
978 | */ | ||
979 | spin_lock(&_dump_buf_lock); | ||
980 | |||
981 | memset((void *)_dump_buf_data, 0, | ||
982 | ((1 << PAGE_SHIFT) << _dump_buf_data_order)); | ||
983 | memset((void *)_dump_buf_dif, 0, | ||
984 | ((1 << PAGE_SHIFT) << _dump_buf_dif_order)); | ||
985 | |||
986 | _dump_buf_done = 0; | ||
987 | |||
988 | spin_unlock(&_dump_buf_lock); | ||
989 | |||
990 | return nbytes; | ||
991 | } | ||
992 | |||
993 | |||
994 | |||
910 | /** | 995 | /** |
911 | * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file. | 996 | * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file. |
912 | * @inode: The inode pointer that contains a vport pointer. | 997 | * @inode: The inode pointer that contains a vport pointer. |
@@ -1035,6 +1120,17 @@ lpfc_debugfs_release(struct inode *inode, struct file *file) | |||
1035 | return 0; | 1120 | return 0; |
1036 | } | 1121 | } |
1037 | 1122 | ||
1123 | static int | ||
1124 | lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file) | ||
1125 | { | ||
1126 | struct lpfc_debug *debug = file->private_data; | ||
1127 | |||
1128 | debug->buffer = NULL; | ||
1129 | kfree(debug); | ||
1130 | |||
1131 | return 0; | ||
1132 | } | ||
1133 | |||
1038 | #undef lpfc_debugfs_op_disc_trc | 1134 | #undef lpfc_debugfs_op_disc_trc |
1039 | static struct file_operations lpfc_debugfs_op_disc_trc = { | 1135 | static struct file_operations lpfc_debugfs_op_disc_trc = { |
1040 | .owner = THIS_MODULE, | 1136 | .owner = THIS_MODULE, |
@@ -1080,6 +1176,26 @@ static struct file_operations lpfc_debugfs_op_dumpHostSlim = { | |||
1080 | .release = lpfc_debugfs_release, | 1176 | .release = lpfc_debugfs_release, |
1081 | }; | 1177 | }; |
1082 | 1178 | ||
1179 | #undef lpfc_debugfs_op_dumpData | ||
1180 | static struct file_operations lpfc_debugfs_op_dumpData = { | ||
1181 | .owner = THIS_MODULE, | ||
1182 | .open = lpfc_debugfs_dumpData_open, | ||
1183 | .llseek = lpfc_debugfs_lseek, | ||
1184 | .read = lpfc_debugfs_read, | ||
1185 | .write = lpfc_debugfs_dumpDataDif_write, | ||
1186 | .release = lpfc_debugfs_dumpDataDif_release, | ||
1187 | }; | ||
1188 | |||
1189 | #undef lpfc_debugfs_op_dumpDif | ||
1190 | static struct file_operations lpfc_debugfs_op_dumpDif = { | ||
1191 | .owner = THIS_MODULE, | ||
1192 | .open = lpfc_debugfs_dumpDif_open, | ||
1193 | .llseek = lpfc_debugfs_lseek, | ||
1194 | .read = lpfc_debugfs_read, | ||
1195 | .write = lpfc_debugfs_dumpDataDif_write, | ||
1196 | .release = lpfc_debugfs_dumpDataDif_release, | ||
1197 | }; | ||
1198 | |||
1083 | #undef lpfc_debugfs_op_slow_ring_trc | 1199 | #undef lpfc_debugfs_op_slow_ring_trc |
1084 | static struct file_operations lpfc_debugfs_op_slow_ring_trc = { | 1200 | static struct file_operations lpfc_debugfs_op_slow_ring_trc = { |
1085 | .owner = THIS_MODULE, | 1201 | .owner = THIS_MODULE, |
@@ -1106,7 +1222,7 @@ static atomic_t lpfc_debugfs_hba_count; | |||
1106 | inline void | 1222 | inline void |
1107 | lpfc_debugfs_initialize(struct lpfc_vport *vport) | 1223 | lpfc_debugfs_initialize(struct lpfc_vport *vport) |
1108 | { | 1224 | { |
1109 | #ifdef CONFIG_LPFC_DEBUG_FS | 1225 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1110 | struct lpfc_hba *phba = vport->phba; | 1226 | struct lpfc_hba *phba = vport->phba; |
1111 | char name[64]; | 1227 | char name[64]; |
1112 | uint32_t num, i; | 1228 | uint32_t num, i; |
@@ -1176,6 +1292,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
1176 | goto debug_failed; | 1292 | goto debug_failed; |
1177 | } | 1293 | } |
1178 | 1294 | ||
1295 | /* Setup dumpData */ | ||
1296 | snprintf(name, sizeof(name), "dumpData"); | ||
1297 | phba->debug_dumpData = | ||
1298 | debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, | ||
1299 | phba->hba_debugfs_root, | ||
1300 | phba, &lpfc_debugfs_op_dumpData); | ||
1301 | if (!phba->debug_dumpData) { | ||
1302 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | ||
1303 | "0800 Cannot create debugfs dumpData\n"); | ||
1304 | goto debug_failed; | ||
1305 | } | ||
1306 | |||
1307 | /* Setup dumpDif */ | ||
1308 | snprintf(name, sizeof(name), "dumpDif"); | ||
1309 | phba->debug_dumpDif = | ||
1310 | debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, | ||
1311 | phba->hba_debugfs_root, | ||
1312 | phba, &lpfc_debugfs_op_dumpDif); | ||
1313 | if (!phba->debug_dumpDif) { | ||
1314 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | ||
1315 | "0801 Cannot create debugfs dumpDif\n"); | ||
1316 | goto debug_failed; | ||
1317 | } | ||
1318 | |||
1319 | |||
1320 | |||
1179 | /* Setup slow ring trace */ | 1321 | /* Setup slow ring trace */ |
1180 | if (lpfc_debugfs_max_slow_ring_trc) { | 1322 | if (lpfc_debugfs_max_slow_ring_trc) { |
1181 | num = lpfc_debugfs_max_slow_ring_trc - 1; | 1323 | num = lpfc_debugfs_max_slow_ring_trc - 1; |
@@ -1305,7 +1447,7 @@ debug_failed: | |||
1305 | inline void | 1447 | inline void |
1306 | lpfc_debugfs_terminate(struct lpfc_vport *vport) | 1448 | lpfc_debugfs_terminate(struct lpfc_vport *vport) |
1307 | { | 1449 | { |
1308 | #ifdef CONFIG_LPFC_DEBUG_FS | 1450 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1309 | struct lpfc_hba *phba = vport->phba; | 1451 | struct lpfc_hba *phba = vport->phba; |
1310 | 1452 | ||
1311 | if (vport->disc_trc) { | 1453 | if (vport->disc_trc) { |
@@ -1340,6 +1482,16 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) | |||
1340 | debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ | 1482 | debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ |
1341 | phba->debug_dumpHostSlim = NULL; | 1483 | phba->debug_dumpHostSlim = NULL; |
1342 | } | 1484 | } |
1485 | if (phba->debug_dumpData) { | ||
1486 | debugfs_remove(phba->debug_dumpData); /* dumpData */ | ||
1487 | phba->debug_dumpData = NULL; | ||
1488 | } | ||
1489 | |||
1490 | if (phba->debug_dumpDif) { | ||
1491 | debugfs_remove(phba->debug_dumpDif); /* dumpDif */ | ||
1492 | phba->debug_dumpDif = NULL; | ||
1493 | } | ||
1494 | |||
1343 | if (phba->slow_ring_trc) { | 1495 | if (phba->slow_ring_trc) { |
1344 | kfree(phba->slow_ring_trc); | 1496 | kfree(phba->slow_ring_trc); |
1345 | phba->slow_ring_trc = NULL; | 1497 | phba->slow_ring_trc = NULL; |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 31e86a55391d..03c7313a1012 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h | |||
@@ -21,7 +21,7 @@ | |||
21 | #ifndef _H_LPFC_DEBUG_FS | 21 | #ifndef _H_LPFC_DEBUG_FS |
22 | #define _H_LPFC_DEBUG_FS | 22 | #define _H_LPFC_DEBUG_FS |
23 | 23 | ||
24 | #ifdef CONFIG_LPFC_DEBUG_FS | 24 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
25 | struct lpfc_debugfs_trc { | 25 | struct lpfc_debugfs_trc { |
26 | char *fmt; | 26 | char *fmt; |
27 | uint32_t data1; | 27 | uint32_t data1; |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 630bd28fb997..a8f30bdaff69 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -221,7 +221,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |||
221 | /* For ELS_REQUEST64_CR, use the VPI by default */ | 221 | /* For ELS_REQUEST64_CR, use the VPI by default */ |
222 | icmd->ulpContext = vport->vpi; | 222 | icmd->ulpContext = vport->vpi; |
223 | icmd->ulpCt_h = 0; | 223 | icmd->ulpCt_h = 0; |
224 | icmd->ulpCt_l = 1; | 224 | /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ |
225 | if (elscmd == ELS_CMD_ECHO) | ||
226 | icmd->ulpCt_l = 0; /* context = invalid RPI */ | ||
227 | else | ||
228 | icmd->ulpCt_l = 1; /* context = VPI */ | ||
225 | } | 229 | } |
226 | 230 | ||
227 | bpl = (struct ulp_bde64 *) pbuflist->virt; | 231 | bpl = (struct ulp_bde64 *) pbuflist->virt; |
@@ -271,7 +275,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |||
271 | return elsiocb; | 275 | return elsiocb; |
272 | 276 | ||
273 | els_iocb_free_pbuf_exit: | 277 | els_iocb_free_pbuf_exit: |
274 | lpfc_mbuf_free(phba, prsp->virt, prsp->phys); | 278 | if (expectRsp) |
279 | lpfc_mbuf_free(phba, prsp->virt, prsp->phys); | ||
275 | kfree(pbuflist); | 280 | kfree(pbuflist); |
276 | 281 | ||
277 | els_iocb_free_prsp_exit: | 282 | els_iocb_free_prsp_exit: |
@@ -2468,6 +2473,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2468 | case IOSTAT_LOCAL_REJECT: | 2473 | case IOSTAT_LOCAL_REJECT: |
2469 | switch ((irsp->un.ulpWord[4] & 0xff)) { | 2474 | switch ((irsp->un.ulpWord[4] & 0xff)) { |
2470 | case IOERR_LOOP_OPEN_FAILURE: | 2475 | case IOERR_LOOP_OPEN_FAILURE: |
2476 | if (cmd == ELS_CMD_FLOGI) { | ||
2477 | if (PCI_DEVICE_ID_HORNET == | ||
2478 | phba->pcidev->device) { | ||
2479 | phba->fc_topology = TOPOLOGY_LOOP; | ||
2480 | phba->pport->fc_myDID = 0; | ||
2481 | phba->alpa_map[0] = 0; | ||
2482 | phba->alpa_map[1] = 0; | ||
2483 | } | ||
2484 | } | ||
2471 | if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) | 2485 | if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) |
2472 | delay = 1000; | 2486 | delay = 1000; |
2473 | retry = 1; | 2487 | retry = 1; |
@@ -3823,27 +3837,21 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) | |||
3823 | while (payload_len) { | 3837 | while (payload_len) { |
3824 | rscn_did.un.word = be32_to_cpu(*lp++); | 3838 | rscn_did.un.word = be32_to_cpu(*lp++); |
3825 | payload_len -= sizeof(uint32_t); | 3839 | payload_len -= sizeof(uint32_t); |
3826 | switch (rscn_did.un.b.resv) { | 3840 | switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { |
3827 | case 0: /* Single N_Port ID effected */ | 3841 | case RSCN_ADDRESS_FORMAT_PORT: |
3828 | if (ns_did.un.word == rscn_did.un.word) | 3842 | if (ns_did.un.word == rscn_did.un.word) |
3829 | goto return_did_out; | 3843 | goto return_did_out; |
3830 | break; | 3844 | break; |
3831 | case 1: /* Whole N_Port Area effected */ | 3845 | case RSCN_ADDRESS_FORMAT_AREA: |
3832 | if ((ns_did.un.b.domain == rscn_did.un.b.domain) | 3846 | if ((ns_did.un.b.domain == rscn_did.un.b.domain) |
3833 | && (ns_did.un.b.area == rscn_did.un.b.area)) | 3847 | && (ns_did.un.b.area == rscn_did.un.b.area)) |
3834 | goto return_did_out; | 3848 | goto return_did_out; |
3835 | break; | 3849 | break; |
3836 | case 2: /* Whole N_Port Domain effected */ | 3850 | case RSCN_ADDRESS_FORMAT_DOMAIN: |
3837 | if (ns_did.un.b.domain == rscn_did.un.b.domain) | 3851 | if (ns_did.un.b.domain == rscn_did.un.b.domain) |
3838 | goto return_did_out; | 3852 | goto return_did_out; |
3839 | break; | 3853 | break; |
3840 | default: | 3854 | case RSCN_ADDRESS_FORMAT_FABRIC: |
3841 | /* Unknown Identifier in RSCN node */ | ||
3842 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | ||
3843 | "0217 Unknown Identifier in " | ||
3844 | "RSCN payload Data: x%x\n", | ||
3845 | rscn_did.un.word); | ||
3846 | case 3: /* Whole Fabric effected */ | ||
3847 | goto return_did_out; | 3855 | goto return_did_out; |
3848 | } | 3856 | } |
3849 | } | 3857 | } |
@@ -3887,6 +3895,49 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) | |||
3887 | } | 3895 | } |
3888 | 3896 | ||
3889 | /** | 3897 | /** |
3898 | * lpfc_send_rscn_event: Send an RSCN event to management application. | ||
3899 | * @vport: pointer to a host virtual N_Port data structure. | ||
3900 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
3901 | * | ||
3902 | * lpfc_send_rscn_event sends an RSCN netlink event to management | ||
3903 | * applications. | ||
3904 | */ | ||
3905 | static void | ||
3906 | lpfc_send_rscn_event(struct lpfc_vport *vport, | ||
3907 | struct lpfc_iocbq *cmdiocb) | ||
3908 | { | ||
3909 | struct lpfc_dmabuf *pcmd; | ||
3910 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
3911 | uint32_t *payload_ptr; | ||
3912 | uint32_t payload_len; | ||
3913 | struct lpfc_rscn_event_header *rscn_event_data; | ||
3914 | |||
3915 | pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; | ||
3916 | payload_ptr = (uint32_t *) pcmd->virt; | ||
3917 | payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); | ||
3918 | |||
3919 | rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + | ||
3920 | payload_len, GFP_KERNEL); | ||
3921 | if (!rscn_event_data) { | ||
3922 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | ||
3923 | "0147 Failed to allocate memory for RSCN event\n"); | ||
3924 | return; | ||
3925 | } | ||
3926 | rscn_event_data->event_type = FC_REG_RSCN_EVENT; | ||
3927 | rscn_event_data->payload_length = payload_len; | ||
3928 | memcpy(rscn_event_data->rscn_payload, payload_ptr, | ||
3929 | payload_len); | ||
3930 | |||
3931 | fc_host_post_vendor_event(shost, | ||
3932 | fc_get_event_number(), | ||
3933 | sizeof(struct lpfc_els_event_header) + payload_len, | ||
3934 | (char *)rscn_event_data, | ||
3935 | LPFC_NL_VENDOR_ID); | ||
3936 | |||
3937 | kfree(rscn_event_data); | ||
3938 | } | ||
3939 | |||
3940 | /** | ||
3890 | * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb. | 3941 | * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb. |
3891 | * @vport: pointer to a host virtual N_Port data structure. | 3942 | * @vport: pointer to a host virtual N_Port data structure. |
3892 | * @cmdiocb: pointer to lpfc command iocb data structure. | 3943 | * @cmdiocb: pointer to lpfc command iocb data structure. |
@@ -3933,6 +3984,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3933 | "0214 RSCN received Data: x%x x%x x%x x%x\n", | 3984 | "0214 RSCN received Data: x%x x%x x%x x%x\n", |
3934 | vport->fc_flag, payload_len, *lp, | 3985 | vport->fc_flag, payload_len, *lp, |
3935 | vport->fc_rscn_id_cnt); | 3986 | vport->fc_rscn_id_cnt); |
3987 | |||
3988 | /* Send an RSCN event to the management application */ | ||
3989 | lpfc_send_rscn_event(vport, cmdiocb); | ||
3990 | |||
3936 | for (i = 0; i < payload_len/sizeof(uint32_t); i++) | 3991 | for (i = 0; i < payload_len/sizeof(uint32_t); i++) |
3937 | fc_host_post_event(shost, fc_get_event_number(), | 3992 | fc_host_post_event(shost, fc_get_event_number(), |
3938 | FCH_EVT_RSCN, lp[i]); | 3993 | FCH_EVT_RSCN, lp[i]); |
@@ -4884,10 +4939,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) | |||
4884 | uint32_t timeout; | 4939 | uint32_t timeout; |
4885 | uint32_t remote_ID = 0xffffffff; | 4940 | uint32_t remote_ID = 0xffffffff; |
4886 | 4941 | ||
4887 | /* If the timer is already canceled do nothing */ | ||
4888 | if ((vport->work_port_events & WORKER_ELS_TMO) == 0) { | ||
4889 | return; | ||
4890 | } | ||
4891 | spin_lock_irq(&phba->hbalock); | 4942 | spin_lock_irq(&phba->hbalock); |
4892 | timeout = (uint32_t)(phba->fc_ratov << 1); | 4943 | timeout = (uint32_t)(phba->fc_ratov << 1); |
4893 | 4944 | ||
@@ -5128,7 +5179,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba, | |||
5128 | fc_get_event_number(), | 5179 | fc_get_event_number(), |
5129 | sizeof(lsrjt_event), | 5180 | sizeof(lsrjt_event), |
5130 | (char *)&lsrjt_event, | 5181 | (char *)&lsrjt_event, |
5131 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | 5182 | LPFC_NL_VENDOR_ID); |
5132 | return; | 5183 | return; |
5133 | } | 5184 | } |
5134 | if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || | 5185 | if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || |
@@ -5146,7 +5197,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba, | |||
5146 | fc_get_event_number(), | 5197 | fc_get_event_number(), |
5147 | sizeof(fabric_event), | 5198 | sizeof(fabric_event), |
5148 | (char *)&fabric_event, | 5199 | (char *)&fabric_event, |
5149 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | 5200 | LPFC_NL_VENDOR_ID); |
5150 | return; | 5201 | return; |
5151 | } | 5202 | } |
5152 | 5203 | ||
@@ -5164,32 +5215,68 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba, | |||
5164 | static void | 5215 | static void |
5165 | lpfc_send_els_event(struct lpfc_vport *vport, | 5216 | lpfc_send_els_event(struct lpfc_vport *vport, |
5166 | struct lpfc_nodelist *ndlp, | 5217 | struct lpfc_nodelist *ndlp, |
5167 | uint32_t cmd) | 5218 | uint32_t *payload) |
5168 | { | 5219 | { |
5169 | struct lpfc_els_event_header els_data; | 5220 | struct lpfc_els_event_header *els_data = NULL; |
5221 | struct lpfc_logo_event *logo_data = NULL; | ||
5170 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 5222 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
5171 | 5223 | ||
5172 | els_data.event_type = FC_REG_ELS_EVENT; | 5224 | if (*payload == ELS_CMD_LOGO) { |
5173 | switch (cmd) { | 5225 | logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); |
5226 | if (!logo_data) { | ||
5227 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | ||
5228 | "0148 Failed to allocate memory " | ||
5229 | "for LOGO event\n"); | ||
5230 | return; | ||
5231 | } | ||
5232 | els_data = &logo_data->header; | ||
5233 | } else { | ||
5234 | els_data = kmalloc(sizeof(struct lpfc_els_event_header), | ||
5235 | GFP_KERNEL); | ||
5236 | if (!els_data) { | ||
5237 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | ||
5238 | "0149 Failed to allocate memory " | ||
5239 | "for ELS event\n"); | ||
5240 | return; | ||
5241 | } | ||
5242 | } | ||
5243 | els_data->event_type = FC_REG_ELS_EVENT; | ||
5244 | switch (*payload) { | ||
5174 | case ELS_CMD_PLOGI: | 5245 | case ELS_CMD_PLOGI: |
5175 | els_data.subcategory = LPFC_EVENT_PLOGI_RCV; | 5246 | els_data->subcategory = LPFC_EVENT_PLOGI_RCV; |
5176 | break; | 5247 | break; |
5177 | case ELS_CMD_PRLO: | 5248 | case ELS_CMD_PRLO: |
5178 | els_data.subcategory = LPFC_EVENT_PRLO_RCV; | 5249 | els_data->subcategory = LPFC_EVENT_PRLO_RCV; |
5179 | break; | 5250 | break; |
5180 | case ELS_CMD_ADISC: | 5251 | case ELS_CMD_ADISC: |
5181 | els_data.subcategory = LPFC_EVENT_ADISC_RCV; | 5252 | els_data->subcategory = LPFC_EVENT_ADISC_RCV; |
5253 | break; | ||
5254 | case ELS_CMD_LOGO: | ||
5255 | els_data->subcategory = LPFC_EVENT_LOGO_RCV; | ||
5256 | /* Copy the WWPN in the LOGO payload */ | ||
5257 | memcpy(logo_data->logo_wwpn, &payload[2], | ||
5258 | sizeof(struct lpfc_name)); | ||
5182 | break; | 5259 | break; |
5183 | default: | 5260 | default: |
5184 | return; | 5261 | return; |
5185 | } | 5262 | } |
5186 | memcpy(els_data.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); | 5263 | memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); |
5187 | memcpy(els_data.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); | 5264 | memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); |
5188 | fc_host_post_vendor_event(shost, | 5265 | if (*payload == ELS_CMD_LOGO) { |
5189 | fc_get_event_number(), | 5266 | fc_host_post_vendor_event(shost, |
5190 | sizeof(els_data), | 5267 | fc_get_event_number(), |
5191 | (char *)&els_data, | 5268 | sizeof(struct lpfc_logo_event), |
5192 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | 5269 | (char *)logo_data, |
5270 | LPFC_NL_VENDOR_ID); | ||
5271 | kfree(logo_data); | ||
5272 | } else { | ||
5273 | fc_host_post_vendor_event(shost, | ||
5274 | fc_get_event_number(), | ||
5275 | sizeof(struct lpfc_els_event_header), | ||
5276 | (char *)els_data, | ||
5277 | LPFC_NL_VENDOR_ID); | ||
5278 | kfree(els_data); | ||
5279 | } | ||
5193 | 5280 | ||
5194 | return; | 5281 | return; |
5195 | } | 5282 | } |
@@ -5296,7 +5383,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
5296 | phba->fc_stat.elsRcvPLOGI++; | 5383 | phba->fc_stat.elsRcvPLOGI++; |
5297 | ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); | 5384 | ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); |
5298 | 5385 | ||
5299 | lpfc_send_els_event(vport, ndlp, cmd); | 5386 | lpfc_send_els_event(vport, ndlp, payload); |
5300 | if (vport->port_state < LPFC_DISC_AUTH) { | 5387 | if (vport->port_state < LPFC_DISC_AUTH) { |
5301 | if (!(phba->pport->fc_flag & FC_PT2PT) || | 5388 | if (!(phba->pport->fc_flag & FC_PT2PT) || |
5302 | (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { | 5389 | (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { |
@@ -5334,6 +5421,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
5334 | did, vport->port_state, ndlp->nlp_flag); | 5421 | did, vport->port_state, ndlp->nlp_flag); |
5335 | 5422 | ||
5336 | phba->fc_stat.elsRcvLOGO++; | 5423 | phba->fc_stat.elsRcvLOGO++; |
5424 | lpfc_send_els_event(vport, ndlp, payload); | ||
5337 | if (vport->port_state < LPFC_DISC_AUTH) { | 5425 | if (vport->port_state < LPFC_DISC_AUTH) { |
5338 | rjt_err = LSRJT_UNABLE_TPC; | 5426 | rjt_err = LSRJT_UNABLE_TPC; |
5339 | break; | 5427 | break; |
@@ -5346,7 +5434,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
5346 | did, vport->port_state, ndlp->nlp_flag); | 5434 | did, vport->port_state, ndlp->nlp_flag); |
5347 | 5435 | ||
5348 | phba->fc_stat.elsRcvPRLO++; | 5436 | phba->fc_stat.elsRcvPRLO++; |
5349 | lpfc_send_els_event(vport, ndlp, cmd); | 5437 | lpfc_send_els_event(vport, ndlp, payload); |
5350 | if (vport->port_state < LPFC_DISC_AUTH) { | 5438 | if (vport->port_state < LPFC_DISC_AUTH) { |
5351 | rjt_err = LSRJT_UNABLE_TPC; | 5439 | rjt_err = LSRJT_UNABLE_TPC; |
5352 | break; | 5440 | break; |
@@ -5364,7 +5452,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
5364 | "RCV ADISC: did:x%x/ste:x%x flg:x%x", | 5452 | "RCV ADISC: did:x%x/ste:x%x flg:x%x", |
5365 | did, vport->port_state, ndlp->nlp_flag); | 5453 | did, vport->port_state, ndlp->nlp_flag); |
5366 | 5454 | ||
5367 | lpfc_send_els_event(vport, ndlp, cmd); | 5455 | lpfc_send_els_event(vport, ndlp, payload); |
5368 | phba->fc_stat.elsRcvADISC++; | 5456 | phba->fc_stat.elsRcvADISC++; |
5369 | if (vport->port_state < LPFC_DISC_AUTH) { | 5457 | if (vport->port_state < LPFC_DISC_AUTH) { |
5370 | rjt_err = LSRJT_UNABLE_TPC; | 5458 | rjt_err = LSRJT_UNABLE_TPC; |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index a1a70d9ffc2a..8c64494444bf 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -350,7 +350,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba, | |||
350 | evt_data_size = sizeof(fast_evt_data->un. | 350 | evt_data_size = sizeof(fast_evt_data->un. |
351 | read_check_error); | 351 | read_check_error); |
352 | } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || | 352 | } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || |
353 | (evt_sub_category == IOSTAT_NPORT_BSY)) { | 353 | (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { |
354 | evt_data = (char *) &fast_evt_data->un.fabric_evt; | 354 | evt_data = (char *) &fast_evt_data->un.fabric_evt; |
355 | evt_data_size = sizeof(fast_evt_data->un.fabric_evt); | 355 | evt_data_size = sizeof(fast_evt_data->un.fabric_evt); |
356 | } else { | 356 | } else { |
@@ -387,7 +387,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba, | |||
387 | fc_get_event_number(), | 387 | fc_get_event_number(), |
388 | evt_data_size, | 388 | evt_data_size, |
389 | evt_data, | 389 | evt_data, |
390 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | 390 | LPFC_NL_VENDOR_ID); |
391 | 391 | ||
392 | lpfc_free_fast_evt(phba, fast_evt_data); | 392 | lpfc_free_fast_evt(phba, fast_evt_data); |
393 | return; | 393 | return; |
@@ -585,20 +585,25 @@ lpfc_do_work(void *p) | |||
585 | set_user_nice(current, -20); | 585 | set_user_nice(current, -20); |
586 | phba->data_flags = 0; | 586 | phba->data_flags = 0; |
587 | 587 | ||
588 | while (1) { | 588 | while (!kthread_should_stop()) { |
589 | /* wait and check worker queue activities */ | 589 | /* wait and check worker queue activities */ |
590 | rc = wait_event_interruptible(phba->work_waitq, | 590 | rc = wait_event_interruptible(phba->work_waitq, |
591 | (test_and_clear_bit(LPFC_DATA_READY, | 591 | (test_and_clear_bit(LPFC_DATA_READY, |
592 | &phba->data_flags) | 592 | &phba->data_flags) |
593 | || kthread_should_stop())); | 593 | || kthread_should_stop())); |
594 | BUG_ON(rc); | 594 | /* Signal wakeup shall terminate the worker thread */ |
595 | 595 | if (rc) { | |
596 | if (kthread_should_stop()) | 596 | lpfc_printf_log(phba, KERN_ERR, LOG_ELS, |
597 | "0433 Wakeup on signal: rc=x%x\n", rc); | ||
597 | break; | 598 | break; |
599 | } | ||
598 | 600 | ||
599 | /* Attend pending lpfc data processing */ | 601 | /* Attend pending lpfc data processing */ |
600 | lpfc_work_done(phba); | 602 | lpfc_work_done(phba); |
601 | } | 603 | } |
604 | phba->worker_thread = NULL; | ||
605 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | ||
606 | "0432 Worker thread stopped.\n"); | ||
602 | return 0; | 607 | return 0; |
603 | } | 608 | } |
604 | 609 | ||
@@ -1852,6 +1857,32 @@ lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
1852 | lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, | 1857 | lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, |
1853 | NLP_STE_UNUSED_NODE); | 1858 | NLP_STE_UNUSED_NODE); |
1854 | } | 1859 | } |
1860 | /** | ||
1861 | * lpfc_initialize_node: Initialize all fields of node object. | ||
1862 | * @vport: Pointer to Virtual Port object. | ||
1863 | * @ndlp: Pointer to FC node object. | ||
1864 | * @did: FC_ID of the node. | ||
1865 | * This function is always called when node object need to | ||
1866 | * be initialized. It initializes all the fields of the node | ||
1867 | * object. | ||
1868 | **/ | ||
1869 | static inline void | ||
1870 | lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | ||
1871 | uint32_t did) | ||
1872 | { | ||
1873 | INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); | ||
1874 | INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); | ||
1875 | init_timer(&ndlp->nlp_delayfunc); | ||
1876 | ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; | ||
1877 | ndlp->nlp_delayfunc.data = (unsigned long)ndlp; | ||
1878 | ndlp->nlp_DID = did; | ||
1879 | ndlp->vport = vport; | ||
1880 | ndlp->nlp_sid = NLP_NO_SID; | ||
1881 | kref_init(&ndlp->kref); | ||
1882 | NLP_INT_NODE_ACT(ndlp); | ||
1883 | atomic_set(&ndlp->cmd_pending, 0); | ||
1884 | ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; | ||
1885 | } | ||
1855 | 1886 | ||
1856 | struct lpfc_nodelist * | 1887 | struct lpfc_nodelist * |
1857 | lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | 1888 | lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
@@ -1892,17 +1923,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1892 | /* re-initialize ndlp except of ndlp linked list pointer */ | 1923 | /* re-initialize ndlp except of ndlp linked list pointer */ |
1893 | memset((((char *)ndlp) + sizeof (struct list_head)), 0, | 1924 | memset((((char *)ndlp) + sizeof (struct list_head)), 0, |
1894 | sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); | 1925 | sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); |
1895 | INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); | 1926 | lpfc_initialize_node(vport, ndlp, did); |
1896 | INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); | ||
1897 | init_timer(&ndlp->nlp_delayfunc); | ||
1898 | ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; | ||
1899 | ndlp->nlp_delayfunc.data = (unsigned long)ndlp; | ||
1900 | ndlp->nlp_DID = did; | ||
1901 | ndlp->vport = vport; | ||
1902 | ndlp->nlp_sid = NLP_NO_SID; | ||
1903 | /* ndlp management re-initialize */ | ||
1904 | kref_init(&ndlp->kref); | ||
1905 | NLP_INT_NODE_ACT(ndlp); | ||
1906 | 1927 | ||
1907 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | 1928 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); |
1908 | 1929 | ||
@@ -3116,19 +3137,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
3116 | uint32_t did) | 3137 | uint32_t did) |
3117 | { | 3138 | { |
3118 | memset(ndlp, 0, sizeof (struct lpfc_nodelist)); | 3139 | memset(ndlp, 0, sizeof (struct lpfc_nodelist)); |
3119 | INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); | 3140 | |
3120 | INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); | 3141 | lpfc_initialize_node(vport, ndlp, did); |
3121 | init_timer(&ndlp->nlp_delayfunc); | ||
3122 | ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; | ||
3123 | ndlp->nlp_delayfunc.data = (unsigned long)ndlp; | ||
3124 | ndlp->nlp_DID = did; | ||
3125 | ndlp->vport = vport; | ||
3126 | ndlp->nlp_sid = NLP_NO_SID; | ||
3127 | INIT_LIST_HEAD(&ndlp->nlp_listp); | 3142 | INIT_LIST_HEAD(&ndlp->nlp_listp); |
3128 | kref_init(&ndlp->kref); | ||
3129 | NLP_INT_NODE_ACT(ndlp); | ||
3130 | atomic_set(&ndlp->cmd_pending, 0); | ||
3131 | ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; | ||
3132 | 3143 | ||
3133 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, | 3144 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, |
3134 | "node init: did:x%x", | 3145 | "node init: did:x%x", |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 5de5dabbbee6..4168c7b498b8 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -65,6 +65,9 @@ | |||
65 | #define SLI3_IOCB_RSP_SIZE 64 | 65 | #define SLI3_IOCB_RSP_SIZE 64 |
66 | 66 | ||
67 | 67 | ||
68 | /* vendor ID used in SCSI netlink calls */ | ||
69 | #define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX) | ||
70 | |||
68 | /* Common Transport structures and definitions */ | 71 | /* Common Transport structures and definitions */ |
69 | 72 | ||
70 | union CtRevisionId { | 73 | union CtRevisionId { |
@@ -866,6 +869,12 @@ typedef struct _D_ID { /* Structure is in Big Endian format */ | |||
866 | } un; | 869 | } un; |
867 | } D_ID; | 870 | } D_ID; |
868 | 871 | ||
872 | #define RSCN_ADDRESS_FORMAT_PORT 0x0 | ||
873 | #define RSCN_ADDRESS_FORMAT_AREA 0x1 | ||
874 | #define RSCN_ADDRESS_FORMAT_DOMAIN 0x2 | ||
875 | #define RSCN_ADDRESS_FORMAT_FABRIC 0x3 | ||
876 | #define RSCN_ADDRESS_FORMAT_MASK 0x3 | ||
877 | |||
869 | /* | 878 | /* |
870 | * Structure to define all ELS Payload types | 879 | * Structure to define all ELS Payload types |
871 | */ | 880 | */ |
@@ -1535,6 +1544,108 @@ typedef struct ULP_BDL { /* SLI-2 */ | |||
1535 | uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */ | 1544 | uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */ |
1536 | } ULP_BDL; | 1545 | } ULP_BDL; |
1537 | 1546 | ||
1547 | /* | ||
1548 | * BlockGuard Definitions | ||
1549 | */ | ||
1550 | |||
1551 | enum lpfc_protgrp_type { | ||
1552 | LPFC_PG_TYPE_INVALID = 0, /* used to indicate errors */ | ||
1553 | LPFC_PG_TYPE_NO_DIF, /* no DIF data pointed to by prot grp */ | ||
1554 | LPFC_PG_TYPE_EMBD_DIF, /* DIF is embedded (inline) with data */ | ||
1555 | LPFC_PG_TYPE_DIF_BUF /* DIF has its own scatter/gather list */ | ||
1556 | }; | ||
1557 | |||
1558 | /* PDE Descriptors */ | ||
1559 | #define LPFC_PDE1_DESCRIPTOR 0x81 | ||
1560 | #define LPFC_PDE2_DESCRIPTOR 0x82 | ||
1561 | #define LPFC_PDE3_DESCRIPTOR 0x83 | ||
1562 | |||
1563 | /* BlockGuard Profiles */ | ||
1564 | enum lpfc_bg_prof_codes { | ||
1565 | LPFC_PROF_INVALID, | ||
1566 | LPFC_PROF_A1 = 128, /* Full Protection */ | ||
1567 | LPFC_PROF_A2, /* Disabled Protection Checks:A2~A4 */ | ||
1568 | LPFC_PROF_A3, | ||
1569 | LPFC_PROF_A4, | ||
1570 | LPFC_PROF_B1, /* Embedded DIFs: B1~B3 */ | ||
1571 | LPFC_PROF_B2, | ||
1572 | LPFC_PROF_B3, | ||
1573 | LPFC_PROF_C1, /* Separate DIFs: C1~C3 */ | ||
1574 | LPFC_PROF_C2, | ||
1575 | LPFC_PROF_C3, | ||
1576 | LPFC_PROF_D1, /* Full Protection */ | ||
1577 | LPFC_PROF_D2, /* Partial Protection & Check Disabling */ | ||
1578 | LPFC_PROF_D3, | ||
1579 | LPFC_PROF_E1, /* E1~E4:out - check-only, in - update apptag */ | ||
1580 | LPFC_PROF_E2, | ||
1581 | LPFC_PROF_E3, | ||
1582 | LPFC_PROF_E4, | ||
1583 | LPFC_PROF_F1, /* Full Translation - F1 Prot Descriptor */ | ||
1584 | /* F1 Translation BDE */ | ||
1585 | LPFC_PROF_ANT1, /* TCP checksum, DIF inline with data buffers */ | ||
1586 | LPFC_PROF_AST1, /* TCP checksum, DIF split from data buffer */ | ||
1587 | LPFC_PROF_ANT2, | ||
1588 | LPFC_PROF_AST2 | ||
1589 | }; | ||
1590 | |||
1591 | /* BlockGuard error-control defines */ | ||
1592 | #define BG_EC_STOP_ERR 0x00 | ||
1593 | #define BG_EC_CONT_ERR 0x01 | ||
1594 | #define BG_EC_IGN_UNINIT_STOP_ERR 0x10 | ||
1595 | #define BG_EC_IGN_UNINIT_CONT_ERR 0x11 | ||
1596 | |||
1597 | /* PDE (Protection Descriptor Entry) word 0 bit masks and shifts */ | ||
1598 | #define PDE_DESC_TYPE_MASK 0xff000000 | ||
1599 | #define PDE_DESC_TYPE_SHIFT 24 | ||
1600 | #define PDE_BG_PROFILE_MASK 0x00ff0000 | ||
1601 | #define PDE_BG_PROFILE_SHIFT 16 | ||
1602 | #define PDE_BLOCK_LEN_MASK 0x0000fffc | ||
1603 | #define PDE_BLOCK_LEN_SHIFT 2 | ||
1604 | #define PDE_ERR_CTRL_MASK 0x00000003 | ||
1605 | #define PDE_ERR_CTRL_SHIFT 0 | ||
1606 | /* PDE word 1 bit masks and shifts */ | ||
1607 | #define PDE_APPTAG_MASK_MASK 0xffff0000 | ||
1608 | #define PDE_APPTAG_MASK_SHIFT 16 | ||
1609 | #define PDE_APPTAG_VAL_MASK 0x0000ffff | ||
1610 | #define PDE_APPTAG_VAL_SHIFT 0 | ||
1611 | struct lpfc_pde { | ||
1612 | uint32_t parms; /* bitfields of descriptor, prof, len, and ec */ | ||
1613 | uint32_t apptag; /* bitfields of app tag maskand app tag value */ | ||
1614 | uint32_t reftag; /* reference tag occupying all 32 bits */ | ||
1615 | }; | ||
1616 | |||
1617 | /* inline function to set fields in parms of PDE */ | ||
1618 | static inline void | ||
1619 | lpfc_pde_set_bg_parms(struct lpfc_pde *p, u8 desc, u8 prof, u16 len, u8 ec) | ||
1620 | { | ||
1621 | uint32_t *wp = &p->parms; | ||
1622 | |||
1623 | /* spec indicates that adapter appends two 0's to length field */ | ||
1624 | len = len >> 2; | ||
1625 | |||
1626 | *wp &= 0; | ||
1627 | *wp |= ((desc << PDE_DESC_TYPE_SHIFT) & PDE_DESC_TYPE_MASK); | ||
1628 | *wp |= ((prof << PDE_BG_PROFILE_SHIFT) & PDE_BG_PROFILE_MASK); | ||
1629 | *wp |= ((len << PDE_BLOCK_LEN_SHIFT) & PDE_BLOCK_LEN_MASK); | ||
1630 | *wp |= ((ec << PDE_ERR_CTRL_SHIFT) & PDE_ERR_CTRL_MASK); | ||
1631 | *wp = le32_to_cpu(*wp); | ||
1632 | } | ||
1633 | |||
1634 | /* inline function to set apptag and reftag fields of PDE */ | ||
1635 | static inline void | ||
1636 | lpfc_pde_set_dif_parms(struct lpfc_pde *p, u16 apptagmask, u16 apptagval, | ||
1637 | u32 reftag) | ||
1638 | { | ||
1639 | uint32_t *wp = &p->apptag; | ||
1640 | *wp &= 0; | ||
1641 | *wp |= ((apptagmask << PDE_APPTAG_MASK_SHIFT) & PDE_APPTAG_MASK_MASK); | ||
1642 | *wp |= ((apptagval << PDE_APPTAG_VAL_SHIFT) & PDE_APPTAG_VAL_MASK); | ||
1643 | *wp = le32_to_cpu(*wp); | ||
1644 | wp = &p->reftag; | ||
1645 | *wp = le32_to_cpu(reftag); | ||
1646 | } | ||
1647 | |||
1648 | |||
1538 | /* Structure for MB Command LOAD_SM and DOWN_LOAD */ | 1649 | /* Structure for MB Command LOAD_SM and DOWN_LOAD */ |
1539 | 1650 | ||
1540 | typedef struct { | 1651 | typedef struct { |
@@ -2359,6 +2470,30 @@ typedef struct { | |||
2359 | #define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ | 2470 | #define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ |
2360 | #define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ | 2471 | #define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ |
2361 | 2472 | ||
2473 | #define WAKE_UP_PARMS_REGION_ID 4 | ||
2474 | #define WAKE_UP_PARMS_WORD_SIZE 15 | ||
2475 | |||
2476 | /* Option rom version structure */ | ||
2477 | struct prog_id { | ||
2478 | #ifdef __BIG_ENDIAN_BITFIELD | ||
2479 | uint8_t type; | ||
2480 | uint8_t id; | ||
2481 | uint32_t ver:4; /* Major Version */ | ||
2482 | uint32_t rev:4; /* Revision */ | ||
2483 | uint32_t lev:2; /* Level */ | ||
2484 | uint32_t dist:2; /* Dist Type */ | ||
2485 | uint32_t num:4; /* number after dist type */ | ||
2486 | #else /* __LITTLE_ENDIAN_BITFIELD */ | ||
2487 | uint32_t num:4; /* number after dist type */ | ||
2488 | uint32_t dist:2; /* Dist Type */ | ||
2489 | uint32_t lev:2; /* Level */ | ||
2490 | uint32_t rev:4; /* Revision */ | ||
2491 | uint32_t ver:4; /* Major Version */ | ||
2492 | uint8_t id; | ||
2493 | uint8_t type; | ||
2494 | #endif | ||
2495 | }; | ||
2496 | |||
2362 | /* Structure for MB Command UPDATE_CFG (0x1B) */ | 2497 | /* Structure for MB Command UPDATE_CFG (0x1B) */ |
2363 | 2498 | ||
2364 | struct update_cfg_var { | 2499 | struct update_cfg_var { |
@@ -2552,11 +2687,19 @@ typedef struct { | |||
2552 | 2687 | ||
2553 | uint32_t pcbLow; /* bit 31:0 of memory based port config block */ | 2688 | uint32_t pcbLow; /* bit 31:0 of memory based port config block */ |
2554 | uint32_t pcbHigh; /* bit 63:32 of memory based port config block */ | 2689 | uint32_t pcbHigh; /* bit 63:32 of memory based port config block */ |
2555 | uint32_t hbainit[6]; | 2690 | uint32_t hbainit[5]; |
2691 | #ifdef __BIG_ENDIAN_BITFIELD | ||
2692 | uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */ | ||
2693 | uint32_t rsvd : 31; /* least significant 31 bits of word 9 */ | ||
2694 | #else /* __LITTLE_ENDIAN */ | ||
2695 | uint32_t rsvd : 31; /* least significant 31 bits of word 9 */ | ||
2696 | uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */ | ||
2697 | #endif | ||
2556 | 2698 | ||
2557 | #ifdef __BIG_ENDIAN_BITFIELD | 2699 | #ifdef __BIG_ENDIAN_BITFIELD |
2558 | uint32_t rsvd : 24; /* Reserved */ | 2700 | uint32_t rsvd1 : 23; /* Reserved */ |
2559 | uint32_t cmv : 1; /* Configure Max VPIs */ | 2701 | uint32_t cbg : 1; /* Configure BlockGuard */ |
2702 | uint32_t cmv : 1; /* Configure Max VPIs */ | ||
2560 | uint32_t ccrp : 1; /* Config Command Ring Polling */ | 2703 | uint32_t ccrp : 1; /* Config Command Ring Polling */ |
2561 | uint32_t csah : 1; /* Configure Synchronous Abort Handling */ | 2704 | uint32_t csah : 1; /* Configure Synchronous Abort Handling */ |
2562 | uint32_t chbs : 1; /* Cofigure Host Backing store */ | 2705 | uint32_t chbs : 1; /* Cofigure Host Backing store */ |
@@ -2573,10 +2716,12 @@ typedef struct { | |||
2573 | uint32_t csah : 1; /* Configure Synchronous Abort Handling */ | 2716 | uint32_t csah : 1; /* Configure Synchronous Abort Handling */ |
2574 | uint32_t ccrp : 1; /* Config Command Ring Polling */ | 2717 | uint32_t ccrp : 1; /* Config Command Ring Polling */ |
2575 | uint32_t cmv : 1; /* Configure Max VPIs */ | 2718 | uint32_t cmv : 1; /* Configure Max VPIs */ |
2576 | uint32_t rsvd : 24; /* Reserved */ | 2719 | uint32_t cbg : 1; /* Configure BlockGuard */ |
2720 | uint32_t rsvd1 : 23; /* Reserved */ | ||
2577 | #endif | 2721 | #endif |
2578 | #ifdef __BIG_ENDIAN_BITFIELD | 2722 | #ifdef __BIG_ENDIAN_BITFIELD |
2579 | uint32_t rsvd2 : 24; /* Reserved */ | 2723 | uint32_t rsvd2 : 23; /* Reserved */ |
2724 | uint32_t gbg : 1; /* Grant BlockGuard */ | ||
2580 | uint32_t gmv : 1; /* Grant Max VPIs */ | 2725 | uint32_t gmv : 1; /* Grant Max VPIs */ |
2581 | uint32_t gcrp : 1; /* Grant Command Ring Polling */ | 2726 | uint32_t gcrp : 1; /* Grant Command Ring Polling */ |
2582 | uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ | 2727 | uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ |
@@ -2594,7 +2739,8 @@ typedef struct { | |||
2594 | uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ | 2739 | uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ |
2595 | uint32_t gcrp : 1; /* Grant Command Ring Polling */ | 2740 | uint32_t gcrp : 1; /* Grant Command Ring Polling */ |
2596 | uint32_t gmv : 1; /* Grant Max VPIs */ | 2741 | uint32_t gmv : 1; /* Grant Max VPIs */ |
2597 | uint32_t rsvd2 : 24; /* Reserved */ | 2742 | uint32_t gbg : 1; /* Grant BlockGuard */ |
2743 | uint32_t rsvd2 : 23; /* Reserved */ | ||
2598 | #endif | 2744 | #endif |
2599 | 2745 | ||
2600 | #ifdef __BIG_ENDIAN_BITFIELD | 2746 | #ifdef __BIG_ENDIAN_BITFIELD |
@@ -3214,6 +3360,94 @@ struct que_xri64cx_ext_fields { | |||
3214 | struct lpfc_hbq_entry buff[5]; | 3360 | struct lpfc_hbq_entry buff[5]; |
3215 | }; | 3361 | }; |
3216 | 3362 | ||
3363 | struct sli3_bg_fields { | ||
3364 | uint32_t filler[6]; /* word 8-13 in IOCB */ | ||
3365 | uint32_t bghm; /* word 14 - BlockGuard High Water Mark */ | ||
3366 | /* Bitfields for bgstat (BlockGuard Status - word 15 of IOCB) */ | ||
3367 | #define BGS_BIDIR_BG_PROF_MASK 0xff000000 | ||
3368 | #define BGS_BIDIR_BG_PROF_SHIFT 24 | ||
3369 | #define BGS_BIDIR_ERR_COND_FLAGS_MASK 0x003f0000 | ||
3370 | #define BGS_BIDIR_ERR_COND_SHIFT 16 | ||
3371 | #define BGS_BG_PROFILE_MASK 0x0000ff00 | ||
3372 | #define BGS_BG_PROFILE_SHIFT 8 | ||
3373 | #define BGS_INVALID_PROF_MASK 0x00000020 | ||
3374 | #define BGS_INVALID_PROF_SHIFT 5 | ||
3375 | #define BGS_UNINIT_DIF_BLOCK_MASK 0x00000010 | ||
3376 | #define BGS_UNINIT_DIF_BLOCK_SHIFT 4 | ||
3377 | #define BGS_HI_WATER_MARK_PRESENT_MASK 0x00000008 | ||
3378 | #define BGS_HI_WATER_MARK_PRESENT_SHIFT 3 | ||
3379 | #define BGS_REFTAG_ERR_MASK 0x00000004 | ||
3380 | #define BGS_REFTAG_ERR_SHIFT 2 | ||
3381 | #define BGS_APPTAG_ERR_MASK 0x00000002 | ||
3382 | #define BGS_APPTAG_ERR_SHIFT 1 | ||
3383 | #define BGS_GUARD_ERR_MASK 0x00000001 | ||
3384 | #define BGS_GUARD_ERR_SHIFT 0 | ||
3385 | uint32_t bgstat; /* word 15 - BlockGuard Status */ | ||
3386 | }; | ||
3387 | |||
3388 | static inline uint32_t | ||
3389 | lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat) | ||
3390 | { | ||
3391 | return (le32_to_cpu(bgstat) & BGS_BIDIR_BG_PROF_MASK) >> | ||
3392 | BGS_BIDIR_BG_PROF_SHIFT; | ||
3393 | } | ||
3394 | |||
3395 | static inline uint32_t | ||
3396 | lpfc_bgs_get_bidir_err_cond(uint32_t bgstat) | ||
3397 | { | ||
3398 | return (le32_to_cpu(bgstat) & BGS_BIDIR_ERR_COND_FLAGS_MASK) >> | ||
3399 | BGS_BIDIR_ERR_COND_SHIFT; | ||
3400 | } | ||
3401 | |||
3402 | static inline uint32_t | ||
3403 | lpfc_bgs_get_bg_prof(uint32_t bgstat) | ||
3404 | { | ||
3405 | return (le32_to_cpu(bgstat) & BGS_BG_PROFILE_MASK) >> | ||
3406 | BGS_BG_PROFILE_SHIFT; | ||
3407 | } | ||
3408 | |||
3409 | static inline uint32_t | ||
3410 | lpfc_bgs_get_invalid_prof(uint32_t bgstat) | ||
3411 | { | ||
3412 | return (le32_to_cpu(bgstat) & BGS_INVALID_PROF_MASK) >> | ||
3413 | BGS_INVALID_PROF_SHIFT; | ||
3414 | } | ||
3415 | |||
3416 | static inline uint32_t | ||
3417 | lpfc_bgs_get_uninit_dif_block(uint32_t bgstat) | ||
3418 | { | ||
3419 | return (le32_to_cpu(bgstat) & BGS_UNINIT_DIF_BLOCK_MASK) >> | ||
3420 | BGS_UNINIT_DIF_BLOCK_SHIFT; | ||
3421 | } | ||
3422 | |||
3423 | static inline uint32_t | ||
3424 | lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat) | ||
3425 | { | ||
3426 | return (le32_to_cpu(bgstat) & BGS_HI_WATER_MARK_PRESENT_MASK) >> | ||
3427 | BGS_HI_WATER_MARK_PRESENT_SHIFT; | ||
3428 | } | ||
3429 | |||
3430 | static inline uint32_t | ||
3431 | lpfc_bgs_get_reftag_err(uint32_t bgstat) | ||
3432 | { | ||
3433 | return (le32_to_cpu(bgstat) & BGS_REFTAG_ERR_MASK) >> | ||
3434 | BGS_REFTAG_ERR_SHIFT; | ||
3435 | } | ||
3436 | |||
3437 | static inline uint32_t | ||
3438 | lpfc_bgs_get_apptag_err(uint32_t bgstat) | ||
3439 | { | ||
3440 | return (le32_to_cpu(bgstat) & BGS_APPTAG_ERR_MASK) >> | ||
3441 | BGS_APPTAG_ERR_SHIFT; | ||
3442 | } | ||
3443 | |||
3444 | static inline uint32_t | ||
3445 | lpfc_bgs_get_guard_err(uint32_t bgstat) | ||
3446 | { | ||
3447 | return (le32_to_cpu(bgstat) & BGS_GUARD_ERR_MASK) >> | ||
3448 | BGS_GUARD_ERR_SHIFT; | ||
3449 | } | ||
3450 | |||
3217 | #define LPFC_EXT_DATA_BDE_COUNT 3 | 3451 | #define LPFC_EXT_DATA_BDE_COUNT 3 |
3218 | struct fcp_irw_ext { | 3452 | struct fcp_irw_ext { |
3219 | uint32_t io_tag64_low; | 3453 | uint32_t io_tag64_low; |
@@ -3322,6 +3556,9 @@ typedef struct _IOCB { /* IOCB structure */ | |||
3322 | struct que_xri64cx_ext_fields que_xri64cx_ext_words; | 3556 | struct que_xri64cx_ext_fields que_xri64cx_ext_words; |
3323 | struct fcp_irw_ext fcp_ext; | 3557 | struct fcp_irw_ext fcp_ext; |
3324 | uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ | 3558 | uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ |
3559 | |||
3560 | /* words 8-15 for BlockGuard */ | ||
3561 | struct sli3_bg_fields sli3_bg; | ||
3325 | } unsli3; | 3562 | } unsli3; |
3326 | 3563 | ||
3327 | #define ulpCt_h ulpXS | 3564 | #define ulpCt_h ulpXS |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 909be3301bba..4c77038c8f1c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -45,6 +45,12 @@ | |||
45 | #include "lpfc_vport.h" | 45 | #include "lpfc_vport.h" |
46 | #include "lpfc_version.h" | 46 | #include "lpfc_version.h" |
47 | 47 | ||
48 | char *_dump_buf_data; | ||
49 | unsigned long _dump_buf_data_order; | ||
50 | char *_dump_buf_dif; | ||
51 | unsigned long _dump_buf_dif_order; | ||
52 | spinlock_t _dump_buf_lock; | ||
53 | |||
48 | static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); | 54 | static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); |
49 | static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); | 55 | static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); |
50 | static int lpfc_post_rcv_buf(struct lpfc_hba *); | 56 | static int lpfc_post_rcv_buf(struct lpfc_hba *); |
@@ -236,6 +242,51 @@ lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) | |||
236 | } | 242 | } |
237 | 243 | ||
238 | /** | 244 | /** |
245 | * lpfc_dump_wakeup_param_cmpl: Completion handler for dump memory mailbox | ||
246 | * command used for getting wake up parameters. | ||
247 | * @phba: pointer to lpfc hba data structure. | ||
248 | * @pmboxq: pointer to the driver internal queue element for mailbox command. | ||
249 | * | ||
250 | * This is the completion handler for dump mailbox command for getting | ||
251 | * wake up parameters. When this command complete, the response contain | ||
252 | * Option rom version of the HBA. This function translate the version number | ||
253 | * into a human readable string and store it in OptionROMVersion. | ||
254 | **/ | ||
255 | static void | ||
256 | lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) | ||
257 | { | ||
258 | struct prog_id *prg; | ||
259 | uint32_t prog_id_word; | ||
260 | char dist = ' '; | ||
261 | /* character array used for decoding dist type. */ | ||
262 | char dist_char[] = "nabx"; | ||
263 | |||
264 | if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { | ||
265 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
266 | return; | ||
267 | } | ||
268 | |||
269 | prg = (struct prog_id *) &prog_id_word; | ||
270 | |||
271 | /* word 7 contain option rom version */ | ||
272 | prog_id_word = pmboxq->mb.un.varWords[7]; | ||
273 | |||
274 | /* Decode the Option rom version word to a readable string */ | ||
275 | if (prg->dist < 4) | ||
276 | dist = dist_char[prg->dist]; | ||
277 | |||
278 | if ((prg->dist == 3) && (prg->num == 0)) | ||
279 | sprintf(phba->OptionROMVersion, "%d.%d%d", | ||
280 | prg->ver, prg->rev, prg->lev); | ||
281 | else | ||
282 | sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", | ||
283 | prg->ver, prg->rev, prg->lev, | ||
284 | dist, prg->num); | ||
285 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
286 | return; | ||
287 | } | ||
288 | |||
289 | /** | ||
239 | * lpfc_config_port_post: Perform lpfc initialization after config port. | 290 | * lpfc_config_port_post: Perform lpfc initialization after config port. |
240 | * @phba: pointer to lpfc hba data structure. | 291 | * @phba: pointer to lpfc hba data structure. |
241 | * | 292 | * |
@@ -482,6 +533,20 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
482 | rc); | 533 | rc); |
483 | mempool_free(pmb, phba->mbox_mem_pool); | 534 | mempool_free(pmb, phba->mbox_mem_pool); |
484 | } | 535 | } |
536 | |||
537 | /* Get Option rom version */ | ||
538 | pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
539 | lpfc_dump_wakeup_param(phba, pmb); | ||
540 | pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; | ||
541 | pmb->vport = phba->pport; | ||
542 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | ||
543 | |||
544 | if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { | ||
545 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " | ||
546 | "to get Option ROM version status x%x\n.", rc); | ||
547 | mempool_free(pmb, phba->mbox_mem_pool); | ||
548 | } | ||
549 | |||
485 | return 0; | 550 | return 0; |
486 | } | 551 | } |
487 | 552 | ||
@@ -686,11 +751,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) | |||
686 | return; | 751 | return; |
687 | 752 | ||
688 | spin_lock_irq(&phba->pport->work_port_lock); | 753 | spin_lock_irq(&phba->pport->work_port_lock); |
689 | /* If the timer is already canceled do nothing */ | ||
690 | if (!(phba->pport->work_port_events & WORKER_HB_TMO)) { | ||
691 | spin_unlock_irq(&phba->pport->work_port_lock); | ||
692 | return; | ||
693 | } | ||
694 | 754 | ||
695 | if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, | 755 | if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, |
696 | jiffies)) { | 756 | jiffies)) { |
@@ -833,8 +893,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba) | |||
833 | fc_host_post_vendor_event(shost, fc_get_event_number(), | 893 | fc_host_post_vendor_event(shost, fc_get_event_number(), |
834 | sizeof(board_event), | 894 | sizeof(board_event), |
835 | (char *) &board_event, | 895 | (char *) &board_event, |
836 | SCSI_NL_VID_TYPE_PCI | 896 | LPFC_NL_VENDOR_ID); |
837 | | PCI_VENDOR_ID_EMULEX); | ||
838 | 897 | ||
839 | if (phba->work_hs & HS_FFER6) { | 898 | if (phba->work_hs & HS_FFER6) { |
840 | /* Re-establishing Link */ | 899 | /* Re-establishing Link */ |
@@ -1984,6 +2043,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) | |||
1984 | shost->max_lun = vport->cfg_max_luns; | 2043 | shost->max_lun = vport->cfg_max_luns; |
1985 | shost->this_id = -1; | 2044 | shost->this_id = -1; |
1986 | shost->max_cmd_len = 16; | 2045 | shost->max_cmd_len = 16; |
2046 | |||
1987 | /* | 2047 | /* |
1988 | * Set initial can_queue value since 0 is no longer supported and | 2048 | * Set initial can_queue value since 0 is no longer supported and |
1989 | * scsi_add_host will fail. This will be adjusted later based on the | 2049 | * scsi_add_host will fail. This will be adjusted later based on the |
@@ -2042,8 +2102,6 @@ destroy_port(struct lpfc_vport *vport) | |||
2042 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 2102 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
2043 | struct lpfc_hba *phba = vport->phba; | 2103 | struct lpfc_hba *phba = vport->phba; |
2044 | 2104 | ||
2045 | kfree(vport->vname); | ||
2046 | |||
2047 | lpfc_debugfs_terminate(vport); | 2105 | lpfc_debugfs_terminate(vport); |
2048 | fc_remove_host(shost); | 2106 | fc_remove_host(shost); |
2049 | scsi_remove_host(shost); | 2107 | scsi_remove_host(shost); |
@@ -2226,8 +2284,7 @@ lpfc_enable_msix(struct lpfc_hba *phba) | |||
2226 | ARRAY_SIZE(phba->msix_entries)); | 2284 | ARRAY_SIZE(phba->msix_entries)); |
2227 | if (rc) { | 2285 | if (rc) { |
2228 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 2286 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
2229 | "0420 Enable MSI-X failed (%d), continuing " | 2287 | "0420 PCI enable MSI-X failed (%d)\n", rc); |
2230 | "with MSI\n", rc); | ||
2231 | goto msi_fail_out; | 2288 | goto msi_fail_out; |
2232 | } else | 2289 | } else |
2233 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) | 2290 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) |
@@ -2244,9 +2301,9 @@ lpfc_enable_msix(struct lpfc_hba *phba) | |||
2244 | rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, | 2301 | rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, |
2245 | IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); | 2302 | IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); |
2246 | if (rc) { | 2303 | if (rc) { |
2247 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2304 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
2248 | "0421 MSI-X slow-path request_irq failed " | 2305 | "0421 MSI-X slow-path request_irq failed " |
2249 | "(%d), continuing with MSI\n", rc); | 2306 | "(%d)\n", rc); |
2250 | goto msi_fail_out; | 2307 | goto msi_fail_out; |
2251 | } | 2308 | } |
2252 | 2309 | ||
@@ -2255,9 +2312,9 @@ lpfc_enable_msix(struct lpfc_hba *phba) | |||
2255 | IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); | 2312 | IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); |
2256 | 2313 | ||
2257 | if (rc) { | 2314 | if (rc) { |
2258 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2315 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
2259 | "0429 MSI-X fast-path request_irq failed " | 2316 | "0429 MSI-X fast-path request_irq failed " |
2260 | "(%d), continuing with MSI\n", rc); | 2317 | "(%d)\n", rc); |
2261 | goto irq_fail_out; | 2318 | goto irq_fail_out; |
2262 | } | 2319 | } |
2263 | 2320 | ||
@@ -2278,7 +2335,7 @@ lpfc_enable_msix(struct lpfc_hba *phba) | |||
2278 | goto mbx_fail_out; | 2335 | goto mbx_fail_out; |
2279 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); | 2336 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); |
2280 | if (rc != MBX_SUCCESS) { | 2337 | if (rc != MBX_SUCCESS) { |
2281 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | 2338 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, |
2282 | "0351 Config MSI mailbox command failed, " | 2339 | "0351 Config MSI mailbox command failed, " |
2283 | "mbxCmd x%x, mbxStatus x%x\n", | 2340 | "mbxCmd x%x, mbxStatus x%x\n", |
2284 | pmb->mb.mbxCommand, pmb->mb.mbxStatus); | 2341 | pmb->mb.mbxCommand, pmb->mb.mbxStatus); |
@@ -2327,6 +2384,195 @@ lpfc_disable_msix(struct lpfc_hba *phba) | |||
2327 | } | 2384 | } |
2328 | 2385 | ||
2329 | /** | 2386 | /** |
2387 | * lpfc_enable_msi: Enable MSI interrupt mode. | ||
2388 | * @phba: pointer to lpfc hba data structure. | ||
2389 | * | ||
2390 | * This routine is invoked to enable the MSI interrupt mode. The kernel | ||
2391 | * function pci_enable_msi() is called to enable the MSI vector. The | ||
2392 | * device driver is responsible for calling the request_irq() to register | ||
2393 | * MSI vector with a interrupt the handler, which is done in this function. | ||
2394 | * | ||
2395 | * Return codes | ||
2396 | * 0 - sucessful | ||
2397 | * other values - error | ||
2398 | */ | ||
2399 | static int | ||
2400 | lpfc_enable_msi(struct lpfc_hba *phba) | ||
2401 | { | ||
2402 | int rc; | ||
2403 | |||
2404 | rc = pci_enable_msi(phba->pcidev); | ||
2405 | if (!rc) | ||
2406 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2407 | "0462 PCI enable MSI mode success.\n"); | ||
2408 | else { | ||
2409 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2410 | "0471 PCI enable MSI mode failed (%d)\n", rc); | ||
2411 | return rc; | ||
2412 | } | ||
2413 | |||
2414 | rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, | ||
2415 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); | ||
2416 | if (rc) { | ||
2417 | pci_disable_msi(phba->pcidev); | ||
2418 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
2419 | "0478 MSI request_irq failed (%d)\n", rc); | ||
2420 | } | ||
2421 | return rc; | ||
2422 | } | ||
2423 | |||
2424 | /** | ||
2425 | * lpfc_disable_msi: Disable MSI interrupt mode. | ||
2426 | * @phba: pointer to lpfc hba data structure. | ||
2427 | * | ||
2428 | * This routine is invoked to disable the MSI interrupt mode. The driver | ||
2429 | * calls free_irq() on MSI vector it has done request_irq() on before | ||
2430 | * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and | ||
2431 | * a device will be left with MSI enabled and leaks its vector. | ||
2432 | */ | ||
2433 | |||
2434 | static void | ||
2435 | lpfc_disable_msi(struct lpfc_hba *phba) | ||
2436 | { | ||
2437 | free_irq(phba->pcidev->irq, phba); | ||
2438 | pci_disable_msi(phba->pcidev); | ||
2439 | return; | ||
2440 | } | ||
2441 | |||
2442 | /** | ||
2443 | * lpfc_log_intr_mode: Log the active interrupt mode | ||
2444 | * @phba: pointer to lpfc hba data structure. | ||
2445 | * @intr_mode: active interrupt mode adopted. | ||
2446 | * | ||
2447 | * This routine it invoked to log the currently used active interrupt mode | ||
2448 | * to the device. | ||
2449 | */ | ||
2450 | static void | ||
2451 | lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) | ||
2452 | { | ||
2453 | switch (intr_mode) { | ||
2454 | case 0: | ||
2455 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2456 | "0470 Enable INTx interrupt mode.\n"); | ||
2457 | break; | ||
2458 | case 1: | ||
2459 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2460 | "0481 Enabled MSI interrupt mode.\n"); | ||
2461 | break; | ||
2462 | case 2: | ||
2463 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2464 | "0480 Enabled MSI-X interrupt mode.\n"); | ||
2465 | break; | ||
2466 | default: | ||
2467 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2468 | "0482 Illegal interrupt mode.\n"); | ||
2469 | break; | ||
2470 | } | ||
2471 | return; | ||
2472 | } | ||
2473 | |||
2474 | static void | ||
2475 | lpfc_stop_port(struct lpfc_hba *phba) | ||
2476 | { | ||
2477 | /* Clear all interrupt enable conditions */ | ||
2478 | writel(0, phba->HCregaddr); | ||
2479 | readl(phba->HCregaddr); /* flush */ | ||
2480 | /* Clear all pending interrupts */ | ||
2481 | writel(0xffffffff, phba->HAregaddr); | ||
2482 | readl(phba->HAregaddr); /* flush */ | ||
2483 | |||
2484 | /* Reset some HBA SLI setup states */ | ||
2485 | lpfc_stop_phba_timers(phba); | ||
2486 | phba->pport->work_port_events = 0; | ||
2487 | |||
2488 | return; | ||
2489 | } | ||
2490 | |||
2491 | /** | ||
2492 | * lpfc_enable_intr: Enable device interrupt. | ||
2493 | * @phba: pointer to lpfc hba data structure. | ||
2494 | * | ||
2495 | * This routine is invoked to enable device interrupt and associate driver's | ||
2496 | * interrupt handler(s) to interrupt vector(s). Depends on the interrupt | ||
2497 | * mode configured to the driver, the driver will try to fallback from the | ||
2498 | * configured interrupt mode to an interrupt mode which is supported by the | ||
2499 | * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. | ||
2500 | * | ||
2501 | * Return codes | ||
2502 | * 0 - sucessful | ||
2503 | * other values - error | ||
2504 | **/ | ||
2505 | static uint32_t | ||
2506 | lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) | ||
2507 | { | ||
2508 | uint32_t intr_mode = LPFC_INTR_ERROR; | ||
2509 | int retval; | ||
2510 | |||
2511 | if (cfg_mode == 2) { | ||
2512 | /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ | ||
2513 | retval = lpfc_sli_config_port(phba, 3); | ||
2514 | if (!retval) { | ||
2515 | /* Now, try to enable MSI-X interrupt mode */ | ||
2516 | retval = lpfc_enable_msix(phba); | ||
2517 | if (!retval) { | ||
2518 | /* Indicate initialization to MSI-X mode */ | ||
2519 | phba->intr_type = MSIX; | ||
2520 | intr_mode = 2; | ||
2521 | } | ||
2522 | } | ||
2523 | } | ||
2524 | |||
2525 | /* Fallback to MSI if MSI-X initialization failed */ | ||
2526 | if (cfg_mode >= 1 && phba->intr_type == NONE) { | ||
2527 | retval = lpfc_enable_msi(phba); | ||
2528 | if (!retval) { | ||
2529 | /* Indicate initialization to MSI mode */ | ||
2530 | phba->intr_type = MSI; | ||
2531 | intr_mode = 1; | ||
2532 | } | ||
2533 | } | ||
2534 | |||
2535 | /* Fallback to INTx if both MSI-X/MSI initalization failed */ | ||
2536 | if (phba->intr_type == NONE) { | ||
2537 | retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, | ||
2538 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); | ||
2539 | if (!retval) { | ||
2540 | /* Indicate initialization to INTx mode */ | ||
2541 | phba->intr_type = INTx; | ||
2542 | intr_mode = 0; | ||
2543 | } | ||
2544 | } | ||
2545 | return intr_mode; | ||
2546 | } | ||
2547 | |||
2548 | /** | ||
2549 | * lpfc_disable_intr: Disable device interrupt. | ||
2550 | * @phba: pointer to lpfc hba data structure. | ||
2551 | * | ||
2552 | * This routine is invoked to disable device interrupt and disassociate the | ||
2553 | * driver's interrupt handler(s) from interrupt vector(s). Depending on the | ||
2554 | * interrupt mode, the driver will release the interrupt vector(s) for the | ||
2555 | * message signaled interrupt. | ||
2556 | **/ | ||
2557 | static void | ||
2558 | lpfc_disable_intr(struct lpfc_hba *phba) | ||
2559 | { | ||
2560 | /* Disable the currently initialized interrupt mode */ | ||
2561 | if (phba->intr_type == MSIX) | ||
2562 | lpfc_disable_msix(phba); | ||
2563 | else if (phba->intr_type == MSI) | ||
2564 | lpfc_disable_msi(phba); | ||
2565 | else if (phba->intr_type == INTx) | ||
2566 | free_irq(phba->pcidev->irq, phba); | ||
2567 | |||
2568 | /* Reset interrupt management states */ | ||
2569 | phba->intr_type = NONE; | ||
2570 | phba->sli.slistat.sli_intr = 0; | ||
2571 | |||
2572 | return; | ||
2573 | } | ||
2574 | |||
2575 | /** | ||
2330 | * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem. | 2576 | * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem. |
2331 | * @pdev: pointer to PCI device | 2577 | * @pdev: pointer to PCI device |
2332 | * @pid: pointer to PCI device identifier | 2578 | * @pid: pointer to PCI device identifier |
@@ -2356,6 +2602,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2356 | int error = -ENODEV, retval; | 2602 | int error = -ENODEV, retval; |
2357 | int i, hbq_count; | 2603 | int i, hbq_count; |
2358 | uint16_t iotag; | 2604 | uint16_t iotag; |
2605 | uint32_t cfg_mode, intr_mode; | ||
2359 | int bars = pci_select_bars(pdev, IORESOURCE_MEM); | 2606 | int bars = pci_select_bars(pdev, IORESOURCE_MEM); |
2360 | struct lpfc_adapter_event_header adapter_event; | 2607 | struct lpfc_adapter_event_header adapter_event; |
2361 | 2608 | ||
@@ -2409,6 +2656,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2409 | phba->eratt_poll.data = (unsigned long) phba; | 2656 | phba->eratt_poll.data = (unsigned long) phba; |
2410 | 2657 | ||
2411 | pci_set_master(pdev); | 2658 | pci_set_master(pdev); |
2659 | pci_save_state(pdev); | ||
2412 | pci_try_set_mwi(pdev); | 2660 | pci_try_set_mwi(pdev); |
2413 | 2661 | ||
2414 | if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) | 2662 | if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) |
@@ -2557,7 +2805,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2557 | lpfc_debugfs_initialize(vport); | 2805 | lpfc_debugfs_initialize(vport); |
2558 | 2806 | ||
2559 | pci_set_drvdata(pdev, shost); | 2807 | pci_set_drvdata(pdev, shost); |
2560 | phba->intr_type = NONE; | ||
2561 | 2808 | ||
2562 | phba->MBslimaddr = phba->slim_memmap_p; | 2809 | phba->MBslimaddr = phba->slim_memmap_p; |
2563 | phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; | 2810 | phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; |
@@ -2565,63 +2812,58 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2565 | phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; | 2812 | phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; |
2566 | phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; | 2813 | phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; |
2567 | 2814 | ||
2568 | /* Configure and enable interrupt */ | 2815 | /* Configure sysfs attributes */ |
2569 | if (phba->cfg_use_msi == 2) { | ||
2570 | /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ | ||
2571 | error = lpfc_sli_config_port(phba, 3); | ||
2572 | if (error) | ||
2573 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2574 | "0427 Firmware not capable of SLI 3 mode.\n"); | ||
2575 | else { | ||
2576 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2577 | "0426 Firmware capable of SLI 3 mode.\n"); | ||
2578 | /* Now, try to enable MSI-X interrupt mode */ | ||
2579 | error = lpfc_enable_msix(phba); | ||
2580 | if (!error) { | ||
2581 | phba->intr_type = MSIX; | ||
2582 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2583 | "0430 enable MSI-X mode.\n"); | ||
2584 | } | ||
2585 | } | ||
2586 | } | ||
2587 | |||
2588 | /* Fallback to MSI if MSI-X initialization failed */ | ||
2589 | if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { | ||
2590 | retval = pci_enable_msi(phba->pcidev); | ||
2591 | if (!retval) { | ||
2592 | phba->intr_type = MSI; | ||
2593 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2594 | "0473 enable MSI mode.\n"); | ||
2595 | } else | ||
2596 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2597 | "0452 enable IRQ mode.\n"); | ||
2598 | } | ||
2599 | |||
2600 | /* MSI-X is the only case the doesn't need to call request_irq */ | ||
2601 | if (phba->intr_type != MSIX) { | ||
2602 | retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, | ||
2603 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); | ||
2604 | if (retval) { | ||
2605 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable " | ||
2606 | "interrupt handler failed\n"); | ||
2607 | error = retval; | ||
2608 | goto out_disable_msi; | ||
2609 | } else if (phba->intr_type != MSI) | ||
2610 | phba->intr_type = INTx; | ||
2611 | } | ||
2612 | |||
2613 | if (lpfc_alloc_sysfs_attr(vport)) { | 2816 | if (lpfc_alloc_sysfs_attr(vport)) { |
2614 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2817 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
2615 | "1476 Failed to allocate sysfs attr\n"); | 2818 | "1476 Failed to allocate sysfs attr\n"); |
2616 | error = -ENOMEM; | 2819 | error = -ENOMEM; |
2617 | goto out_free_irq; | 2820 | goto out_destroy_port; |
2618 | } | 2821 | } |
2619 | 2822 | ||
2620 | if (lpfc_sli_hba_setup(phba)) { | 2823 | cfg_mode = phba->cfg_use_msi; |
2621 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2824 | while (true) { |
2622 | "1477 Failed to set up hba\n"); | 2825 | /* Configure and enable interrupt */ |
2623 | error = -ENODEV; | 2826 | intr_mode = lpfc_enable_intr(phba, cfg_mode); |
2624 | goto out_remove_device; | 2827 | if (intr_mode == LPFC_INTR_ERROR) { |
2828 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2829 | "0426 Failed to enable interrupt.\n"); | ||
2830 | goto out_free_sysfs_attr; | ||
2831 | } | ||
2832 | /* HBA SLI setup */ | ||
2833 | if (lpfc_sli_hba_setup(phba)) { | ||
2834 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2835 | "1477 Failed to set up hba\n"); | ||
2836 | error = -ENODEV; | ||
2837 | goto out_remove_device; | ||
2838 | } | ||
2839 | |||
2840 | /* Wait 50ms for the interrupts of previous mailbox commands */ | ||
2841 | msleep(50); | ||
2842 | /* Check active interrupts received */ | ||
2843 | if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { | ||
2844 | /* Log the current active interrupt mode */ | ||
2845 | phba->intr_mode = intr_mode; | ||
2846 | lpfc_log_intr_mode(phba, intr_mode); | ||
2847 | break; | ||
2848 | } else { | ||
2849 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2850 | "0451 Configure interrupt mode (%d) " | ||
2851 | "failed active interrupt test.\n", | ||
2852 | intr_mode); | ||
2853 | if (intr_mode == 0) { | ||
2854 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2855 | "0479 Failed to enable " | ||
2856 | "interrupt.\n"); | ||
2857 | error = -ENODEV; | ||
2858 | goto out_remove_device; | ||
2859 | } | ||
2860 | /* Stop HBA SLI setups */ | ||
2861 | lpfc_stop_port(phba); | ||
2862 | /* Disable the current interrupt mode */ | ||
2863 | lpfc_disable_intr(phba); | ||
2864 | /* Try next level of interrupt mode */ | ||
2865 | cfg_mode = --intr_mode; | ||
2866 | } | ||
2625 | } | 2867 | } |
2626 | 2868 | ||
2627 | /* | 2869 | /* |
@@ -2629,6 +2871,75 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2629 | * the value of can_queue. | 2871 | * the value of can_queue. |
2630 | */ | 2872 | */ |
2631 | shost->can_queue = phba->cfg_hba_queue_depth - 10; | 2873 | shost->can_queue = phba->cfg_hba_queue_depth - 10; |
2874 | if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { | ||
2875 | |||
2876 | if (lpfc_prot_mask && lpfc_prot_guard) { | ||
2877 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2878 | "1478 Registering BlockGuard with the " | ||
2879 | "SCSI layer\n"); | ||
2880 | |||
2881 | scsi_host_set_prot(shost, lpfc_prot_mask); | ||
2882 | scsi_host_set_guard(shost, lpfc_prot_guard); | ||
2883 | } | ||
2884 | } | ||
2885 | |||
2886 | if (!_dump_buf_data) { | ||
2887 | int pagecnt = 10; | ||
2888 | while (pagecnt) { | ||
2889 | spin_lock_init(&_dump_buf_lock); | ||
2890 | _dump_buf_data = | ||
2891 | (char *) __get_free_pages(GFP_KERNEL, pagecnt); | ||
2892 | if (_dump_buf_data) { | ||
2893 | printk(KERN_ERR "BLKGRD allocated %d pages for " | ||
2894 | "_dump_buf_data at 0x%p\n", | ||
2895 | (1 << pagecnt), _dump_buf_data); | ||
2896 | _dump_buf_data_order = pagecnt; | ||
2897 | memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT) | ||
2898 | << pagecnt)); | ||
2899 | break; | ||
2900 | } else { | ||
2901 | --pagecnt; | ||
2902 | } | ||
2903 | |||
2904 | } | ||
2905 | |||
2906 | if (!_dump_buf_data_order) | ||
2907 | printk(KERN_ERR "BLKGRD ERROR unable to allocate " | ||
2908 | "memory for hexdump\n"); | ||
2909 | |||
2910 | } else { | ||
2911 | printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" | ||
2912 | "\n", _dump_buf_data); | ||
2913 | } | ||
2914 | |||
2915 | |||
2916 | if (!_dump_buf_dif) { | ||
2917 | int pagecnt = 10; | ||
2918 | while (pagecnt) { | ||
2919 | _dump_buf_dif = | ||
2920 | (char *) __get_free_pages(GFP_KERNEL, pagecnt); | ||
2921 | if (_dump_buf_dif) { | ||
2922 | printk(KERN_ERR "BLKGRD allocated %d pages for " | ||
2923 | "_dump_buf_dif at 0x%p\n", | ||
2924 | (1 << pagecnt), _dump_buf_dif); | ||
2925 | _dump_buf_dif_order = pagecnt; | ||
2926 | memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT) | ||
2927 | << pagecnt)); | ||
2928 | break; | ||
2929 | } else { | ||
2930 | --pagecnt; | ||
2931 | } | ||
2932 | |||
2933 | } | ||
2934 | |||
2935 | if (!_dump_buf_dif_order) | ||
2936 | printk(KERN_ERR "BLKGRD ERROR unable to allocate " | ||
2937 | "memory for hexdump\n"); | ||
2938 | |||
2939 | } else { | ||
2940 | printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", | ||
2941 | _dump_buf_dif); | ||
2942 | } | ||
2632 | 2943 | ||
2633 | lpfc_host_attrib_init(shost); | 2944 | lpfc_host_attrib_init(shost); |
2634 | 2945 | ||
@@ -2646,29 +2957,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2646 | fc_host_post_vendor_event(shost, fc_get_event_number(), | 2957 | fc_host_post_vendor_event(shost, fc_get_event_number(), |
2647 | sizeof(adapter_event), | 2958 | sizeof(adapter_event), |
2648 | (char *) &adapter_event, | 2959 | (char *) &adapter_event, |
2649 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | 2960 | LPFC_NL_VENDOR_ID); |
2650 | |||
2651 | scsi_scan_host(shost); | ||
2652 | 2961 | ||
2653 | return 0; | 2962 | return 0; |
2654 | 2963 | ||
2655 | out_remove_device: | 2964 | out_remove_device: |
2656 | lpfc_free_sysfs_attr(vport); | ||
2657 | spin_lock_irq(shost->host_lock); | 2965 | spin_lock_irq(shost->host_lock); |
2658 | vport->load_flag |= FC_UNLOADING; | 2966 | vport->load_flag |= FC_UNLOADING; |
2659 | spin_unlock_irq(shost->host_lock); | 2967 | spin_unlock_irq(shost->host_lock); |
2660 | out_free_irq: | ||
2661 | lpfc_stop_phba_timers(phba); | 2968 | lpfc_stop_phba_timers(phba); |
2662 | phba->pport->work_port_events = 0; | 2969 | phba->pport->work_port_events = 0; |
2663 | 2970 | lpfc_disable_intr(phba); | |
2664 | if (phba->intr_type == MSIX) | 2971 | lpfc_sli_hba_down(phba); |
2665 | lpfc_disable_msix(phba); | 2972 | lpfc_sli_brdrestart(phba); |
2666 | else | 2973 | out_free_sysfs_attr: |
2667 | free_irq(phba->pcidev->irq, phba); | 2974 | lpfc_free_sysfs_attr(vport); |
2668 | 2975 | out_destroy_port: | |
2669 | out_disable_msi: | ||
2670 | if (phba->intr_type == MSI) | ||
2671 | pci_disable_msi(phba->pcidev); | ||
2672 | destroy_port(vport); | 2976 | destroy_port(vport); |
2673 | out_kthread_stop: | 2977 | out_kthread_stop: |
2674 | kthread_stop(phba->worker_thread); | 2978 | kthread_stop(phba->worker_thread); |
@@ -2709,7 +3013,7 @@ out: | |||
2709 | * @pdev: pointer to PCI device | 3013 | * @pdev: pointer to PCI device |
2710 | * | 3014 | * |
2711 | * This routine is to be registered to the kernel's PCI subsystem. When an | 3015 | * This routine is to be registered to the kernel's PCI subsystem. When an |
2712 | * Emulex HBA is removed from PCI bus. It perform all the necessary cleanup | 3016 | * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup |
2713 | * for the HBA device to be removed from the PCI subsystem properly. | 3017 | * for the HBA device to be removed from the PCI subsystem properly. |
2714 | **/ | 3018 | **/ |
2715 | static void __devexit | 3019 | static void __devexit |
@@ -2717,18 +3021,27 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
2717 | { | 3021 | { |
2718 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 3022 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
2719 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 3023 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
3024 | struct lpfc_vport **vports; | ||
2720 | struct lpfc_hba *phba = vport->phba; | 3025 | struct lpfc_hba *phba = vport->phba; |
3026 | int i; | ||
2721 | int bars = pci_select_bars(pdev, IORESOURCE_MEM); | 3027 | int bars = pci_select_bars(pdev, IORESOURCE_MEM); |
2722 | 3028 | ||
2723 | spin_lock_irq(&phba->hbalock); | 3029 | spin_lock_irq(&phba->hbalock); |
2724 | vport->load_flag |= FC_UNLOADING; | 3030 | vport->load_flag |= FC_UNLOADING; |
2725 | spin_unlock_irq(&phba->hbalock); | 3031 | spin_unlock_irq(&phba->hbalock); |
2726 | 3032 | ||
2727 | kfree(vport->vname); | ||
2728 | lpfc_free_sysfs_attr(vport); | 3033 | lpfc_free_sysfs_attr(vport); |
2729 | 3034 | ||
2730 | kthread_stop(phba->worker_thread); | 3035 | kthread_stop(phba->worker_thread); |
2731 | 3036 | ||
3037 | /* Release all the vports against this physical port */ | ||
3038 | vports = lpfc_create_vport_work_array(phba); | ||
3039 | if (vports != NULL) | ||
3040 | for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) | ||
3041 | fc_vport_terminate(vports[i]->fc_vport); | ||
3042 | lpfc_destroy_vport_work_array(phba, vports); | ||
3043 | |||
3044 | /* Remove FC host and then SCSI host with the physical port */ | ||
2732 | fc_remove_host(shost); | 3045 | fc_remove_host(shost); |
2733 | scsi_remove_host(shost); | 3046 | scsi_remove_host(shost); |
2734 | lpfc_cleanup(vport); | 3047 | lpfc_cleanup(vport); |
@@ -2748,13 +3061,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
2748 | 3061 | ||
2749 | lpfc_debugfs_terminate(vport); | 3062 | lpfc_debugfs_terminate(vport); |
2750 | 3063 | ||
2751 | if (phba->intr_type == MSIX) | 3064 | /* Disable interrupt */ |
2752 | lpfc_disable_msix(phba); | 3065 | lpfc_disable_intr(phba); |
2753 | else { | ||
2754 | free_irq(phba->pcidev->irq, phba); | ||
2755 | if (phba->intr_type == MSI) | ||
2756 | pci_disable_msi(phba->pcidev); | ||
2757 | } | ||
2758 | 3066 | ||
2759 | pci_set_drvdata(pdev, NULL); | 3067 | pci_set_drvdata(pdev, NULL); |
2760 | scsi_host_put(shost); | 3068 | scsi_host_put(shost); |
@@ -2786,6 +3094,115 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
2786 | } | 3094 | } |
2787 | 3095 | ||
2788 | /** | 3096 | /** |
3097 | * lpfc_pci_suspend_one: lpfc PCI func to suspend device for power management. | ||
3098 | * @pdev: pointer to PCI device | ||
3099 | * @msg: power management message | ||
3100 | * | ||
3101 | * This routine is to be registered to the kernel's PCI subsystem to support | ||
3102 | * system Power Management (PM). When PM invokes this method, it quiesces the | ||
3103 | * device by stopping the driver's worker thread for the device, turning off | ||
3104 | * device's interrupt and DMA, and bring the device offline. Note that as the | ||
3105 | * driver implements the minimum PM requirements to a power-aware driver's PM | ||
3106 | * support for suspend/resume -- all the possible PM messages (SUSPEND, | ||
3107 | * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND | ||
3108 | * and the driver will fully reinitialize its device during resume() method | ||
3109 | * call, the driver will set device to PCI_D3hot state in PCI config space | ||
3110 | * instead of setting it according to the @msg provided by the PM. | ||
3111 | * | ||
3112 | * Return code | ||
3113 | * 0 - driver suspended the device | ||
3114 | * Error otherwise | ||
3115 | **/ | ||
3116 | static int | ||
3117 | lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) | ||
3118 | { | ||
3119 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
3120 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | ||
3121 | |||
3122 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
3123 | "0473 PCI device Power Management suspend.\n"); | ||
3124 | |||
3125 | /* Bring down the device */ | ||
3126 | lpfc_offline_prep(phba); | ||
3127 | lpfc_offline(phba); | ||
3128 | kthread_stop(phba->worker_thread); | ||
3129 | |||
3130 | /* Disable interrupt from device */ | ||
3131 | lpfc_disable_intr(phba); | ||
3132 | |||
3133 | /* Save device state to PCI config space */ | ||
3134 | pci_save_state(pdev); | ||
3135 | pci_set_power_state(pdev, PCI_D3hot); | ||
3136 | |||
3137 | return 0; | ||
3138 | } | ||
3139 | |||
3140 | /** | ||
3141 | * lpfc_pci_resume_one: lpfc PCI func to resume device for power management. | ||
3142 | * @pdev: pointer to PCI device | ||
3143 | * | ||
3144 | * This routine is to be registered to the kernel's PCI subsystem to support | ||
3145 | * system Power Management (PM). When PM invokes this method, it restores | ||
3146 | * the device's PCI config space state and fully reinitializes the device | ||
3147 | * and brings it online. Note that as the driver implements the minimum PM | ||
3148 | * requirements to a power-aware driver's PM for suspend/resume -- all | ||
3149 | * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() | ||
3150 | * method call will be treated as SUSPEND and the driver will fully | ||
3151 | * reinitialize its device during resume() method call, the device will be | ||
3152 | * set to PCI_D0 directly in PCI config space before restoring the state. | ||
3153 | * | ||
3154 | * Return code | ||
3155 | * 0 - driver suspended the device | ||
3156 | * Error otherwise | ||
3157 | **/ | ||
3158 | static int | ||
3159 | lpfc_pci_resume_one(struct pci_dev *pdev) | ||
3160 | { | ||
3161 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
3162 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | ||
3163 | uint32_t intr_mode; | ||
3164 | int error; | ||
3165 | |||
3166 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
3167 | "0452 PCI device Power Management resume.\n"); | ||
3168 | |||
3169 | /* Restore device state from PCI config space */ | ||
3170 | pci_set_power_state(pdev, PCI_D0); | ||
3171 | pci_restore_state(pdev); | ||
3172 | if (pdev->is_busmaster) | ||
3173 | pci_set_master(pdev); | ||
3174 | |||
3175 | /* Startup the kernel thread for this host adapter. */ | ||
3176 | phba->worker_thread = kthread_run(lpfc_do_work, phba, | ||
3177 | "lpfc_worker_%d", phba->brd_no); | ||
3178 | if (IS_ERR(phba->worker_thread)) { | ||
3179 | error = PTR_ERR(phba->worker_thread); | ||
3180 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3181 | "0434 PM resume failed to start worker " | ||
3182 | "thread: error=x%x.\n", error); | ||
3183 | return error; | ||
3184 | } | ||
3185 | |||
3186 | /* Configure and enable interrupt */ | ||
3187 | intr_mode = lpfc_enable_intr(phba, phba->intr_mode); | ||
3188 | if (intr_mode == LPFC_INTR_ERROR) { | ||
3189 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3190 | "0430 PM resume Failed to enable interrupt\n"); | ||
3191 | return -EIO; | ||
3192 | } else | ||
3193 | phba->intr_mode = intr_mode; | ||
3194 | |||
3195 | /* Restart HBA and bring it online */ | ||
3196 | lpfc_sli_brdrestart(phba); | ||
3197 | lpfc_online(phba); | ||
3198 | |||
3199 | /* Log the current active interrupt mode */ | ||
3200 | lpfc_log_intr_mode(phba, phba->intr_mode); | ||
3201 | |||
3202 | return 0; | ||
3203 | } | ||
3204 | |||
3205 | /** | ||
2789 | * lpfc_io_error_detected: Driver method for handling PCI I/O error detected. | 3206 | * lpfc_io_error_detected: Driver method for handling PCI I/O error detected. |
2790 | * @pdev: pointer to PCI device. | 3207 | * @pdev: pointer to PCI device. |
2791 | * @state: the current PCI connection state. | 3208 | * @state: the current PCI connection state. |
@@ -2828,13 +3245,8 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, | |||
2828 | pring = &psli->ring[psli->fcp_ring]; | 3245 | pring = &psli->ring[psli->fcp_ring]; |
2829 | lpfc_sli_abort_iocb_ring(phba, pring); | 3246 | lpfc_sli_abort_iocb_ring(phba, pring); |
2830 | 3247 | ||
2831 | if (phba->intr_type == MSIX) | 3248 | /* Disable interrupt */ |
2832 | lpfc_disable_msix(phba); | 3249 | lpfc_disable_intr(phba); |
2833 | else { | ||
2834 | free_irq(phba->pcidev->irq, phba); | ||
2835 | if (phba->intr_type == MSI) | ||
2836 | pci_disable_msi(phba->pcidev); | ||
2837 | } | ||
2838 | 3250 | ||
2839 | /* Request a slot reset. */ | 3251 | /* Request a slot reset. */ |
2840 | return PCI_ERS_RESULT_NEED_RESET; | 3252 | return PCI_ERS_RESULT_NEED_RESET; |
@@ -2862,7 +3274,7 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | |||
2862 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 3274 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
2863 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 3275 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; |
2864 | struct lpfc_sli *psli = &phba->sli; | 3276 | struct lpfc_sli *psli = &phba->sli; |
2865 | int error, retval; | 3277 | uint32_t intr_mode; |
2866 | 3278 | ||
2867 | dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); | 3279 | dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); |
2868 | if (pci_enable_device_mem(pdev)) { | 3280 | if (pci_enable_device_mem(pdev)) { |
@@ -2871,61 +3283,31 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | |||
2871 | return PCI_ERS_RESULT_DISCONNECT; | 3283 | return PCI_ERS_RESULT_DISCONNECT; |
2872 | } | 3284 | } |
2873 | 3285 | ||
2874 | pci_set_master(pdev); | 3286 | pci_restore_state(pdev); |
3287 | if (pdev->is_busmaster) | ||
3288 | pci_set_master(pdev); | ||
2875 | 3289 | ||
2876 | spin_lock_irq(&phba->hbalock); | 3290 | spin_lock_irq(&phba->hbalock); |
2877 | psli->sli_flag &= ~LPFC_SLI2_ACTIVE; | 3291 | psli->sli_flag &= ~LPFC_SLI2_ACTIVE; |
2878 | spin_unlock_irq(&phba->hbalock); | 3292 | spin_unlock_irq(&phba->hbalock); |
2879 | 3293 | ||
2880 | /* Enable configured interrupt method */ | 3294 | /* Configure and enable interrupt */ |
2881 | phba->intr_type = NONE; | 3295 | intr_mode = lpfc_enable_intr(phba, phba->intr_mode); |
2882 | if (phba->cfg_use_msi == 2) { | 3296 | if (intr_mode == LPFC_INTR_ERROR) { |
2883 | /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ | 3297 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
2884 | error = lpfc_sli_config_port(phba, 3); | 3298 | "0427 Cannot re-enable interrupt after " |
2885 | if (error) | 3299 | "slot reset.\n"); |
2886 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 3300 | return PCI_ERS_RESULT_DISCONNECT; |
2887 | "0478 Firmware not capable of SLI 3 mode.\n"); | 3301 | } else |
2888 | else { | 3302 | phba->intr_mode = intr_mode; |
2889 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2890 | "0479 Firmware capable of SLI 3 mode.\n"); | ||
2891 | /* Now, try to enable MSI-X interrupt mode */ | ||
2892 | error = lpfc_enable_msix(phba); | ||
2893 | if (!error) { | ||
2894 | phba->intr_type = MSIX; | ||
2895 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2896 | "0480 enable MSI-X mode.\n"); | ||
2897 | } | ||
2898 | } | ||
2899 | } | ||
2900 | |||
2901 | /* Fallback to MSI if MSI-X initialization failed */ | ||
2902 | if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { | ||
2903 | retval = pci_enable_msi(phba->pcidev); | ||
2904 | if (!retval) { | ||
2905 | phba->intr_type = MSI; | ||
2906 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2907 | "0481 enable MSI mode.\n"); | ||
2908 | } else | ||
2909 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2910 | "0470 enable IRQ mode.\n"); | ||
2911 | } | ||
2912 | |||
2913 | /* MSI-X is the only case the doesn't need to call request_irq */ | ||
2914 | if (phba->intr_type != MSIX) { | ||
2915 | retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, | ||
2916 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); | ||
2917 | if (retval) { | ||
2918 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2919 | "0471 Enable interrupt handler " | ||
2920 | "failed\n"); | ||
2921 | } else if (phba->intr_type != MSI) | ||
2922 | phba->intr_type = INTx; | ||
2923 | } | ||
2924 | 3303 | ||
2925 | /* Take device offline; this will perform cleanup */ | 3304 | /* Take device offline; this will perform cleanup */ |
2926 | lpfc_offline(phba); | 3305 | lpfc_offline(phba); |
2927 | lpfc_sli_brdrestart(phba); | 3306 | lpfc_sli_brdrestart(phba); |
2928 | 3307 | ||
3308 | /* Log the current active interrupt mode */ | ||
3309 | lpfc_log_intr_mode(phba, phba->intr_mode); | ||
3310 | |||
2929 | return PCI_ERS_RESULT_RECOVERED; | 3311 | return PCI_ERS_RESULT_RECOVERED; |
2930 | } | 3312 | } |
2931 | 3313 | ||
@@ -3037,6 +3419,8 @@ static struct pci_driver lpfc_driver = { | |||
3037 | .id_table = lpfc_id_table, | 3419 | .id_table = lpfc_id_table, |
3038 | .probe = lpfc_pci_probe_one, | 3420 | .probe = lpfc_pci_probe_one, |
3039 | .remove = __devexit_p(lpfc_pci_remove_one), | 3421 | .remove = __devexit_p(lpfc_pci_remove_one), |
3422 | .suspend = lpfc_pci_suspend_one, | ||
3423 | .resume = lpfc_pci_resume_one, | ||
3040 | .err_handler = &lpfc_err_handler, | 3424 | .err_handler = &lpfc_err_handler, |
3041 | }; | 3425 | }; |
3042 | 3426 | ||
@@ -3100,6 +3484,19 @@ lpfc_exit(void) | |||
3100 | fc_release_transport(lpfc_transport_template); | 3484 | fc_release_transport(lpfc_transport_template); |
3101 | if (lpfc_enable_npiv) | 3485 | if (lpfc_enable_npiv) |
3102 | fc_release_transport(lpfc_vport_transport_template); | 3486 | fc_release_transport(lpfc_vport_transport_template); |
3487 | if (_dump_buf_data) { | ||
3488 | printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " | ||
3489 | "at 0x%p\n", | ||
3490 | (1L << _dump_buf_data_order), _dump_buf_data); | ||
3491 | free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); | ||
3492 | } | ||
3493 | |||
3494 | if (_dump_buf_dif) { | ||
3495 | printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " | ||
3496 | "at 0x%p\n", | ||
3497 | (1L << _dump_buf_dif_order), _dump_buf_dif); | ||
3498 | free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); | ||
3499 | } | ||
3103 | } | 3500 | } |
3104 | 3501 | ||
3105 | module_init(lpfc_init); | 3502 | module_init(lpfc_init); |
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 39fd2b843bec..a85b7c196bbc 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #define LOG_FCP 0x40 /* FCP traffic history */ | 27 | #define LOG_FCP 0x40 /* FCP traffic history */ |
28 | #define LOG_NODE 0x80 /* Node table events */ | 28 | #define LOG_NODE 0x80 /* Node table events */ |
29 | #define LOG_TEMP 0x100 /* Temperature sensor events */ | 29 | #define LOG_TEMP 0x100 /* Temperature sensor events */ |
30 | #define LOG_BG 0x200 /* BlockBuard events */ | ||
30 | #define LOG_MISC 0x400 /* Miscellaneous events */ | 31 | #define LOG_MISC 0x400 /* Miscellaneous events */ |
31 | #define LOG_SLI 0x800 /* SLI events */ | 32 | #define LOG_SLI 0x800 /* SLI events */ |
32 | #define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ | 33 | #define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ |
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 7465fe746fe9..34eeb086a667 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -77,6 +77,38 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | /** | 79 | /** |
80 | * lpfc_dump_mem: Prepare a mailbox command for retrieving wakeup params. | ||
81 | * @phba: pointer to lpfc hba data structure. | ||
82 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
83 | * This function create a dump memory mailbox command to dump wake up | ||
84 | * parameters. | ||
85 | */ | ||
86 | void | ||
87 | lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | ||
88 | { | ||
89 | MAILBOX_t *mb; | ||
90 | void *ctx; | ||
91 | |||
92 | mb = &pmb->mb; | ||
93 | /* Save context so that we can restore after memset */ | ||
94 | ctx = pmb->context2; | ||
95 | |||
96 | /* Setup to dump VPD region */ | ||
97 | memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); | ||
98 | mb->mbxCommand = MBX_DUMP_MEMORY; | ||
99 | mb->mbxOwner = OWN_HOST; | ||
100 | mb->un.varDmp.cv = 1; | ||
101 | mb->un.varDmp.type = DMP_NV_PARAMS; | ||
102 | mb->un.varDmp.entry_index = 0; | ||
103 | mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID; | ||
104 | mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE; | ||
105 | mb->un.varDmp.co = 0; | ||
106 | mb->un.varDmp.resp_offset = 0; | ||
107 | pmb->context2 = ctx; | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | /** | ||
80 | * lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param. | 112 | * lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param. |
81 | * @phba: pointer to lpfc hba data structure. | 113 | * @phba: pointer to lpfc hba data structure. |
82 | * @pmb: pointer to the driver internal queue element for mailbox command. | 114 | * @pmb: pointer to the driver internal queue element for mailbox command. |
@@ -1061,9 +1093,14 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1061 | mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); | 1093 | mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); |
1062 | mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); | 1094 | mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); |
1063 | 1095 | ||
1096 | /* Always Host Group Pointer is in SLIM */ | ||
1097 | mb->un.varCfgPort.hps = 1; | ||
1098 | |||
1064 | /* If HBA supports SLI=3 ask for it */ | 1099 | /* If HBA supports SLI=3 ask for it */ |
1065 | 1100 | ||
1066 | if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { | 1101 | if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { |
1102 | if (phba->cfg_enable_bg) | ||
1103 | mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ | ||
1067 | mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ | 1104 | mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ |
1068 | mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ | 1105 | mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ |
1069 | mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ | 1106 | mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ |
@@ -1163,16 +1200,11 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1163 | sizeof(*phba->host_gp)); | 1200 | sizeof(*phba->host_gp)); |
1164 | } | 1201 | } |
1165 | 1202 | ||
1166 | /* Setup Port Group ring pointer */ | 1203 | /* Setup Port Group offset */ |
1167 | if (phba->sli3_options & LPFC_SLI3_INB_ENABLED) { | 1204 | if (phba->sli_rev == 3) |
1168 | pgp_offset = offsetof(struct lpfc_sli2_slim, | ||
1169 | mbx.us.s3_inb_pgp.port); | ||
1170 | phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; | ||
1171 | } else if (phba->sli_rev == 3) { | ||
1172 | pgp_offset = offsetof(struct lpfc_sli2_slim, | 1205 | pgp_offset = offsetof(struct lpfc_sli2_slim, |
1173 | mbx.us.s3_pgp.port); | 1206 | mbx.us.s3_pgp.port); |
1174 | phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; | 1207 | else |
1175 | } else | ||
1176 | pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port); | 1208 | pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port); |
1177 | pdma_addr = phba->slim2p.phys + pgp_offset; | 1209 | pdma_addr = phba->slim2p.phys + pgp_offset; |
1178 | phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr); | 1210 | phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr); |
@@ -1285,10 +1317,12 @@ lpfc_mbox_get(struct lpfc_hba * phba) | |||
1285 | void | 1317 | void |
1286 | lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) | 1318 | lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) |
1287 | { | 1319 | { |
1320 | unsigned long iflag; | ||
1321 | |||
1288 | /* This function expects to be called from interrupt context */ | 1322 | /* This function expects to be called from interrupt context */ |
1289 | spin_lock(&phba->hbalock); | 1323 | spin_lock_irqsave(&phba->hbalock, iflag); |
1290 | list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); | 1324 | list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); |
1291 | spin_unlock(&phba->hbalock); | 1325 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
1292 | return; | 1326 | return; |
1293 | } | 1327 | } |
1294 | 1328 | ||
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h index 1accb5a9f4e6..27d1a88a98fe 100644 --- a/drivers/scsi/lpfc/lpfc_nl.h +++ b/drivers/scsi/lpfc/lpfc_nl.h | |||
@@ -22,18 +22,20 @@ | |||
22 | #define FC_REG_LINK_EVENT 0x0001 /* link up / down events */ | 22 | #define FC_REG_LINK_EVENT 0x0001 /* link up / down events */ |
23 | #define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */ | 23 | #define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */ |
24 | #define FC_REG_CT_EVENT 0x0004 /* CT request events */ | 24 | #define FC_REG_CT_EVENT 0x0004 /* CT request events */ |
25 | #define FC_REG_DUMP_EVENT 0x0008 /* Dump events */ | 25 | #define FC_REG_DUMP_EVENT 0x0010 /* Dump events */ |
26 | #define FC_REG_TEMPERATURE_EVENT 0x0010 /* temperature events */ | 26 | #define FC_REG_TEMPERATURE_EVENT 0x0020 /* temperature events */ |
27 | #define FC_REG_ELS_EVENT 0x0020 /* lpfc els events */ | 27 | #define FC_REG_VPORTRSCN_EVENT 0x0040 /* Vport RSCN events */ |
28 | #define FC_REG_FABRIC_EVENT 0x0040 /* lpfc fabric events */ | 28 | #define FC_REG_ELS_EVENT 0x0080 /* lpfc els events */ |
29 | #define FC_REG_SCSI_EVENT 0x0080 /* lpfc scsi events */ | 29 | #define FC_REG_FABRIC_EVENT 0x0100 /* lpfc fabric events */ |
30 | #define FC_REG_BOARD_EVENT 0x0100 /* lpfc board events */ | 30 | #define FC_REG_SCSI_EVENT 0x0200 /* lpfc scsi events */ |
31 | #define FC_REG_ADAPTER_EVENT 0x0200 /* lpfc adapter events */ | 31 | #define FC_REG_BOARD_EVENT 0x0400 /* lpfc board events */ |
32 | #define FC_REG_ADAPTER_EVENT 0x0800 /* lpfc adapter events */ | ||
32 | #define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \ | 33 | #define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \ |
33 | FC_REG_RSCN_EVENT | \ | 34 | FC_REG_RSCN_EVENT | \ |
34 | FC_REG_CT_EVENT | \ | 35 | FC_REG_CT_EVENT | \ |
35 | FC_REG_DUMP_EVENT | \ | 36 | FC_REG_DUMP_EVENT | \ |
36 | FC_REG_TEMPERATURE_EVENT | \ | 37 | FC_REG_TEMPERATURE_EVENT | \ |
38 | FC_REG_VPORTRSCN_EVENT | \ | ||
37 | FC_REG_ELS_EVENT | \ | 39 | FC_REG_ELS_EVENT | \ |
38 | FC_REG_FABRIC_EVENT | \ | 40 | FC_REG_FABRIC_EVENT | \ |
39 | FC_REG_SCSI_EVENT | \ | 41 | FC_REG_SCSI_EVENT | \ |
@@ -52,6 +54,13 @@ | |||
52 | * The payload sent via the fc transport is one-way driver->application. | 54 | * The payload sent via the fc transport is one-way driver->application. |
53 | */ | 55 | */ |
54 | 56 | ||
57 | /* RSCN event header */ | ||
58 | struct lpfc_rscn_event_header { | ||
59 | uint32_t event_type; | ||
60 | uint32_t payload_length; /* RSCN data length in bytes */ | ||
61 | uint32_t rscn_payload[]; | ||
62 | }; | ||
63 | |||
55 | /* els event header */ | 64 | /* els event header */ |
56 | struct lpfc_els_event_header { | 65 | struct lpfc_els_event_header { |
57 | uint32_t event_type; | 66 | uint32_t event_type; |
@@ -65,6 +74,7 @@ struct lpfc_els_event_header { | |||
65 | #define LPFC_EVENT_PRLO_RCV 0x02 | 74 | #define LPFC_EVENT_PRLO_RCV 0x02 |
66 | #define LPFC_EVENT_ADISC_RCV 0x04 | 75 | #define LPFC_EVENT_ADISC_RCV 0x04 |
67 | #define LPFC_EVENT_LSRJT_RCV 0x08 | 76 | #define LPFC_EVENT_LSRJT_RCV 0x08 |
77 | #define LPFC_EVENT_LOGO_RCV 0x10 | ||
68 | 78 | ||
69 | /* special els lsrjt event */ | 79 | /* special els lsrjt event */ |
70 | struct lpfc_lsrjt_event { | 80 | struct lpfc_lsrjt_event { |
@@ -74,6 +84,11 @@ struct lpfc_lsrjt_event { | |||
74 | uint32_t explanation; | 84 | uint32_t explanation; |
75 | }; | 85 | }; |
76 | 86 | ||
87 | /* special els logo event */ | ||
88 | struct lpfc_logo_event { | ||
89 | struct lpfc_els_event_header header; | ||
90 | uint8_t logo_wwpn[8]; | ||
91 | }; | ||
77 | 92 | ||
78 | /* fabric event header */ | 93 | /* fabric event header */ |
79 | struct lpfc_fabric_event_header { | 94 | struct lpfc_fabric_event_header { |
@@ -125,6 +140,7 @@ struct lpfc_scsi_varqueuedepth_event { | |||
125 | /* special case scsi check condition event */ | 140 | /* special case scsi check condition event */ |
126 | struct lpfc_scsi_check_condition_event { | 141 | struct lpfc_scsi_check_condition_event { |
127 | struct lpfc_scsi_event_header scsi_event; | 142 | struct lpfc_scsi_event_header scsi_event; |
143 | uint8_t opcode; | ||
128 | uint8_t sense_key; | 144 | uint8_t sense_key; |
129 | uint8_t asc; | 145 | uint8_t asc; |
130 | uint8_t ascq; | 146 | uint8_t ascq; |
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 0c25d97acb42..8f548adae9cc 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -1929,10 +1929,10 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1929 | if (vport->fc_flag & FC_RSCN_DEFERRED) | 1929 | if (vport->fc_flag & FC_RSCN_DEFERRED) |
1930 | return ndlp->nlp_state; | 1930 | return ndlp->nlp_state; |
1931 | 1931 | ||
1932 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | ||
1932 | spin_lock_irq(shost->host_lock); | 1933 | spin_lock_irq(shost->host_lock); |
1933 | ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); | 1934 | ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); |
1934 | spin_unlock_irq(shost->host_lock); | 1935 | spin_unlock_irq(shost->host_lock); |
1935 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | ||
1936 | return ndlp->nlp_state; | 1936 | return ndlp->nlp_state; |
1937 | } | 1937 | } |
1938 | 1938 | ||
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index bd1867411821..b103b6ed4970 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -18,13 +18,14 @@ | |||
18 | * more details, a copy of which can be found in the file COPYING * | 18 | * more details, a copy of which can be found in the file COPYING * |
19 | * included with this package. * | 19 | * included with this package. * |
20 | *******************************************************************/ | 20 | *******************************************************************/ |
21 | |||
22 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
23 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
24 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <asm/unaligned.h> | ||
25 | 25 | ||
26 | #include <scsi/scsi.h> | 26 | #include <scsi/scsi.h> |
27 | #include <scsi/scsi_device.h> | 27 | #include <scsi/scsi_device.h> |
28 | #include <scsi/scsi_eh.h> | ||
28 | #include <scsi/scsi_host.h> | 29 | #include <scsi/scsi_host.h> |
29 | #include <scsi/scsi_tcq.h> | 30 | #include <scsi/scsi_tcq.h> |
30 | #include <scsi/scsi_transport_fc.h> | 31 | #include <scsi/scsi_transport_fc.h> |
@@ -43,6 +44,73 @@ | |||
43 | #define LPFC_RESET_WAIT 2 | 44 | #define LPFC_RESET_WAIT 2 |
44 | #define LPFC_ABORT_WAIT 2 | 45 | #define LPFC_ABORT_WAIT 2 |
45 | 46 | ||
47 | int _dump_buf_done; | ||
48 | |||
49 | static char *dif_op_str[] = { | ||
50 | "SCSI_PROT_NORMAL", | ||
51 | "SCSI_PROT_READ_INSERT", | ||
52 | "SCSI_PROT_WRITE_STRIP", | ||
53 | "SCSI_PROT_READ_STRIP", | ||
54 | "SCSI_PROT_WRITE_INSERT", | ||
55 | "SCSI_PROT_READ_PASS", | ||
56 | "SCSI_PROT_WRITE_PASS", | ||
57 | "SCSI_PROT_READ_CONVERT", | ||
58 | "SCSI_PROT_WRITE_CONVERT" | ||
59 | }; | ||
60 | |||
61 | static void | ||
62 | lpfc_debug_save_data(struct scsi_cmnd *cmnd) | ||
63 | { | ||
64 | void *src, *dst; | ||
65 | struct scatterlist *sgde = scsi_sglist(cmnd); | ||
66 | |||
67 | if (!_dump_buf_data) { | ||
68 | printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", | ||
69 | __func__); | ||
70 | return; | ||
71 | } | ||
72 | |||
73 | |||
74 | if (!sgde) { | ||
75 | printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n"); | ||
76 | return; | ||
77 | } | ||
78 | |||
79 | dst = (void *) _dump_buf_data; | ||
80 | while (sgde) { | ||
81 | src = sg_virt(sgde); | ||
82 | memcpy(dst, src, sgde->length); | ||
83 | dst += sgde->length; | ||
84 | sgde = sg_next(sgde); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | static void | ||
89 | lpfc_debug_save_dif(struct scsi_cmnd *cmnd) | ||
90 | { | ||
91 | void *src, *dst; | ||
92 | struct scatterlist *sgde = scsi_prot_sglist(cmnd); | ||
93 | |||
94 | if (!_dump_buf_dif) { | ||
95 | printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", | ||
96 | __func__); | ||
97 | return; | ||
98 | } | ||
99 | |||
100 | if (!sgde) { | ||
101 | printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n"); | ||
102 | return; | ||
103 | } | ||
104 | |||
105 | dst = _dump_buf_dif; | ||
106 | while (sgde) { | ||
107 | src = sg_virt(sgde); | ||
108 | memcpy(dst, src, sgde->length); | ||
109 | dst += sgde->length; | ||
110 | sgde = sg_next(sgde); | ||
111 | } | ||
112 | } | ||
113 | |||
46 | /** | 114 | /** |
47 | * lpfc_update_stats: Update statistical data for the command completion. | 115 | * lpfc_update_stats: Update statistical data for the command completion. |
48 | * @phba: Pointer to HBA object. | 116 | * @phba: Pointer to HBA object. |
@@ -66,6 +134,8 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
66 | if (cmd->result) | 134 | if (cmd->result) |
67 | return; | 135 | return; |
68 | 136 | ||
137 | latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time); | ||
138 | |||
69 | spin_lock_irqsave(shost->host_lock, flags); | 139 | spin_lock_irqsave(shost->host_lock, flags); |
70 | if (!vport->stat_data_enabled || | 140 | if (!vport->stat_data_enabled || |
71 | vport->stat_data_blocked || | 141 | vport->stat_data_blocked || |
@@ -74,13 +144,15 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
74 | spin_unlock_irqrestore(shost->host_lock, flags); | 144 | spin_unlock_irqrestore(shost->host_lock, flags); |
75 | return; | 145 | return; |
76 | } | 146 | } |
77 | latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time); | ||
78 | 147 | ||
79 | if (phba->bucket_type == LPFC_LINEAR_BUCKET) { | 148 | if (phba->bucket_type == LPFC_LINEAR_BUCKET) { |
80 | i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ | 149 | i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ |
81 | phba->bucket_step; | 150 | phba->bucket_step; |
82 | if (i >= LPFC_MAX_BUCKET_COUNT) | 151 | /* check array subscript bounds */ |
83 | i = LPFC_MAX_BUCKET_COUNT; | 152 | if (i < 0) |
153 | i = 0; | ||
154 | else if (i >= LPFC_MAX_BUCKET_COUNT) | ||
155 | i = LPFC_MAX_BUCKET_COUNT - 1; | ||
84 | } else { | 156 | } else { |
85 | for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) | 157 | for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) |
86 | if (latency <= (phba->bucket_base + | 158 | if (latency <= (phba->bucket_base + |
@@ -92,7 +164,6 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
92 | spin_unlock_irqrestore(shost->host_lock, flags); | 164 | spin_unlock_irqrestore(shost->host_lock, flags); |
93 | } | 165 | } |
94 | 166 | ||
95 | |||
96 | /** | 167 | /** |
97 | * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change | 168 | * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change |
98 | * event. | 169 | * event. |
@@ -148,12 +219,19 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, | |||
148 | return; | 219 | return; |
149 | } | 220 | } |
150 | 221 | ||
151 | /* | 222 | /** |
152 | * This function is called with no lock held when there is a resource | 223 | * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread. |
153 | * error in driver or in firmware. | 224 | * @phba: The Hba for which this call is being executed. |
154 | */ | 225 | * |
226 | * This routine is called when there is resource error in driver or firmware. | ||
227 | * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine | ||
228 | * posts at most 1 event each second. This routine wakes up worker thread of | ||
229 | * @phba to process WORKER_RAM_DOWN_EVENT event. | ||
230 | * | ||
231 | * This routine should be called with no lock held. | ||
232 | **/ | ||
155 | void | 233 | void |
156 | lpfc_adjust_queue_depth(struct lpfc_hba *phba) | 234 | lpfc_rampdown_queue_depth(struct lpfc_hba *phba) |
157 | { | 235 | { |
158 | unsigned long flags; | 236 | unsigned long flags; |
159 | uint32_t evt_posted; | 237 | uint32_t evt_posted; |
@@ -182,10 +260,17 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba) | |||
182 | return; | 260 | return; |
183 | } | 261 | } |
184 | 262 | ||
185 | /* | 263 | /** |
186 | * This function is called with no lock held when there is a successful | 264 | * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread. |
187 | * SCSI command completion. | 265 | * @phba: The Hba for which this call is being executed. |
188 | */ | 266 | * |
267 | * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine | ||
268 | * post at most 1 event every 5 minute after last_ramp_up_time or | ||
269 | * last_rsrc_error_time. This routine wakes up worker thread of @phba | ||
270 | * to process WORKER_RAM_DOWN_EVENT event. | ||
271 | * | ||
272 | * This routine should be called with no lock held. | ||
273 | **/ | ||
189 | static inline void | 274 | static inline void |
190 | lpfc_rampup_queue_depth(struct lpfc_vport *vport, | 275 | lpfc_rampup_queue_depth(struct lpfc_vport *vport, |
191 | struct scsi_device *sdev) | 276 | struct scsi_device *sdev) |
@@ -217,6 +302,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport, | |||
217 | return; | 302 | return; |
218 | } | 303 | } |
219 | 304 | ||
305 | /** | ||
306 | * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler. | ||
307 | * @phba: The Hba for which this call is being executed. | ||
308 | * | ||
309 | * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker | ||
310 | * thread.This routine reduces queue depth for all scsi device on each vport | ||
311 | * associated with @phba. | ||
312 | **/ | ||
220 | void | 313 | void |
221 | lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) | 314 | lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) |
222 | { | 315 | { |
@@ -267,6 +360,15 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) | |||
267 | atomic_set(&phba->num_cmd_success, 0); | 360 | atomic_set(&phba->num_cmd_success, 0); |
268 | } | 361 | } |
269 | 362 | ||
363 | /** | ||
364 | * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler. | ||
365 | * @phba: The Hba for which this call is being executed. | ||
366 | * | ||
367 | * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker | ||
368 | * thread.This routine increases queue depth for all scsi device on each vport | ||
369 | * associated with @phba by 1. This routine also sets @phba num_rsrc_err and | ||
370 | * num_cmd_success to zero. | ||
371 | **/ | ||
270 | void | 372 | void |
271 | lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) | 373 | lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) |
272 | { | 374 | { |
@@ -336,14 +438,21 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba) | |||
336 | lpfc_destroy_vport_work_array(phba, vports); | 438 | lpfc_destroy_vport_work_array(phba, vports); |
337 | } | 439 | } |
338 | 440 | ||
339 | /* | 441 | /** |
442 | * lpfc_new_scsi_buf: Scsi buffer allocator. | ||
443 | * @vport: The virtual port for which this call being executed. | ||
444 | * | ||
340 | * This routine allocates a scsi buffer, which contains all the necessary | 445 | * This routine allocates a scsi buffer, which contains all the necessary |
341 | * information needed to initiate a SCSI I/O. The non-DMAable buffer region | 446 | * information needed to initiate a SCSI I/O. The non-DMAable buffer region |
342 | * contains information to build the IOCB. The DMAable region contains | 447 | * contains information to build the IOCB. The DMAable region contains |
343 | * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to | 448 | * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to |
344 | * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL | 449 | * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL |
345 | * and the BPL BDE is setup in the IOCB. | 450 | * and the BPL BDE is setup in the IOCB. |
346 | */ | 451 | * |
452 | * Return codes: | ||
453 | * NULL - Error | ||
454 | * Pointer to lpfc_scsi_buf data structure - Success | ||
455 | **/ | ||
347 | static struct lpfc_scsi_buf * | 456 | static struct lpfc_scsi_buf * |
348 | lpfc_new_scsi_buf(struct lpfc_vport *vport) | 457 | lpfc_new_scsi_buf(struct lpfc_vport *vport) |
349 | { | 458 | { |
@@ -407,14 +516,14 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport) | |||
407 | bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); | 516 | bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); |
408 | bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); | 517 | bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); |
409 | bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; | 518 | bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
410 | bpl[0].tus.w = le32_to_cpu(bpl->tus.w); | 519 | bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); |
411 | 520 | ||
412 | /* Setup the physical region for the FCP RSP */ | 521 | /* Setup the physical region for the FCP RSP */ |
413 | bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); | 522 | bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); |
414 | bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); | 523 | bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); |
415 | bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); | 524 | bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); |
416 | bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; | 525 | bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
417 | bpl[1].tus.w = le32_to_cpu(bpl->tus.w); | 526 | bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); |
418 | 527 | ||
419 | /* | 528 | /* |
420 | * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, | 529 | * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, |
@@ -422,7 +531,8 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport) | |||
422 | */ | 531 | */ |
423 | iocb = &psb->cur_iocbq.iocb; | 532 | iocb = &psb->cur_iocbq.iocb; |
424 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; | 533 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; |
425 | if (phba->sli_rev == 3) { | 534 | if ((phba->sli_rev == 3) && |
535 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { | ||
426 | /* fill in immediate fcp command BDE */ | 536 | /* fill in immediate fcp command BDE */ |
427 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; | 537 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; |
428 | iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); | 538 | iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); |
@@ -452,6 +562,17 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport) | |||
452 | return psb; | 562 | return psb; |
453 | } | 563 | } |
454 | 564 | ||
565 | /** | ||
566 | * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba. | ||
567 | * @phba: The Hba for which this call is being executed. | ||
568 | * | ||
569 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list | ||
570 | * and returns to caller. | ||
571 | * | ||
572 | * Return codes: | ||
573 | * NULL - Error | ||
574 | * Pointer to lpfc_scsi_buf - Success | ||
575 | **/ | ||
455 | static struct lpfc_scsi_buf* | 576 | static struct lpfc_scsi_buf* |
456 | lpfc_get_scsi_buf(struct lpfc_hba * phba) | 577 | lpfc_get_scsi_buf(struct lpfc_hba * phba) |
457 | { | 578 | { |
@@ -464,11 +585,20 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) | |||
464 | if (lpfc_cmd) { | 585 | if (lpfc_cmd) { |
465 | lpfc_cmd->seg_cnt = 0; | 586 | lpfc_cmd->seg_cnt = 0; |
466 | lpfc_cmd->nonsg_phys = 0; | 587 | lpfc_cmd->nonsg_phys = 0; |
588 | lpfc_cmd->prot_seg_cnt = 0; | ||
467 | } | 589 | } |
468 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); | 590 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); |
469 | return lpfc_cmd; | 591 | return lpfc_cmd; |
470 | } | 592 | } |
471 | 593 | ||
594 | /** | ||
595 | * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list. | ||
596 | * @phba: The Hba for which this call is being executed. | ||
597 | * @psb: The scsi buffer which is being released. | ||
598 | * | ||
599 | * This routine releases @psb scsi buffer by adding it to tail of @phba | ||
600 | * lpfc_scsi_buf_list list. | ||
601 | **/ | ||
472 | static void | 602 | static void |
473 | lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | 603 | lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) |
474 | { | 604 | { |
@@ -480,6 +610,20 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | |||
480 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); | 610 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); |
481 | } | 611 | } |
482 | 612 | ||
613 | /** | ||
614 | * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer. | ||
615 | * @phba: The Hba for which this call is being executed. | ||
616 | * @lpfc_cmd: The scsi buffer which is going to be mapped. | ||
617 | * | ||
618 | * This routine does the pci dma mapping for scatter-gather list of scsi cmnd | ||
619 | * field of @lpfc_cmd. This routine scans through sg elements and format the | ||
620 | * bdea. This routine also initializes all IOCB fields which are dependent on | ||
621 | * scsi command request buffer. | ||
622 | * | ||
623 | * Return codes: | ||
624 | * 1 - Error | ||
625 | * 0 - Success | ||
626 | **/ | ||
483 | static int | 627 | static int |
484 | lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | 628 | lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) |
485 | { | 629 | { |
@@ -516,7 +660,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
516 | lpfc_cmd->seg_cnt = nseg; | 660 | lpfc_cmd->seg_cnt = nseg; |
517 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { | 661 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { |
518 | printk(KERN_ERR "%s: Too many sg segments from " | 662 | printk(KERN_ERR "%s: Too many sg segments from " |
519 | "dma_map_sg. Config %d, seg_cnt %d", | 663 | "dma_map_sg. Config %d, seg_cnt %d\n", |
520 | __func__, phba->cfg_sg_seg_cnt, | 664 | __func__, phba->cfg_sg_seg_cnt, |
521 | lpfc_cmd->seg_cnt); | 665 | lpfc_cmd->seg_cnt); |
522 | scsi_dma_unmap(scsi_cmnd); | 666 | scsi_dma_unmap(scsi_cmnd); |
@@ -535,6 +679,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
535 | scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { | 679 | scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { |
536 | physaddr = sg_dma_address(sgel); | 680 | physaddr = sg_dma_address(sgel); |
537 | if (phba->sli_rev == 3 && | 681 | if (phba->sli_rev == 3 && |
682 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && | ||
538 | nseg <= LPFC_EXT_DATA_BDE_COUNT) { | 683 | nseg <= LPFC_EXT_DATA_BDE_COUNT) { |
539 | data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | 684 | data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
540 | data_bde->tus.f.bdeSize = sg_dma_len(sgel); | 685 | data_bde->tus.f.bdeSize = sg_dma_len(sgel); |
@@ -560,7 +705,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
560 | * explicitly reinitialized and for SLI-3 the extended bde count is | 705 | * explicitly reinitialized and for SLI-3 the extended bde count is |
561 | * explicitly reinitialized since all iocb memory resources are reused. | 706 | * explicitly reinitialized since all iocb memory resources are reused. |
562 | */ | 707 | */ |
563 | if (phba->sli_rev == 3) { | 708 | if (phba->sli_rev == 3 && |
709 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { | ||
564 | if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { | 710 | if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { |
565 | /* | 711 | /* |
566 | * The extended IOCB format can only fit 3 BDE or a BPL. | 712 | * The extended IOCB format can only fit 3 BDE or a BPL. |
@@ -587,7 +733,683 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
587 | ((num_bde + 2) * sizeof(struct ulp_bde64)); | 733 | ((num_bde + 2) * sizeof(struct ulp_bde64)); |
588 | } | 734 | } |
589 | fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); | 735 | fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); |
736 | |||
737 | /* | ||
738 | * Due to difference in data length between DIF/non-DIF paths, | ||
739 | * we need to set word 4 of IOCB here | ||
740 | */ | ||
741 | iocb_cmd->un.fcpi.fcpi_parm = le32_to_cpu(scsi_bufflen(scsi_cmnd)); | ||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | /* | ||
746 | * Given a scsi cmnd, determine the BlockGuard profile to be used | ||
747 | * with the cmd | ||
748 | */ | ||
749 | static int | ||
750 | lpfc_sc_to_sli_prof(struct scsi_cmnd *sc) | ||
751 | { | ||
752 | uint8_t guard_type = scsi_host_get_guard(sc->device->host); | ||
753 | uint8_t ret_prof = LPFC_PROF_INVALID; | ||
754 | |||
755 | if (guard_type == SHOST_DIX_GUARD_IP) { | ||
756 | switch (scsi_get_prot_op(sc)) { | ||
757 | case SCSI_PROT_READ_INSERT: | ||
758 | case SCSI_PROT_WRITE_STRIP: | ||
759 | ret_prof = LPFC_PROF_AST2; | ||
760 | break; | ||
761 | |||
762 | case SCSI_PROT_READ_STRIP: | ||
763 | case SCSI_PROT_WRITE_INSERT: | ||
764 | ret_prof = LPFC_PROF_A1; | ||
765 | break; | ||
766 | |||
767 | case SCSI_PROT_READ_CONVERT: | ||
768 | case SCSI_PROT_WRITE_CONVERT: | ||
769 | ret_prof = LPFC_PROF_AST1; | ||
770 | break; | ||
771 | |||
772 | case SCSI_PROT_READ_PASS: | ||
773 | case SCSI_PROT_WRITE_PASS: | ||
774 | case SCSI_PROT_NORMAL: | ||
775 | default: | ||
776 | printk(KERN_ERR "Bad op/guard:%d/%d combination\n", | ||
777 | scsi_get_prot_op(sc), guard_type); | ||
778 | break; | ||
779 | |||
780 | } | ||
781 | } else if (guard_type == SHOST_DIX_GUARD_CRC) { | ||
782 | switch (scsi_get_prot_op(sc)) { | ||
783 | case SCSI_PROT_READ_STRIP: | ||
784 | case SCSI_PROT_WRITE_INSERT: | ||
785 | ret_prof = LPFC_PROF_A1; | ||
786 | break; | ||
787 | |||
788 | case SCSI_PROT_READ_PASS: | ||
789 | case SCSI_PROT_WRITE_PASS: | ||
790 | ret_prof = LPFC_PROF_C1; | ||
791 | break; | ||
792 | |||
793 | case SCSI_PROT_READ_CONVERT: | ||
794 | case SCSI_PROT_WRITE_CONVERT: | ||
795 | case SCSI_PROT_READ_INSERT: | ||
796 | case SCSI_PROT_WRITE_STRIP: | ||
797 | case SCSI_PROT_NORMAL: | ||
798 | default: | ||
799 | printk(KERN_ERR "Bad op/guard:%d/%d combination\n", | ||
800 | scsi_get_prot_op(sc), guard_type); | ||
801 | break; | ||
802 | } | ||
803 | } else { | ||
804 | /* unsupported format */ | ||
805 | BUG(); | ||
806 | } | ||
807 | |||
808 | return ret_prof; | ||
809 | } | ||
810 | |||
811 | struct scsi_dif_tuple { | ||
812 | __be16 guard_tag; /* Checksum */ | ||
813 | __be16 app_tag; /* Opaque storage */ | ||
814 | __be32 ref_tag; /* Target LBA or indirect LBA */ | ||
815 | }; | ||
816 | |||
817 | static inline unsigned | ||
818 | lpfc_cmd_blksize(struct scsi_cmnd *sc) | ||
819 | { | ||
820 | return sc->device->sector_size; | ||
821 | } | ||
822 | |||
823 | /** | ||
824 | * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command | ||
825 | * @sc: in: SCSI command | ||
826 | * @apptagmask out: app tag mask | ||
827 | * @apptagval out: app tag value | ||
828 | * @reftag out: ref tag (reference tag) | ||
829 | * | ||
830 | * Description: | ||
831 | * Extract DIF paramters from the command if possible. Otherwise, | ||
832 | * use default paratmers. | ||
833 | * | ||
834 | **/ | ||
835 | static inline void | ||
836 | lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask, | ||
837 | uint16_t *apptagval, uint32_t *reftag) | ||
838 | { | ||
839 | struct scsi_dif_tuple *spt; | ||
840 | unsigned char op = scsi_get_prot_op(sc); | ||
841 | unsigned int protcnt = scsi_prot_sg_count(sc); | ||
842 | static int cnt; | ||
843 | |||
844 | if (protcnt && (op == SCSI_PROT_WRITE_STRIP || | ||
845 | op == SCSI_PROT_WRITE_PASS || | ||
846 | op == SCSI_PROT_WRITE_CONVERT)) { | ||
847 | |||
848 | cnt++; | ||
849 | spt = page_address(sg_page(scsi_prot_sglist(sc))) + | ||
850 | scsi_prot_sglist(sc)[0].offset; | ||
851 | *apptagmask = 0; | ||
852 | *apptagval = 0; | ||
853 | *reftag = cpu_to_be32(spt->ref_tag); | ||
854 | |||
855 | } else { | ||
856 | /* SBC defines ref tag to be lower 32bits of LBA */ | ||
857 | *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc)); | ||
858 | *apptagmask = 0; | ||
859 | *apptagval = 0; | ||
860 | } | ||
861 | } | ||
862 | |||
863 | /* | ||
864 | * This function sets up buffer list for protection groups of | ||
865 | * type LPFC_PG_TYPE_NO_DIF | ||
866 | * | ||
867 | * This is usually used when the HBA is instructed to generate | ||
868 | * DIFs and insert them into data stream (or strip DIF from | ||
869 | * incoming data stream) | ||
870 | * | ||
871 | * The buffer list consists of just one protection group described | ||
872 | * below: | ||
873 | * +-------------------------+ | ||
874 | * start of prot group --> | PDE_1 | | ||
875 | * +-------------------------+ | ||
876 | * | Data BDE | | ||
877 | * +-------------------------+ | ||
878 | * |more Data BDE's ... (opt)| | ||
879 | * +-------------------------+ | ||
880 | * | ||
881 | * @sc: pointer to scsi command we're working on | ||
882 | * @bpl: pointer to buffer list for protection groups | ||
883 | * @datacnt: number of segments of data that have been dma mapped | ||
884 | * | ||
885 | * Note: Data s/g buffers have been dma mapped | ||
886 | */ | ||
887 | static int | ||
888 | lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, | ||
889 | struct ulp_bde64 *bpl, int datasegcnt) | ||
890 | { | ||
891 | struct scatterlist *sgde = NULL; /* s/g data entry */ | ||
892 | struct lpfc_pde *pde1 = NULL; | ||
893 | dma_addr_t physaddr; | ||
894 | int i = 0, num_bde = 0; | ||
895 | int datadir = sc->sc_data_direction; | ||
896 | int prof = LPFC_PROF_INVALID; | ||
897 | unsigned blksize; | ||
898 | uint32_t reftag; | ||
899 | uint16_t apptagmask, apptagval; | ||
900 | |||
901 | pde1 = (struct lpfc_pde *) bpl; | ||
902 | prof = lpfc_sc_to_sli_prof(sc); | ||
903 | |||
904 | if (prof == LPFC_PROF_INVALID) | ||
905 | goto out; | ||
906 | |||
907 | /* extract some info from the scsi command for PDE1*/ | ||
908 | blksize = lpfc_cmd_blksize(sc); | ||
909 | lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); | ||
910 | |||
911 | /* setup PDE1 with what we have */ | ||
912 | lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize, | ||
913 | BG_EC_STOP_ERR); | ||
914 | lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag); | ||
915 | |||
916 | num_bde++; | ||
917 | bpl++; | ||
918 | |||
919 | /* assumption: caller has already run dma_map_sg on command data */ | ||
920 | scsi_for_each_sg(sc, sgde, datasegcnt, i) { | ||
921 | physaddr = sg_dma_address(sgde); | ||
922 | bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); | ||
923 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); | ||
924 | bpl->tus.f.bdeSize = sg_dma_len(sgde); | ||
925 | if (datadir == DMA_TO_DEVICE) | ||
926 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
927 | else | ||
928 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; | ||
929 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | ||
930 | bpl++; | ||
931 | num_bde++; | ||
932 | } | ||
933 | |||
934 | out: | ||
935 | return num_bde; | ||
936 | } | ||
937 | |||
938 | /* | ||
939 | * This function sets up buffer list for protection groups of | ||
940 | * type LPFC_PG_TYPE_DIF_BUF | ||
941 | * | ||
942 | * This is usually used when DIFs are in their own buffers, | ||
943 | * separate from the data. The HBA can then by instructed | ||
944 | * to place the DIFs in the outgoing stream. For read operations, | ||
945 | * The HBA could extract the DIFs and place it in DIF buffers. | ||
946 | * | ||
947 | * The buffer list for this type consists of one or more of the | ||
948 | * protection groups described below: | ||
949 | * +-------------------------+ | ||
950 | * start of first prot group --> | PDE_1 | | ||
951 | * +-------------------------+ | ||
952 | * | PDE_3 (Prot BDE) | | ||
953 | * +-------------------------+ | ||
954 | * | Data BDE | | ||
955 | * +-------------------------+ | ||
956 | * |more Data BDE's ... (opt)| | ||
957 | * +-------------------------+ | ||
958 | * start of new prot group --> | PDE_1 | | ||
959 | * +-------------------------+ | ||
960 | * | ... | | ||
961 | * +-------------------------+ | ||
962 | * | ||
963 | * @sc: pointer to scsi command we're working on | ||
964 | * @bpl: pointer to buffer list for protection groups | ||
965 | * @datacnt: number of segments of data that have been dma mapped | ||
966 | * @protcnt: number of segment of protection data that have been dma mapped | ||
967 | * | ||
968 | * Note: It is assumed that both data and protection s/g buffers have been | ||
969 | * mapped for DMA | ||
970 | */ | ||
971 | static int | ||
972 | lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | ||
973 | struct ulp_bde64 *bpl, int datacnt, int protcnt) | ||
974 | { | ||
975 | struct scatterlist *sgde = NULL; /* s/g data entry */ | ||
976 | struct scatterlist *sgpe = NULL; /* s/g prot entry */ | ||
977 | struct lpfc_pde *pde1 = NULL; | ||
978 | struct ulp_bde64 *prot_bde = NULL; | ||
979 | dma_addr_t dataphysaddr, protphysaddr; | ||
980 | unsigned short curr_data = 0, curr_prot = 0; | ||
981 | unsigned int split_offset, protgroup_len; | ||
982 | unsigned int protgrp_blks, protgrp_bytes; | ||
983 | unsigned int remainder, subtotal; | ||
984 | int prof = LPFC_PROF_INVALID; | ||
985 | int datadir = sc->sc_data_direction; | ||
986 | unsigned char pgdone = 0, alldone = 0; | ||
987 | unsigned blksize; | ||
988 | uint32_t reftag; | ||
989 | uint16_t apptagmask, apptagval; | ||
990 | int num_bde = 0; | ||
991 | |||
992 | sgpe = scsi_prot_sglist(sc); | ||
993 | sgde = scsi_sglist(sc); | ||
994 | |||
995 | if (!sgpe || !sgde) { | ||
996 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, | ||
997 | "9020 Invalid s/g entry: data=0x%p prot=0x%p\n", | ||
998 | sgpe, sgde); | ||
999 | return 0; | ||
1000 | } | ||
1001 | |||
1002 | prof = lpfc_sc_to_sli_prof(sc); | ||
1003 | if (prof == LPFC_PROF_INVALID) | ||
1004 | goto out; | ||
1005 | |||
1006 | /* extract some info from the scsi command for PDE1*/ | ||
1007 | blksize = lpfc_cmd_blksize(sc); | ||
1008 | lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); | ||
1009 | |||
1010 | split_offset = 0; | ||
1011 | do { | ||
1012 | /* setup the first PDE_1 */ | ||
1013 | pde1 = (struct lpfc_pde *) bpl; | ||
1014 | |||
1015 | lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize, | ||
1016 | BG_EC_STOP_ERR); | ||
1017 | lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag); | ||
1018 | |||
1019 | num_bde++; | ||
1020 | bpl++; | ||
1021 | |||
1022 | /* setup the first BDE that points to protection buffer */ | ||
1023 | prot_bde = (struct ulp_bde64 *) bpl; | ||
1024 | protphysaddr = sg_dma_address(sgpe); | ||
1025 | prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); | ||
1026 | prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); | ||
1027 | protgroup_len = sg_dma_len(sgpe); | ||
1028 | |||
1029 | |||
1030 | /* must be integer multiple of the DIF block length */ | ||
1031 | BUG_ON(protgroup_len % 8); | ||
1032 | |||
1033 | protgrp_blks = protgroup_len / 8; | ||
1034 | protgrp_bytes = protgrp_blks * blksize; | ||
1035 | |||
1036 | prot_bde->tus.f.bdeSize = protgroup_len; | ||
1037 | if (datadir == DMA_TO_DEVICE) | ||
1038 | prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
1039 | else | ||
1040 | prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; | ||
1041 | prot_bde->tus.w = le32_to_cpu(bpl->tus.w); | ||
1042 | |||
1043 | curr_prot++; | ||
1044 | num_bde++; | ||
1045 | |||
1046 | /* setup BDE's for data blocks associated with DIF data */ | ||
1047 | pgdone = 0; | ||
1048 | subtotal = 0; /* total bytes processed for current prot grp */ | ||
1049 | while (!pgdone) { | ||
1050 | if (!sgde) { | ||
1051 | printk(KERN_ERR "%s Invalid data segment\n", | ||
1052 | __func__); | ||
1053 | return 0; | ||
1054 | } | ||
1055 | bpl++; | ||
1056 | dataphysaddr = sg_dma_address(sgde) + split_offset; | ||
1057 | bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); | ||
1058 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); | ||
1059 | |||
1060 | remainder = sg_dma_len(sgde) - split_offset; | ||
1061 | |||
1062 | if ((subtotal + remainder) <= protgrp_bytes) { | ||
1063 | /* we can use this whole buffer */ | ||
1064 | bpl->tus.f.bdeSize = remainder; | ||
1065 | split_offset = 0; | ||
1066 | |||
1067 | if ((subtotal + remainder) == protgrp_bytes) | ||
1068 | pgdone = 1; | ||
1069 | } else { | ||
1070 | /* must split this buffer with next prot grp */ | ||
1071 | bpl->tus.f.bdeSize = protgrp_bytes - subtotal; | ||
1072 | split_offset += bpl->tus.f.bdeSize; | ||
1073 | } | ||
1074 | |||
1075 | subtotal += bpl->tus.f.bdeSize; | ||
1076 | |||
1077 | if (datadir == DMA_TO_DEVICE) | ||
1078 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
1079 | else | ||
1080 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; | ||
1081 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | ||
1082 | |||
1083 | num_bde++; | ||
1084 | curr_data++; | ||
1085 | |||
1086 | if (split_offset) | ||
1087 | break; | ||
1088 | |||
1089 | /* Move to the next s/g segment if possible */ | ||
1090 | sgde = sg_next(sgde); | ||
1091 | } | ||
1092 | |||
1093 | /* are we done ? */ | ||
1094 | if (curr_prot == protcnt) { | ||
1095 | alldone = 1; | ||
1096 | } else if (curr_prot < protcnt) { | ||
1097 | /* advance to next prot buffer */ | ||
1098 | sgpe = sg_next(sgpe); | ||
1099 | bpl++; | ||
1100 | |||
1101 | /* update the reference tag */ | ||
1102 | reftag += protgrp_blks; | ||
1103 | } else { | ||
1104 | /* if we're here, we have a bug */ | ||
1105 | printk(KERN_ERR "BLKGRD: bug in %s\n", __func__); | ||
1106 | } | ||
1107 | |||
1108 | } while (!alldone); | ||
1109 | |||
1110 | out: | ||
1111 | |||
1112 | |||
1113 | return num_bde; | ||
1114 | } | ||
1115 | /* | ||
1116 | * Given a SCSI command that supports DIF, determine composition of protection | ||
1117 | * groups involved in setting up buffer lists | ||
1118 | * | ||
1119 | * Returns: | ||
1120 | * for DIF (for both read and write) | ||
1121 | * */ | ||
1122 | static int | ||
1123 | lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) | ||
1124 | { | ||
1125 | int ret = LPFC_PG_TYPE_INVALID; | ||
1126 | unsigned char op = scsi_get_prot_op(sc); | ||
1127 | |||
1128 | switch (op) { | ||
1129 | case SCSI_PROT_READ_STRIP: | ||
1130 | case SCSI_PROT_WRITE_INSERT: | ||
1131 | ret = LPFC_PG_TYPE_NO_DIF; | ||
1132 | break; | ||
1133 | case SCSI_PROT_READ_INSERT: | ||
1134 | case SCSI_PROT_WRITE_STRIP: | ||
1135 | case SCSI_PROT_READ_PASS: | ||
1136 | case SCSI_PROT_WRITE_PASS: | ||
1137 | case SCSI_PROT_WRITE_CONVERT: | ||
1138 | case SCSI_PROT_READ_CONVERT: | ||
1139 | ret = LPFC_PG_TYPE_DIF_BUF; | ||
1140 | break; | ||
1141 | default: | ||
1142 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, | ||
1143 | "9021 Unsupported protection op:%d\n", op); | ||
1144 | break; | ||
1145 | } | ||
1146 | |||
1147 | return ret; | ||
1148 | } | ||
1149 | |||
1150 | /* | ||
1151 | * This is the protection/DIF aware version of | ||
1152 | * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the | ||
1153 | * two functions eventually, but for now, it's here | ||
1154 | */ | ||
1155 | static int | ||
1156 | lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, | ||
1157 | struct lpfc_scsi_buf *lpfc_cmd) | ||
1158 | { | ||
1159 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; | ||
1160 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; | ||
1161 | struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; | ||
1162 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; | ||
1163 | uint32_t num_bde = 0; | ||
1164 | int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; | ||
1165 | int prot_group_type = 0; | ||
1166 | int diflen, fcpdl; | ||
1167 | unsigned blksize; | ||
1168 | |||
1169 | /* | ||
1170 | * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd | ||
1171 | * fcp_rsp regions to the first data bde entry | ||
1172 | */ | ||
1173 | bpl += 2; | ||
1174 | if (scsi_sg_count(scsi_cmnd)) { | ||
1175 | /* | ||
1176 | * The driver stores the segment count returned from pci_map_sg | ||
1177 | * because this a count of dma-mappings used to map the use_sg | ||
1178 | * pages. They are not guaranteed to be the same for those | ||
1179 | * architectures that implement an IOMMU. | ||
1180 | */ | ||
1181 | datasegcnt = dma_map_sg(&phba->pcidev->dev, | ||
1182 | scsi_sglist(scsi_cmnd), | ||
1183 | scsi_sg_count(scsi_cmnd), datadir); | ||
1184 | if (unlikely(!datasegcnt)) | ||
1185 | return 1; | ||
1186 | |||
1187 | lpfc_cmd->seg_cnt = datasegcnt; | ||
1188 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { | ||
1189 | printk(KERN_ERR "%s: Too many sg segments from " | ||
1190 | "dma_map_sg. Config %d, seg_cnt %d\n", | ||
1191 | __func__, phba->cfg_sg_seg_cnt, | ||
1192 | lpfc_cmd->seg_cnt); | ||
1193 | scsi_dma_unmap(scsi_cmnd); | ||
1194 | return 1; | ||
1195 | } | ||
1196 | |||
1197 | prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); | ||
1198 | |||
1199 | switch (prot_group_type) { | ||
1200 | case LPFC_PG_TYPE_NO_DIF: | ||
1201 | num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, | ||
1202 | datasegcnt); | ||
1203 | /* we shoud have 2 or more entries in buffer list */ | ||
1204 | if (num_bde < 2) | ||
1205 | goto err; | ||
1206 | break; | ||
1207 | case LPFC_PG_TYPE_DIF_BUF:{ | ||
1208 | /* | ||
1209 | * This type indicates that protection buffers are | ||
1210 | * passed to the driver, so that needs to be prepared | ||
1211 | * for DMA | ||
1212 | */ | ||
1213 | protsegcnt = dma_map_sg(&phba->pcidev->dev, | ||
1214 | scsi_prot_sglist(scsi_cmnd), | ||
1215 | scsi_prot_sg_count(scsi_cmnd), datadir); | ||
1216 | if (unlikely(!protsegcnt)) { | ||
1217 | scsi_dma_unmap(scsi_cmnd); | ||
1218 | return 1; | ||
1219 | } | ||
1220 | |||
1221 | lpfc_cmd->prot_seg_cnt = protsegcnt; | ||
1222 | if (lpfc_cmd->prot_seg_cnt | ||
1223 | > phba->cfg_prot_sg_seg_cnt) { | ||
1224 | printk(KERN_ERR "%s: Too many prot sg segments " | ||
1225 | "from dma_map_sg. Config %d," | ||
1226 | "prot_seg_cnt %d\n", __func__, | ||
1227 | phba->cfg_prot_sg_seg_cnt, | ||
1228 | lpfc_cmd->prot_seg_cnt); | ||
1229 | dma_unmap_sg(&phba->pcidev->dev, | ||
1230 | scsi_prot_sglist(scsi_cmnd), | ||
1231 | scsi_prot_sg_count(scsi_cmnd), | ||
1232 | datadir); | ||
1233 | scsi_dma_unmap(scsi_cmnd); | ||
1234 | return 1; | ||
1235 | } | ||
1236 | |||
1237 | num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, | ||
1238 | datasegcnt, protsegcnt); | ||
1239 | /* we shoud have 3 or more entries in buffer list */ | ||
1240 | if (num_bde < 3) | ||
1241 | goto err; | ||
1242 | break; | ||
1243 | } | ||
1244 | case LPFC_PG_TYPE_INVALID: | ||
1245 | default: | ||
1246 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, | ||
1247 | "9022 Unexpected protection group %i\n", | ||
1248 | prot_group_type); | ||
1249 | return 1; | ||
1250 | } | ||
1251 | } | ||
1252 | |||
1253 | /* | ||
1254 | * Finish initializing those IOCB fields that are dependent on the | ||
1255 | * scsi_cmnd request_buffer. Note that the bdeSize is explicitly | ||
1256 | * reinitialized since all iocb memory resources are used many times | ||
1257 | * for transmit, receive, and continuation bpl's. | ||
1258 | */ | ||
1259 | iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); | ||
1260 | iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); | ||
1261 | iocb_cmd->ulpBdeCount = 1; | ||
1262 | iocb_cmd->ulpLe = 1; | ||
1263 | |||
1264 | fcpdl = scsi_bufflen(scsi_cmnd); | ||
1265 | |||
1266 | if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) { | ||
1267 | /* | ||
1268 | * We are in DIF Type 1 mode | ||
1269 | * Every data block has a 8 byte DIF (trailer) | ||
1270 | * attached to it. Must ajust FCP data length | ||
1271 | */ | ||
1272 | blksize = lpfc_cmd_blksize(scsi_cmnd); | ||
1273 | diflen = (fcpdl / blksize) * 8; | ||
1274 | fcpdl += diflen; | ||
1275 | } | ||
1276 | fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); | ||
1277 | |||
1278 | /* | ||
1279 | * Due to difference in data length between DIF/non-DIF paths, | ||
1280 | * we need to set word 4 of IOCB here | ||
1281 | */ | ||
1282 | iocb_cmd->un.fcpi.fcpi_parm = fcpdl; | ||
1283 | |||
590 | return 0; | 1284 | return 0; |
1285 | err: | ||
1286 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, | ||
1287 | "9023 Could not setup all needed BDE's" | ||
1288 | "prot_group_type=%d, num_bde=%d\n", | ||
1289 | prot_group_type, num_bde); | ||
1290 | return 1; | ||
1291 | } | ||
1292 | |||
1293 | /* | ||
1294 | * This function checks for BlockGuard errors detected by | ||
1295 | * the HBA. In case of errors, the ASC/ASCQ fields in the | ||
1296 | * sense buffer will be set accordingly, paired with | ||
1297 | * ILLEGAL_REQUEST to signal to the kernel that the HBA | ||
1298 | * detected corruption. | ||
1299 | * | ||
1300 | * Returns: | ||
1301 | * 0 - No error found | ||
1302 | * 1 - BlockGuard error found | ||
1303 | * -1 - Internal error (bad profile, ...etc) | ||
1304 | */ | ||
1305 | static int | ||
1306 | lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ||
1307 | struct lpfc_iocbq *pIocbOut) | ||
1308 | { | ||
1309 | struct scsi_cmnd *cmd = lpfc_cmd->pCmd; | ||
1310 | struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg; | ||
1311 | int ret = 0; | ||
1312 | uint32_t bghm = bgf->bghm; | ||
1313 | uint32_t bgstat = bgf->bgstat; | ||
1314 | uint64_t failing_sector = 0; | ||
1315 | |||
1316 | printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx " | ||
1317 | "bgstat=0x%x bghm=0x%x\n", | ||
1318 | cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), | ||
1319 | cmd->request->nr_sectors, bgstat, bghm); | ||
1320 | |||
1321 | spin_lock(&_dump_buf_lock); | ||
1322 | if (!_dump_buf_done) { | ||
1323 | printk(KERN_ERR "Saving Data for %u blocks to debugfs\n", | ||
1324 | (cmd->cmnd[7] << 8 | cmd->cmnd[8])); | ||
1325 | lpfc_debug_save_data(cmd); | ||
1326 | |||
1327 | /* If we have a prot sgl, save the DIF buffer */ | ||
1328 | if (lpfc_prot_group_type(phba, cmd) == | ||
1329 | LPFC_PG_TYPE_DIF_BUF) { | ||
1330 | printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n", | ||
1331 | (cmd->cmnd[7] << 8 | cmd->cmnd[8])); | ||
1332 | lpfc_debug_save_dif(cmd); | ||
1333 | } | ||
1334 | |||
1335 | _dump_buf_done = 1; | ||
1336 | } | ||
1337 | spin_unlock(&_dump_buf_lock); | ||
1338 | |||
1339 | if (lpfc_bgs_get_invalid_prof(bgstat)) { | ||
1340 | cmd->result = ScsiResult(DID_ERROR, 0); | ||
1341 | printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n", | ||
1342 | bgstat); | ||
1343 | ret = (-1); | ||
1344 | goto out; | ||
1345 | } | ||
1346 | |||
1347 | if (lpfc_bgs_get_uninit_dif_block(bgstat)) { | ||
1348 | cmd->result = ScsiResult(DID_ERROR, 0); | ||
1349 | printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n", | ||
1350 | bgstat); | ||
1351 | ret = (-1); | ||
1352 | goto out; | ||
1353 | } | ||
1354 | |||
1355 | if (lpfc_bgs_get_guard_err(bgstat)) { | ||
1356 | ret = 1; | ||
1357 | |||
1358 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | ||
1359 | 0x10, 0x1); | ||
1360 | cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24 | ||
1361 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); | ||
1362 | phba->bg_guard_err_cnt++; | ||
1363 | printk(KERN_ERR "BLKGRD: guard_tag error\n"); | ||
1364 | } | ||
1365 | |||
1366 | if (lpfc_bgs_get_reftag_err(bgstat)) { | ||
1367 | ret = 1; | ||
1368 | |||
1369 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | ||
1370 | 0x10, 0x3); | ||
1371 | cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24 | ||
1372 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); | ||
1373 | |||
1374 | phba->bg_reftag_err_cnt++; | ||
1375 | printk(KERN_ERR "BLKGRD: ref_tag error\n"); | ||
1376 | } | ||
1377 | |||
1378 | if (lpfc_bgs_get_apptag_err(bgstat)) { | ||
1379 | ret = 1; | ||
1380 | |||
1381 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | ||
1382 | 0x10, 0x2); | ||
1383 | cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24 | ||
1384 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); | ||
1385 | |||
1386 | phba->bg_apptag_err_cnt++; | ||
1387 | printk(KERN_ERR "BLKGRD: app_tag error\n"); | ||
1388 | } | ||
1389 | |||
1390 | if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { | ||
1391 | /* | ||
1392 | * setup sense data descriptor 0 per SPC-4 as an information | ||
1393 | * field, and put the failing LBA in it | ||
1394 | */ | ||
1395 | cmd->sense_buffer[8] = 0; /* Information */ | ||
1396 | cmd->sense_buffer[9] = 0xa; /* Add. length */ | ||
1397 | do_div(bghm, cmd->device->sector_size); | ||
1398 | |||
1399 | failing_sector = scsi_get_lba(cmd); | ||
1400 | failing_sector += bghm; | ||
1401 | |||
1402 | put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]); | ||
1403 | } | ||
1404 | |||
1405 | if (!ret) { | ||
1406 | /* No error was reported - problem in FW? */ | ||
1407 | cmd->result = ScsiResult(DID_ERROR, 0); | ||
1408 | printk(KERN_ERR "BLKGRD: no errors reported!\n"); | ||
1409 | } | ||
1410 | |||
1411 | out: | ||
1412 | return ret; | ||
591 | } | 1413 | } |
592 | 1414 | ||
593 | /** | 1415 | /** |
@@ -681,6 +1503,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, | |||
681 | lpfc_worker_wake_up(phba); | 1503 | lpfc_worker_wake_up(phba); |
682 | return; | 1504 | return; |
683 | } | 1505 | } |
1506 | |||
1507 | /** | ||
1508 | * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather. | ||
1509 | * @phba: The Hba for which this call is being executed. | ||
1510 | * @psb: The scsi buffer which is going to be un-mapped. | ||
1511 | * | ||
1512 | * This routine does DMA un-mapping of scatter gather list of scsi command | ||
1513 | * field of @lpfc_cmd. | ||
1514 | **/ | ||
684 | static void | 1515 | static void |
685 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) | 1516 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) |
686 | { | 1517 | { |
@@ -692,8 +1523,22 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) | |||
692 | */ | 1523 | */ |
693 | if (psb->seg_cnt > 0) | 1524 | if (psb->seg_cnt > 0) |
694 | scsi_dma_unmap(psb->pCmd); | 1525 | scsi_dma_unmap(psb->pCmd); |
1526 | if (psb->prot_seg_cnt > 0) | ||
1527 | dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), | ||
1528 | scsi_prot_sg_count(psb->pCmd), | ||
1529 | psb->pCmd->sc_data_direction); | ||
695 | } | 1530 | } |
696 | 1531 | ||
1532 | /** | ||
1533 | * lpfc_handler_fcp_err: FCP response handler. | ||
1534 | * @vport: The virtual port for which this call is being executed. | ||
1535 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | ||
1536 | * @rsp_iocb: The response IOCB which contains FCP error. | ||
1537 | * | ||
1538 | * This routine is called to process response IOCB with status field | ||
1539 | * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command | ||
1540 | * based upon SCSI and FCP error. | ||
1541 | **/ | ||
697 | static void | 1542 | static void |
698 | lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | 1543 | lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, |
699 | struct lpfc_iocbq *rsp_iocb) | 1544 | struct lpfc_iocbq *rsp_iocb) |
@@ -735,7 +1580,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
735 | logit = LOG_FCP; | 1580 | logit = LOG_FCP; |
736 | 1581 | ||
737 | lpfc_printf_vlog(vport, KERN_WARNING, logit, | 1582 | lpfc_printf_vlog(vport, KERN_WARNING, logit, |
738 | "0730 FCP command x%x failed: x%x SNS x%x x%x " | 1583 | "9024 FCP command x%x failed: x%x SNS x%x x%x " |
739 | "Data: x%x x%x x%x x%x x%x\n", | 1584 | "Data: x%x x%x x%x x%x x%x\n", |
740 | cmnd->cmnd[0], scsi_status, | 1585 | cmnd->cmnd[0], scsi_status, |
741 | be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, | 1586 | be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, |
@@ -758,7 +1603,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
758 | scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); | 1603 | scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); |
759 | 1604 | ||
760 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, | 1605 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
761 | "0716 FCP Read Underrun, expected %d, " | 1606 | "9025 FCP Read Underrun, expected %d, " |
762 | "residual %d Data: x%x x%x x%x\n", | 1607 | "residual %d Data: x%x x%x x%x\n", |
763 | be32_to_cpu(fcpcmd->fcpDl), | 1608 | be32_to_cpu(fcpcmd->fcpDl), |
764 | scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], | 1609 | scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], |
@@ -774,7 +1619,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
774 | (scsi_get_resid(cmnd) != fcpi_parm)) { | 1619 | (scsi_get_resid(cmnd) != fcpi_parm)) { |
775 | lpfc_printf_vlog(vport, KERN_WARNING, | 1620 | lpfc_printf_vlog(vport, KERN_WARNING, |
776 | LOG_FCP | LOG_FCP_ERROR, | 1621 | LOG_FCP | LOG_FCP_ERROR, |
777 | "0735 FCP Read Check Error " | 1622 | "9026 FCP Read Check Error " |
778 | "and Underrun Data: x%x x%x x%x x%x\n", | 1623 | "and Underrun Data: x%x x%x x%x x%x\n", |
779 | be32_to_cpu(fcpcmd->fcpDl), | 1624 | be32_to_cpu(fcpcmd->fcpDl), |
780 | scsi_get_resid(cmnd), fcpi_parm, | 1625 | scsi_get_resid(cmnd), fcpi_parm, |
@@ -793,7 +1638,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
793 | (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) | 1638 | (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) |
794 | < cmnd->underflow)) { | 1639 | < cmnd->underflow)) { |
795 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, | 1640 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
796 | "0717 FCP command x%x residual " | 1641 | "9027 FCP command x%x residual " |
797 | "underrun converted to error " | 1642 | "underrun converted to error " |
798 | "Data: x%x x%x x%x\n", | 1643 | "Data: x%x x%x x%x\n", |
799 | cmnd->cmnd[0], scsi_bufflen(cmnd), | 1644 | cmnd->cmnd[0], scsi_bufflen(cmnd), |
@@ -802,7 +1647,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
802 | } | 1647 | } |
803 | } else if (resp_info & RESID_OVER) { | 1648 | } else if (resp_info & RESID_OVER) { |
804 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 1649 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
805 | "0720 FCP command x%x residual overrun error. " | 1650 | "9028 FCP command x%x residual overrun error. " |
806 | "Data: x%x x%x \n", cmnd->cmnd[0], | 1651 | "Data: x%x x%x \n", cmnd->cmnd[0], |
807 | scsi_bufflen(cmnd), scsi_get_resid(cmnd)); | 1652 | scsi_bufflen(cmnd), scsi_get_resid(cmnd)); |
808 | host_status = DID_ERROR; | 1653 | host_status = DID_ERROR; |
@@ -814,7 +1659,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
814 | } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && | 1659 | } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && |
815 | (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { | 1660 | (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { |
816 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, | 1661 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, |
817 | "0734 FCP Read Check Error Data: " | 1662 | "9029 FCP Read Check Error Data: " |
818 | "x%x x%x x%x x%x\n", | 1663 | "x%x x%x x%x x%x\n", |
819 | be32_to_cpu(fcpcmd->fcpDl), | 1664 | be32_to_cpu(fcpcmd->fcpDl), |
820 | be32_to_cpu(fcprsp->rspResId), | 1665 | be32_to_cpu(fcprsp->rspResId), |
@@ -828,6 +1673,16 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
828 | lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); | 1673 | lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); |
829 | } | 1674 | } |
830 | 1675 | ||
1676 | /** | ||
1677 | * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine. | ||
1678 | * @phba: The Hba for which this call is being executed. | ||
1679 | * @pIocbIn: The command IOCBQ for the scsi cmnd. | ||
1680 | * @pIocbOut: The response IOCBQ for the scsi cmnd . | ||
1681 | * | ||
1682 | * This routine assigns scsi command result by looking into response IOCB | ||
1683 | * status field appropriately. This routine handles QUEUE FULL condition as | ||
1684 | * well by ramping down device queue depth. | ||
1685 | **/ | ||
831 | static void | 1686 | static void |
832 | lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | 1687 | lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, |
833 | struct lpfc_iocbq *pIocbOut) | 1688 | struct lpfc_iocbq *pIocbOut) |
@@ -846,7 +1701,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
846 | 1701 | ||
847 | lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; | 1702 | lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; |
848 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; | 1703 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; |
849 | atomic_dec(&pnode->cmd_pending); | 1704 | if (pnode && NLP_CHK_NODE_ACT(pnode)) |
1705 | atomic_dec(&pnode->cmd_pending); | ||
850 | 1706 | ||
851 | if (lpfc_cmd->status) { | 1707 | if (lpfc_cmd->status) { |
852 | if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && | 1708 | if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && |
@@ -856,7 +1712,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
856 | lpfc_cmd->status = IOSTAT_DEFAULT; | 1712 | lpfc_cmd->status = IOSTAT_DEFAULT; |
857 | 1713 | ||
858 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 1714 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
859 | "0729 FCP cmd x%x failed <%d/%d> " | 1715 | "9030 FCP cmd x%x failed <%d/%d> " |
860 | "status: x%x result: x%x Data: x%x x%x\n", | 1716 | "status: x%x result: x%x Data: x%x x%x\n", |
861 | cmd->cmnd[0], | 1717 | cmd->cmnd[0], |
862 | cmd->device ? cmd->device->id : 0xffff, | 1718 | cmd->device ? cmd->device->id : 0xffff, |
@@ -904,7 +1760,28 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
904 | lpfc_cmd->result == IOERR_ABORT_REQUESTED) { | 1760 | lpfc_cmd->result == IOERR_ABORT_REQUESTED) { |
905 | cmd->result = ScsiResult(DID_REQUEUE, 0); | 1761 | cmd->result = ScsiResult(DID_REQUEUE, 0); |
906 | break; | 1762 | break; |
907 | } /* else: fall through */ | 1763 | } |
1764 | |||
1765 | if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || | ||
1766 | lpfc_cmd->result == IOERR_TX_DMA_FAILED) && | ||
1767 | pIocbOut->iocb.unsli3.sli3_bg.bgstat) { | ||
1768 | if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { | ||
1769 | /* | ||
1770 | * This is a response for a BG enabled | ||
1771 | * cmd. Parse BG error | ||
1772 | */ | ||
1773 | lpfc_parse_bg_err(phba, lpfc_cmd, | ||
1774 | pIocbOut); | ||
1775 | break; | ||
1776 | } else { | ||
1777 | lpfc_printf_vlog(vport, KERN_WARNING, | ||
1778 | LOG_BG, | ||
1779 | "9031 non-zero BGSTAT " | ||
1780 | "on unprotected cmd"); | ||
1781 | } | ||
1782 | } | ||
1783 | |||
1784 | /* else: fall through */ | ||
908 | default: | 1785 | default: |
909 | cmd->result = ScsiResult(DID_ERROR, 0); | 1786 | cmd->result = ScsiResult(DID_ERROR, 0); |
910 | break; | 1787 | break; |
@@ -936,23 +1813,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
936 | time_after(jiffies, lpfc_cmd->start_time + | 1813 | time_after(jiffies, lpfc_cmd->start_time + |
937 | msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { | 1814 | msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { |
938 | spin_lock_irqsave(sdev->host->host_lock, flags); | 1815 | spin_lock_irqsave(sdev->host->host_lock, flags); |
939 | if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) && | 1816 | if (pnode && NLP_CHK_NODE_ACT(pnode)) { |
940 | (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) && | 1817 | if (pnode->cmd_qdepth > |
941 | ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10)))) | 1818 | atomic_read(&pnode->cmd_pending) && |
942 | pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending); | 1819 | (atomic_read(&pnode->cmd_pending) > |
943 | 1820 | LPFC_MIN_TGT_QDEPTH) && | |
944 | pnode->last_change_time = jiffies; | 1821 | ((cmd->cmnd[0] == READ_10) || |
1822 | (cmd->cmnd[0] == WRITE_10))) | ||
1823 | pnode->cmd_qdepth = | ||
1824 | atomic_read(&pnode->cmd_pending); | ||
1825 | |||
1826 | pnode->last_change_time = jiffies; | ||
1827 | } | ||
945 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | 1828 | spin_unlock_irqrestore(sdev->host->host_lock, flags); |
946 | } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && | 1829 | } else if (pnode && NLP_CHK_NODE_ACT(pnode)) { |
1830 | if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && | ||
947 | time_after(jiffies, pnode->last_change_time + | 1831 | time_after(jiffies, pnode->last_change_time + |
948 | msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { | 1832 | msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { |
949 | spin_lock_irqsave(sdev->host->host_lock, flags); | 1833 | spin_lock_irqsave(sdev->host->host_lock, flags); |
950 | pnode->cmd_qdepth += pnode->cmd_qdepth * | 1834 | pnode->cmd_qdepth += pnode->cmd_qdepth * |
951 | LPFC_TGTQ_RAMPUP_PCENT / 100; | 1835 | LPFC_TGTQ_RAMPUP_PCENT / 100; |
952 | if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) | 1836 | if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) |
953 | pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; | 1837 | pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; |
954 | pnode->last_change_time = jiffies; | 1838 | pnode->last_change_time = jiffies; |
955 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | 1839 | spin_unlock_irqrestore(sdev->host->host_lock, flags); |
1840 | } | ||
956 | } | 1841 | } |
957 | 1842 | ||
958 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | 1843 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); |
@@ -1067,6 +1952,15 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) | |||
1067 | } | 1952 | } |
1068 | } | 1953 | } |
1069 | 1954 | ||
1955 | /** | ||
1956 | * lpfc_scsi_prep_cmnd: Routine to convert scsi cmnd to FCP information unit. | ||
1957 | * @vport: The virtual port for which this call is being executed. | ||
1958 | * @lpfc_cmd: The scsi command which needs to send. | ||
1959 | * @pnode: Pointer to lpfc_nodelist. | ||
1960 | * | ||
1961 | * This routine initializes fcp_cmnd and iocb data structure from scsi command | ||
1962 | * to transfer. | ||
1963 | **/ | ||
1070 | static void | 1964 | static void |
1071 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | 1965 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, |
1072 | struct lpfc_nodelist *pnode) | 1966 | struct lpfc_nodelist *pnode) |
@@ -1122,7 +2016,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
1122 | } else { | 2016 | } else { |
1123 | iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; | 2017 | iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; |
1124 | iocb_cmd->ulpPU = PARM_READ_CHECK; | 2018 | iocb_cmd->ulpPU = PARM_READ_CHECK; |
1125 | iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); | ||
1126 | fcp_cmnd->fcpCntl3 = READ_DATA; | 2019 | fcp_cmnd->fcpCntl3 = READ_DATA; |
1127 | phba->fc4InputRequests++; | 2020 | phba->fc4InputRequests++; |
1128 | } | 2021 | } |
@@ -1133,7 +2026,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
1133 | fcp_cmnd->fcpCntl3 = 0; | 2026 | fcp_cmnd->fcpCntl3 = 0; |
1134 | phba->fc4ControlRequests++; | 2027 | phba->fc4ControlRequests++; |
1135 | } | 2028 | } |
1136 | if (phba->sli_rev == 3) | 2029 | if (phba->sli_rev == 3 && |
2030 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) | ||
1137 | lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); | 2031 | lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); |
1138 | /* | 2032 | /* |
1139 | * Finish initializing those IOCB fields that are independent | 2033 | * Finish initializing those IOCB fields that are independent |
@@ -1152,6 +2046,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
1152 | piocbq->vport = vport; | 2046 | piocbq->vport = vport; |
1153 | } | 2047 | } |
1154 | 2048 | ||
2049 | /** | ||
2050 | * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit. | ||
2051 | * @vport: The virtual port for which this call is being executed. | ||
2052 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | ||
2053 | * @lun: Logical unit number. | ||
2054 | * @task_mgmt_cmd: SCSI task management command. | ||
2055 | * | ||
2056 | * This routine creates FCP information unit corresponding to @task_mgmt_cmd. | ||
2057 | * | ||
2058 | * Return codes: | ||
2059 | * 0 - Error | ||
2060 | * 1 - Success | ||
2061 | **/ | ||
1155 | static int | 2062 | static int |
1156 | lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | 2063 | lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, |
1157 | struct lpfc_scsi_buf *lpfc_cmd, | 2064 | struct lpfc_scsi_buf *lpfc_cmd, |
@@ -1178,7 +2085,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | |||
1178 | memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); | 2085 | memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); |
1179 | int_to_scsilun(lun, &fcp_cmnd->fcp_lun); | 2086 | int_to_scsilun(lun, &fcp_cmnd->fcp_lun); |
1180 | fcp_cmnd->fcpCntl2 = task_mgmt_cmd; | 2087 | fcp_cmnd->fcpCntl2 = task_mgmt_cmd; |
1181 | if (vport->phba->sli_rev == 3) | 2088 | if (vport->phba->sli_rev == 3 && |
2089 | !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) | ||
1182 | lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); | 2090 | lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); |
1183 | piocb->ulpCommand = CMD_FCP_ICMND64_CR; | 2091 | piocb->ulpCommand = CMD_FCP_ICMND64_CR; |
1184 | piocb->ulpContext = ndlp->nlp_rpi; | 2092 | piocb->ulpContext = ndlp->nlp_rpi; |
@@ -1201,6 +2109,15 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | |||
1201 | return 1; | 2109 | return 1; |
1202 | } | 2110 | } |
1203 | 2111 | ||
2112 | /** | ||
2113 | * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command. | ||
2114 | * @phba: The Hba for which this call is being executed. | ||
2115 | * @cmdiocbq: Pointer to lpfc_iocbq data structure. | ||
2116 | * @rspiocbq: Pointer to lpfc_iocbq data structure. | ||
2117 | * | ||
2118 | * This routine is IOCB completion routine for device reset and target reset | ||
2119 | * routine. This routine release scsi buffer associated with lpfc_cmd. | ||
2120 | **/ | ||
1204 | static void | 2121 | static void |
1205 | lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, | 2122 | lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, |
1206 | struct lpfc_iocbq *cmdiocbq, | 2123 | struct lpfc_iocbq *cmdiocbq, |
@@ -1213,6 +2130,20 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, | |||
1213 | return; | 2130 | return; |
1214 | } | 2131 | } |
1215 | 2132 | ||
2133 | /** | ||
2134 | * lpfc_scsi_tgt_reset: Target reset handler. | ||
2135 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure | ||
2136 | * @vport: The virtual port for which this call is being executed. | ||
2137 | * @tgt_id: Target ID. | ||
2138 | * @lun: Lun number. | ||
2139 | * @rdata: Pointer to lpfc_rport_data. | ||
2140 | * | ||
2141 | * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID. | ||
2142 | * | ||
2143 | * Return Code: | ||
2144 | * 0x2003 - Error | ||
2145 | * 0x2002 - Success. | ||
2146 | **/ | ||
1216 | static int | 2147 | static int |
1217 | lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, | 2148 | lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, |
1218 | unsigned tgt_id, unsigned int lun, | 2149 | unsigned tgt_id, unsigned int lun, |
@@ -1266,6 +2197,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, | |||
1266 | return ret; | 2197 | return ret; |
1267 | } | 2198 | } |
1268 | 2199 | ||
2200 | /** | ||
2201 | * lpfc_info: Info entry point of scsi_host_template data structure. | ||
2202 | * @host: The scsi host for which this call is being executed. | ||
2203 | * | ||
2204 | * This routine provides module information about hba. | ||
2205 | * | ||
2206 | * Reutrn code: | ||
2207 | * Pointer to char - Success. | ||
2208 | **/ | ||
1269 | const char * | 2209 | const char * |
1270 | lpfc_info(struct Scsi_Host *host) | 2210 | lpfc_info(struct Scsi_Host *host) |
1271 | { | 2211 | { |
@@ -1295,6 +2235,13 @@ lpfc_info(struct Scsi_Host *host) | |||
1295 | return lpfcinfobuf; | 2235 | return lpfcinfobuf; |
1296 | } | 2236 | } |
1297 | 2237 | ||
2238 | /** | ||
2239 | * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba. | ||
2240 | * @phba: The Hba for which this call is being executed. | ||
2241 | * | ||
2242 | * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. | ||
2243 | * The default value of cfg_poll_tmo is 10 milliseconds. | ||
2244 | **/ | ||
1298 | static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) | 2245 | static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) |
1299 | { | 2246 | { |
1300 | unsigned long poll_tmo_expires = | 2247 | unsigned long poll_tmo_expires = |
@@ -1305,11 +2252,25 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) | |||
1305 | poll_tmo_expires); | 2252 | poll_tmo_expires); |
1306 | } | 2253 | } |
1307 | 2254 | ||
2255 | /** | ||
2256 | * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA. | ||
2257 | * @phba: The Hba for which this call is being executed. | ||
2258 | * | ||
2259 | * This routine starts the fcp_poll_timer of @phba. | ||
2260 | **/ | ||
1308 | void lpfc_poll_start_timer(struct lpfc_hba * phba) | 2261 | void lpfc_poll_start_timer(struct lpfc_hba * phba) |
1309 | { | 2262 | { |
1310 | lpfc_poll_rearm_timer(phba); | 2263 | lpfc_poll_rearm_timer(phba); |
1311 | } | 2264 | } |
1312 | 2265 | ||
2266 | /** | ||
2267 | * lpfc_poll_timeout: Restart polling timer. | ||
2268 | * @ptr: Map to lpfc_hba data structure pointer. | ||
2269 | * | ||
2270 | * This routine restarts fcp_poll timer, when FCP ring polling is enable | ||
2271 | * and FCP Ring interrupt is disable. | ||
2272 | **/ | ||
2273 | |||
1313 | void lpfc_poll_timeout(unsigned long ptr) | 2274 | void lpfc_poll_timeout(unsigned long ptr) |
1314 | { | 2275 | { |
1315 | struct lpfc_hba *phba = (struct lpfc_hba *) ptr; | 2276 | struct lpfc_hba *phba = (struct lpfc_hba *) ptr; |
@@ -1321,6 +2282,20 @@ void lpfc_poll_timeout(unsigned long ptr) | |||
1321 | } | 2282 | } |
1322 | } | 2283 | } |
1323 | 2284 | ||
2285 | /** | ||
2286 | * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data | ||
2287 | * structure. | ||
2288 | * @cmnd: Pointer to scsi_cmnd data structure. | ||
2289 | * @done: Pointer to done routine. | ||
2290 | * | ||
2291 | * Driver registers this routine to scsi midlayer to submit a @cmd to process. | ||
2292 | * This routine prepares an IOCB from scsi command and provides to firmware. | ||
2293 | * The @done callback is invoked after driver finished processing the command. | ||
2294 | * | ||
2295 | * Return value : | ||
2296 | * 0 - Success | ||
2297 | * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. | ||
2298 | **/ | ||
1324 | static int | 2299 | static int |
1325 | lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | 2300 | lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) |
1326 | { | 2301 | { |
@@ -1340,6 +2315,17 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
1340 | goto out_fail_command; | 2315 | goto out_fail_command; |
1341 | } | 2316 | } |
1342 | 2317 | ||
2318 | if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && | ||
2319 | scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { | ||
2320 | |||
2321 | printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x " | ||
2322 | "str=%s without registering for BlockGuard - " | ||
2323 | "Rejecting command\n", | ||
2324 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), | ||
2325 | dif_op_str[scsi_get_prot_op(cmnd)]); | ||
2326 | goto out_fail_command; | ||
2327 | } | ||
2328 | |||
1343 | /* | 2329 | /* |
1344 | * Catch race where our node has transitioned, but the | 2330 | * Catch race where our node has transitioned, but the |
1345 | * transport is still transitioning. | 2331 | * transport is still transitioning. |
@@ -1348,12 +2334,13 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
1348 | cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); | 2334 | cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); |
1349 | goto out_fail_command; | 2335 | goto out_fail_command; |
1350 | } | 2336 | } |
1351 | if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) | 2337 | if (vport->cfg_max_scsicmpl_time && |
2338 | (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)) | ||
1352 | goto out_host_busy; | 2339 | goto out_host_busy; |
1353 | 2340 | ||
1354 | lpfc_cmd = lpfc_get_scsi_buf(phba); | 2341 | lpfc_cmd = lpfc_get_scsi_buf(phba); |
1355 | if (lpfc_cmd == NULL) { | 2342 | if (lpfc_cmd == NULL) { |
1356 | lpfc_adjust_queue_depth(phba); | 2343 | lpfc_rampdown_queue_depth(phba); |
1357 | 2344 | ||
1358 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, | 2345 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
1359 | "0707 driver's buffer pool is empty, " | 2346 | "0707 driver's buffer pool is empty, " |
@@ -1361,7 +2348,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
1361 | goto out_host_busy; | 2348 | goto out_host_busy; |
1362 | } | 2349 | } |
1363 | 2350 | ||
1364 | lpfc_cmd->start_time = jiffies; | ||
1365 | /* | 2351 | /* |
1366 | * Store the midlayer's command structure for the completion phase | 2352 | * Store the midlayer's command structure for the completion phase |
1367 | * and complete the command initialization. | 2353 | * and complete the command initialization. |
@@ -1373,7 +2359,65 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
1373 | cmnd->host_scribble = (unsigned char *)lpfc_cmd; | 2359 | cmnd->host_scribble = (unsigned char *)lpfc_cmd; |
1374 | cmnd->scsi_done = done; | 2360 | cmnd->scsi_done = done; |
1375 | 2361 | ||
1376 | err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); | 2362 | if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { |
2363 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
2364 | "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " | ||
2365 | "str=%s\n", | ||
2366 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), | ||
2367 | dif_op_str[scsi_get_prot_op(cmnd)]); | ||
2368 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
2369 | "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " | ||
2370 | "%02x %02x %02x %02x %02x \n", | ||
2371 | cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], | ||
2372 | cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], | ||
2373 | cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], | ||
2374 | cmnd->cmnd[9]); | ||
2375 | if (cmnd->cmnd[0] == READ_10) | ||
2376 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
2377 | "9035 BLKGRD: READ @ sector %llu, " | ||
2378 | "count %lu\n", | ||
2379 | (unsigned long long)scsi_get_lba(cmnd), | ||
2380 | cmnd->request->nr_sectors); | ||
2381 | else if (cmnd->cmnd[0] == WRITE_10) | ||
2382 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
2383 | "9036 BLKGRD: WRITE @ sector %llu, " | ||
2384 | "count %lu cmd=%p\n", | ||
2385 | (unsigned long long)scsi_get_lba(cmnd), | ||
2386 | cmnd->request->nr_sectors, | ||
2387 | cmnd); | ||
2388 | |||
2389 | err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); | ||
2390 | } else { | ||
2391 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
2392 | "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x" | ||
2393 | " str=%s\n", | ||
2394 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), | ||
2395 | dif_op_str[scsi_get_prot_op(cmnd)]); | ||
2396 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
2397 | "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x " | ||
2398 | "%02x %02x %02x %02x %02x \n", | ||
2399 | cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], | ||
2400 | cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], | ||
2401 | cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], | ||
2402 | cmnd->cmnd[9]); | ||
2403 | if (cmnd->cmnd[0] == READ_10) | ||
2404 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
2405 | "9040 dbg: READ @ sector %llu, " | ||
2406 | "count %lu\n", | ||
2407 | (unsigned long long)scsi_get_lba(cmnd), | ||
2408 | cmnd->request->nr_sectors); | ||
2409 | else if (cmnd->cmnd[0] == WRITE_10) | ||
2410 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
2411 | "9041 dbg: WRITE @ sector %llu, " | ||
2412 | "count %lu cmd=%p\n", | ||
2413 | (unsigned long long)scsi_get_lba(cmnd), | ||
2414 | cmnd->request->nr_sectors, cmnd); | ||
2415 | else | ||
2416 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
2417 | "9042 dbg: parser not implemented\n"); | ||
2418 | err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); | ||
2419 | } | ||
2420 | |||
1377 | if (err) | 2421 | if (err) |
1378 | goto out_host_busy_free_buf; | 2422 | goto out_host_busy_free_buf; |
1379 | 2423 | ||
@@ -1382,9 +2426,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
1382 | atomic_inc(&ndlp->cmd_pending); | 2426 | atomic_inc(&ndlp->cmd_pending); |
1383 | err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], | 2427 | err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], |
1384 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); | 2428 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); |
1385 | if (err) | 2429 | if (err) { |
2430 | atomic_dec(&ndlp->cmd_pending); | ||
1386 | goto out_host_busy_free_buf; | 2431 | goto out_host_busy_free_buf; |
1387 | 2432 | } | |
1388 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | 2433 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
1389 | lpfc_sli_poll_fcp_ring(phba); | 2434 | lpfc_sli_poll_fcp_ring(phba); |
1390 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) | 2435 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) |
@@ -1394,7 +2439,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
1394 | return 0; | 2439 | return 0; |
1395 | 2440 | ||
1396 | out_host_busy_free_buf: | 2441 | out_host_busy_free_buf: |
1397 | atomic_dec(&ndlp->cmd_pending); | ||
1398 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | 2442 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); |
1399 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 2443 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
1400 | out_host_busy: | 2444 | out_host_busy: |
@@ -1405,6 +2449,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
1405 | return 0; | 2449 | return 0; |
1406 | } | 2450 | } |
1407 | 2451 | ||
2452 | /** | ||
2453 | * lpfc_block_error_handler: Routine to block error handler. | ||
2454 | * @cmnd: Pointer to scsi_cmnd data structure. | ||
2455 | * | ||
2456 | * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD. | ||
2457 | **/ | ||
1408 | static void | 2458 | static void |
1409 | lpfc_block_error_handler(struct scsi_cmnd *cmnd) | 2459 | lpfc_block_error_handler(struct scsi_cmnd *cmnd) |
1410 | { | 2460 | { |
@@ -1421,6 +2471,17 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd) | |||
1421 | return; | 2471 | return; |
1422 | } | 2472 | } |
1423 | 2473 | ||
2474 | /** | ||
2475 | * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data | ||
2476 | *structure. | ||
2477 | * @cmnd: Pointer to scsi_cmnd data structure. | ||
2478 | * | ||
2479 | * This routine aborts @cmnd pending in base driver. | ||
2480 | * | ||
2481 | * Return code : | ||
2482 | * 0x2003 - Error | ||
2483 | * 0x2002 - Success | ||
2484 | **/ | ||
1424 | static int | 2485 | static int |
1425 | lpfc_abort_handler(struct scsi_cmnd *cmnd) | 2486 | lpfc_abort_handler(struct scsi_cmnd *cmnd) |
1426 | { | 2487 | { |
@@ -1516,6 +2577,18 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
1516 | return ret; | 2577 | return ret; |
1517 | } | 2578 | } |
1518 | 2579 | ||
2580 | /** | ||
2581 | * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template | ||
2582 | *data structure. | ||
2583 | * @cmnd: Pointer to scsi_cmnd data structure. | ||
2584 | * | ||
2585 | * This routine does a device reset by sending a TARGET_RESET task management | ||
2586 | * command. | ||
2587 | * | ||
2588 | * Return code : | ||
2589 | * 0x2003 - Error | ||
2590 | * 0ex2002 - Success | ||
2591 | **/ | ||
1519 | static int | 2592 | static int |
1520 | lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | 2593 | lpfc_device_reset_handler(struct scsi_cmnd *cmnd) |
1521 | { | 2594 | { |
@@ -1560,7 +2633,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
1560 | fc_get_event_number(), | 2633 | fc_get_event_number(), |
1561 | sizeof(scsi_event), | 2634 | sizeof(scsi_event), |
1562 | (char *)&scsi_event, | 2635 | (char *)&scsi_event, |
1563 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | 2636 | LPFC_NL_VENDOR_ID); |
1564 | 2637 | ||
1565 | if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { | 2638 | if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { |
1566 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 2639 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
@@ -1633,6 +2706,17 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
1633 | return ret; | 2706 | return ret; |
1634 | } | 2707 | } |
1635 | 2708 | ||
2709 | /** | ||
2710 | * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host | ||
2711 | * Template data structure. | ||
2712 | * @cmnd: Pointer to scsi_cmnd data structure. | ||
2713 | * | ||
2714 | * This routine does target reset to all target on @cmnd->device->host. | ||
2715 | * | ||
2716 | * Return Code: | ||
2717 | * 0x2003 - Error | ||
2718 | * 0x2002 - Success | ||
2719 | **/ | ||
1636 | static int | 2720 | static int |
1637 | lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | 2721 | lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) |
1638 | { | 2722 | { |
@@ -1657,7 +2741,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | |||
1657 | fc_get_event_number(), | 2741 | fc_get_event_number(), |
1658 | sizeof(scsi_event), | 2742 | sizeof(scsi_event), |
1659 | (char *)&scsi_event, | 2743 | (char *)&scsi_event, |
1660 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | 2744 | LPFC_NL_VENDOR_ID); |
1661 | 2745 | ||
1662 | lpfc_block_error_handler(cmnd); | 2746 | lpfc_block_error_handler(cmnd); |
1663 | /* | 2747 | /* |
@@ -1723,6 +2807,20 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | |||
1723 | return ret; | 2807 | return ret; |
1724 | } | 2808 | } |
1725 | 2809 | ||
2810 | /** | ||
2811 | * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data | ||
2812 | * structure. | ||
2813 | * @sdev: Pointer to scsi_device. | ||
2814 | * | ||
2815 | * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's | ||
2816 | * globally available list of scsi buffers. This routine also makes sure scsi | ||
2817 | * buffer is not allocated more than HBA limit conveyed to midlayer. This list | ||
2818 | * of scsi buffer exists for the lifetime of the driver. | ||
2819 | * | ||
2820 | * Return codes: | ||
2821 | * non-0 - Error | ||
2822 | * 0 - Success | ||
2823 | **/ | ||
1726 | static int | 2824 | static int |
1727 | lpfc_slave_alloc(struct scsi_device *sdev) | 2825 | lpfc_slave_alloc(struct scsi_device *sdev) |
1728 | { | 2826 | { |
@@ -1784,6 +2882,19 @@ lpfc_slave_alloc(struct scsi_device *sdev) | |||
1784 | return 0; | 2882 | return 0; |
1785 | } | 2883 | } |
1786 | 2884 | ||
2885 | /** | ||
2886 | * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data | ||
2887 | * structure. | ||
2888 | * @sdev: Pointer to scsi_device. | ||
2889 | * | ||
2890 | * This routine configures following items | ||
2891 | * - Tag command queuing support for @sdev if supported. | ||
2892 | * - Dev loss time out value of fc_rport. | ||
2893 | * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. | ||
2894 | * | ||
2895 | * Return codes: | ||
2896 | * 0 - Success | ||
2897 | **/ | ||
1787 | static int | 2898 | static int |
1788 | lpfc_slave_configure(struct scsi_device *sdev) | 2899 | lpfc_slave_configure(struct scsi_device *sdev) |
1789 | { | 2900 | { |
@@ -1813,6 +2924,12 @@ lpfc_slave_configure(struct scsi_device *sdev) | |||
1813 | return 0; | 2924 | return 0; |
1814 | } | 2925 | } |
1815 | 2926 | ||
2927 | /** | ||
2928 | * lpfc_slave_destroy: slave_destroy entry point of SHT data structure. | ||
2929 | * @sdev: Pointer to scsi_device. | ||
2930 | * | ||
2931 | * This routine sets @sdev hostatdata filed to null. | ||
2932 | **/ | ||
1816 | static void | 2933 | static void |
1817 | lpfc_slave_destroy(struct scsi_device *sdev) | 2934 | lpfc_slave_destroy(struct scsi_device *sdev) |
1818 | { | 2935 | { |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 437f182e2322..c7c440d5fa29 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h | |||
@@ -124,6 +124,8 @@ struct lpfc_scsi_buf { | |||
124 | uint32_t seg_cnt; /* Number of scatter-gather segments returned by | 124 | uint32_t seg_cnt; /* Number of scatter-gather segments returned by |
125 | * dma_map_sg. The driver needs this for calls | 125 | * dma_map_sg. The driver needs this for calls |
126 | * to dma_unmap_sg. */ | 126 | * to dma_unmap_sg. */ |
127 | uint32_t prot_seg_cnt; /* seg_cnt's counterpart for protection data */ | ||
128 | |||
127 | dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */ | 129 | dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */ |
128 | 130 | ||
129 | /* | 131 | /* |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8ab5babdeebc..01dfdc8696f8 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -542,6 +542,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
542 | */ | 542 | */ |
543 | nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; | 543 | nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; |
544 | 544 | ||
545 | |||
545 | if (pring->ringno == LPFC_ELS_RING) { | 546 | if (pring->ringno == LPFC_ELS_RING) { |
546 | lpfc_debugfs_slow_ring_trc(phba, | 547 | lpfc_debugfs_slow_ring_trc(phba, |
547 | "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", | 548 | "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", |
@@ -1259,68 +1260,6 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) | |||
1259 | } | 1260 | } |
1260 | 1261 | ||
1261 | /** | 1262 | /** |
1262 | * lpfc_sli_replace_hbqbuff: Replace the HBQ buffer with a new buffer. | ||
1263 | * @phba: Pointer to HBA context object. | ||
1264 | * @tag: Tag for the HBQ buffer. | ||
1265 | * | ||
1266 | * This function is called from unsolicited event handler code path to get the | ||
1267 | * HBQ buffer associated with an unsolicited iocb. This function is called with | ||
1268 | * no lock held. It returns the buffer associated with the given tag and posts | ||
1269 | * another buffer to the firmware. Note that the new buffer must be allocated | ||
1270 | * before taking the hbalock and that the hba lock must be held until it is | ||
1271 | * finished with the hbq entry swap. | ||
1272 | **/ | ||
1273 | static struct lpfc_dmabuf * | ||
1274 | lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) | ||
1275 | { | ||
1276 | struct hbq_dmabuf *hbq_entry, *new_hbq_entry; | ||
1277 | uint32_t hbqno; | ||
1278 | void *virt; /* virtual address ptr */ | ||
1279 | dma_addr_t phys; /* mapped address */ | ||
1280 | unsigned long flags; | ||
1281 | |||
1282 | hbqno = tag >> 16; | ||
1283 | new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); | ||
1284 | /* Check whether HBQ is still in use */ | ||
1285 | spin_lock_irqsave(&phba->hbalock, flags); | ||
1286 | if (!phba->hbq_in_use) { | ||
1287 | if (new_hbq_entry) | ||
1288 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, | ||
1289 | new_hbq_entry); | ||
1290 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
1291 | return NULL; | ||
1292 | } | ||
1293 | |||
1294 | hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); | ||
1295 | if (hbq_entry == NULL) { | ||
1296 | if (new_hbq_entry) | ||
1297 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, | ||
1298 | new_hbq_entry); | ||
1299 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
1300 | return NULL; | ||
1301 | } | ||
1302 | list_del(&hbq_entry->dbuf.list); | ||
1303 | |||
1304 | if (new_hbq_entry == NULL) { | ||
1305 | list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list); | ||
1306 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
1307 | return &hbq_entry->dbuf; | ||
1308 | } | ||
1309 | new_hbq_entry->tag = -1; | ||
1310 | phys = new_hbq_entry->dbuf.phys; | ||
1311 | virt = new_hbq_entry->dbuf.virt; | ||
1312 | new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys; | ||
1313 | new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt; | ||
1314 | hbq_entry->dbuf.phys = phys; | ||
1315 | hbq_entry->dbuf.virt = virt; | ||
1316 | lpfc_sli_free_hbq(phba, hbq_entry); | ||
1317 | list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list); | ||
1318 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
1319 | |||
1320 | return &new_hbq_entry->dbuf; | ||
1321 | } | ||
1322 | |||
1323 | /** | ||
1324 | * lpfc_sli_get_buff: Get the buffer associated with the buffer tag. | 1263 | * lpfc_sli_get_buff: Get the buffer associated with the buffer tag. |
1325 | * @phba: Pointer to HBA context object. | 1264 | * @phba: Pointer to HBA context object. |
1326 | * @pring: Pointer to driver SLI ring object. | 1265 | * @pring: Pointer to driver SLI ring object. |
@@ -1334,13 +1273,17 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) | |||
1334 | **/ | 1273 | **/ |
1335 | static struct lpfc_dmabuf * | 1274 | static struct lpfc_dmabuf * |
1336 | lpfc_sli_get_buff(struct lpfc_hba *phba, | 1275 | lpfc_sli_get_buff(struct lpfc_hba *phba, |
1337 | struct lpfc_sli_ring *pring, | 1276 | struct lpfc_sli_ring *pring, |
1338 | uint32_t tag) | 1277 | uint32_t tag) |
1339 | { | 1278 | { |
1279 | struct hbq_dmabuf *hbq_entry; | ||
1280 | |||
1340 | if (tag & QUE_BUFTAG_BIT) | 1281 | if (tag & QUE_BUFTAG_BIT) |
1341 | return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); | 1282 | return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); |
1342 | else | 1283 | hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); |
1343 | return lpfc_sli_replace_hbqbuff(phba, tag); | 1284 | if (!hbq_entry) |
1285 | return NULL; | ||
1286 | return &hbq_entry->dbuf; | ||
1344 | } | 1287 | } |
1345 | 1288 | ||
1346 | 1289 | ||
@@ -1372,8 +1315,6 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
1372 | match = 0; | 1315 | match = 0; |
1373 | irsp = &(saveq->iocb); | 1316 | irsp = &(saveq->iocb); |
1374 | 1317 | ||
1375 | if (irsp->ulpStatus == IOSTAT_NEED_BUFFER) | ||
1376 | return 1; | ||
1377 | if (irsp->ulpCommand == CMD_ASYNC_STATUS) { | 1318 | if (irsp->ulpCommand == CMD_ASYNC_STATUS) { |
1378 | if (pring->lpfc_sli_rcv_async_status) | 1319 | if (pring->lpfc_sli_rcv_async_status) |
1379 | pring->lpfc_sli_rcv_async_status(phba, pring, saveq); | 1320 | pring->lpfc_sli_rcv_async_status(phba, pring, saveq); |
@@ -1982,7 +1923,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
1982 | if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && | 1923 | if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && |
1983 | (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { | 1924 | (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { |
1984 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 1925 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
1985 | lpfc_adjust_queue_depth(phba); | 1926 | lpfc_rampdown_queue_depth(phba); |
1986 | spin_lock_irqsave(&phba->hbalock, iflag); | 1927 | spin_lock_irqsave(&phba->hbalock, iflag); |
1987 | } | 1928 | } |
1988 | 1929 | ||
@@ -2225,7 +2166,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, | |||
2225 | if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && | 2166 | if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && |
2226 | (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { | 2167 | (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { |
2227 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 2168 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
2228 | lpfc_adjust_queue_depth(phba); | 2169 | lpfc_rampdown_queue_depth(phba); |
2229 | spin_lock_irqsave(&phba->hbalock, iflag); | 2170 | spin_lock_irqsave(&phba->hbalock, iflag); |
2230 | } | 2171 | } |
2231 | 2172 | ||
@@ -2790,7 +2731,6 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) | |||
2790 | { | 2731 | { |
2791 | MAILBOX_t *mb; | 2732 | MAILBOX_t *mb; |
2792 | struct lpfc_sli *psli; | 2733 | struct lpfc_sli *psli; |
2793 | uint16_t skip_post; | ||
2794 | volatile uint32_t word0; | 2734 | volatile uint32_t word0; |
2795 | void __iomem *to_slim; | 2735 | void __iomem *to_slim; |
2796 | 2736 | ||
@@ -2815,13 +2755,10 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) | |||
2815 | readl(to_slim); /* flush */ | 2755 | readl(to_slim); /* flush */ |
2816 | 2756 | ||
2817 | /* Only skip post after fc_ffinit is completed */ | 2757 | /* Only skip post after fc_ffinit is completed */ |
2818 | if (phba->pport->port_state) { | 2758 | if (phba->pport->port_state) |
2819 | skip_post = 1; | ||
2820 | word0 = 1; /* This is really setting up word1 */ | 2759 | word0 = 1; /* This is really setting up word1 */ |
2821 | } else { | 2760 | else |
2822 | skip_post = 0; | ||
2823 | word0 = 0; /* This is really setting up word1 */ | 2761 | word0 = 0; /* This is really setting up word1 */ |
2824 | } | ||
2825 | to_slim = phba->MBslimaddr + sizeof (uint32_t); | 2762 | to_slim = phba->MBslimaddr + sizeof (uint32_t); |
2826 | writel(*(uint32_t *) mb, to_slim); | 2763 | writel(*(uint32_t *) mb, to_slim); |
2827 | readl(to_slim); /* flush */ | 2764 | readl(to_slim); /* flush */ |
@@ -2835,10 +2772,8 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) | |||
2835 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); | 2772 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); |
2836 | psli->stats_start = get_seconds(); | 2773 | psli->stats_start = get_seconds(); |
2837 | 2774 | ||
2838 | if (skip_post) | 2775 | /* Give the INITFF and Post time to settle. */ |
2839 | mdelay(100); | 2776 | mdelay(100); |
2840 | else | ||
2841 | mdelay(2000); | ||
2842 | 2777 | ||
2843 | lpfc_hba_down_post(phba); | 2778 | lpfc_hba_down_post(phba); |
2844 | 2779 | ||
@@ -3084,7 +3019,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) | |||
3084 | spin_unlock_irq(&phba->hbalock); | 3019 | spin_unlock_irq(&phba->hbalock); |
3085 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; | 3020 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; |
3086 | lpfc_sli_brdrestart(phba); | 3021 | lpfc_sli_brdrestart(phba); |
3087 | msleep(2500); | ||
3088 | rc = lpfc_sli_chipset_init(phba); | 3022 | rc = lpfc_sli_chipset_init(phba); |
3089 | if (rc) | 3023 | if (rc) |
3090 | break; | 3024 | break; |
@@ -3111,7 +3045,8 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) | |||
3111 | phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | | 3045 | phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | |
3112 | LPFC_SLI3_HBQ_ENABLED | | 3046 | LPFC_SLI3_HBQ_ENABLED | |
3113 | LPFC_SLI3_CRP_ENABLED | | 3047 | LPFC_SLI3_CRP_ENABLED | |
3114 | LPFC_SLI3_INB_ENABLED); | 3048 | LPFC_SLI3_INB_ENABLED | |
3049 | LPFC_SLI3_BG_ENABLED); | ||
3115 | if (rc != MBX_SUCCESS) { | 3050 | if (rc != MBX_SUCCESS) { |
3116 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 3051 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
3117 | "0442 Adapter failed to init, mbxCmd x%x " | 3052 | "0442 Adapter failed to init, mbxCmd x%x " |
@@ -3144,17 +3079,29 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) | |||
3144 | phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; | 3079 | phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; |
3145 | if (pmb->mb.un.varCfgPort.ginb) { | 3080 | if (pmb->mb.un.varCfgPort.ginb) { |
3146 | phba->sli3_options |= LPFC_SLI3_INB_ENABLED; | 3081 | phba->sli3_options |= LPFC_SLI3_INB_ENABLED; |
3082 | phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; | ||
3147 | phba->port_gp = phba->mbox->us.s3_inb_pgp.port; | 3083 | phba->port_gp = phba->mbox->us.s3_inb_pgp.port; |
3148 | phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy; | 3084 | phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy; |
3149 | phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter; | 3085 | phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter; |
3150 | phba->inb_last_counter = | 3086 | phba->inb_last_counter = |
3151 | phba->mbox->us.s3_inb_pgp.counter; | 3087 | phba->mbox->us.s3_inb_pgp.counter; |
3152 | } else { | 3088 | } else { |
3089 | phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; | ||
3153 | phba->port_gp = phba->mbox->us.s3_pgp.port; | 3090 | phba->port_gp = phba->mbox->us.s3_pgp.port; |
3154 | phba->inb_ha_copy = NULL; | 3091 | phba->inb_ha_copy = NULL; |
3155 | phba->inb_counter = NULL; | 3092 | phba->inb_counter = NULL; |
3156 | } | 3093 | } |
3094 | |||
3095 | if (phba->cfg_enable_bg) { | ||
3096 | if (pmb->mb.un.varCfgPort.gbg) | ||
3097 | phba->sli3_options |= LPFC_SLI3_BG_ENABLED; | ||
3098 | else | ||
3099 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3100 | "0443 Adapter did not grant " | ||
3101 | "BlockGuard\n"); | ||
3102 | } | ||
3157 | } else { | 3103 | } else { |
3104 | phba->hbq_get = NULL; | ||
3158 | phba->port_gp = phba->mbox->us.s2.port; | 3105 | phba->port_gp = phba->mbox->us.s2.port; |
3159 | phba->inb_ha_copy = NULL; | 3106 | phba->inb_ha_copy = NULL; |
3160 | phba->inb_counter = NULL; | 3107 | phba->inb_counter = NULL; |
@@ -3305,10 +3252,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) | |||
3305 | struct lpfc_sli *psli = &phba->sli; | 3252 | struct lpfc_sli *psli = &phba->sli; |
3306 | struct lpfc_sli_ring *pring; | 3253 | struct lpfc_sli_ring *pring; |
3307 | 3254 | ||
3308 | if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) { | ||
3309 | return; | ||
3310 | } | ||
3311 | |||
3312 | /* Mbox cmd <mbxCommand> timeout */ | 3255 | /* Mbox cmd <mbxCommand> timeout */ |
3313 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 3256 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
3314 | "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", | 3257 | "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", |
@@ -4005,7 +3948,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba, | |||
4005 | shost = lpfc_shost_from_vport(phba->pport); | 3948 | shost = lpfc_shost_from_vport(phba->pport); |
4006 | fc_host_post_vendor_event(shost, fc_get_event_number(), | 3949 | fc_host_post_vendor_event(shost, fc_get_event_number(), |
4007 | sizeof(temp_event_data), (char *) &temp_event_data, | 3950 | sizeof(temp_event_data), (char *) &temp_event_data, |
4008 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | 3951 | LPFC_NL_VENDOR_ID); |
4009 | 3952 | ||
4010 | } | 3953 | } |
4011 | 3954 | ||
@@ -5184,6 +5127,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) | |||
5184 | { | 5127 | { |
5185 | uint32_t ha_copy; | 5128 | uint32_t ha_copy; |
5186 | 5129 | ||
5130 | /* If PCI channel is offline, don't process it */ | ||
5131 | if (unlikely(pci_channel_offline(phba->pcidev))) | ||
5132 | return 0; | ||
5133 | |||
5187 | /* If somebody is waiting to handle an eratt, don't process it | 5134 | /* If somebody is waiting to handle an eratt, don't process it |
5188 | * here. The brdkill function will do this. | 5135 | * here. The brdkill function will do this. |
5189 | */ | 5136 | */ |
@@ -5242,6 +5189,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id) | |||
5242 | uint32_t ha_copy; | 5189 | uint32_t ha_copy; |
5243 | uint32_t work_ha_copy; | 5190 | uint32_t work_ha_copy; |
5244 | unsigned long status; | 5191 | unsigned long status; |
5192 | unsigned long iflag; | ||
5245 | uint32_t control; | 5193 | uint32_t control; |
5246 | 5194 | ||
5247 | MAILBOX_t *mbox, *pmbox; | 5195 | MAILBOX_t *mbox, *pmbox; |
@@ -5274,7 +5222,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id) | |||
5274 | if (unlikely(phba->link_state < LPFC_LINK_DOWN)) | 5222 | if (unlikely(phba->link_state < LPFC_LINK_DOWN)) |
5275 | return IRQ_NONE; | 5223 | return IRQ_NONE; |
5276 | /* Need to read HA REG for slow-path events */ | 5224 | /* Need to read HA REG for slow-path events */ |
5277 | spin_lock(&phba->hbalock); | 5225 | spin_lock_irqsave(&phba->hbalock, iflag); |
5278 | ha_copy = readl(phba->HAregaddr); | 5226 | ha_copy = readl(phba->HAregaddr); |
5279 | /* If somebody is waiting to handle an eratt don't process it | 5227 | /* If somebody is waiting to handle an eratt don't process it |
5280 | * here. The brdkill function will do this. | 5228 | * here. The brdkill function will do this. |
@@ -5294,7 +5242,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id) | |||
5294 | writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), | 5242 | writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), |
5295 | phba->HAregaddr); | 5243 | phba->HAregaddr); |
5296 | readl(phba->HAregaddr); /* flush */ | 5244 | readl(phba->HAregaddr); /* flush */ |
5297 | spin_unlock(&phba->hbalock); | 5245 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
5298 | } else | 5246 | } else |
5299 | ha_copy = phba->ha_copy; | 5247 | ha_copy = phba->ha_copy; |
5300 | 5248 | ||
@@ -5307,13 +5255,13 @@ lpfc_sp_intr_handler(int irq, void *dev_id) | |||
5307 | * Turn off Link Attention interrupts | 5255 | * Turn off Link Attention interrupts |
5308 | * until CLEAR_LA done | 5256 | * until CLEAR_LA done |
5309 | */ | 5257 | */ |
5310 | spin_lock(&phba->hbalock); | 5258 | spin_lock_irqsave(&phba->hbalock, iflag); |
5311 | phba->sli.sli_flag &= ~LPFC_PROCESS_LA; | 5259 | phba->sli.sli_flag &= ~LPFC_PROCESS_LA; |
5312 | control = readl(phba->HCregaddr); | 5260 | control = readl(phba->HCregaddr); |
5313 | control &= ~HC_LAINT_ENA; | 5261 | control &= ~HC_LAINT_ENA; |
5314 | writel(control, phba->HCregaddr); | 5262 | writel(control, phba->HCregaddr); |
5315 | readl(phba->HCregaddr); /* flush */ | 5263 | readl(phba->HCregaddr); /* flush */ |
5316 | spin_unlock(&phba->hbalock); | 5264 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
5317 | } | 5265 | } |
5318 | else | 5266 | else |
5319 | work_ha_copy &= ~HA_LATT; | 5267 | work_ha_copy &= ~HA_LATT; |
@@ -5328,7 +5276,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id) | |||
5328 | (HA_RXMASK << (4*LPFC_ELS_RING))); | 5276 | (HA_RXMASK << (4*LPFC_ELS_RING))); |
5329 | status >>= (4*LPFC_ELS_RING); | 5277 | status >>= (4*LPFC_ELS_RING); |
5330 | if (status & HA_RXMASK) { | 5278 | if (status & HA_RXMASK) { |
5331 | spin_lock(&phba->hbalock); | 5279 | spin_lock_irqsave(&phba->hbalock, iflag); |
5332 | control = readl(phba->HCregaddr); | 5280 | control = readl(phba->HCregaddr); |
5333 | 5281 | ||
5334 | lpfc_debugfs_slow_ring_trc(phba, | 5282 | lpfc_debugfs_slow_ring_trc(phba, |
@@ -5357,10 +5305,10 @@ lpfc_sp_intr_handler(int irq, void *dev_id) | |||
5357 | (uint32_t)((unsigned long) | 5305 | (uint32_t)((unsigned long) |
5358 | &phba->work_waitq)); | 5306 | &phba->work_waitq)); |
5359 | } | 5307 | } |
5360 | spin_unlock(&phba->hbalock); | 5308 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
5361 | } | 5309 | } |
5362 | } | 5310 | } |
5363 | spin_lock(&phba->hbalock); | 5311 | spin_lock_irqsave(&phba->hbalock, iflag); |
5364 | if (work_ha_copy & HA_ERATT) | 5312 | if (work_ha_copy & HA_ERATT) |
5365 | lpfc_sli_read_hs(phba); | 5313 | lpfc_sli_read_hs(phba); |
5366 | if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { | 5314 | if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { |
@@ -5372,7 +5320,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id) | |||
5372 | /* First check out the status word */ | 5320 | /* First check out the status word */ |
5373 | lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); | 5321 | lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); |
5374 | if (pmbox->mbxOwner != OWN_HOST) { | 5322 | if (pmbox->mbxOwner != OWN_HOST) { |
5375 | spin_unlock(&phba->hbalock); | 5323 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
5376 | /* | 5324 | /* |
5377 | * Stray Mailbox Interrupt, mbxCommand <cmd> | 5325 | * Stray Mailbox Interrupt, mbxCommand <cmd> |
5378 | * mbxStatus <status> | 5326 | * mbxStatus <status> |
@@ -5389,7 +5337,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id) | |||
5389 | work_ha_copy &= ~HA_MBATT; | 5337 | work_ha_copy &= ~HA_MBATT; |
5390 | } else { | 5338 | } else { |
5391 | phba->sli.mbox_active = NULL; | 5339 | phba->sli.mbox_active = NULL; |
5392 | spin_unlock(&phba->hbalock); | 5340 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
5393 | phba->last_completion_time = jiffies; | 5341 | phba->last_completion_time = jiffies; |
5394 | del_timer(&phba->sli.mbox_tmo); | 5342 | del_timer(&phba->sli.mbox_tmo); |
5395 | if (pmb->mbox_cmpl) { | 5343 | if (pmb->mbox_cmpl) { |
@@ -5438,14 +5386,18 @@ lpfc_sp_intr_handler(int irq, void *dev_id) | |||
5438 | goto send_current_mbox; | 5386 | goto send_current_mbox; |
5439 | } | 5387 | } |
5440 | } | 5388 | } |
5441 | spin_lock(&phba->pport->work_port_lock); | 5389 | spin_lock_irqsave( |
5390 | &phba->pport->work_port_lock, | ||
5391 | iflag); | ||
5442 | phba->pport->work_port_events &= | 5392 | phba->pport->work_port_events &= |
5443 | ~WORKER_MBOX_TMO; | 5393 | ~WORKER_MBOX_TMO; |
5444 | spin_unlock(&phba->pport->work_port_lock); | 5394 | spin_unlock_irqrestore( |
5395 | &phba->pport->work_port_lock, | ||
5396 | iflag); | ||
5445 | lpfc_mbox_cmpl_put(phba, pmb); | 5397 | lpfc_mbox_cmpl_put(phba, pmb); |
5446 | } | 5398 | } |
5447 | } else | 5399 | } else |
5448 | spin_unlock(&phba->hbalock); | 5400 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
5449 | 5401 | ||
5450 | if ((work_ha_copy & HA_MBATT) && | 5402 | if ((work_ha_copy & HA_MBATT) && |
5451 | (phba->sli.mbox_active == NULL)) { | 5403 | (phba->sli.mbox_active == NULL)) { |
@@ -5461,9 +5413,9 @@ send_current_mbox: | |||
5461 | "MBX_SUCCESS"); | 5413 | "MBX_SUCCESS"); |
5462 | } | 5414 | } |
5463 | 5415 | ||
5464 | spin_lock(&phba->hbalock); | 5416 | spin_lock_irqsave(&phba->hbalock, iflag); |
5465 | phba->work_ha |= work_ha_copy; | 5417 | phba->work_ha |= work_ha_copy; |
5466 | spin_unlock(&phba->hbalock); | 5418 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
5467 | lpfc_worker_wake_up(phba); | 5419 | lpfc_worker_wake_up(phba); |
5468 | } | 5420 | } |
5469 | return IRQ_HANDLED; | 5421 | return IRQ_HANDLED; |
@@ -5495,6 +5447,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id) | |||
5495 | struct lpfc_hba *phba; | 5447 | struct lpfc_hba *phba; |
5496 | uint32_t ha_copy; | 5448 | uint32_t ha_copy; |
5497 | unsigned long status; | 5449 | unsigned long status; |
5450 | unsigned long iflag; | ||
5498 | 5451 | ||
5499 | /* Get the driver's phba structure from the dev_id and | 5452 | /* Get the driver's phba structure from the dev_id and |
5500 | * assume the HBA is not interrupting. | 5453 | * assume the HBA is not interrupting. |
@@ -5520,11 +5473,11 @@ lpfc_fp_intr_handler(int irq, void *dev_id) | |||
5520 | /* Need to read HA REG for FCP ring and other ring events */ | 5473 | /* Need to read HA REG for FCP ring and other ring events */ |
5521 | ha_copy = readl(phba->HAregaddr); | 5474 | ha_copy = readl(phba->HAregaddr); |
5522 | /* Clear up only attention source related to fast-path */ | 5475 | /* Clear up only attention source related to fast-path */ |
5523 | spin_lock(&phba->hbalock); | 5476 | spin_lock_irqsave(&phba->hbalock, iflag); |
5524 | writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), | 5477 | writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), |
5525 | phba->HAregaddr); | 5478 | phba->HAregaddr); |
5526 | readl(phba->HAregaddr); /* flush */ | 5479 | readl(phba->HAregaddr); /* flush */ |
5527 | spin_unlock(&phba->hbalock); | 5480 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
5528 | } else | 5481 | } else |
5529 | ha_copy = phba->ha_copy; | 5482 | ha_copy = phba->ha_copy; |
5530 | 5483 | ||
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index cc43e9de22cc..7e32e95c5392 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.2.8" | 21 | #define LPFC_DRIVER_VERSION "8.3.0" |
22 | 22 | ||
23 | #define LPFC_DRIVER_NAME "lpfc" | 23 | #define LPFC_DRIVER_NAME "lpfc" |
24 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | 24 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" |
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index a7de1cc02b40..63b54c66756c 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
@@ -288,10 +288,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | |||
288 | int vpi; | 288 | int vpi; |
289 | int rc = VPORT_ERROR; | 289 | int rc = VPORT_ERROR; |
290 | int status; | 290 | int status; |
291 | int size; | ||
292 | 291 | ||
293 | if ((phba->sli_rev < 3) || | 292 | if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) { |
294 | !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { | ||
295 | lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, | 293 | lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, |
296 | "1808 Create VPORT failed: " | 294 | "1808 Create VPORT failed: " |
297 | "NPIV is not enabled: SLImode:%d\n", | 295 | "NPIV is not enabled: SLImode:%d\n", |
@@ -351,20 +349,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | |||
351 | 349 | ||
352 | memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); | 350 | memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); |
353 | memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); | 351 | memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); |
354 | size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN); | ||
355 | if (size) { | ||
356 | vport->vname = kzalloc(size+1, GFP_KERNEL); | ||
357 | if (!vport->vname) { | ||
358 | lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, | ||
359 | "1814 Create VPORT failed. " | ||
360 | "vname allocation failed.\n"); | ||
361 | rc = VPORT_ERROR; | ||
362 | lpfc_free_vpi(phba, vpi); | ||
363 | destroy_port(vport); | ||
364 | goto error_out; | ||
365 | } | ||
366 | memcpy(vport->vname, fc_vport->symbolic_name, size+1); | ||
367 | } | ||
368 | if (fc_vport->node_name != 0) | 352 | if (fc_vport->node_name != 0) |
369 | u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); | 353 | u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); |
370 | if (fc_vport->port_name != 0) | 354 | if (fc_vport->port_name != 0) |
@@ -394,6 +378,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | |||
394 | goto error_out; | 378 | goto error_out; |
395 | } | 379 | } |
396 | 380 | ||
381 | /* Create binary sysfs attribute for vport */ | ||
382 | lpfc_alloc_sysfs_attr(vport); | ||
383 | |||
397 | *(struct lpfc_vport **)fc_vport->dd_data = vport; | 384 | *(struct lpfc_vport **)fc_vport->dd_data = vport; |
398 | vport->fc_vport = fc_vport; | 385 | vport->fc_vport = fc_vport; |
399 | 386 | ||
@@ -405,6 +392,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | |||
405 | } | 392 | } |
406 | 393 | ||
407 | if (disable) { | 394 | if (disable) { |
395 | lpfc_vport_set_state(vport, FC_VPORT_DISABLED); | ||
408 | rc = VPORT_OK; | 396 | rc = VPORT_OK; |
409 | goto out; | 397 | goto out; |
410 | } | 398 | } |
@@ -587,8 +575,12 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
587 | spin_lock_irq(&phba->hbalock); | 575 | spin_lock_irq(&phba->hbalock); |
588 | vport->load_flag |= FC_UNLOADING; | 576 | vport->load_flag |= FC_UNLOADING; |
589 | spin_unlock_irq(&phba->hbalock); | 577 | spin_unlock_irq(&phba->hbalock); |
590 | kfree(vport->vname); | 578 | |
579 | lpfc_free_sysfs_attr(vport); | ||
580 | |||
591 | lpfc_debugfs_terminate(vport); | 581 | lpfc_debugfs_terminate(vport); |
582 | |||
583 | /* Remove FC host and then SCSI host with the vport */ | ||
592 | fc_remove_host(lpfc_shost_from_vport(vport)); | 584 | fc_remove_host(lpfc_shost_from_vport(vport)); |
593 | scsi_remove_host(lpfc_shost_from_vport(vport)); | 585 | scsi_remove_host(lpfc_shost_from_vport(vport)); |
594 | 586 | ||