aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_hbadisc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c241
1 files changed, 216 insertions, 25 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a98d11bf3576..a1a70d9ffc2a 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -30,6 +30,7 @@
30#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
31 31
32#include "lpfc_hw.h" 32#include "lpfc_hw.h"
33#include "lpfc_nl.h"
33#include "lpfc_disc.h" 34#include "lpfc_disc.h"
34#include "lpfc_sli.h" 35#include "lpfc_sli.h"
35#include "lpfc_scsi.h" 36#include "lpfc_scsi.h"
@@ -88,14 +89,6 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
88 &phba->sli.ring[phba->sli.fcp_ring], 89 &phba->sli.ring[phba->sli.fcp_ring],
89 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 90 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
90 } 91 }
91
92 /*
93 * A device is normally blocked for rediscovery and unblocked when
94 * devloss timeout happens. In case a vport is removed or driver
95 * unloaded before devloss timeout happens, we need to unblock here.
96 */
97 scsi_target_unblock(&rport->dev);
98 return;
99} 92}
100 93
101/* 94/*
@@ -215,8 +208,16 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
215 return; 208 return;
216 } 209 }
217 210
218 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 211 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
212 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
213 "0284 Devloss timeout Ignored on "
214 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
215 "NPort x%x\n",
216 *name, *(name+1), *(name+2), *(name+3),
217 *(name+4), *(name+5), *(name+6), *(name+7),
218 ndlp->nlp_DID);
219 return; 219 return;
220 }
220 221
221 if (ndlp->nlp_type & NLP_FABRIC) { 222 if (ndlp->nlp_type & NLP_FABRIC) {
222 /* We will clean up these Nodes in linkup */ 223 /* We will clean up these Nodes in linkup */
@@ -237,8 +238,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
237 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 238 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
238 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 239 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
239 } 240 }
240 if (vport->load_flag & FC_UNLOADING)
241 warn_on = 0;
242 241
243 if (warn_on) { 242 if (warn_on) {
244 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 243 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
@@ -276,6 +275,124 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 275 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
277} 276}
278 277
278/**
279 * lpfc_alloc_fast_evt: Allocates data structure for posting event.
280 * @phba: Pointer to hba context object.
281 *
282 * This function is called from the functions which need to post
283 * events from interrupt context. This function allocates data
284 * structure required for posting event. It also keeps track of
285 * number of events pending and prevent event storm when there are
286 * too many events.
287 **/
288struct lpfc_fast_path_event *
289lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
290 struct lpfc_fast_path_event *ret;
291
292 /* If there are lot of fast event do not exhaust memory due to this */
293 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
294 return NULL;
295
296 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
297 GFP_ATOMIC);
298 if (ret)
299 atomic_inc(&phba->fast_event_count);
300 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
301 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
302 return ret;
303}
304
305/**
306 * lpfc_free_fast_evt: Frees event data structure.
307 * @phba: Pointer to hba context object.
308 * @evt: Event object which need to be freed.
309 *
310 * This function frees the data structure required for posting
311 * events.
312 **/
313void
314lpfc_free_fast_evt(struct lpfc_hba *phba,
315 struct lpfc_fast_path_event *evt) {
316
317 atomic_dec(&phba->fast_event_count);
318 kfree(evt);
319}
320
321/**
322 * lpfc_send_fastpath_evt: Posts events generated from fast path.
323 * @phba: Pointer to hba context object.
324 * @evtp: Event data structure.
325 *
326 * This function is called from worker thread, when the interrupt
327 * context need to post an event. This function posts the event
328 * to fc transport netlink interface.
329 **/
330static void
331lpfc_send_fastpath_evt(struct lpfc_hba *phba,
332 struct lpfc_work_evt *evtp)
333{
334 unsigned long evt_category, evt_sub_category;
335 struct lpfc_fast_path_event *fast_evt_data;
336 char *evt_data;
337 uint32_t evt_data_size;
338 struct Scsi_Host *shost;
339
340 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
341 work_evt);
342
343 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
344 evt_sub_category = (unsigned long) fast_evt_data->un.
345 fabric_evt.subcategory;
346 shost = lpfc_shost_from_vport(fast_evt_data->vport);
347 if (evt_category == FC_REG_FABRIC_EVENT) {
348 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
349 evt_data = (char *) &fast_evt_data->un.read_check_error;
350 evt_data_size = sizeof(fast_evt_data->un.
351 read_check_error);
352 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
353 (evt_sub_category == IOSTAT_NPORT_BSY)) {
354 evt_data = (char *) &fast_evt_data->un.fabric_evt;
355 evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
356 } else {
357 lpfc_free_fast_evt(phba, fast_evt_data);
358 return;
359 }
360 } else if (evt_category == FC_REG_SCSI_EVENT) {
361 switch (evt_sub_category) {
362 case LPFC_EVENT_QFULL:
363 case LPFC_EVENT_DEVBSY:
364 evt_data = (char *) &fast_evt_data->un.scsi_evt;
365 evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
366 break;
367 case LPFC_EVENT_CHECK_COND:
368 evt_data = (char *) &fast_evt_data->un.check_cond_evt;
369 evt_data_size = sizeof(fast_evt_data->un.
370 check_cond_evt);
371 break;
372 case LPFC_EVENT_VARQUEDEPTH:
373 evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
374 evt_data_size = sizeof(fast_evt_data->un.
375 queue_depth_evt);
376 break;
377 default:
378 lpfc_free_fast_evt(phba, fast_evt_data);
379 return;
380 }
381 } else {
382 lpfc_free_fast_evt(phba, fast_evt_data);
383 return;
384 }
385
386 fc_host_post_vendor_event(shost,
387 fc_get_event_number(),
388 evt_data_size,
389 evt_data,
390 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
391
392 lpfc_free_fast_evt(phba, fast_evt_data);
393 return;
394}
395
279static void 396static void
280lpfc_work_list_done(struct lpfc_hba *phba) 397lpfc_work_list_done(struct lpfc_hba *phba)
281{ 398{
@@ -347,6 +464,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
347 lpfc_unblock_mgmt_io(phba); 464 lpfc_unblock_mgmt_io(phba);
348 complete((struct completion *)(evtp->evt_arg2)); 465 complete((struct completion *)(evtp->evt_arg2));
349 break; 466 break;
467 case LPFC_EVT_FASTPATH_MGMT_EVT:
468 lpfc_send_fastpath_evt(phba, evtp);
469 free_evt = 0;
470 break;
350 } 471 }
351 if (free_evt) 472 if (free_evt)
352 kfree(evtp); 473 kfree(evtp);
@@ -371,6 +492,7 @@ lpfc_work_done(struct lpfc_hba *phba)
371 spin_unlock_irq(&phba->hbalock); 492 spin_unlock_irq(&phba->hbalock);
372 493
373 if (ha_copy & HA_ERATT) 494 if (ha_copy & HA_ERATT)
495 /* Handle the error attention event */
374 lpfc_handle_eratt(phba); 496 lpfc_handle_eratt(phba);
375 497
376 if (ha_copy & HA_MBATT) 498 if (ha_copy & HA_MBATT)
@@ -378,6 +500,7 @@ lpfc_work_done(struct lpfc_hba *phba)
378 500
379 if (ha_copy & HA_LATT) 501 if (ha_copy & HA_LATT)
380 lpfc_handle_latt(phba); 502 lpfc_handle_latt(phba);
503
381 vports = lpfc_create_vport_work_array(phba); 504 vports = lpfc_create_vport_work_array(phba);
382 if (vports != NULL) 505 if (vports != NULL)
383 for(i = 0; i <= phba->max_vpi; i++) { 506 for(i = 0; i <= phba->max_vpi; i++) {
@@ -1013,14 +1136,10 @@ out:
1013} 1136}
1014 1137
1015static void 1138static void
1016lpfc_mbx_issue_link_down(struct lpfc_hba *phba) 1139lpfc_enable_la(struct lpfc_hba *phba)
1017{ 1140{
1018 uint32_t control; 1141 uint32_t control;
1019 struct lpfc_sli *psli = &phba->sli; 1142 struct lpfc_sli *psli = &phba->sli;
1020
1021 lpfc_linkdown(phba);
1022
1023 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1024 spin_lock_irq(&phba->hbalock); 1143 spin_lock_irq(&phba->hbalock);
1025 psli->sli_flag |= LPFC_PROCESS_LA; 1144 psli->sli_flag |= LPFC_PROCESS_LA;
1026 control = readl(phba->HCregaddr); 1145 control = readl(phba->HCregaddr);
@@ -1030,6 +1149,15 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1030 spin_unlock_irq(&phba->hbalock); 1149 spin_unlock_irq(&phba->hbalock);
1031} 1150}
1032 1151
1152static void
1153lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1154{
1155 lpfc_linkdown(phba);
1156 lpfc_enable_la(phba);
1157 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1158}
1159
1160
1033/* 1161/*
1034 * This routine handles processing a READ_LA mailbox 1162 * This routine handles processing a READ_LA mailbox
1035 * command upon completion. It is setup in the LPFC_MBOXQ 1163 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -1077,8 +1205,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1077 } 1205 }
1078 1206
1079 phba->fc_eventTag = la->eventTag; 1207 phba->fc_eventTag = la->eventTag;
1208 if (la->mm)
1209 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
1210 else
1211 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
1080 1212
1081 if (la->attType == AT_LINK_UP) { 1213 if (la->attType == AT_LINK_UP && (!la->mm)) {
1082 phba->fc_stat.LinkUp++; 1214 phba->fc_stat.LinkUp++;
1083 if (phba->link_flag & LS_LOOPBACK_MODE) { 1215 if (phba->link_flag & LS_LOOPBACK_MODE) {
1084 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1216 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -1090,13 +1222,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1090 } else { 1222 } else {
1091 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1223 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1092 "1303 Link Up Event x%x received " 1224 "1303 Link Up Event x%x received "
1093 "Data: x%x x%x x%x x%x\n", 1225 "Data: x%x x%x x%x x%x x%x x%x %d\n",
1094 la->eventTag, phba->fc_eventTag, 1226 la->eventTag, phba->fc_eventTag,
1095 la->granted_AL_PA, la->UlnkSpeed, 1227 la->granted_AL_PA, la->UlnkSpeed,
1096 phba->alpa_map[0]); 1228 phba->alpa_map[0],
1229 la->mm, la->fa,
1230 phba->wait_4_mlo_maint_flg);
1097 } 1231 }
1098 lpfc_mbx_process_link_up(phba, la); 1232 lpfc_mbx_process_link_up(phba, la);
1099 } else { 1233 } else if (la->attType == AT_LINK_DOWN) {
1100 phba->fc_stat.LinkDown++; 1234 phba->fc_stat.LinkDown++;
1101 if (phba->link_flag & LS_LOOPBACK_MODE) { 1235 if (phba->link_flag & LS_LOOPBACK_MODE) {
1102 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1236 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -1109,11 +1243,46 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1109 else { 1243 else {
1110 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1244 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1111 "1305 Link Down Event x%x received " 1245 "1305 Link Down Event x%x received "
1246 "Data: x%x x%x x%x x%x x%x\n",
1247 la->eventTag, phba->fc_eventTag,
1248 phba->pport->port_state, vport->fc_flag,
1249 la->mm, la->fa);
1250 }
1251 lpfc_mbx_issue_link_down(phba);
1252 }
1253 if (la->mm && la->attType == AT_LINK_UP) {
1254 if (phba->link_state != LPFC_LINK_DOWN) {
1255 phba->fc_stat.LinkDown++;
1256 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1257 "1312 Link Down Event x%x received "
1258 "Data: x%x x%x x%x\n",
1259 la->eventTag, phba->fc_eventTag,
1260 phba->pport->port_state, vport->fc_flag);
1261 lpfc_mbx_issue_link_down(phba);
1262 } else
1263 lpfc_enable_la(phba);
1264
1265 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1266 "1310 Menlo Maint Mode Link up Event x%x rcvd "
1112 "Data: x%x x%x x%x\n", 1267 "Data: x%x x%x x%x\n",
1113 la->eventTag, phba->fc_eventTag, 1268 la->eventTag, phba->fc_eventTag,
1114 phba->pport->port_state, vport->fc_flag); 1269 phba->pport->port_state, vport->fc_flag);
1270 /*
1271 * The cmnd that triggered this will be waiting for this
1272 * signal.
1273 */
1274 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
1275 if (phba->wait_4_mlo_maint_flg) {
1276 phba->wait_4_mlo_maint_flg = 0;
1277 wake_up_interruptible(&phba->wait_4_mlo_m_q);
1115 } 1278 }
1116 lpfc_mbx_issue_link_down(phba); 1279 }
1280
1281 if (la->fa) {
1282 if (la->mm)
1283 lpfc_issue_clear_la(phba, vport);
1284 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1285 "1311 fa %d\n", la->fa);
1117 } 1286 }
1118 1287
1119lpfc_mbx_cmpl_read_la_free_mbuf: 1288lpfc_mbx_cmpl_read_la_free_mbuf:
@@ -1177,7 +1346,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1177 scsi_host_put(shost); 1346 scsi_host_put(shost);
1178} 1347}
1179 1348
1180void 1349int
1181lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) 1350lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1182{ 1351{
1183 struct lpfc_hba *phba = vport->phba; 1352 struct lpfc_hba *phba = vport->phba;
@@ -1186,7 +1355,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1186 1355
1187 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1356 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1188 if (!mbox) 1357 if (!mbox)
1189 return; 1358 return 1;
1190 1359
1191 lpfc_unreg_vpi(phba, vport->vpi, mbox); 1360 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1192 mbox->vport = vport; 1361 mbox->vport = vport;
@@ -1197,7 +1366,9 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1197 "1800 Could not issue unreg_vpi\n"); 1366 "1800 Could not issue unreg_vpi\n");
1198 mempool_free(mbox, phba->mbox_mem_pool); 1367 mempool_free(mbox, phba->mbox_mem_pool);
1199 vport->unreg_vpi_cmpl = VPORT_ERROR; 1368 vport->unreg_vpi_cmpl = VPORT_ERROR;
1369 return rc;
1200 } 1370 }
1371 return 0;
1201} 1372}
1202 1373
1203static void 1374static void
@@ -1553,6 +1724,22 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1553 */ 1724 */
1554 lpfc_register_remote_port(vport, ndlp); 1725 lpfc_register_remote_port(vport, ndlp);
1555 } 1726 }
1727 if ((new_state == NLP_STE_MAPPED_NODE) &&
1728 (vport->stat_data_enabled)) {
1729 /*
1730 * A new target is discovered, if there is no buffer for
1731 * statistical data collection allocate buffer.
1732 */
1733 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
1734 sizeof(struct lpfc_scsicmd_bkt),
1735 GFP_KERNEL);
1736
1737 if (!ndlp->lat_data)
1738 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
1739 "0286 lpfc_nlp_state_cleanup failed to "
1740 "allocate statistical data buffer DID "
1741 "0x%x\n", ndlp->nlp_DID);
1742 }
1556 /* 1743 /*
1557 * if we added to Mapped list, but the remote port 1744 * if we added to Mapped list, but the remote port
1558 * registration failed or assigned a target id outside 1745 * registration failed or assigned a target id outside
@@ -2786,7 +2973,7 @@ restart_disc:
2786 2973
2787 default: 2974 default:
2788 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2975 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2789 "0229 Unexpected discovery timeout, " 2976 "0273 Unexpected discovery timeout, "
2790 "vport State x%x\n", vport->port_state); 2977 "vport State x%x\n", vport->port_state);
2791 break; 2978 break;
2792 } 2979 }
@@ -2940,6 +3127,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2940 INIT_LIST_HEAD(&ndlp->nlp_listp); 3127 INIT_LIST_HEAD(&ndlp->nlp_listp);
2941 kref_init(&ndlp->kref); 3128 kref_init(&ndlp->kref);
2942 NLP_INT_NODE_ACT(ndlp); 3129 NLP_INT_NODE_ACT(ndlp);
3130 atomic_set(&ndlp->cmd_pending, 0);
3131 ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
2943 3132
2944 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 3133 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2945 "node init: did:x%x", 3134 "node init: did:x%x",
@@ -2979,8 +3168,10 @@ lpfc_nlp_release(struct kref *kref)
2979 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3168 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
2980 3169
2981 /* free ndlp memory for final ndlp release */ 3170 /* free ndlp memory for final ndlp release */
2982 if (NLP_CHK_FREE_REQ(ndlp)) 3171 if (NLP_CHK_FREE_REQ(ndlp)) {
3172 kfree(ndlp->lat_data);
2983 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); 3173 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
3174 }
2984} 3175}
2985 3176
2986/* This routine bumps the reference count for a ndlp structure to ensure 3177/* This routine bumps the reference count for a ndlp structure to ensure