diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 120 |
1 files changed, 35 insertions, 85 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 7cb68feb04fd..a98d11bf3576 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -153,11 +153,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
153 | * count until this queued work is done | 153 | * count until this queued work is done |
154 | */ | 154 | */ |
155 | evtp->evt_arg1 = lpfc_nlp_get(ndlp); | 155 | evtp->evt_arg1 = lpfc_nlp_get(ndlp); |
156 | evtp->evt = LPFC_EVT_DEV_LOSS; | 156 | if (evtp->evt_arg1) { |
157 | list_add_tail(&evtp->evt_listp, &phba->work_list); | 157 | evtp->evt = LPFC_EVT_DEV_LOSS; |
158 | if (phba->work_wait) | 158 | list_add_tail(&evtp->evt_listp, &phba->work_list); |
159 | wake_up(phba->work_wait); | 159 | lpfc_worker_wake_up(phba); |
160 | 160 | } | |
161 | spin_unlock_irq(&phba->hbalock); | 161 | spin_unlock_irq(&phba->hbalock); |
162 | 162 | ||
163 | return; | 163 | return; |
@@ -276,14 +276,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
276 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); | 276 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); |
277 | } | 277 | } |
278 | 278 | ||
279 | |||
280 | void | ||
281 | lpfc_worker_wake_up(struct lpfc_hba *phba) | ||
282 | { | ||
283 | wake_up(phba->work_wait); | ||
284 | return; | ||
285 | } | ||
286 | |||
287 | static void | 279 | static void |
288 | lpfc_work_list_done(struct lpfc_hba *phba) | 280 | lpfc_work_list_done(struct lpfc_hba *phba) |
289 | { | 281 | { |
@@ -429,6 +421,8 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
429 | || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { | 421 | || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { |
430 | if (pring->flag & LPFC_STOP_IOCB_EVENT) { | 422 | if (pring->flag & LPFC_STOP_IOCB_EVENT) { |
431 | pring->flag |= LPFC_DEFERRED_RING_EVENT; | 423 | pring->flag |= LPFC_DEFERRED_RING_EVENT; |
424 | /* Set the lpfc data pending flag */ | ||
425 | set_bit(LPFC_DATA_READY, &phba->data_flags); | ||
432 | } else { | 426 | } else { |
433 | pring->flag &= ~LPFC_DEFERRED_RING_EVENT; | 427 | pring->flag &= ~LPFC_DEFERRED_RING_EVENT; |
434 | lpfc_sli_handle_slow_ring_event(phba, pring, | 428 | lpfc_sli_handle_slow_ring_event(phba, pring, |
@@ -459,69 +453,29 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
459 | lpfc_work_list_done(phba); | 453 | lpfc_work_list_done(phba); |
460 | } | 454 | } |
461 | 455 | ||
462 | static int | ||
463 | check_work_wait_done(struct lpfc_hba *phba) | ||
464 | { | ||
465 | struct lpfc_vport *vport; | ||
466 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | ||
467 | int rc = 0; | ||
468 | |||
469 | spin_lock_irq(&phba->hbalock); | ||
470 | list_for_each_entry(vport, &phba->port_list, listentry) { | ||
471 | if (vport->work_port_events) { | ||
472 | rc = 1; | ||
473 | break; | ||
474 | } | ||
475 | } | ||
476 | if (rc || phba->work_ha || (!list_empty(&phba->work_list)) || | ||
477 | kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) { | ||
478 | rc = 1; | ||
479 | phba->work_found++; | ||
480 | } else | ||
481 | phba->work_found = 0; | ||
482 | spin_unlock_irq(&phba->hbalock); | ||
483 | return rc; | ||
484 | } | ||
485 | |||
486 | |||
487 | int | 456 | int |
488 | lpfc_do_work(void *p) | 457 | lpfc_do_work(void *p) |
489 | { | 458 | { |
490 | struct lpfc_hba *phba = p; | 459 | struct lpfc_hba *phba = p; |
491 | int rc; | 460 | int rc; |
492 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq); | ||
493 | 461 | ||
494 | set_user_nice(current, -20); | 462 | set_user_nice(current, -20); |
495 | phba->work_wait = &work_waitq; | 463 | phba->data_flags = 0; |
496 | phba->work_found = 0; | ||
497 | 464 | ||
498 | while (1) { | 465 | while (1) { |
499 | 466 | /* wait and check worker queue activities */ | |
500 | rc = wait_event_interruptible(work_waitq, | 467 | rc = wait_event_interruptible(phba->work_waitq, |
501 | check_work_wait_done(phba)); | 468 | (test_and_clear_bit(LPFC_DATA_READY, |
502 | 469 | &phba->data_flags) | |
470 | || kthread_should_stop())); | ||
503 | BUG_ON(rc); | 471 | BUG_ON(rc); |
504 | 472 | ||
505 | if (kthread_should_stop()) | 473 | if (kthread_should_stop()) |
506 | break; | 474 | break; |
507 | 475 | ||
476 | /* Attend pending lpfc data processing */ | ||
508 | lpfc_work_done(phba); | 477 | lpfc_work_done(phba); |
509 | |||
510 | /* If there is alot of slow ring work, like during link up | ||
511 | * check_work_wait_done() may cause this thread to not give | ||
512 | * up the CPU for very long periods of time. This may cause | ||
513 | * soft lockups or other problems. To avoid these situations | ||
514 | * give up the CPU here after LPFC_MAX_WORKER_ITERATION | ||
515 | * consecutive iterations. | ||
516 | */ | ||
517 | if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) { | ||
518 | phba->work_found = 0; | ||
519 | schedule(); | ||
520 | } | ||
521 | } | 478 | } |
522 | spin_lock_irq(&phba->hbalock); | ||
523 | phba->work_wait = NULL; | ||
524 | spin_unlock_irq(&phba->hbalock); | ||
525 | return 0; | 479 | return 0; |
526 | } | 480 | } |
527 | 481 | ||
@@ -551,10 +505,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, | |||
551 | 505 | ||
552 | spin_lock_irqsave(&phba->hbalock, flags); | 506 | spin_lock_irqsave(&phba->hbalock, flags); |
553 | list_add_tail(&evtp->evt_listp, &phba->work_list); | 507 | list_add_tail(&evtp->evt_listp, &phba->work_list); |
554 | if (phba->work_wait) | ||
555 | lpfc_worker_wake_up(phba); | ||
556 | spin_unlock_irqrestore(&phba->hbalock, flags); | 508 | spin_unlock_irqrestore(&phba->hbalock, flags); |
557 | 509 | ||
510 | lpfc_worker_wake_up(phba); | ||
511 | |||
558 | return 1; | 512 | return 1; |
559 | } | 513 | } |
560 | 514 | ||
@@ -963,6 +917,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | |||
963 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 917 | if (phba->fc_topology == TOPOLOGY_LOOP) { |
964 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; | 918 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; |
965 | 919 | ||
920 | if (phba->cfg_enable_npiv) | ||
921 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | ||
922 | "1309 Link Up Event npiv not supported in loop " | ||
923 | "topology\n"); | ||
966 | /* Get Loop Map information */ | 924 | /* Get Loop Map information */ |
967 | if (la->il) | 925 | if (la->il) |
968 | vport->fc_flag |= FC_LBIT; | 926 | vport->fc_flag |= FC_LBIT; |
@@ -1087,6 +1045,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1087 | MAILBOX_t *mb = &pmb->mb; | 1045 | MAILBOX_t *mb = &pmb->mb; |
1088 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); | 1046 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); |
1089 | 1047 | ||
1048 | /* Unblock ELS traffic */ | ||
1049 | phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; | ||
1090 | /* Check for error */ | 1050 | /* Check for error */ |
1091 | if (mb->mbxStatus) { | 1051 | if (mb->mbxStatus) { |
1092 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, | 1052 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, |
@@ -1650,7 +1610,6 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1650 | ndlp->nlp_DID, old_state, state); | 1610 | ndlp->nlp_DID, old_state, state); |
1651 | 1611 | ||
1652 | if (old_state == NLP_STE_NPR_NODE && | 1612 | if (old_state == NLP_STE_NPR_NODE && |
1653 | (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 && | ||
1654 | state != NLP_STE_NPR_NODE) | 1613 | state != NLP_STE_NPR_NODE) |
1655 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | 1614 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
1656 | if (old_state == NLP_STE_UNMAPPED_NODE) { | 1615 | if (old_state == NLP_STE_UNMAPPED_NODE) { |
@@ -1687,8 +1646,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
1687 | { | 1646 | { |
1688 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 1647 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1689 | 1648 | ||
1690 | if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) | 1649 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
1691 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | ||
1692 | if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) | 1650 | if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) |
1693 | lpfc_nlp_counters(vport, ndlp->nlp_state, -1); | 1651 | lpfc_nlp_counters(vport, ndlp->nlp_state, -1); |
1694 | spin_lock_irq(shost->host_lock); | 1652 | spin_lock_irq(shost->host_lock); |
@@ -1701,8 +1659,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
1701 | static void | 1659 | static void |
1702 | lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | 1660 | lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
1703 | { | 1661 | { |
1704 | if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) | 1662 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
1705 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | ||
1706 | if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) | 1663 | if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) |
1707 | lpfc_nlp_counters(vport, ndlp->nlp_state, -1); | 1664 | lpfc_nlp_counters(vport, ndlp->nlp_state, -1); |
1708 | lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, | 1665 | lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, |
@@ -2121,10 +2078,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2121 | ndlp->nlp_last_elscmd = 0; | 2078 | ndlp->nlp_last_elscmd = 0; |
2122 | del_timer_sync(&ndlp->nlp_delayfunc); | 2079 | del_timer_sync(&ndlp->nlp_delayfunc); |
2123 | 2080 | ||
2124 | if (!list_empty(&ndlp->els_retry_evt.evt_listp)) | 2081 | list_del_init(&ndlp->els_retry_evt.evt_listp); |
2125 | list_del_init(&ndlp->els_retry_evt.evt_listp); | 2082 | list_del_init(&ndlp->dev_loss_evt.evt_listp); |
2126 | if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) | ||
2127 | list_del_init(&ndlp->dev_loss_evt.evt_listp); | ||
2128 | 2083 | ||
2129 | lpfc_unreg_rpi(vport, ndlp); | 2084 | lpfc_unreg_rpi(vport, ndlp); |
2130 | 2085 | ||
@@ -2144,10 +2099,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2144 | LPFC_MBOXQ_t *mbox; | 2099 | LPFC_MBOXQ_t *mbox; |
2145 | int rc; | 2100 | int rc; |
2146 | 2101 | ||
2147 | if (ndlp->nlp_flag & NLP_DELAY_TMO) { | 2102 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
2148 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | ||
2149 | } | ||
2150 | |||
2151 | if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { | 2103 | if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { |
2152 | /* For this case we need to cleanup the default rpi | 2104 | /* For this case we need to cleanup the default rpi |
2153 | * allocated by the firmware. | 2105 | * allocated by the firmware. |
@@ -2317,8 +2269,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) | |||
2317 | /* Since this node is marked for discovery, | 2269 | /* Since this node is marked for discovery, |
2318 | * delay timeout is not needed. | 2270 | * delay timeout is not needed. |
2319 | */ | 2271 | */ |
2320 | if (ndlp->nlp_flag & NLP_DELAY_TMO) | 2272 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
2321 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | ||
2322 | } else | 2273 | } else |
2323 | ndlp = NULL; | 2274 | ndlp = NULL; |
2324 | } else { | 2275 | } else { |
@@ -2643,21 +2594,20 @@ lpfc_disc_timeout(unsigned long ptr) | |||
2643 | { | 2594 | { |
2644 | struct lpfc_vport *vport = (struct lpfc_vport *) ptr; | 2595 | struct lpfc_vport *vport = (struct lpfc_vport *) ptr; |
2645 | struct lpfc_hba *phba = vport->phba; | 2596 | struct lpfc_hba *phba = vport->phba; |
2597 | uint32_t tmo_posted; | ||
2646 | unsigned long flags = 0; | 2598 | unsigned long flags = 0; |
2647 | 2599 | ||
2648 | if (unlikely(!phba)) | 2600 | if (unlikely(!phba)) |
2649 | return; | 2601 | return; |
2650 | 2602 | ||
2651 | if ((vport->work_port_events & WORKER_DISC_TMO) == 0) { | 2603 | spin_lock_irqsave(&vport->work_port_lock, flags); |
2652 | spin_lock_irqsave(&vport->work_port_lock, flags); | 2604 | tmo_posted = vport->work_port_events & WORKER_DISC_TMO; |
2605 | if (!tmo_posted) | ||
2653 | vport->work_port_events |= WORKER_DISC_TMO; | 2606 | vport->work_port_events |= WORKER_DISC_TMO; |
2654 | spin_unlock_irqrestore(&vport->work_port_lock, flags); | 2607 | spin_unlock_irqrestore(&vport->work_port_lock, flags); |
2655 | 2608 | ||
2656 | spin_lock_irqsave(&phba->hbalock, flags); | 2609 | if (!tmo_posted) |
2657 | if (phba->work_wait) | 2610 | lpfc_worker_wake_up(phba); |
2658 | lpfc_worker_wake_up(phba); | ||
2659 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
2660 | } | ||
2661 | return; | 2611 | return; |
2662 | } | 2612 | } |
2663 | 2613 | ||