aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c36
7 files changed, 97 insertions, 140 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index ec0b0f6e5e1a..e3e5b540e36c 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -59,6 +59,9 @@ struct lpfc_sli2_slim;
59 59
60#define MAX_HBAEVT 32 60#define MAX_HBAEVT 32
61 61
62/* lpfc wait event data ready flag */
63#define LPFC_DATA_READY (1<<0)
64
62enum lpfc_polling_flags { 65enum lpfc_polling_flags {
63 ENABLE_FCP_RING_POLLING = 0x1, 66 ENABLE_FCP_RING_POLLING = 0x1,
64 DISABLE_FCP_RING_INT = 0x2 67 DISABLE_FCP_RING_INT = 0x2
@@ -425,9 +428,6 @@ struct lpfc_hba {
425 428
426 uint16_t pci_cfg_value; 429 uint16_t pci_cfg_value;
427 430
428 uint8_t work_found;
429#define LPFC_MAX_WORKER_ITERATION 4
430
431 uint8_t fc_linkspeed; /* Link speed after last READ_LA */ 431 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
432 432
433 uint32_t fc_eventTag; /* event tag for link attention */ 433 uint32_t fc_eventTag; /* event tag for link attention */
@@ -489,8 +489,9 @@ struct lpfc_hba {
489 uint32_t work_hs; /* HS stored in case of ERRAT */ 489 uint32_t work_hs; /* HS stored in case of ERRAT */
490 uint32_t work_status[2]; /* Extra status from SLIM */ 490 uint32_t work_status[2]; /* Extra status from SLIM */
491 491
492 wait_queue_head_t *work_wait; 492 wait_queue_head_t work_waitq;
493 struct task_struct *worker_thread; 493 struct task_struct *worker_thread;
494 long data_flags;
494 495
495 uint32_t hbq_in_use; /* HBQs in use flag */ 496 uint32_t hbq_in_use; /* HBQs in use flag */
496 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ 497 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
@@ -637,6 +638,17 @@ lpfc_is_link_up(struct lpfc_hba *phba)
637 phba->link_state == LPFC_HBA_READY; 638 phba->link_state == LPFC_HBA_READY;
638} 639}
639 640
641static inline void
642lpfc_worker_wake_up(struct lpfc_hba *phba)
643{
644 /* Set the lpfc data pending flag */
645 set_bit(LPFC_DATA_READY, &phba->data_flags);
646
647 /* Wake up worker thread */
648 wake_up(&phba->work_waitq);
649 return;
650}
651
640#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ 652#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
641#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature 653#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature
642 event */ 654 event */
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 153afae567b5..5442ce33615a 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1679,20 +1679,18 @@ lpfc_fdmi_tmo(unsigned long ptr)
1679{ 1679{
1680 struct lpfc_vport *vport = (struct lpfc_vport *)ptr; 1680 struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
1681 struct lpfc_hba *phba = vport->phba; 1681 struct lpfc_hba *phba = vport->phba;
1682 uint32_t tmo_posted;
1682 unsigned long iflag; 1683 unsigned long iflag;
1683 1684
1684 spin_lock_irqsave(&vport->work_port_lock, iflag); 1685 spin_lock_irqsave(&vport->work_port_lock, iflag);
1685 if (!(vport->work_port_events & WORKER_FDMI_TMO)) { 1686 tmo_posted = vport->work_port_events & WORKER_FDMI_TMO;
1687 if (!tmo_posted)
1686 vport->work_port_events |= WORKER_FDMI_TMO; 1688 vport->work_port_events |= WORKER_FDMI_TMO;
1687 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 1689 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
1688 1690
1689 spin_lock_irqsave(&phba->hbalock, iflag); 1691 if (!tmo_posted)
1690 if (phba->work_wait) 1692 lpfc_worker_wake_up(phba);
1691 lpfc_worker_wake_up(phba); 1693 return;
1692 spin_unlock_irqrestore(&phba->hbalock, iflag);
1693 }
1694 else
1695 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
1696} 1694}
1697 1695
1698void 1696void
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d418c7c1251e..5d69dee85a8d 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1813,11 +1813,11 @@ lpfc_els_retry_delay(unsigned long ptr)
1813 * count until the queued work is done 1813 * count until the queued work is done
1814 */ 1814 */
1815 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 1815 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
1816 evtp->evt = LPFC_EVT_ELS_RETRY; 1816 if (evtp->evt_arg1) {
1817 list_add_tail(&evtp->evt_listp, &phba->work_list); 1817 evtp->evt = LPFC_EVT_ELS_RETRY;
1818 if (phba->work_wait) 1818 list_add_tail(&evtp->evt_listp, &phba->work_list);
1819 lpfc_worker_wake_up(phba); 1819 lpfc_worker_wake_up(phba);
1820 1820 }
1821 spin_unlock_irqrestore(&phba->hbalock, flags); 1821 spin_unlock_irqrestore(&phba->hbalock, flags);
1822 return; 1822 return;
1823} 1823}
@@ -3802,20 +3802,17 @@ lpfc_els_timeout(unsigned long ptr)
3802{ 3802{
3803 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 3803 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
3804 struct lpfc_hba *phba = vport->phba; 3804 struct lpfc_hba *phba = vport->phba;
3805 uint32_t tmo_posted;
3805 unsigned long iflag; 3806 unsigned long iflag;
3806 3807
3807 spin_lock_irqsave(&vport->work_port_lock, iflag); 3808 spin_lock_irqsave(&vport->work_port_lock, iflag);
3808 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) { 3809 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
3810 if (!tmo_posted)
3809 vport->work_port_events |= WORKER_ELS_TMO; 3811 vport->work_port_events |= WORKER_ELS_TMO;
3810 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 3812 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3811 3813
3812 spin_lock_irqsave(&phba->hbalock, iflag); 3814 if (!tmo_posted)
3813 if (phba->work_wait) 3815 lpfc_worker_wake_up(phba);
3814 lpfc_worker_wake_up(phba);
3815 spin_unlock_irqrestore(&phba->hbalock, iflag);
3816 }
3817 else
3818 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3819 return; 3816 return;
3820} 3817}
3821 3818
@@ -4769,18 +4766,16 @@ lpfc_fabric_block_timeout(unsigned long ptr)
4769 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 4766 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4770 unsigned long iflags; 4767 unsigned long iflags;
4771 uint32_t tmo_posted; 4768 uint32_t tmo_posted;
4769
4772 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 4770 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
4773 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 4771 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
4774 if (!tmo_posted) 4772 if (!tmo_posted)
4775 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 4773 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
4776 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 4774 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
4777 4775
4778 if (!tmo_posted) { 4776 if (!tmo_posted)
4779 spin_lock_irqsave(&phba->hbalock, iflags); 4777 lpfc_worker_wake_up(phba);
4780 if (phba->work_wait) 4778 return;
4781 lpfc_worker_wake_up(phba);
4782 spin_unlock_irqrestore(&phba->hbalock, iflags);
4783 }
4784} 4779}
4785 4780
4786static void 4781static void
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f3dc19dfac5b..ba4873c9e2c3 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -153,11 +153,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
153 * count until this queued work is done 153 * count until this queued work is done
154 */ 154 */
155 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 155 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
156 evtp->evt = LPFC_EVT_DEV_LOSS; 156 if (evtp->evt_arg1) {
157 list_add_tail(&evtp->evt_listp, &phba->work_list); 157 evtp->evt = LPFC_EVT_DEV_LOSS;
158 if (phba->work_wait) 158 list_add_tail(&evtp->evt_listp, &phba->work_list);
159 wake_up(phba->work_wait); 159 lpfc_worker_wake_up(phba);
160 160 }
161 spin_unlock_irq(&phba->hbalock); 161 spin_unlock_irq(&phba->hbalock);
162 162
163 return; 163 return;
@@ -276,14 +276,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
277} 277}
278 278
279
280void
281lpfc_worker_wake_up(struct lpfc_hba *phba)
282{
283 wake_up(phba->work_wait);
284 return;
285}
286
287static void 279static void
288lpfc_work_list_done(struct lpfc_hba *phba) 280lpfc_work_list_done(struct lpfc_hba *phba)
289{ 281{
@@ -429,6 +421,8 @@ lpfc_work_done(struct lpfc_hba *phba)
429 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 421 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
430 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 422 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
431 pring->flag |= LPFC_DEFERRED_RING_EVENT; 423 pring->flag |= LPFC_DEFERRED_RING_EVENT;
424 /* Set the lpfc data pending flag */
425 set_bit(LPFC_DATA_READY, &phba->data_flags);
432 } else { 426 } else {
433 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 427 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
434 lpfc_sli_handle_slow_ring_event(phba, pring, 428 lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -459,69 +453,29 @@ lpfc_work_done(struct lpfc_hba *phba)
459 lpfc_work_list_done(phba); 453 lpfc_work_list_done(phba);
460} 454}
461 455
462static int
463check_work_wait_done(struct lpfc_hba *phba)
464{
465 struct lpfc_vport *vport;
466 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
467 int rc = 0;
468
469 spin_lock_irq(&phba->hbalock);
470 list_for_each_entry(vport, &phba->port_list, listentry) {
471 if (vport->work_port_events) {
472 rc = 1;
473 break;
474 }
475 }
476 if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
477 kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
478 rc = 1;
479 phba->work_found++;
480 } else
481 phba->work_found = 0;
482 spin_unlock_irq(&phba->hbalock);
483 return rc;
484}
485
486
487int 456int
488lpfc_do_work(void *p) 457lpfc_do_work(void *p)
489{ 458{
490 struct lpfc_hba *phba = p; 459 struct lpfc_hba *phba = p;
491 int rc; 460 int rc;
492 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
493 461
494 set_user_nice(current, -20); 462 set_user_nice(current, -20);
495 phba->work_wait = &work_waitq; 463 phba->data_flags = 0;
496 phba->work_found = 0;
497 464
498 while (1) { 465 while (1) {
499 466 /* wait and check worker queue activities */
500 rc = wait_event_interruptible(work_waitq, 467 rc = wait_event_interruptible(phba->work_waitq,
501 check_work_wait_done(phba)); 468 (test_and_clear_bit(LPFC_DATA_READY,
502 469 &phba->data_flags)
470 || kthread_should_stop()));
503 BUG_ON(rc); 471 BUG_ON(rc);
504 472
505 if (kthread_should_stop()) 473 if (kthread_should_stop())
506 break; 474 break;
507 475
476 /* Attend pending lpfc data processing */
508 lpfc_work_done(phba); 477 lpfc_work_done(phba);
509
510 /* If there is alot of slow ring work, like during link up
511 * check_work_wait_done() may cause this thread to not give
512 * up the CPU for very long periods of time. This may cause
513 * soft lockups or other problems. To avoid these situations
514 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
515 * consecutive iterations.
516 */
517 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
518 phba->work_found = 0;
519 schedule();
520 }
521 } 478 }
522 spin_lock_irq(&phba->hbalock);
523 phba->work_wait = NULL;
524 spin_unlock_irq(&phba->hbalock);
525 return 0; 479 return 0;
526} 480}
527 481
@@ -551,10 +505,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
551 505
552 spin_lock_irqsave(&phba->hbalock, flags); 506 spin_lock_irqsave(&phba->hbalock, flags);
553 list_add_tail(&evtp->evt_listp, &phba->work_list); 507 list_add_tail(&evtp->evt_listp, &phba->work_list);
554 if (phba->work_wait)
555 lpfc_worker_wake_up(phba);
556 spin_unlock_irqrestore(&phba->hbalock, flags); 508 spin_unlock_irqrestore(&phba->hbalock, flags);
557 509
510 lpfc_worker_wake_up(phba);
511
558 return 1; 512 return 1;
559} 513}
560 514
@@ -2636,21 +2590,20 @@ lpfc_disc_timeout(unsigned long ptr)
2636{ 2590{
2637 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 2591 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2638 struct lpfc_hba *phba = vport->phba; 2592 struct lpfc_hba *phba = vport->phba;
2593 uint32_t tmo_posted;
2639 unsigned long flags = 0; 2594 unsigned long flags = 0;
2640 2595
2641 if (unlikely(!phba)) 2596 if (unlikely(!phba))
2642 return; 2597 return;
2643 2598
2644 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) { 2599 spin_lock_irqsave(&vport->work_port_lock, flags);
2645 spin_lock_irqsave(&vport->work_port_lock, flags); 2600 tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
2601 if (!tmo_posted)
2646 vport->work_port_events |= WORKER_DISC_TMO; 2602 vport->work_port_events |= WORKER_DISC_TMO;
2647 spin_unlock_irqrestore(&vport->work_port_lock, flags); 2603 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2648 2604
2649 spin_lock_irqsave(&phba->hbalock, flags); 2605 if (!tmo_posted)
2650 if (phba->work_wait) 2606 lpfc_worker_wake_up(phba);
2651 lpfc_worker_wake_up(phba);
2652 spin_unlock_irqrestore(&phba->hbalock, flags);
2653 }
2654 return; 2607 return;
2655} 2608}
2656 2609
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6fcddda58512..53cedbafffba 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -551,18 +551,18 @@ static void
551lpfc_hb_timeout(unsigned long ptr) 551lpfc_hb_timeout(unsigned long ptr)
552{ 552{
553 struct lpfc_hba *phba; 553 struct lpfc_hba *phba;
554 uint32_t tmo_posted;
554 unsigned long iflag; 555 unsigned long iflag;
555 556
556 phba = (struct lpfc_hba *)ptr; 557 phba = (struct lpfc_hba *)ptr;
557 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 558 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
558 if (!(phba->pport->work_port_events & WORKER_HB_TMO)) 559 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
560 if (!tmo_posted)
559 phba->pport->work_port_events |= WORKER_HB_TMO; 561 phba->pport->work_port_events |= WORKER_HB_TMO;
560 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 562 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
561 563
562 spin_lock_irqsave(&phba->hbalock, iflag); 564 if (!tmo_posted)
563 if (phba->work_wait) 565 lpfc_worker_wake_up(phba);
564 wake_up(phba->work_wait);
565 spin_unlock_irqrestore(&phba->hbalock, iflag);
566 return; 566 return;
567} 567}
568 568
@@ -2104,6 +2104,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2104 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); 2104 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
2105 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 2105 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
2106 2106
2107 /* Initialize the wait queue head for the kernel thread */
2108 init_waitqueue_head(&phba->work_waitq);
2109
2107 /* Startup the kernel thread for this host adapter. */ 2110 /* Startup the kernel thread for this host adapter. */
2108 phba->worker_thread = kthread_run(lpfc_do_work, phba, 2111 phba->worker_thread = kthread_run(lpfc_do_work, phba,
2109 "lpfc_worker_%d", phba->brd_no); 2112 "lpfc_worker_%d", phba->brd_no);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3926affaf727..1e88b7a8a451 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -50,6 +50,7 @@ void
50lpfc_adjust_queue_depth(struct lpfc_hba *phba) 50lpfc_adjust_queue_depth(struct lpfc_hba *phba)
51{ 51{
52 unsigned long flags; 52 unsigned long flags;
53 uint32_t evt_posted;
53 54
54 spin_lock_irqsave(&phba->hbalock, flags); 55 spin_lock_irqsave(&phba->hbalock, flags);
55 atomic_inc(&phba->num_rsrc_err); 56 atomic_inc(&phba->num_rsrc_err);
@@ -65,17 +66,13 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
65 spin_unlock_irqrestore(&phba->hbalock, flags); 66 spin_unlock_irqrestore(&phba->hbalock, flags);
66 67
67 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 68 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
68 if ((phba->pport->work_port_events & 69 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
69 WORKER_RAMP_DOWN_QUEUE) == 0) { 70 if (!evt_posted)
70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 71 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
71 }
72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
73 73
74 spin_lock_irqsave(&phba->hbalock, flags); 74 if (!evt_posted)
75 if (phba->work_wait) 75 lpfc_worker_wake_up(phba);
76 wake_up(phba->work_wait);
77 spin_unlock_irqrestore(&phba->hbalock, flags);
78
79 return; 76 return;
80} 77}
81 78
@@ -89,6 +86,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
89{ 86{
90 unsigned long flags; 87 unsigned long flags;
91 struct lpfc_hba *phba = vport->phba; 88 struct lpfc_hba *phba = vport->phba;
89 uint32_t evt_posted;
92 atomic_inc(&phba->num_cmd_success); 90 atomic_inc(&phba->num_cmd_success);
93 91
94 if (vport->cfg_lun_queue_depth <= sdev->queue_depth) 92 if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
@@ -103,16 +101,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
103 spin_unlock_irqrestore(&phba->hbalock, flags); 101 spin_unlock_irqrestore(&phba->hbalock, flags);
104 102
105 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 103 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
106 if ((phba->pport->work_port_events & 104 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
107 WORKER_RAMP_UP_QUEUE) == 0) { 105 if (!evt_posted)
108 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; 106 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
109 }
110 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 107 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
111 108
112 spin_lock_irqsave(&phba->hbalock, flags); 109 if (!evt_posted)
113 if (phba->work_wait) 110 lpfc_worker_wake_up(phba);
114 wake_up(phba->work_wait); 111 return;
115 spin_unlock_irqrestore(&phba->hbalock, flags);
116} 112}
117 113
118void 114void
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 70a0a9eab211..3dba3a967ed1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -324,9 +324,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
324 phba->work_ha |= HA_ERATT; 324 phba->work_ha |= HA_ERATT;
325 phba->work_hs = HS_FFER3; 325 phba->work_hs = HS_FFER3;
326 326
327 /* hbalock should already be held */ 327 lpfc_worker_wake_up(phba);
328 if (phba->work_wait)
329 lpfc_worker_wake_up(phba);
330 328
331 return NULL; 329 return NULL;
332 } 330 }
@@ -1309,9 +1307,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1309 phba->work_ha |= HA_ERATT; 1307 phba->work_ha |= HA_ERATT;
1310 phba->work_hs = HS_FFER3; 1308 phba->work_hs = HS_FFER3;
1311 1309
1312 /* hbalock should already be held */ 1310 lpfc_worker_wake_up(phba);
1313 if (phba->work_wait)
1314 lpfc_worker_wake_up(phba);
1315 1311
1316 return; 1312 return;
1317} 1313}
@@ -2611,12 +2607,9 @@ lpfc_mbox_timeout(unsigned long ptr)
2611 phba->pport->work_port_events |= WORKER_MBOX_TMO; 2607 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2612 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 2608 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2613 2609
2614 if (!tmo_posted) { 2610 if (!tmo_posted)
2615 spin_lock_irqsave(&phba->hbalock, iflag); 2611 lpfc_worker_wake_up(phba);
2616 if (phba->work_wait) 2612 return;
2617 lpfc_worker_wake_up(phba);
2618 spin_unlock_irqrestore(&phba->hbalock, iflag);
2619 }
2620} 2613}
2621 2614
2622void 2615void
@@ -3374,8 +3367,12 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
3374 for (i = 0; i < psli->num_rings; i++) { 3367 for (i = 0; i < psli->num_rings; i++) {
3375 pring = &psli->ring[i]; 3368 pring = &psli->ring[i];
3376 prev_pring_flag = pring->flag; 3369 prev_pring_flag = pring->flag;
3377 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ 3370 /* Only slow rings */
3371 if (pring->ringno == LPFC_ELS_RING) {
3378 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3372 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3373 /* Set the lpfc data pending flag */
3374 set_bit(LPFC_DATA_READY, &phba->data_flags);
3375 }
3379 /* 3376 /*
3380 * Error everything on the txq since these iocbs have not been 3377 * Error everything on the txq since these iocbs have not been
3381 * given to the FW yet. 3378 * given to the FW yet.
@@ -3434,8 +3431,12 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
3434 spin_lock_irqsave(&phba->hbalock, flags); 3431 spin_lock_irqsave(&phba->hbalock, flags);
3435 for (i = 0; i < psli->num_rings; i++) { 3432 for (i = 0; i < psli->num_rings; i++) {
3436 pring = &psli->ring[i]; 3433 pring = &psli->ring[i];
3437 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ 3434 /* Only slow rings */
3435 if (pring->ringno == LPFC_ELS_RING) {
3438 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3436 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3437 /* Set the lpfc data pending flag */
3438 set_bit(LPFC_DATA_READY, &phba->data_flags);
3439 }
3439 3440
3440 /* 3441 /*
3441 * Error everything on the txq since these iocbs have not been 3442 * Error everything on the txq since these iocbs have not been
@@ -4159,7 +4160,7 @@ lpfc_intr_handler(int irq, void *dev_id)
4159 "pwork:x%x hawork:x%x wait:x%x", 4160 "pwork:x%x hawork:x%x wait:x%x",
4160 phba->work_ha, work_ha_copy, 4161 phba->work_ha, work_ha_copy,
4161 (uint32_t)((unsigned long) 4162 (uint32_t)((unsigned long)
4162 phba->work_wait)); 4163 &phba->work_waitq));
4163 4164
4164 control &= 4165 control &=
4165 ~(HC_R0INT_ENA << LPFC_ELS_RING); 4166 ~(HC_R0INT_ENA << LPFC_ELS_RING);
@@ -4172,7 +4173,7 @@ lpfc_intr_handler(int irq, void *dev_id)
4172 "x%x hawork:x%x wait:x%x", 4173 "x%x hawork:x%x wait:x%x",
4173 phba->work_ha, work_ha_copy, 4174 phba->work_ha, work_ha_copy,
4174 (uint32_t)((unsigned long) 4175 (uint32_t)((unsigned long)
4175 phba->work_wait)); 4176 &phba->work_waitq));
4176 } 4177 }
4177 spin_unlock(&phba->hbalock); 4178 spin_unlock(&phba->hbalock);
4178 } 4179 }
@@ -4297,9 +4298,8 @@ send_current_mbox:
4297 4298
4298 spin_lock(&phba->hbalock); 4299 spin_lock(&phba->hbalock);
4299 phba->work_ha |= work_ha_copy; 4300 phba->work_ha |= work_ha_copy;
4300 if (phba->work_wait)
4301 lpfc_worker_wake_up(phba);
4302 spin_unlock(&phba->hbalock); 4301 spin_unlock(&phba->hbalock);
4302 lpfc_worker_wake_up(phba);
4303 } 4303 }
4304 4304
4305 ha_copy &= ~(phba->work_ha_mask); 4305 ha_copy &= ~(phba->work_ha_mask);