aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_hbadisc.c
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2008-06-14 22:52:53 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-07-12 09:22:28 -0400
commit5e9d9b8276980fc5dfa88ce34f6ec88ce3026232 (patch)
tree30b495edab629068f929a32f88a66ad705687f34 /drivers/scsi/lpfc/lpfc_hbadisc.c
parent0d2b6b83030d6a88cbf7db57f84f2daf0e0b251b (diff)
[SCSI] lpfc 8.2.7 : Rework the worker thread
Rework of the worker thread to make it more efficient. Make a finer-grain notfication of pending work so less time is spent checking conditions. Also made other general cleanups. Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c93
1 files changed, 23 insertions, 70 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f3dc19dfac5b..ba4873c9e2c3 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -153,11 +153,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
153 * count until this queued work is done 153 * count until this queued work is done
154 */ 154 */
155 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 155 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
156 evtp->evt = LPFC_EVT_DEV_LOSS; 156 if (evtp->evt_arg1) {
157 list_add_tail(&evtp->evt_listp, &phba->work_list); 157 evtp->evt = LPFC_EVT_DEV_LOSS;
158 if (phba->work_wait) 158 list_add_tail(&evtp->evt_listp, &phba->work_list);
159 wake_up(phba->work_wait); 159 lpfc_worker_wake_up(phba);
160 160 }
161 spin_unlock_irq(&phba->hbalock); 161 spin_unlock_irq(&phba->hbalock);
162 162
163 return; 163 return;
@@ -276,14 +276,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
277} 277}
278 278
279
280void
281lpfc_worker_wake_up(struct lpfc_hba *phba)
282{
283 wake_up(phba->work_wait);
284 return;
285}
286
287static void 279static void
288lpfc_work_list_done(struct lpfc_hba *phba) 280lpfc_work_list_done(struct lpfc_hba *phba)
289{ 281{
@@ -429,6 +421,8 @@ lpfc_work_done(struct lpfc_hba *phba)
429 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 421 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
430 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 422 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
431 pring->flag |= LPFC_DEFERRED_RING_EVENT; 423 pring->flag |= LPFC_DEFERRED_RING_EVENT;
424 /* Set the lpfc data pending flag */
425 set_bit(LPFC_DATA_READY, &phba->data_flags);
432 } else { 426 } else {
433 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 427 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
434 lpfc_sli_handle_slow_ring_event(phba, pring, 428 lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -459,69 +453,29 @@ lpfc_work_done(struct lpfc_hba *phba)
459 lpfc_work_list_done(phba); 453 lpfc_work_list_done(phba);
460} 454}
461 455
462static int
463check_work_wait_done(struct lpfc_hba *phba)
464{
465 struct lpfc_vport *vport;
466 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
467 int rc = 0;
468
469 spin_lock_irq(&phba->hbalock);
470 list_for_each_entry(vport, &phba->port_list, listentry) {
471 if (vport->work_port_events) {
472 rc = 1;
473 break;
474 }
475 }
476 if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
477 kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
478 rc = 1;
479 phba->work_found++;
480 } else
481 phba->work_found = 0;
482 spin_unlock_irq(&phba->hbalock);
483 return rc;
484}
485
486
487int 456int
488lpfc_do_work(void *p) 457lpfc_do_work(void *p)
489{ 458{
490 struct lpfc_hba *phba = p; 459 struct lpfc_hba *phba = p;
491 int rc; 460 int rc;
492 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
493 461
494 set_user_nice(current, -20); 462 set_user_nice(current, -20);
495 phba->work_wait = &work_waitq; 463 phba->data_flags = 0;
496 phba->work_found = 0;
497 464
498 while (1) { 465 while (1) {
499 466 /* wait and check worker queue activities */
500 rc = wait_event_interruptible(work_waitq, 467 rc = wait_event_interruptible(phba->work_waitq,
501 check_work_wait_done(phba)); 468 (test_and_clear_bit(LPFC_DATA_READY,
502 469 &phba->data_flags)
470 || kthread_should_stop()));
503 BUG_ON(rc); 471 BUG_ON(rc);
504 472
505 if (kthread_should_stop()) 473 if (kthread_should_stop())
506 break; 474 break;
507 475
476 /* Attend pending lpfc data processing */
508 lpfc_work_done(phba); 477 lpfc_work_done(phba);
509
510 /* If there is alot of slow ring work, like during link up
511 * check_work_wait_done() may cause this thread to not give
512 * up the CPU for very long periods of time. This may cause
513 * soft lockups or other problems. To avoid these situations
514 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
515 * consecutive iterations.
516 */
517 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
518 phba->work_found = 0;
519 schedule();
520 }
521 } 478 }
522 spin_lock_irq(&phba->hbalock);
523 phba->work_wait = NULL;
524 spin_unlock_irq(&phba->hbalock);
525 return 0; 479 return 0;
526} 480}
527 481
@@ -551,10 +505,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
551 505
552 spin_lock_irqsave(&phba->hbalock, flags); 506 spin_lock_irqsave(&phba->hbalock, flags);
553 list_add_tail(&evtp->evt_listp, &phba->work_list); 507 list_add_tail(&evtp->evt_listp, &phba->work_list);
554 if (phba->work_wait)
555 lpfc_worker_wake_up(phba);
556 spin_unlock_irqrestore(&phba->hbalock, flags); 508 spin_unlock_irqrestore(&phba->hbalock, flags);
557 509
510 lpfc_worker_wake_up(phba);
511
558 return 1; 512 return 1;
559} 513}
560 514
@@ -2636,21 +2590,20 @@ lpfc_disc_timeout(unsigned long ptr)
2636{ 2590{
2637 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 2591 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2638 struct lpfc_hba *phba = vport->phba; 2592 struct lpfc_hba *phba = vport->phba;
2593 uint32_t tmo_posted;
2639 unsigned long flags = 0; 2594 unsigned long flags = 0;
2640 2595
2641 if (unlikely(!phba)) 2596 if (unlikely(!phba))
2642 return; 2597 return;
2643 2598
2644 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) { 2599 spin_lock_irqsave(&vport->work_port_lock, flags);
2645 spin_lock_irqsave(&vport->work_port_lock, flags); 2600 tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
2601 if (!tmo_posted)
2646 vport->work_port_events |= WORKER_DISC_TMO; 2602 vport->work_port_events |= WORKER_DISC_TMO;
2647 spin_unlock_irqrestore(&vport->work_port_lock, flags); 2603 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2648 2604
2649 spin_lock_irqsave(&phba->hbalock, flags); 2605 if (!tmo_posted)
2650 if (phba->work_wait) 2606 lpfc_worker_wake_up(phba);
2651 lpfc_worker_wake_up(phba);
2652 spin_unlock_irqrestore(&phba->hbalock, flags);
2653 }
2654 return; 2607 return;
2655} 2608}
2656 2609