aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_scsi.c
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2008-06-14 22:52:53 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-07-12 09:22:28 -0400
commit5e9d9b8276980fc5dfa88ce34f6ec88ce3026232 (patch)
tree30b495edab629068f929a32f88a66ad705687f34 /drivers/scsi/lpfc/lpfc_scsi.c
parent0d2b6b83030d6a88cbf7db57f84f2daf0e0b251b (diff)
[SCSI] lpfc 8.2.7 : Rework the worker thread
Rework of the worker thread to make it more efficient. Make a finer-grain notfication of pending work so less time is spent checking conditions. Also made other general cleanups. Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c26
1 files changed, 11 insertions, 15 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3926affaf727..1e88b7a8a451 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -50,6 +50,7 @@ void
50lpfc_adjust_queue_depth(struct lpfc_hba *phba) 50lpfc_adjust_queue_depth(struct lpfc_hba *phba)
51{ 51{
52 unsigned long flags; 52 unsigned long flags;
53 uint32_t evt_posted;
53 54
54 spin_lock_irqsave(&phba->hbalock, flags); 55 spin_lock_irqsave(&phba->hbalock, flags);
55 atomic_inc(&phba->num_rsrc_err); 56 atomic_inc(&phba->num_rsrc_err);
@@ -65,17 +66,13 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
65 spin_unlock_irqrestore(&phba->hbalock, flags); 66 spin_unlock_irqrestore(&phba->hbalock, flags);
66 67
67 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 68 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
68 if ((phba->pport->work_port_events & 69 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
69 WORKER_RAMP_DOWN_QUEUE) == 0) { 70 if (!evt_posted)
70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 71 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
71 }
72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
73 73
74 spin_lock_irqsave(&phba->hbalock, flags); 74 if (!evt_posted)
75 if (phba->work_wait) 75 lpfc_worker_wake_up(phba);
76 wake_up(phba->work_wait);
77 spin_unlock_irqrestore(&phba->hbalock, flags);
78
79 return; 76 return;
80} 77}
81 78
@@ -89,6 +86,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
89{ 86{
90 unsigned long flags; 87 unsigned long flags;
91 struct lpfc_hba *phba = vport->phba; 88 struct lpfc_hba *phba = vport->phba;
89 uint32_t evt_posted;
92 atomic_inc(&phba->num_cmd_success); 90 atomic_inc(&phba->num_cmd_success);
93 91
94 if (vport->cfg_lun_queue_depth <= sdev->queue_depth) 92 if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
@@ -103,16 +101,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
103 spin_unlock_irqrestore(&phba->hbalock, flags); 101 spin_unlock_irqrestore(&phba->hbalock, flags);
104 102
105 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 103 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
106 if ((phba->pport->work_port_events & 104 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
107 WORKER_RAMP_UP_QUEUE) == 0) { 105 if (!evt_posted)
108 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; 106 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
109 }
110 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 107 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
111 108
112 spin_lock_irqsave(&phba->hbalock, flags); 109 if (!evt_posted)
113 if (phba->work_wait) 110 lpfc_worker_wake_up(phba);
114 wake_up(phba->work_wait); 111 return;
115 spin_unlock_irqrestore(&phba->hbalock, flags);
116} 112}
117 113
118void 114void