aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_scsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c557
1 files changed, 342 insertions, 215 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 9a12d05e99e4..8f45bbc42126 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -37,10 +37,158 @@
37#include "lpfc.h" 37#include "lpfc.h"
38#include "lpfc_logmsg.h" 38#include "lpfc_logmsg.h"
39#include "lpfc_crtn.h" 39#include "lpfc_crtn.h"
40#include "lpfc_vport.h"
40 41
41#define LPFC_RESET_WAIT 2 42#define LPFC_RESET_WAIT 2
42#define LPFC_ABORT_WAIT 2 43#define LPFC_ABORT_WAIT 2
43 44
45/*
46 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware.
48 */
49void
50lpfc_adjust_queue_depth(struct lpfc_hba *phba)
51{
52 unsigned long flags;
53
54 spin_lock_irqsave(&phba->hbalock, flags);
55 atomic_inc(&phba->num_rsrc_err);
56 phba->last_rsrc_error_time = jiffies;
57
58 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
59 spin_unlock_irqrestore(&phba->hbalock, flags);
60 return;
61 }
62
63 phba->last_ramp_down_time = jiffies;
64
65 spin_unlock_irqrestore(&phba->hbalock, flags);
66
67 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
68 if ((phba->pport->work_port_events &
69 WORKER_RAMP_DOWN_QUEUE) == 0) {
70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
71 }
72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
73
74 spin_lock_irqsave(&phba->hbalock, flags);
75 if (phba->work_wait)
76 wake_up(phba->work_wait);
77 spin_unlock_irqrestore(&phba->hbalock, flags);
78
79 return;
80}
81
82/*
83 * This function is called with no lock held when there is a successful
84 * SCSI command completion.
85 */
86static inline void
87lpfc_rampup_queue_depth(struct lpfc_hba *phba,
88 struct scsi_device *sdev)
89{
90 unsigned long flags;
91 atomic_inc(&phba->num_cmd_success);
92
93 if (phba->cfg_lun_queue_depth <= sdev->queue_depth)
94 return;
95
96 spin_lock_irqsave(&phba->hbalock, flags);
97 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
98 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
99 spin_unlock_irqrestore(&phba->hbalock, flags);
100 return;
101 }
102
103 phba->last_ramp_up_time = jiffies;
104 spin_unlock_irqrestore(&phba->hbalock, flags);
105
106 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
107 if ((phba->pport->work_port_events &
108 WORKER_RAMP_UP_QUEUE) == 0) {
109 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
110 }
111 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
112
113 spin_lock_irqsave(&phba->hbalock, flags);
114 if (phba->work_wait)
115 wake_up(phba->work_wait);
116 spin_unlock_irqrestore(&phba->hbalock, flags);
117}
118
119void
120lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
121{
122 struct lpfc_vport *vport;
123 struct Scsi_Host *host;
124 struct scsi_device *sdev;
125 unsigned long new_queue_depth;
126 unsigned long num_rsrc_err, num_cmd_success;
127
128 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
129 num_cmd_success = atomic_read(&phba->num_cmd_success);
130
131 spin_lock_irq(&phba->hbalock);
132 list_for_each_entry(vport, &phba->port_list, listentry) {
133 host = lpfc_shost_from_vport(vport);
134 if (!scsi_host_get(host))
135 continue;
136
137 spin_unlock_irq(&phba->hbalock);
138
139 shost_for_each_device(sdev, host) {
140 new_queue_depth = sdev->queue_depth * num_rsrc_err /
141 (num_rsrc_err + num_cmd_success);
142 if (!new_queue_depth)
143 new_queue_depth = sdev->queue_depth - 1;
144 else
145 new_queue_depth =
146 sdev->queue_depth - new_queue_depth;
147
148 if (sdev->ordered_tags)
149 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
150 new_queue_depth);
151 else
152 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
153 new_queue_depth);
154 }
155 spin_lock_irq(&phba->hbalock);
156 scsi_host_put(host);
157 }
158 spin_unlock_irq(&phba->hbalock);
159 atomic_set(&phba->num_rsrc_err, 0);
160 atomic_set(&phba->num_cmd_success, 0);
161}
162
163void
164lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
165{
166 struct lpfc_vport *vport;
167 struct Scsi_Host *host;
168 struct scsi_device *sdev;
169
170 spin_lock_irq(&phba->hbalock);
171 list_for_each_entry(vport, &phba->port_list, listentry) {
172 host = lpfc_shost_from_vport(vport);
173 if (!scsi_host_get(host))
174 continue;
175
176 spin_unlock_irq(&phba->hbalock);
177 shost_for_each_device(sdev, host) {
178 if (sdev->ordered_tags)
179 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
180 sdev->queue_depth+1);
181 else
182 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
183 sdev->queue_depth+1);
184 }
185 spin_lock_irq(&phba->hbalock);
186 scsi_host_put(host);
187 }
188 spin_unlock_irq(&phba->hbalock);
189 atomic_set(&phba->num_rsrc_err, 0);
190 atomic_set(&phba->num_cmd_success, 0);
191}
44 192
45/* 193/*
46 * This routine allocates a scsi buffer, which contains all the necessary 194 * This routine allocates a scsi buffer, which contains all the necessary
@@ -51,8 +199,9 @@
51 * and the BPL BDE is setup in the IOCB. 199 * and the BPL BDE is setup in the IOCB.
52 */ 200 */
53static struct lpfc_scsi_buf * 201static struct lpfc_scsi_buf *
54lpfc_new_scsi_buf(struct lpfc_hba * phba) 202lpfc_new_scsi_buf(struct lpfc_vport *vport)
55{ 203{
204 struct lpfc_hba *phba = vport->phba;
56 struct lpfc_scsi_buf *psb; 205 struct lpfc_scsi_buf *psb;
57 struct ulp_bde64 *bpl; 206 struct ulp_bde64 *bpl;
58 IOCB_t *iocb; 207 IOCB_t *iocb;
@@ -63,7 +212,6 @@ lpfc_new_scsi_buf(struct lpfc_hba * phba)
63 if (!psb) 212 if (!psb)
64 return NULL; 213 return NULL;
65 memset(psb, 0, sizeof (struct lpfc_scsi_buf)); 214 memset(psb, 0, sizeof (struct lpfc_scsi_buf));
66 psb->scsi_hba = phba;
67 215
68 /* 216 /*
69 * Get memory from the pci pool to map the virt space to pci bus space 217 * Get memory from the pci pool to map the virt space to pci bus space
@@ -155,7 +303,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
155} 303}
156 304
157static void 305static void
158lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 306lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
159{ 307{
160 unsigned long iflag = 0; 308 unsigned long iflag = 0;
161 309
@@ -166,7 +314,7 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
166} 314}
167 315
168static int 316static int
169lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) 317lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
170{ 318{
171 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 319 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
172 struct scatterlist *sgel = NULL; 320 struct scatterlist *sgel = NULL;
@@ -175,8 +323,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
175 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 323 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
176 dma_addr_t physaddr; 324 dma_addr_t physaddr;
177 uint32_t i, num_bde = 0; 325 uint32_t i, num_bde = 0;
178 int datadir = scsi_cmnd->sc_data_direction; 326 int nseg, datadir = scsi_cmnd->sc_data_direction;
179 int dma_error;
180 327
181 /* 328 /*
182 * There are three possibilities here - use scatter-gather segment, use 329 * There are three possibilities here - use scatter-gather segment, use
@@ -185,26 +332,26 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
185 * data bde entry. 332 * data bde entry.
186 */ 333 */
187 bpl += 2; 334 bpl += 2;
188 if (scsi_cmnd->use_sg) { 335 if (scsi_sg_count(scsi_cmnd)) {
189 /* 336 /*
190 * The driver stores the segment count returned from pci_map_sg 337 * The driver stores the segment count returned from pci_map_sg
191 * because this a count of dma-mappings used to map the use_sg 338 * because this a count of dma-mappings used to map the use_sg
192 * pages. They are not guaranteed to be the same for those 339 * pages. They are not guaranteed to be the same for those
193 * architectures that implement an IOMMU. 340 * architectures that implement an IOMMU.
194 */ 341 */
195 sgel = (struct scatterlist *)scsi_cmnd->request_buffer; 342
196 lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel, 343 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
197 scsi_cmnd->use_sg, datadir); 344 scsi_sg_count(scsi_cmnd), datadir);
198 if (lpfc_cmd->seg_cnt == 0) 345 if (unlikely(!nseg))
199 return 1; 346 return 1;
200 347
348 lpfc_cmd->seg_cnt = nseg;
201 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 349 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
202 printk(KERN_ERR "%s: Too many sg segments from " 350 printk(KERN_ERR "%s: Too many sg segments from "
203 "dma_map_sg. Config %d, seg_cnt %d", 351 "dma_map_sg. Config %d, seg_cnt %d",
204 __FUNCTION__, phba->cfg_sg_seg_cnt, 352 __FUNCTION__, phba->cfg_sg_seg_cnt,
205 lpfc_cmd->seg_cnt); 353 lpfc_cmd->seg_cnt);
206 dma_unmap_sg(&phba->pcidev->dev, sgel, 354 scsi_dma_unmap(scsi_cmnd);
207 lpfc_cmd->seg_cnt, datadir);
208 return 1; 355 return 1;
209 } 356 }
210 357
@@ -214,7 +361,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
214 * single scsi command. Just run through the seg_cnt and format 361 * single scsi command. Just run through the seg_cnt and format
215 * the bde's. 362 * the bde's.
216 */ 363 */
217 for (i = 0; i < lpfc_cmd->seg_cnt; i++) { 364 scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
218 physaddr = sg_dma_address(sgel); 365 physaddr = sg_dma_address(sgel);
219 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 366 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
220 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 367 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
@@ -225,34 +372,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
225 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 372 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
226 bpl->tus.w = le32_to_cpu(bpl->tus.w); 373 bpl->tus.w = le32_to_cpu(bpl->tus.w);
227 bpl++; 374 bpl++;
228 sgel++;
229 num_bde++; 375 num_bde++;
230 } 376 }
231 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
232 physaddr = dma_map_single(&phba->pcidev->dev,
233 scsi_cmnd->request_buffer,
234 scsi_cmnd->request_bufflen,
235 datadir);
236 dma_error = dma_mapping_error(physaddr);
237 if (dma_error) {
238 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
239 "%d:0718 Unable to dma_map_single "
240 "request_buffer: x%x\n",
241 phba->brd_no, dma_error);
242 return 1;
243 }
244
245 lpfc_cmd->nonsg_phys = physaddr;
246 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
247 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
248 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
249 if (datadir == DMA_TO_DEVICE)
250 bpl->tus.f.bdeFlags = 0;
251 else
252 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
253 bpl->tus.w = le32_to_cpu(bpl->tus.w);
254 num_bde = 1;
255 bpl++;
256 } 377 }
257 378
258 /* 379 /*
@@ -266,7 +387,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
266 (num_bde * sizeof (struct ulp_bde64)); 387 (num_bde * sizeof (struct ulp_bde64));
267 iocb_cmd->ulpBdeCount = 1; 388 iocb_cmd->ulpBdeCount = 1;
268 iocb_cmd->ulpLe = 1; 389 iocb_cmd->ulpLe = 1;
269 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen); 390 fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd));
270 return 0; 391 return 0;
271} 392}
272 393
@@ -279,26 +400,20 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
279 * a request buffer, but did not request use_sg. There is a third 400 * a request buffer, but did not request use_sg. There is a third
280 * case, but it does not require resource deallocation. 401 * case, but it does not require resource deallocation.
281 */ 402 */
282 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { 403 if (psb->seg_cnt > 0)
283 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, 404 scsi_dma_unmap(psb->pCmd);
284 psb->seg_cnt, psb->pCmd->sc_data_direction);
285 } else {
286 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
287 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
288 psb->pCmd->request_bufflen,
289 psb->pCmd->sc_data_direction);
290 }
291 }
292} 405}
293 406
294static void 407static void
295lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) 408lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
409 struct lpfc_iocbq *rsp_iocb)
296{ 410{
297 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 411 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
298 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 412 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
299 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 413 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
300 struct lpfc_hba *phba = lpfc_cmd->scsi_hba; 414 struct lpfc_hba *phba = vport->phba;
301 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 415 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
416 uint32_t vpi = vport->vpi;
302 uint32_t resp_info = fcprsp->rspStatus2; 417 uint32_t resp_info = fcprsp->rspStatus2;
303 uint32_t scsi_status = fcprsp->rspStatus3; 418 uint32_t scsi_status = fcprsp->rspStatus3;
304 uint32_t *lp; 419 uint32_t *lp;
@@ -331,9 +446,9 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
331 logit = LOG_FCP; 446 logit = LOG_FCP;
332 447
333 lpfc_printf_log(phba, KERN_WARNING, logit, 448 lpfc_printf_log(phba, KERN_WARNING, logit,
334 "%d:0730 FCP command x%x failed: x%x SNS x%x x%x " 449 "%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x "
335 "Data: x%x x%x x%x x%x x%x\n", 450 "Data: x%x x%x x%x x%x x%x\n",
336 phba->brd_no, cmnd->cmnd[0], scsi_status, 451 phba->brd_no, vpi, cmnd->cmnd[0], scsi_status,
337 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 452 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
338 be32_to_cpu(fcprsp->rspResId), 453 be32_to_cpu(fcprsp->rspResId),
339 be32_to_cpu(fcprsp->rspSnsLen), 454 be32_to_cpu(fcprsp->rspSnsLen),
@@ -349,15 +464,16 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
349 } 464 }
350 } 465 }
351 466
352 cmnd->resid = 0; 467 scsi_set_resid(cmnd, 0);
353 if (resp_info & RESID_UNDER) { 468 if (resp_info & RESID_UNDER) {
354 cmnd->resid = be32_to_cpu(fcprsp->rspResId); 469 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
355 470
356 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 471 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
357 "%d:0716 FCP Read Underrun, expected %d, " 472 "%d (%d):0716 FCP Read Underrun, expected %d, "
358 "residual %d Data: x%x x%x x%x\n", phba->brd_no, 473 "residual %d Data: x%x x%x x%x\n",
359 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid, 474 phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl),
360 fcpi_parm, cmnd->cmnd[0], cmnd->underflow); 475 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
476 cmnd->underflow);
361 477
362 /* 478 /*
363 * If there is an under run check if under run reported by 479 * If there is an under run check if under run reported by
@@ -366,15 +482,16 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
366 */ 482 */
367 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 483 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
368 fcpi_parm && 484 fcpi_parm &&
369 (cmnd->resid != fcpi_parm)) { 485 (scsi_get_resid(cmnd) != fcpi_parm)) {
370 lpfc_printf_log(phba, KERN_WARNING, 486 lpfc_printf_log(phba, KERN_WARNING,
371 LOG_FCP | LOG_FCP_ERROR, 487 LOG_FCP | LOG_FCP_ERROR,
372 "%d:0735 FCP Read Check Error and Underrun " 488 "%d (%d):0735 FCP Read Check Error "
373 "Data: x%x x%x x%x x%x\n", phba->brd_no, 489 "and Underrun Data: x%x x%x x%x x%x\n",
374 be32_to_cpu(fcpcmd->fcpDl), 490 phba->brd_no, vpi,
375 cmnd->resid, 491 be32_to_cpu(fcpcmd->fcpDl),
376 fcpi_parm, cmnd->cmnd[0]); 492 scsi_get_resid(cmnd), fcpi_parm,
377 cmnd->resid = cmnd->request_bufflen; 493 cmnd->cmnd[0]);
494 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
378 host_status = DID_ERROR; 495 host_status = DID_ERROR;
379 } 496 }
380 /* 497 /*
@@ -385,22 +502,23 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
385 */ 502 */
386 if (!(resp_info & SNS_LEN_VALID) && 503 if (!(resp_info & SNS_LEN_VALID) &&
387 (scsi_status == SAM_STAT_GOOD) && 504 (scsi_status == SAM_STAT_GOOD) &&
388 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { 505 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
506 < cmnd->underflow)) {
389 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 507 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
390 "%d:0717 FCP command x%x residual " 508 "%d (%d):0717 FCP command x%x residual "
391 "underrun converted to error " 509 "underrun converted to error "
392 "Data: x%x x%x x%x\n", phba->brd_no, 510 "Data: x%x x%x x%x\n",
393 cmnd->cmnd[0], cmnd->request_bufflen, 511 phba->brd_no, vpi, cmnd->cmnd[0],
394 cmnd->resid, cmnd->underflow); 512 scsi_bufflen(cmnd),
395 513 scsi_get_resid(cmnd), cmnd->underflow);
396 host_status = DID_ERROR; 514 host_status = DID_ERROR;
397 } 515 }
398 } else if (resp_info & RESID_OVER) { 516 } else if (resp_info & RESID_OVER) {
399 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 517 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
400 "%d:0720 FCP command x%x residual " 518 "%d (%d):0720 FCP command x%x residual "
401 "overrun error. Data: x%x x%x \n", 519 "overrun error. Data: x%x x%x \n",
402 phba->brd_no, cmnd->cmnd[0], 520 phba->brd_no, vpi, cmnd->cmnd[0],
403 cmnd->request_bufflen, cmnd->resid); 521 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
404 host_status = DID_ERROR; 522 host_status = DID_ERROR;
405 523
406 /* 524 /*
@@ -410,13 +528,14 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
410 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 528 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
411 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 529 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
412 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 530 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
413 "%d:0734 FCP Read Check Error Data: " 531 "%d (%d):0734 FCP Read Check Error Data: "
414 "x%x x%x x%x x%x\n", phba->brd_no, 532 "x%x x%x x%x x%x\n",
415 be32_to_cpu(fcpcmd->fcpDl), 533 phba->brd_no, vpi,
416 be32_to_cpu(fcprsp->rspResId), 534 be32_to_cpu(fcpcmd->fcpDl),
417 fcpi_parm, cmnd->cmnd[0]); 535 be32_to_cpu(fcprsp->rspResId),
536 fcpi_parm, cmnd->cmnd[0]);
418 host_status = DID_ERROR; 537 host_status = DID_ERROR;
419 cmnd->resid = cmnd->request_bufflen; 538 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
420 } 539 }
421 540
422 out: 541 out:
@@ -429,9 +548,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
429{ 548{
430 struct lpfc_scsi_buf *lpfc_cmd = 549 struct lpfc_scsi_buf *lpfc_cmd =
431 (struct lpfc_scsi_buf *) pIocbIn->context1; 550 (struct lpfc_scsi_buf *) pIocbIn->context1;
551 struct lpfc_vport *vport = pIocbIn->vport;
432 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 552 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
433 struct lpfc_nodelist *pnode = rdata->pnode; 553 struct lpfc_nodelist *pnode = rdata->pnode;
434 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 554 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
555 uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
556 ? lpfc_cmd->cur_iocbq.vport->vpi
557 : 0);
435 int result; 558 int result;
436 struct scsi_device *sdev, *tmp_sdev; 559 struct scsi_device *sdev, *tmp_sdev;
437 int depth = 0; 560 int depth = 0;
@@ -447,22 +570,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
447 lpfc_cmd->status = IOSTAT_DEFAULT; 570 lpfc_cmd->status = IOSTAT_DEFAULT;
448 571
449 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 572 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
450 "%d:0729 FCP cmd x%x failed <%d/%d> status: " 573 "%d (%d):0729 FCP cmd x%x failed <%d/%d> "
451 "x%x result: x%x Data: x%x x%x\n", 574 "status: x%x result: x%x Data: x%x x%x\n",
452 phba->brd_no, cmd->cmnd[0], cmd->device->id, 575 phba->brd_no, vpi, cmd->cmnd[0],
453 cmd->device->lun, lpfc_cmd->status, 576 cmd->device ? cmd->device->id : 0xffff,
454 lpfc_cmd->result, pIocbOut->iocb.ulpContext, 577 cmd->device ? cmd->device->lun : 0xffff,
578 lpfc_cmd->status, lpfc_cmd->result,
579 pIocbOut->iocb.ulpContext,
455 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 580 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
456 581
457 switch (lpfc_cmd->status) { 582 switch (lpfc_cmd->status) {
458 case IOSTAT_FCP_RSP_ERROR: 583 case IOSTAT_FCP_RSP_ERROR:
459 /* Call FCP RSP handler to determine result */ 584 /* Call FCP RSP handler to determine result */
460 lpfc_handle_fcp_err(lpfc_cmd,pIocbOut); 585 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
461 break; 586 break;
462 case IOSTAT_NPORT_BSY: 587 case IOSTAT_NPORT_BSY:
463 case IOSTAT_FABRIC_BSY: 588 case IOSTAT_FABRIC_BSY:
464 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 589 cmd->result = ScsiResult(DID_BUS_BUSY, 0);
465 break; 590 break;
591 case IOSTAT_LOCAL_REJECT:
592 if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
593 lpfc_cmd->result == IOERR_NO_RESOURCES ||
594 lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
595 cmd->result = ScsiResult(DID_REQUEUE, 0);
596 break;
597 } /* else: fall through */
466 default: 598 default:
467 cmd->result = ScsiResult(DID_ERROR, 0); 599 cmd->result = ScsiResult(DID_ERROR, 0);
468 break; 600 break;
@@ -479,11 +611,12 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
479 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 611 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
480 612
481 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 613 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
482 "%d:0710 Iodone <%d/%d> cmd %p, error x%x " 614 "%d (%d):0710 Iodone <%d/%d> cmd %p, error "
483 "SNS x%x x%x Data: x%x x%x\n", 615 "x%x SNS x%x x%x Data: x%x x%x\n",
484 phba->brd_no, cmd->device->id, 616 phba->brd_no, vpi, cmd->device->id,
485 cmd->device->lun, cmd, cmd->result, 617 cmd->device->lun, cmd, cmd->result,
486 *lp, *(lp + 3), cmd->retries, cmd->resid); 618 *lp, *(lp + 3), cmd->retries,
619 scsi_get_resid(cmd));
487 } 620 }
488 621
489 result = cmd->result; 622 result = cmd->result;
@@ -496,6 +629,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
496 return; 629 return;
497 } 630 }
498 631
632
633 if (!result)
634 lpfc_rampup_queue_depth(phba, sdev);
635
499 if (!result && pnode != NULL && 636 if (!result && pnode != NULL &&
500 ((jiffies - pnode->last_ramp_up_time) > 637 ((jiffies - pnode->last_ramp_up_time) >
501 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 638 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
@@ -534,7 +671,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
534 tmp_sdev->queue_depth - 1); 671 tmp_sdev->queue_depth - 1);
535 } 672 }
536 /* 673 /*
537 * The queue depth cannot be lowered any more. 674 * The queue depth cannot be lowered any more.
538 * Modify the returned error code to store 675 * Modify the returned error code to store
539 * the final depth value set by 676 * the final depth value set by
540 * scsi_track_queue_full. 677 * scsi_track_queue_full.
@@ -544,8 +681,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
544 681
545 if (depth) { 682 if (depth) {
546 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 683 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
547 "%d:0711 detected queue full - lun queue depth " 684 "%d (%d):0711 detected queue full - "
548 " adjusted to %d.\n", phba->brd_no, depth); 685 "lun queue depth adjusted to %d.\n",
686 phba->brd_no, vpi, depth);
549 } 687 }
550 } 688 }
551 689
@@ -553,9 +691,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
553} 691}
554 692
555static void 693static void
556lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, 694lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
557 struct lpfc_nodelist *pnode) 695 struct lpfc_nodelist *pnode)
558{ 696{
697 struct lpfc_hba *phba = vport->phba;
559 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 698 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
560 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 699 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
561 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 700 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
@@ -592,7 +731,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
592 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 731 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
593 * data bde entry. 732 * data bde entry.
594 */ 733 */
595 if (scsi_cmnd->use_sg) { 734 if (scsi_sg_count(scsi_cmnd)) {
596 if (datadir == DMA_TO_DEVICE) { 735 if (datadir == DMA_TO_DEVICE) {
597 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 736 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
598 iocb_cmd->un.fcpi.fcpi_parm = 0; 737 iocb_cmd->un.fcpi.fcpi_parm = 0;
@@ -602,23 +741,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
602 } else { 741 } else {
603 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 742 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
604 iocb_cmd->ulpPU = PARM_READ_CHECK; 743 iocb_cmd->ulpPU = PARM_READ_CHECK;
605 iocb_cmd->un.fcpi.fcpi_parm = 744 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
606 scsi_cmnd->request_bufflen;
607 fcp_cmnd->fcpCntl3 = READ_DATA;
608 phba->fc4InputRequests++;
609 }
610 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
611 if (datadir == DMA_TO_DEVICE) {
612 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
613 iocb_cmd->un.fcpi.fcpi_parm = 0;
614 iocb_cmd->ulpPU = 0;
615 fcp_cmnd->fcpCntl3 = WRITE_DATA;
616 phba->fc4OutputRequests++;
617 } else {
618 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
619 iocb_cmd->ulpPU = PARM_READ_CHECK;
620 iocb_cmd->un.fcpi.fcpi_parm =
621 scsi_cmnd->request_bufflen;
622 fcp_cmnd->fcpCntl3 = READ_DATA; 745 fcp_cmnd->fcpCntl3 = READ_DATA;
623 phba->fc4InputRequests++; 746 phba->fc4InputRequests++;
624 } 747 }
@@ -642,15 +765,15 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
642 piocbq->context1 = lpfc_cmd; 765 piocbq->context1 = lpfc_cmd;
643 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 766 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
644 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 767 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
768 piocbq->vport = vport;
645} 769}
646 770
647static int 771static int
648lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, 772lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
649 struct lpfc_scsi_buf *lpfc_cmd, 773 struct lpfc_scsi_buf *lpfc_cmd,
650 unsigned int lun, 774 unsigned int lun,
651 uint8_t task_mgmt_cmd) 775 uint8_t task_mgmt_cmd)
652{ 776{
653 struct lpfc_sli *psli;
654 struct lpfc_iocbq *piocbq; 777 struct lpfc_iocbq *piocbq;
655 IOCB_t *piocb; 778 IOCB_t *piocb;
656 struct fcp_cmnd *fcp_cmnd; 779 struct fcp_cmnd *fcp_cmnd;
@@ -661,8 +784,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
661 return 0; 784 return 0;
662 } 785 }
663 786
664 psli = &phba->sli;
665 piocbq = &(lpfc_cmd->cur_iocbq); 787 piocbq = &(lpfc_cmd->cur_iocbq);
788 piocbq->vport = vport;
789
666 piocb = &piocbq->iocb; 790 piocb = &piocbq->iocb;
667 791
668 fcp_cmnd = lpfc_cmd->fcp_cmnd; 792 fcp_cmnd = lpfc_cmd->fcp_cmnd;
@@ -688,7 +812,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
688 piocb->ulpTimeout = lpfc_cmd->timeout; 812 piocb->ulpTimeout = lpfc_cmd->timeout;
689 } 813 }
690 814
691 return (1); 815 return 1;
692} 816}
693 817
694static void 818static void
@@ -704,10 +828,11 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
704} 828}
705 829
706static int 830static int
707lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, 831lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
708 unsigned tgt_id, unsigned int lun, 832 unsigned tgt_id, unsigned int lun,
709 struct lpfc_rport_data *rdata) 833 struct lpfc_rport_data *rdata)
710{ 834{
835 struct lpfc_hba *phba = vport->phba;
711 struct lpfc_iocbq *iocbq; 836 struct lpfc_iocbq *iocbq;
712 struct lpfc_iocbq *iocbqrsp; 837 struct lpfc_iocbq *iocbqrsp;
713 int ret; 838 int ret;
@@ -716,12 +841,11 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
716 return FAILED; 841 return FAILED;
717 842
718 lpfc_cmd->rdata = rdata; 843 lpfc_cmd->rdata = rdata;
719 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, 844 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
720 FCP_TARGET_RESET); 845 FCP_TARGET_RESET);
721 if (!ret) 846 if (!ret)
722 return FAILED; 847 return FAILED;
723 848
724 lpfc_cmd->scsi_hba = phba;
725 iocbq = &lpfc_cmd->cur_iocbq; 849 iocbq = &lpfc_cmd->cur_iocbq;
726 iocbqrsp = lpfc_sli_get_iocbq(phba); 850 iocbqrsp = lpfc_sli_get_iocbq(phba);
727 851
@@ -730,10 +854,10 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
730 854
731 /* Issue Target Reset to TGT <num> */ 855 /* Issue Target Reset to TGT <num> */
732 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 856 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
733 "%d:0702 Issue Target Reset to TGT %d " 857 "%d (%d):0702 Issue Target Reset to TGT %d "
734 "Data: x%x x%x\n", 858 "Data: x%x x%x\n",
735 phba->brd_no, tgt_id, rdata->pnode->nlp_rpi, 859 phba->brd_no, vport->vpi, tgt_id,
736 rdata->pnode->nlp_flag); 860 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
737 861
738 ret = lpfc_sli_issue_iocb_wait(phba, 862 ret = lpfc_sli_issue_iocb_wait(phba,
739 &phba->sli.ring[phba->sli.fcp_ring], 863 &phba->sli.ring[phba->sli.fcp_ring],
@@ -758,7 +882,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
758const char * 882const char *
759lpfc_info(struct Scsi_Host *host) 883lpfc_info(struct Scsi_Host *host)
760{ 884{
761 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata; 885 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
886 struct lpfc_hba *phba = vport->phba;
762 int len; 887 int len;
763 static char lpfcinfobuf[384]; 888 static char lpfcinfobuf[384];
764 889
@@ -800,26 +925,22 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba)
800 925
801void lpfc_poll_timeout(unsigned long ptr) 926void lpfc_poll_timeout(unsigned long ptr)
802{ 927{
803 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 928 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
804 unsigned long iflag;
805
806 spin_lock_irqsave(phba->host->host_lock, iflag);
807 929
808 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 930 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
809 lpfc_sli_poll_fcp_ring (phba); 931 lpfc_sli_poll_fcp_ring (phba);
810 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 932 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
811 lpfc_poll_rearm_timer(phba); 933 lpfc_poll_rearm_timer(phba);
812 } 934 }
813
814 spin_unlock_irqrestore(phba->host->host_lock, iflag);
815} 935}
816 936
817static int 937static int
818lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 938lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
819{ 939{
820 struct lpfc_hba *phba = 940 struct Scsi_Host *shost = cmnd->device->host;
821 (struct lpfc_hba *) cmnd->device->host->hostdata; 941 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
822 struct lpfc_sli *psli = &phba->sli; 942 struct lpfc_hba *phba = vport->phba;
943 struct lpfc_sli *psli = &phba->sli;
823 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 944 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
824 struct lpfc_nodelist *ndlp = rdata->pnode; 945 struct lpfc_nodelist *ndlp = rdata->pnode;
825 struct lpfc_scsi_buf *lpfc_cmd; 946 struct lpfc_scsi_buf *lpfc_cmd;
@@ -840,11 +961,14 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
840 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 961 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
841 goto out_fail_command; 962 goto out_fail_command;
842 } 963 }
843 lpfc_cmd = lpfc_get_scsi_buf (phba); 964 lpfc_cmd = lpfc_get_scsi_buf(phba);
844 if (lpfc_cmd == NULL) { 965 if (lpfc_cmd == NULL) {
966 lpfc_adjust_queue_depth(phba);
967
845 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 968 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
846 "%d:0707 driver's buffer pool is empty, " 969 "%d (%d):0707 driver's buffer pool is empty, "
847 "IO busied\n", phba->brd_no); 970 "IO busied\n",
971 phba->brd_no, vport->vpi);
848 goto out_host_busy; 972 goto out_host_busy;
849 } 973 }
850 974
@@ -862,10 +986,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
862 if (err) 986 if (err)
863 goto out_host_busy_free_buf; 987 goto out_host_busy_free_buf;
864 988
865 lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp); 989 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
866 990
867 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 991 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
868 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 992 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
869 if (err) 993 if (err)
870 goto out_host_busy_free_buf; 994 goto out_host_busy_free_buf;
871 995
@@ -907,8 +1031,9 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd)
907static int 1031static int
908lpfc_abort_handler(struct scsi_cmnd *cmnd) 1032lpfc_abort_handler(struct scsi_cmnd *cmnd)
909{ 1033{
910 struct Scsi_Host *shost = cmnd->device->host; 1034 struct Scsi_Host *shost = cmnd->device->host;
911 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1035 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1036 struct lpfc_hba *phba = vport->phba;
912 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 1037 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
913 struct lpfc_iocbq *iocb; 1038 struct lpfc_iocbq *iocb;
914 struct lpfc_iocbq *abtsiocb; 1039 struct lpfc_iocbq *abtsiocb;
@@ -918,8 +1043,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
918 int ret = SUCCESS; 1043 int ret = SUCCESS;
919 1044
920 lpfc_block_error_handler(cmnd); 1045 lpfc_block_error_handler(cmnd);
921 spin_lock_irq(shost->host_lock);
922
923 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 1046 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
924 BUG_ON(!lpfc_cmd); 1047 BUG_ON(!lpfc_cmd);
925 1048
@@ -956,12 +1079,13 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
956 1079
957 icmd->ulpLe = 1; 1080 icmd->ulpLe = 1;
958 icmd->ulpClass = cmd->ulpClass; 1081 icmd->ulpClass = cmd->ulpClass;
959 if (phba->hba_state >= LPFC_LINK_UP) 1082 if (lpfc_is_link_up(phba))
960 icmd->ulpCommand = CMD_ABORT_XRI_CN; 1083 icmd->ulpCommand = CMD_ABORT_XRI_CN;
961 else 1084 else
962 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 1085 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
963 1086
964 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 1087 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
1088 abtsiocb->vport = vport;
965 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 1089 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
966 lpfc_sli_release_iocbq(phba, abtsiocb); 1090 lpfc_sli_release_iocbq(phba, abtsiocb);
967 ret = FAILED; 1091 ret = FAILED;
@@ -977,9 +1101,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
977 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1101 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
978 lpfc_sli_poll_fcp_ring (phba); 1102 lpfc_sli_poll_fcp_ring (phba);
979 1103
980 spin_unlock_irq(phba->host->host_lock); 1104 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ);
981 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
982 spin_lock_irq(phba->host->host_lock);
983 if (++loop_count 1105 if (++loop_count
984 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT) 1106 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
985 break; 1107 break;
@@ -988,30 +1110,30 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
988 if (lpfc_cmd->pCmd == cmnd) { 1110 if (lpfc_cmd->pCmd == cmnd) {
989 ret = FAILED; 1111 ret = FAILED;
990 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1112 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
991 "%d:0748 abort handler timed out waiting for " 1113 "%d (%d):0748 abort handler timed out waiting "
992 "abort to complete: ret %#x, ID %d, LUN %d, " 1114 "for abort to complete: ret %#x, ID %d, "
993 "snum %#lx\n", 1115 "LUN %d, snum %#lx\n",
994 phba->brd_no, ret, cmnd->device->id, 1116 phba->brd_no, vport->vpi, ret,
995 cmnd->device->lun, cmnd->serial_number); 1117 cmnd->device->id, cmnd->device->lun,
1118 cmnd->serial_number);
996 } 1119 }
997 1120
998 out: 1121 out:
999 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1122 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1000 "%d:0749 SCSI Layer I/O Abort Request " 1123 "%d (%d):0749 SCSI Layer I/O Abort Request "
1001 "Status x%x ID %d LUN %d snum %#lx\n", 1124 "Status x%x ID %d LUN %d snum %#lx\n",
1002 phba->brd_no, ret, cmnd->device->id, 1125 phba->brd_no, vport->vpi, ret, cmnd->device->id,
1003 cmnd->device->lun, cmnd->serial_number); 1126 cmnd->device->lun, cmnd->serial_number);
1004 1127
1005 spin_unlock_irq(shost->host_lock);
1006
1007 return ret; 1128 return ret;
1008} 1129}
1009 1130
1010static int 1131static int
1011lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 1132lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1012{ 1133{
1013 struct Scsi_Host *shost = cmnd->device->host; 1134 struct Scsi_Host *shost = cmnd->device->host;
1014 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1135 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1136 struct lpfc_hba *phba = vport->phba;
1015 struct lpfc_scsi_buf *lpfc_cmd; 1137 struct lpfc_scsi_buf *lpfc_cmd;
1016 struct lpfc_iocbq *iocbq, *iocbqrsp; 1138 struct lpfc_iocbq *iocbq, *iocbqrsp;
1017 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 1139 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
@@ -1022,28 +1144,26 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1022 int cnt, loopcnt; 1144 int cnt, loopcnt;
1023 1145
1024 lpfc_block_error_handler(cmnd); 1146 lpfc_block_error_handler(cmnd);
1025 spin_lock_irq(shost->host_lock);
1026 loopcnt = 0; 1147 loopcnt = 0;
1027 /* 1148 /*
1028 * If target is not in a MAPPED state, delay the reset until 1149 * If target is not in a MAPPED state, delay the reset until
1029 * target is rediscovered or devloss timeout expires. 1150 * target is rediscovered or devloss timeout expires.
1030 */ 1151 */
1031 while ( 1 ) { 1152 while (1) {
1032 if (!pnode) 1153 if (!pnode)
1033 goto out; 1154 goto out;
1034 1155
1035 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1156 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1036 spin_unlock_irq(phba->host->host_lock);
1037 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1157 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1038 spin_lock_irq(phba->host->host_lock);
1039 loopcnt++; 1158 loopcnt++;
1040 rdata = cmnd->device->hostdata; 1159 rdata = cmnd->device->hostdata;
1041 if (!rdata || 1160 if (!rdata ||
1042 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) { 1161 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
1043 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1162 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1044 "%d:0721 LUN Reset rport failure:" 1163 "%d (%d):0721 LUN Reset rport "
1045 " cnt x%x rdata x%p\n", 1164 "failure: cnt x%x rdata x%p\n",
1046 phba->brd_no, loopcnt, rdata); 1165 phba->brd_no, vport->vpi,
1166 loopcnt, rdata);
1047 goto out; 1167 goto out;
1048 } 1168 }
1049 pnode = rdata->pnode; 1169 pnode = rdata->pnode;
@@ -1054,15 +1174,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1054 break; 1174 break;
1055 } 1175 }
1056 1176
1057 lpfc_cmd = lpfc_get_scsi_buf (phba); 1177 lpfc_cmd = lpfc_get_scsi_buf(phba);
1058 if (lpfc_cmd == NULL) 1178 if (lpfc_cmd == NULL)
1059 goto out; 1179 goto out;
1060 1180
1061 lpfc_cmd->timeout = 60; 1181 lpfc_cmd->timeout = 60;
1062 lpfc_cmd->scsi_hba = phba;
1063 lpfc_cmd->rdata = rdata; 1182 lpfc_cmd->rdata = rdata;
1064 1183
1065 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun, 1184 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
1066 FCP_TARGET_RESET); 1185 FCP_TARGET_RESET);
1067 if (!ret) 1186 if (!ret)
1068 goto out_free_scsi_buf; 1187 goto out_free_scsi_buf;
@@ -1075,8 +1194,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1075 goto out_free_scsi_buf; 1194 goto out_free_scsi_buf;
1076 1195
1077 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1196 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1078 "%d:0703 Issue target reset to TGT %d LUN %d rpi x%x " 1197 "%d (%d):0703 Issue target reset to TGT %d LUN %d "
1079 "nlp_flag x%x\n", phba->brd_no, cmnd->device->id, 1198 "rpi x%x nlp_flag x%x\n",
1199 phba->brd_no, vport->vpi, cmnd->device->id,
1080 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1200 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1081 1201
1082 iocb_status = lpfc_sli_issue_iocb_wait(phba, 1202 iocb_status = lpfc_sli_issue_iocb_wait(phba,
@@ -1111,9 +1231,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1111 0, LPFC_CTX_LUN); 1231 0, LPFC_CTX_LUN);
1112 loopcnt = 0; 1232 loopcnt = 0;
1113 while(cnt) { 1233 while(cnt) {
1114 spin_unlock_irq(phba->host->host_lock);
1115 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1234 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1116 spin_lock_irq(phba->host->host_lock);
1117 1235
1118 if (++loopcnt 1236 if (++loopcnt
1119 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1237 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
@@ -1127,8 +1245,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1127 1245
1128 if (cnt) { 1246 if (cnt) {
1129 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1247 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1130 "%d:0719 device reset I/O flush failure: cnt x%x\n", 1248 "%d (%d):0719 device reset I/O flush failure: "
1131 phba->brd_no, cnt); 1249 "cnt x%x\n",
1250 phba->brd_no, vport->vpi, cnt);
1132 ret = FAILED; 1251 ret = FAILED;
1133 } 1252 }
1134 1253
@@ -1137,21 +1256,21 @@ out_free_scsi_buf:
1137 lpfc_release_scsi_buf(phba, lpfc_cmd); 1256 lpfc_release_scsi_buf(phba, lpfc_cmd);
1138 } 1257 }
1139 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1258 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1140 "%d:0713 SCSI layer issued device reset (%d, %d) " 1259 "%d (%d):0713 SCSI layer issued device reset (%d, %d) "
1141 "return x%x status x%x result x%x\n", 1260 "return x%x status x%x result x%x\n",
1142 phba->brd_no, cmnd->device->id, cmnd->device->lun, 1261 phba->brd_no, vport->vpi, cmnd->device->id,
1143 ret, cmd_status, cmd_result); 1262 cmnd->device->lun, ret, cmd_status, cmd_result);
1144 1263
1145out: 1264out:
1146 spin_unlock_irq(shost->host_lock);
1147 return ret; 1265 return ret;
1148} 1266}
1149 1267
1150static int 1268static int
1151lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 1269lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1152{ 1270{
1153 struct Scsi_Host *shost = cmnd->device->host; 1271 struct Scsi_Host *shost = cmnd->device->host;
1154 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1272 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1273 struct lpfc_hba *phba = vport->phba;
1155 struct lpfc_nodelist *ndlp = NULL; 1274 struct lpfc_nodelist *ndlp = NULL;
1156 int match; 1275 int match;
1157 int ret = FAILED, i, err_count = 0; 1276 int ret = FAILED, i, err_count = 0;
@@ -1159,7 +1278,6 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1159 struct lpfc_scsi_buf * lpfc_cmd; 1278 struct lpfc_scsi_buf * lpfc_cmd;
1160 1279
1161 lpfc_block_error_handler(cmnd); 1280 lpfc_block_error_handler(cmnd);
1162 spin_lock_irq(shost->host_lock);
1163 1281
1164 lpfc_cmd = lpfc_get_scsi_buf(phba); 1282 lpfc_cmd = lpfc_get_scsi_buf(phba);
1165 if (lpfc_cmd == NULL) 1283 if (lpfc_cmd == NULL)
@@ -1167,7 +1285,6 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1167 1285
1168 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1286 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1169 lpfc_cmd->timeout = 60; 1287 lpfc_cmd->timeout = 60;
1170 lpfc_cmd->scsi_hba = phba;
1171 1288
1172 /* 1289 /*
1173 * Since the driver manages a single bus device, reset all 1290 * Since the driver manages a single bus device, reset all
@@ -1177,7 +1294,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1177 for (i = 0; i < LPFC_MAX_TARGET; i++) { 1294 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1178 /* Search for mapped node by target ID */ 1295 /* Search for mapped node by target ID */
1179 match = 0; 1296 match = 0;
1180 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 1297 spin_lock_irq(shost->host_lock);
1298 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1181 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1299 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1182 i == ndlp->nlp_sid && 1300 i == ndlp->nlp_sid &&
1183 ndlp->rport) { 1301 ndlp->rport) {
@@ -1185,15 +1303,18 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1185 break; 1303 break;
1186 } 1304 }
1187 } 1305 }
1306 spin_unlock_irq(shost->host_lock);
1188 if (!match) 1307 if (!match)
1189 continue; 1308 continue;
1190 1309
1191 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun, 1310 ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1311 cmnd->device->lun,
1192 ndlp->rport->dd_data); 1312 ndlp->rport->dd_data);
1193 if (ret != SUCCESS) { 1313 if (ret != SUCCESS) {
1194 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1314 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1195 "%d:0700 Bus Reset on target %d failed\n", 1315 "%d (%d):0700 Bus Reset on target %d "
1196 phba->brd_no, i); 1316 "failed\n",
1317 phba->brd_no, vport->vpi, i);
1197 err_count++; 1318 err_count++;
1198 break; 1319 break;
1199 } 1320 }
@@ -1219,9 +1340,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1219 0, 0, 0, LPFC_CTX_HOST); 1340 0, 0, 0, LPFC_CTX_HOST);
1220 loopcnt = 0; 1341 loopcnt = 0;
1221 while(cnt) { 1342 while(cnt) {
1222 spin_unlock_irq(phba->host->host_lock);
1223 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1343 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1224 spin_lock_irq(phba->host->host_lock);
1225 1344
1226 if (++loopcnt 1345 if (++loopcnt
1227 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1346 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
@@ -1234,25 +1353,24 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1234 1353
1235 if (cnt) { 1354 if (cnt) {
1236 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1355 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1237 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", 1356 "%d (%d):0715 Bus Reset I/O flush failure: "
1238 phba->brd_no, cnt, i); 1357 "cnt x%x left x%x\n",
1358 phba->brd_no, vport->vpi, cnt, i);
1239 ret = FAILED; 1359 ret = FAILED;
1240 } 1360 }
1241 1361
1242 lpfc_printf_log(phba, 1362 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1243 KERN_ERR, 1363 "%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n",
1244 LOG_FCP, 1364 phba->brd_no, vport->vpi, ret);
1245 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1246 phba->brd_no, ret);
1247out: 1365out:
1248 spin_unlock_irq(shost->host_lock);
1249 return ret; 1366 return ret;
1250} 1367}
1251 1368
1252static int 1369static int
1253lpfc_slave_alloc(struct scsi_device *sdev) 1370lpfc_slave_alloc(struct scsi_device *sdev)
1254{ 1371{
1255 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata; 1372 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1373 struct lpfc_hba *phba = vport->phba;
1256 struct lpfc_scsi_buf *scsi_buf = NULL; 1374 struct lpfc_scsi_buf *scsi_buf = NULL;
1257 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1375 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1258 uint32_t total = 0, i; 1376 uint32_t total = 0, i;
@@ -1273,27 +1391,35 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1273 */ 1391 */
1274 total = phba->total_scsi_bufs; 1392 total = phba->total_scsi_bufs;
1275 num_to_alloc = phba->cfg_lun_queue_depth + 2; 1393 num_to_alloc = phba->cfg_lun_queue_depth + 2;
1276 if (total >= phba->cfg_hba_queue_depth) { 1394
1395 /* Allow some exchanges to be available always to complete discovery */
1396 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1277 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1397 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1278 "%d:0704 At limitation of %d preallocated " 1398 "%d (%d):0704 At limitation of %d "
1279 "command buffers\n", phba->brd_no, total); 1399 "preallocated command buffers\n",
1400 phba->brd_no, vport->vpi, total);
1280 return 0; 1401 return 0;
1281 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { 1402
1403 /* Allow some exchanges to be available always to complete discovery */
1404 } else if (total + num_to_alloc >
1405 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1282 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1406 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1283 "%d:0705 Allocation request of %d command " 1407 "%d (%d):0705 Allocation request of %d "
1284 "buffers will exceed max of %d. Reducing " 1408 "command buffers will exceed max of %d. "
1285 "allocation request to %d.\n", phba->brd_no, 1409 "Reducing allocation request to %d.\n",
1286 num_to_alloc, phba->cfg_hba_queue_depth, 1410 phba->brd_no, vport->vpi, num_to_alloc,
1411 phba->cfg_hba_queue_depth,
1287 (phba->cfg_hba_queue_depth - total)); 1412 (phba->cfg_hba_queue_depth - total));
1288 num_to_alloc = phba->cfg_hba_queue_depth - total; 1413 num_to_alloc = phba->cfg_hba_queue_depth - total;
1289 } 1414 }
1290 1415
1291 for (i = 0; i < num_to_alloc; i++) { 1416 for (i = 0; i < num_to_alloc; i++) {
1292 scsi_buf = lpfc_new_scsi_buf(phba); 1417 scsi_buf = lpfc_new_scsi_buf(vport);
1293 if (!scsi_buf) { 1418 if (!scsi_buf) {
1294 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1419 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1295 "%d:0706 Failed to allocate command " 1420 "%d (%d):0706 Failed to allocate "
1296 "buffer\n", phba->brd_no); 1421 "command buffer\n",
1422 phba->brd_no, vport->vpi);
1297 break; 1423 break;
1298 } 1424 }
1299 1425
@@ -1308,8 +1434,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1308static int 1434static int
1309lpfc_slave_configure(struct scsi_device *sdev) 1435lpfc_slave_configure(struct scsi_device *sdev)
1310{ 1436{
1311 struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata; 1437 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1312 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1438 struct lpfc_hba *phba = vport->phba;
1439 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1313 1440
1314 if (sdev->tagged_supported) 1441 if (sdev->tagged_supported)
1315 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth); 1442 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
@@ -1340,6 +1467,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
1340 return; 1467 return;
1341} 1468}
1342 1469
1470
1343struct scsi_host_template lpfc_template = { 1471struct scsi_host_template lpfc_template = {
1344 .module = THIS_MODULE, 1472 .module = THIS_MODULE,
1345 .name = LPFC_DRIVER_NAME, 1473 .name = LPFC_DRIVER_NAME,
@@ -1352,11 +1480,10 @@ struct scsi_host_template lpfc_template = {
1352 .slave_configure = lpfc_slave_configure, 1480 .slave_configure = lpfc_slave_configure,
1353 .slave_destroy = lpfc_slave_destroy, 1481 .slave_destroy = lpfc_slave_destroy,
1354 .scan_finished = lpfc_scan_finished, 1482 .scan_finished = lpfc_scan_finished,
1355 .scan_start = lpfc_scan_start,
1356 .this_id = -1, 1483 .this_id = -1,
1357 .sg_tablesize = LPFC_SG_SEG_CNT, 1484 .sg_tablesize = LPFC_SG_SEG_CNT,
1358 .cmd_per_lun = LPFC_CMD_PER_LUN, 1485 .cmd_per_lun = LPFC_CMD_PER_LUN,
1359 .use_clustering = ENABLE_CLUSTERING, 1486 .use_clustering = ENABLE_CLUSTERING,
1360 .shost_attrs = lpfc_host_attrs, 1487 .shost_attrs = lpfc_hba_attrs,
1361 .max_sectors = 0xFFFF, 1488 .max_sectors = 0xFFFF,
1362}; 1489};