aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_scsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c135
1 files changed, 53 insertions, 82 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index f5ab5dd9bbb..bf80cdefb50 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -246,6 +246,36 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
246} 246}
247 247
248/** 248/**
249 * lpfc_change_queue_depth - Alter scsi device queue depth
250 * @sdev: Pointer the scsi device on which to change the queue depth.
251 * @qdepth: New queue depth to set the sdev to.
252 * @reason: The reason for the queue depth change.
253 *
254 * This function is called by the midlayer and the LLD to alter the queue
255 * depth for a scsi device. This function sets the queue depth to the new
256 * value and sends an event out to log the queue depth change.
257 **/
258int
259lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
260{
261 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
262 struct lpfc_hba *phba = vport->phba;
263 struct lpfc_rport_data *rdata;
264 unsigned long new_queue_depth, old_queue_depth;
265
266 old_queue_depth = sdev->queue_depth;
267 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
268 new_queue_depth = sdev->queue_depth;
269 rdata = sdev->hostdata;
270 if (rdata)
271 lpfc_send_sdev_queuedepth_change_event(phba, vport,
272 rdata->pnode, sdev->lun,
273 old_queue_depth,
274 new_queue_depth);
275 return sdev->queue_depth;
276}
277
278/**
249 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 279 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
250 * @phba: The Hba for which this call is being executed. 280 * @phba: The Hba for which this call is being executed.
251 * 281 *
@@ -309,8 +339,10 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
309 if (vport->cfg_lun_queue_depth <= queue_depth) 339 if (vport->cfg_lun_queue_depth <= queue_depth)
310 return; 340 return;
311 spin_lock_irqsave(&phba->hbalock, flags); 341 spin_lock_irqsave(&phba->hbalock, flags);
312 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) || 342 if (time_before(jiffies,
313 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) { 343 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
344 time_before(jiffies,
345 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
314 spin_unlock_irqrestore(&phba->hbalock, flags); 346 spin_unlock_irqrestore(&phba->hbalock, flags);
315 return; 347 return;
316 } 348 }
@@ -342,10 +374,9 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
342 struct lpfc_vport **vports; 374 struct lpfc_vport **vports;
343 struct Scsi_Host *shost; 375 struct Scsi_Host *shost;
344 struct scsi_device *sdev; 376 struct scsi_device *sdev;
345 unsigned long new_queue_depth, old_queue_depth; 377 unsigned long new_queue_depth;
346 unsigned long num_rsrc_err, num_cmd_success; 378 unsigned long num_rsrc_err, num_cmd_success;
347 int i; 379 int i;
348 struct lpfc_rport_data *rdata;
349 380
350 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 381 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
351 num_cmd_success = atomic_read(&phba->num_cmd_success); 382 num_cmd_success = atomic_read(&phba->num_cmd_success);
@@ -363,22 +394,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
363 else 394 else
364 new_queue_depth = sdev->queue_depth - 395 new_queue_depth = sdev->queue_depth -
365 new_queue_depth; 396 new_queue_depth;
366 old_queue_depth = sdev->queue_depth; 397 lpfc_change_queue_depth(sdev, new_queue_depth,
367 if (sdev->ordered_tags) 398 SCSI_QDEPTH_DEFAULT);
368 scsi_adjust_queue_depth(sdev,
369 MSG_ORDERED_TAG,
370 new_queue_depth);
371 else
372 scsi_adjust_queue_depth(sdev,
373 MSG_SIMPLE_TAG,
374 new_queue_depth);
375 rdata = sdev->hostdata;
376 if (rdata)
377 lpfc_send_sdev_queuedepth_change_event(
378 phba, vports[i],
379 rdata->pnode,
380 sdev->lun, old_queue_depth,
381 new_queue_depth);
382 } 399 }
383 } 400 }
384 lpfc_destroy_vport_work_array(phba, vports); 401 lpfc_destroy_vport_work_array(phba, vports);
@@ -402,7 +419,6 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
402 struct Scsi_Host *shost; 419 struct Scsi_Host *shost;
403 struct scsi_device *sdev; 420 struct scsi_device *sdev;
404 int i; 421 int i;
405 struct lpfc_rport_data *rdata;
406 422
407 vports = lpfc_create_vport_work_array(phba); 423 vports = lpfc_create_vport_work_array(phba);
408 if (vports != NULL) 424 if (vports != NULL)
@@ -412,22 +428,9 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
412 if (vports[i]->cfg_lun_queue_depth <= 428 if (vports[i]->cfg_lun_queue_depth <=
413 sdev->queue_depth) 429 sdev->queue_depth)
414 continue; 430 continue;
415 if (sdev->ordered_tags) 431 lpfc_change_queue_depth(sdev,
416 scsi_adjust_queue_depth(sdev, 432 sdev->queue_depth+1,
417 MSG_ORDERED_TAG, 433 SCSI_QDEPTH_RAMP_UP);
418 sdev->queue_depth+1);
419 else
420 scsi_adjust_queue_depth(sdev,
421 MSG_SIMPLE_TAG,
422 sdev->queue_depth+1);
423 rdata = sdev->hostdata;
424 if (rdata)
425 lpfc_send_sdev_queuedepth_change_event(
426 phba, vports[i],
427 rdata->pnode,
428 sdev->lun,
429 sdev->queue_depth - 1,
430 sdev->queue_depth);
431 } 434 }
432 } 435 }
433 lpfc_destroy_vport_work_array(phba, vports); 436 lpfc_destroy_vport_work_array(phba, vports);
@@ -2208,7 +2211,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2208 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2211 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2209 int result; 2212 int result;
2210 struct scsi_device *tmp_sdev; 2213 struct scsi_device *tmp_sdev;
2211 int depth = 0; 2214 int depth;
2212 unsigned long flags; 2215 unsigned long flags;
2213 struct lpfc_fast_path_event *fast_path_evt; 2216 struct lpfc_fast_path_event *fast_path_evt;
2214 struct Scsi_Host *shost = cmd->device->host; 2217 struct Scsi_Host *shost = cmd->device->host;
@@ -2375,67 +2378,29 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2375 return; 2378 return;
2376 } 2379 }
2377 2380
2378
2379 if (!result) 2381 if (!result)
2380 lpfc_rampup_queue_depth(vport, queue_depth); 2382 lpfc_rampup_queue_depth(vport, queue_depth);
2381 2383
2382 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
2383 ((jiffies - pnode->last_ramp_up_time) >
2384 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
2385 ((jiffies - pnode->last_q_full_time) >
2386 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
2387 (vport->cfg_lun_queue_depth > queue_depth)) {
2388 shost_for_each_device(tmp_sdev, shost) {
2389 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
2390 if (tmp_sdev->id != scsi_id)
2391 continue;
2392 if (tmp_sdev->ordered_tags)
2393 scsi_adjust_queue_depth(tmp_sdev,
2394 MSG_ORDERED_TAG,
2395 tmp_sdev->queue_depth+1);
2396 else
2397 scsi_adjust_queue_depth(tmp_sdev,
2398 MSG_SIMPLE_TAG,
2399 tmp_sdev->queue_depth+1);
2400
2401 pnode->last_ramp_up_time = jiffies;
2402 }
2403 }
2404 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
2405 0xFFFFFFFF,
2406 queue_depth , queue_depth + 1);
2407 }
2408
2409 /* 2384 /*
2410 * Check for queue full. If the lun is reporting queue full, then 2385 * Check for queue full. If the lun is reporting queue full, then
2411 * back off the lun queue depth to prevent target overloads. 2386 * back off the lun queue depth to prevent target overloads.
2412 */ 2387 */
2413 if (result == SAM_STAT_TASK_SET_FULL && pnode && 2388 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
2414 NLP_CHK_NODE_ACT(pnode)) { 2389 NLP_CHK_NODE_ACT(pnode)) {
2415 pnode->last_q_full_time = jiffies;
2416
2417 shost_for_each_device(tmp_sdev, shost) { 2390 shost_for_each_device(tmp_sdev, shost) {
2418 if (tmp_sdev->id != scsi_id) 2391 if (tmp_sdev->id != scsi_id)
2419 continue; 2392 continue;
2420 depth = scsi_track_queue_full(tmp_sdev, 2393 depth = scsi_track_queue_full(tmp_sdev,
2421 tmp_sdev->queue_depth - 1); 2394 tmp_sdev->queue_depth-1);
2422 } 2395 if (depth <= 0)
2423 /* 2396 continue;
2424 * The queue depth cannot be lowered any more.
2425 * Modify the returned error code to store
2426 * the final depth value set by
2427 * scsi_track_queue_full.
2428 */
2429 if (depth == -1)
2430 depth = shost->cmd_per_lun;
2431
2432 if (depth) {
2433 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2397 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2434 "0711 detected queue full - lun queue " 2398 "0711 detected queue full - lun queue "
2435 "depth adjusted to %d.\n", depth); 2399 "depth adjusted to %d.\n", depth);
2436 lpfc_send_sdev_queuedepth_change_event(phba, vport, 2400 lpfc_send_sdev_queuedepth_change_event(phba, vport,
2437 pnode, 0xFFFFFFFF, 2401 pnode,
2438 depth+1, depth); 2402 tmp_sdev->lun,
2403 depth+1, depth);
2439 } 2404 }
2440 } 2405 }
2441 2406
@@ -3019,6 +2984,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3019 2984
3020 icmd->ulpLe = 1; 2985 icmd->ulpLe = 1;
3021 icmd->ulpClass = cmd->ulpClass; 2986 icmd->ulpClass = cmd->ulpClass;
2987
2988 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2989 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
2990
3022 if (lpfc_is_link_up(phba)) 2991 if (lpfc_is_link_up(phba))
3023 icmd->ulpCommand = CMD_ABORT_XRI_CN; 2992 icmd->ulpCommand = CMD_ABORT_XRI_CN;
3024 else 2993 else
@@ -3596,6 +3565,7 @@ struct scsi_host_template lpfc_template = {
3596 .shost_attrs = lpfc_hba_attrs, 3565 .shost_attrs = lpfc_hba_attrs,
3597 .max_sectors = 0xFFFF, 3566 .max_sectors = 0xFFFF,
3598 .vendor_id = LPFC_NL_VENDOR_ID, 3567 .vendor_id = LPFC_NL_VENDOR_ID,
3568 .change_queue_depth = lpfc_change_queue_depth,
3599}; 3569};
3600 3570
3601struct scsi_host_template lpfc_vport_template = { 3571struct scsi_host_template lpfc_vport_template = {
@@ -3617,4 +3587,5 @@ struct scsi_host_template lpfc_vport_template = {
3617 .use_clustering = ENABLE_CLUSTERING, 3587 .use_clustering = ENABLE_CLUSTERING,
3618 .shost_attrs = lpfc_vport_attrs, 3588 .shost_attrs = lpfc_vport_attrs,
3619 .max_sectors = 0xFFFF, 3589 .max_sectors = 0xFFFF,
3590 .change_queue_depth = lpfc_change_queue_depth,
3620}; 3591};