diff options
author | James Smart <james.smart@emulex.com> | 2014-02-20 09:57:08 -0500 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2014-03-15 13:18:56 -0400 |
commit | 6ff8556d5f86681c164fc9d05e617e160f79f264 (patch) | |
tree | bd6bd99af5e09898ae8e8123afdfba03f7417b86 /drivers/scsi/lpfc/lpfc_scsi.c | |
parent | 1ba981fd3ad1f91b8bb205ce6aac6aad45f2fa7a (diff) |
[SCSI] lpfc 8.3.45: Incorporate changes to use reason in change_queue_depth function.
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 126 |
1 files changed, 19 insertions, 107 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 4015fcc6d9a1..92f90df9f991 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -315,7 +315,25 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) | |||
315 | unsigned long new_queue_depth, old_queue_depth; | 315 | unsigned long new_queue_depth, old_queue_depth; |
316 | 316 | ||
317 | old_queue_depth = sdev->queue_depth; | 317 | old_queue_depth = sdev->queue_depth; |
318 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); | 318 | |
319 | switch (reason) { | ||
320 | case SCSI_QDEPTH_DEFAULT: | ||
321 | /* change request from sysfs, fall through */ | ||
322 | case SCSI_QDEPTH_RAMP_UP: | ||
323 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); | ||
324 | break; | ||
325 | case SCSI_QDEPTH_QFULL: | ||
326 | if (scsi_track_queue_full(sdev, qdepth) == 0) | ||
327 | return sdev->queue_depth; | ||
328 | |||
329 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | ||
330 | "0711 detected queue full - lun queue " | ||
331 | "depth adjusted to %d.\n", sdev->queue_depth); | ||
332 | break; | ||
333 | default: | ||
334 | return -EOPNOTSUPP; | ||
335 | } | ||
336 | |||
319 | new_queue_depth = sdev->queue_depth; | 337 | new_queue_depth = sdev->queue_depth; |
320 | rdata = lpfc_rport_data_from_scsi_device(sdev); | 338 | rdata = lpfc_rport_data_from_scsi_device(sdev); |
321 | if (rdata) | 339 | if (rdata) |
@@ -388,50 +406,6 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba) | |||
388 | } | 406 | } |
389 | 407 | ||
390 | /** | 408 | /** |
391 | * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread | ||
392 | * @phba: The Hba for which this call is being executed. | ||
393 | * | ||
394 | * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine | ||
395 | * post at most 1 event every 5 minute after last_ramp_up_time or | ||
396 | * last_rsrc_error_time. This routine wakes up worker thread of @phba | ||
397 | * to process WORKER_RAM_DOWN_EVENT event. | ||
398 | * | ||
399 | * This routine should be called with no lock held. | ||
400 | **/ | ||
401 | static inline void | ||
402 | lpfc_rampup_queue_depth(struct lpfc_vport *vport, | ||
403 | uint32_t queue_depth) | ||
404 | { | ||
405 | unsigned long flags; | ||
406 | struct lpfc_hba *phba = vport->phba; | ||
407 | uint32_t evt_posted; | ||
408 | atomic_inc(&phba->num_cmd_success); | ||
409 | |||
410 | if (vport->cfg_lun_queue_depth <= queue_depth) | ||
411 | return; | ||
412 | spin_lock_irqsave(&phba->hbalock, flags); | ||
413 | if (time_before(jiffies, | ||
414 | phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) || | ||
415 | time_before(jiffies, | ||
416 | phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) { | ||
417 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
418 | return; | ||
419 | } | ||
420 | phba->last_ramp_up_time = jiffies; | ||
421 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
422 | |||
423 | spin_lock_irqsave(&phba->pport->work_port_lock, flags); | ||
424 | evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE; | ||
425 | if (!evt_posted) | ||
426 | phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; | ||
427 | spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); | ||
428 | |||
429 | if (!evt_posted) | ||
430 | lpfc_worker_wake_up(phba); | ||
431 | return; | ||
432 | } | ||
433 | |||
434 | /** | ||
435 | * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler | 409 | * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler |
436 | * @phba: The Hba for which this call is being executed. | 410 | * @phba: The Hba for which this call is being executed. |
437 | * | 411 | * |
@@ -483,41 +457,6 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) | |||
483 | } | 457 | } |
484 | 458 | ||
485 | /** | 459 | /** |
486 | * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler | ||
487 | * @phba: The Hba for which this call is being executed. | ||
488 | * | ||
489 | * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker | ||
490 | * thread.This routine increases queue depth for all scsi device on each vport | ||
491 | * associated with @phba by 1. This routine also sets @phba num_rsrc_err and | ||
492 | * num_cmd_success to zero. | ||
493 | **/ | ||
494 | void | ||
495 | lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) | ||
496 | { | ||
497 | struct lpfc_vport **vports; | ||
498 | struct Scsi_Host *shost; | ||
499 | struct scsi_device *sdev; | ||
500 | int i; | ||
501 | |||
502 | vports = lpfc_create_vport_work_array(phba); | ||
503 | if (vports != NULL) | ||
504 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { | ||
505 | shost = lpfc_shost_from_vport(vports[i]); | ||
506 | shost_for_each_device(sdev, shost) { | ||
507 | if (vports[i]->cfg_lun_queue_depth <= | ||
508 | sdev->queue_depth) | ||
509 | continue; | ||
510 | lpfc_change_queue_depth(sdev, | ||
511 | sdev->queue_depth+1, | ||
512 | SCSI_QDEPTH_RAMP_UP); | ||
513 | } | ||
514 | } | ||
515 | lpfc_destroy_vport_work_array(phba, vports); | ||
516 | atomic_set(&phba->num_rsrc_err, 0); | ||
517 | atomic_set(&phba->num_cmd_success, 0); | ||
518 | } | ||
519 | |||
520 | /** | ||
521 | * lpfc_scsi_dev_block - set all scsi hosts to block state | 460 | * lpfc_scsi_dev_block - set all scsi hosts to block state |
522 | * @phba: Pointer to HBA context object. | 461 | * @phba: Pointer to HBA context object. |
523 | * | 462 | * |
@@ -4040,7 +3979,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
4040 | struct lpfc_nodelist *pnode = rdata->pnode; | 3979 | struct lpfc_nodelist *pnode = rdata->pnode; |
4041 | struct scsi_cmnd *cmd; | 3980 | struct scsi_cmnd *cmd; |
4042 | int result; | 3981 | int result; |
4043 | struct scsi_device *tmp_sdev; | ||
4044 | int depth; | 3982 | int depth; |
4045 | unsigned long flags; | 3983 | unsigned long flags; |
4046 | struct lpfc_fast_path_event *fast_path_evt; | 3984 | struct lpfc_fast_path_event *fast_path_evt; |
@@ -4285,32 +4223,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
4285 | return; | 4223 | return; |
4286 | } | 4224 | } |
4287 | 4225 | ||
4288 | if (!result) | ||
4289 | lpfc_rampup_queue_depth(vport, queue_depth); | ||
4290 | |||
4291 | /* | ||
4292 | * Check for queue full. If the lun is reporting queue full, then | ||
4293 | * back off the lun queue depth to prevent target overloads. | ||
4294 | */ | ||
4295 | if (result == SAM_STAT_TASK_SET_FULL && pnode && | ||
4296 | NLP_CHK_NODE_ACT(pnode)) { | ||
4297 | shost_for_each_device(tmp_sdev, shost) { | ||
4298 | if (tmp_sdev->id != scsi_id) | ||
4299 | continue; | ||
4300 | depth = scsi_track_queue_full(tmp_sdev, | ||
4301 | tmp_sdev->queue_depth-1); | ||
4302 | if (depth <= 0) | ||
4303 | continue; | ||
4304 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | ||
4305 | "0711 detected queue full - lun queue " | ||
4306 | "depth adjusted to %d.\n", depth); | ||
4307 | lpfc_send_sdev_queuedepth_change_event(phba, vport, | ||
4308 | pnode, | ||
4309 | tmp_sdev->lun, | ||
4310 | depth+1, depth); | ||
4311 | } | ||
4312 | } | ||
4313 | |||
4314 | spin_lock_irqsave(&phba->hbalock, flags); | 4226 | spin_lock_irqsave(&phba->hbalock, flags); |
4315 | lpfc_cmd->pCmd = NULL; | 4227 | lpfc_cmd->pCmd = NULL; |
4316 | spin_unlock_irqrestore(&phba->hbalock, flags); | 4228 | spin_unlock_irqrestore(&phba->hbalock, flags); |