diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 930 |
1 files changed, 785 insertions, 145 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 8032c5adb6a9..e9fa6762044a 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -31,8 +31,10 @@ | |||
31 | #include <scsi/scsi_transport_fc.h> | 31 | #include <scsi/scsi_transport_fc.h> |
32 | 32 | ||
33 | #include "lpfc_version.h" | 33 | #include "lpfc_version.h" |
34 | #include "lpfc_hw4.h" | ||
34 | #include "lpfc_hw.h" | 35 | #include "lpfc_hw.h" |
35 | #include "lpfc_sli.h" | 36 | #include "lpfc_sli.h" |
37 | #include "lpfc_sli4.h" | ||
36 | #include "lpfc_nl.h" | 38 | #include "lpfc_nl.h" |
37 | #include "lpfc_disc.h" | 39 | #include "lpfc_disc.h" |
38 | #include "lpfc_scsi.h" | 40 | #include "lpfc_scsi.h" |
@@ -57,6 +59,8 @@ static char *dif_op_str[] = { | |||
57 | "SCSI_PROT_READ_CONVERT", | 59 | "SCSI_PROT_READ_CONVERT", |
58 | "SCSI_PROT_WRITE_CONVERT" | 60 | "SCSI_PROT_WRITE_CONVERT" |
59 | }; | 61 | }; |
62 | static void | ||
63 | lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); | ||
60 | 64 | ||
61 | static void | 65 | static void |
62 | lpfc_debug_save_data(struct scsi_cmnd *cmnd) | 66 | lpfc_debug_save_data(struct scsi_cmnd *cmnd) |
@@ -325,7 +329,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) | |||
325 | 329 | ||
326 | vports = lpfc_create_vport_work_array(phba); | 330 | vports = lpfc_create_vport_work_array(phba); |
327 | if (vports != NULL) | 331 | if (vports != NULL) |
328 | for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | 332 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
329 | shost = lpfc_shost_from_vport(vports[i]); | 333 | shost = lpfc_shost_from_vport(vports[i]); |
330 | shost_for_each_device(sdev, shost) { | 334 | shost_for_each_device(sdev, shost) { |
331 | new_queue_depth = | 335 | new_queue_depth = |
@@ -379,7 +383,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) | |||
379 | 383 | ||
380 | vports = lpfc_create_vport_work_array(phba); | 384 | vports = lpfc_create_vport_work_array(phba); |
381 | if (vports != NULL) | 385 | if (vports != NULL) |
382 | for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | 386 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
383 | shost = lpfc_shost_from_vport(vports[i]); | 387 | shost = lpfc_shost_from_vport(vports[i]); |
384 | shost_for_each_device(sdev, shost) { | 388 | shost_for_each_device(sdev, shost) { |
385 | if (vports[i]->cfg_lun_queue_depth <= | 389 | if (vports[i]->cfg_lun_queue_depth <= |
@@ -427,7 +431,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba) | |||
427 | 431 | ||
428 | vports = lpfc_create_vport_work_array(phba); | 432 | vports = lpfc_create_vport_work_array(phba); |
429 | if (vports != NULL) | 433 | if (vports != NULL) |
430 | for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | 434 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
431 | shost = lpfc_shost_from_vport(vports[i]); | 435 | shost = lpfc_shost_from_vport(vports[i]); |
432 | shost_for_each_device(sdev, shost) { | 436 | shost_for_each_device(sdev, shost) { |
433 | rport = starget_to_rport(scsi_target(sdev)); | 437 | rport = starget_to_rport(scsi_target(sdev)); |
@@ -438,22 +442,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba) | |||
438 | } | 442 | } |
439 | 443 | ||
440 | /** | 444 | /** |
441 | * lpfc_new_scsi_buf - Scsi buffer allocator | 445 | * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec |
442 | * @vport: The virtual port for which this call being executed. | 446 | * @vport: The virtual port for which this call being executed. |
447 | * @num_to_allocate: The requested number of buffers to allocate. | ||
443 | * | 448 | * |
444 | * This routine allocates a scsi buffer, which contains all the necessary | 449 | * This routine allocates a scsi buffer for device with SLI-3 interface spec, |
445 | * information needed to initiate a SCSI I/O. The non-DMAable buffer region | 450 | * the scsi buffer contains all the necessary information needed to initiate |
446 | * contains information to build the IOCB. The DMAable region contains | 451 | * a SCSI I/O. The non-DMAable buffer region contains information to build |
447 | * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to | 452 | * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, |
448 | * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL | 453 | * and the initial BPL. In addition to allocating memory, the FCP CMND and |
449 | * and the BPL BDE is setup in the IOCB. | 454 | * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. |
450 | * | 455 | * |
451 | * Return codes: | 456 | * Return codes: |
452 | * NULL - Error | 457 | * int - number of scsi buffers that were allocated. |
453 | * Pointer to lpfc_scsi_buf data structure - Success | 458 | * 0 = failure, less than num_to_alloc is a partial failure. |
454 | **/ | 459 | **/ |
455 | static struct lpfc_scsi_buf * | 460 | static int |
456 | lpfc_new_scsi_buf(struct lpfc_vport *vport) | 461 | lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) |
457 | { | 462 | { |
458 | struct lpfc_hba *phba = vport->phba; | 463 | struct lpfc_hba *phba = vport->phba; |
459 | struct lpfc_scsi_buf *psb; | 464 | struct lpfc_scsi_buf *psb; |
@@ -463,107 +468,401 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport) | |||
463 | dma_addr_t pdma_phys_fcp_rsp; | 468 | dma_addr_t pdma_phys_fcp_rsp; |
464 | dma_addr_t pdma_phys_bpl; | 469 | dma_addr_t pdma_phys_bpl; |
465 | uint16_t iotag; | 470 | uint16_t iotag; |
471 | int bcnt; | ||
466 | 472 | ||
467 | psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); | 473 | for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { |
468 | if (!psb) | 474 | psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); |
469 | return NULL; | 475 | if (!psb) |
476 | break; | ||
477 | |||
478 | /* | ||
479 | * Get memory from the pci pool to map the virt space to pci | ||
480 | * bus space for an I/O. The DMA buffer includes space for the | ||
481 | * struct fcp_cmnd, struct fcp_rsp and the number of bde's | ||
482 | * necessary to support the sg_tablesize. | ||
483 | */ | ||
484 | psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, | ||
485 | GFP_KERNEL, &psb->dma_handle); | ||
486 | if (!psb->data) { | ||
487 | kfree(psb); | ||
488 | break; | ||
489 | } | ||
490 | |||
491 | /* Initialize virtual ptrs to dma_buf region. */ | ||
492 | memset(psb->data, 0, phba->cfg_sg_dma_buf_size); | ||
493 | |||
494 | /* Allocate iotag for psb->cur_iocbq. */ | ||
495 | iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); | ||
496 | if (iotag == 0) { | ||
497 | pci_pool_free(phba->lpfc_scsi_dma_buf_pool, | ||
498 | psb->data, psb->dma_handle); | ||
499 | kfree(psb); | ||
500 | break; | ||
501 | } | ||
502 | psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; | ||
503 | |||
504 | psb->fcp_cmnd = psb->data; | ||
505 | psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); | ||
506 | psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + | ||
507 | sizeof(struct fcp_rsp); | ||
508 | |||
509 | /* Initialize local short-hand pointers. */ | ||
510 | bpl = psb->fcp_bpl; | ||
511 | pdma_phys_fcp_cmd = psb->dma_handle; | ||
512 | pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); | ||
513 | pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + | ||
514 | sizeof(struct fcp_rsp); | ||
515 | |||
516 | /* | ||
517 | * The first two bdes are the FCP_CMD and FCP_RSP. The balance | ||
518 | * are sg list bdes. Initialize the first two and leave the | ||
519 | * rest for queuecommand. | ||
520 | */ | ||
521 | bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); | ||
522 | bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); | ||
523 | bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); | ||
524 | bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
525 | bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); | ||
526 | |||
527 | /* Setup the physical region for the FCP RSP */ | ||
528 | bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); | ||
529 | bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); | ||
530 | bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); | ||
531 | bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
532 | bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); | ||
533 | |||
534 | /* | ||
535 | * Since the IOCB for the FCP I/O is built into this | ||
536 | * lpfc_scsi_buf, initialize it with all known data now. | ||
537 | */ | ||
538 | iocb = &psb->cur_iocbq.iocb; | ||
539 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; | ||
540 | if ((phba->sli_rev == 3) && | ||
541 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { | ||
542 | /* fill in immediate fcp command BDE */ | ||
543 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; | ||
544 | iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); | ||
545 | iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, | ||
546 | unsli3.fcp_ext.icd); | ||
547 | iocb->un.fcpi64.bdl.addrHigh = 0; | ||
548 | iocb->ulpBdeCount = 0; | ||
549 | iocb->ulpLe = 0; | ||
550 | /* fill in responce BDE */ | ||
551 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = | ||
552 | BUFF_TYPE_BDE_64; | ||
553 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = | ||
554 | sizeof(struct fcp_rsp); | ||
555 | iocb->unsli3.fcp_ext.rbde.addrLow = | ||
556 | putPaddrLow(pdma_phys_fcp_rsp); | ||
557 | iocb->unsli3.fcp_ext.rbde.addrHigh = | ||
558 | putPaddrHigh(pdma_phys_fcp_rsp); | ||
559 | } else { | ||
560 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
561 | iocb->un.fcpi64.bdl.bdeSize = | ||
562 | (2 * sizeof(struct ulp_bde64)); | ||
563 | iocb->un.fcpi64.bdl.addrLow = | ||
564 | putPaddrLow(pdma_phys_bpl); | ||
565 | iocb->un.fcpi64.bdl.addrHigh = | ||
566 | putPaddrHigh(pdma_phys_bpl); | ||
567 | iocb->ulpBdeCount = 1; | ||
568 | iocb->ulpLe = 1; | ||
569 | } | ||
570 | iocb->ulpClass = CLASS3; | ||
571 | psb->status = IOSTAT_SUCCESS; | ||
572 | /* Put it back into the SCSI buffer list */ | ||
573 | lpfc_release_scsi_buf_s4(phba, psb); | ||
470 | 574 | ||
471 | /* | ||
472 | * Get memory from the pci pool to map the virt space to pci bus space | ||
473 | * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, | ||
474 | * struct fcp_rsp and the number of bde's necessary to support the | ||
475 | * sg_tablesize. | ||
476 | */ | ||
477 | psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, | ||
478 | &psb->dma_handle); | ||
479 | if (!psb->data) { | ||
480 | kfree(psb); | ||
481 | return NULL; | ||
482 | } | 575 | } |
483 | 576 | ||
484 | /* Initialize virtual ptrs to dma_buf region. */ | 577 | return bcnt; |
485 | memset(psb->data, 0, phba->cfg_sg_dma_buf_size); | 578 | } |
486 | 579 | ||
487 | /* Allocate iotag for psb->cur_iocbq. */ | 580 | /** |
488 | iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); | 581 | * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort |
489 | if (iotag == 0) { | 582 | * @phba: pointer to lpfc hba data structure. |
490 | pci_pool_free(phba->lpfc_scsi_dma_buf_pool, | 583 | * @axri: pointer to the fcp xri abort wcqe structure. |
491 | psb->data, psb->dma_handle); | 584 | * |
492 | kfree (psb); | 585 | * This routine is invoked by the worker thread to process a SLI4 fast-path |
493 | return NULL; | 586 | * FCP aborted xri. |
587 | **/ | ||
588 | void | ||
589 | lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, | ||
590 | struct sli4_wcqe_xri_aborted *axri) | ||
591 | { | ||
592 | uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); | ||
593 | struct lpfc_scsi_buf *psb, *next_psb; | ||
594 | unsigned long iflag = 0; | ||
595 | |||
596 | spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); | ||
597 | list_for_each_entry_safe(psb, next_psb, | ||
598 | &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { | ||
599 | if (psb->cur_iocbq.sli4_xritag == xri) { | ||
600 | list_del(&psb->list); | ||
601 | psb->status = IOSTAT_SUCCESS; | ||
602 | spin_unlock_irqrestore( | ||
603 | &phba->sli4_hba.abts_scsi_buf_list_lock, | ||
604 | iflag); | ||
605 | lpfc_release_scsi_buf_s4(phba, psb); | ||
606 | return; | ||
607 | } | ||
608 | } | ||
609 | spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, | ||
610 | iflag); | ||
611 | } | ||
612 | |||
613 | /** | ||
614 | * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block | ||
615 | * @phba: pointer to lpfc hba data structure. | ||
616 | * | ||
617 | * This routine walks the list of scsi buffers that have been allocated and | ||
618 | * repost them to the HBA by using SGL block post. This is needed after a | ||
619 | * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine | ||
620 | * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list | ||
621 | * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. | ||
622 | * | ||
623 | * Returns: 0 = success, non-zero failure. | ||
624 | **/ | ||
625 | int | ||
626 | lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) | ||
627 | { | ||
628 | struct lpfc_scsi_buf *psb; | ||
629 | int index, status, bcnt = 0, rcnt = 0, rc = 0; | ||
630 | LIST_HEAD(sblist); | ||
631 | |||
632 | for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) { | ||
633 | psb = phba->sli4_hba.lpfc_scsi_psb_array[index]; | ||
634 | if (psb) { | ||
635 | /* Remove from SCSI buffer list */ | ||
636 | list_del(&psb->list); | ||
637 | /* Add it to a local SCSI buffer list */ | ||
638 | list_add_tail(&psb->list, &sblist); | ||
639 | if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) { | ||
640 | bcnt = rcnt; | ||
641 | rcnt = 0; | ||
642 | } | ||
643 | } else | ||
644 | /* A hole present in the XRI array, need to skip */ | ||
645 | bcnt = rcnt; | ||
646 | |||
647 | if (index == phba->sli4_hba.scsi_xri_cnt - 1) | ||
648 | /* End of XRI array for SCSI buffer, complete */ | ||
649 | bcnt = rcnt; | ||
650 | |||
651 | /* Continue until collect up to a nembed page worth of sgls */ | ||
652 | if (bcnt == 0) | ||
653 | continue; | ||
654 | /* Now, post the SCSI buffer list sgls as a block */ | ||
655 | status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); | ||
656 | /* Reset SCSI buffer count for next round of posting */ | ||
657 | bcnt = 0; | ||
658 | while (!list_empty(&sblist)) { | ||
659 | list_remove_head(&sblist, psb, struct lpfc_scsi_buf, | ||
660 | list); | ||
661 | if (status) { | ||
662 | /* Put this back on the abort scsi list */ | ||
663 | psb->status = IOSTAT_LOCAL_REJECT; | ||
664 | psb->result = IOERR_ABORT_REQUESTED; | ||
665 | rc++; | ||
666 | } else | ||
667 | psb->status = IOSTAT_SUCCESS; | ||
668 | /* Put it back into the SCSI buffer list */ | ||
669 | lpfc_release_scsi_buf_s4(phba, psb); | ||
670 | } | ||
494 | } | 671 | } |
495 | psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; | 672 | return rc; |
673 | } | ||
496 | 674 | ||
497 | psb->fcp_cmnd = psb->data; | 675 | /** |
498 | psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); | 676 | * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec |
499 | psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + | 677 | * @vport: The virtual port for which this call being executed. |
500 | sizeof(struct fcp_rsp); | 678 | * @num_to_allocate: The requested number of buffers to allocate. |
679 | * | ||
680 | * This routine allocates a scsi buffer for device with SLI-4 interface spec, | ||
681 | * the scsi buffer contains all the necessary information needed to initiate | ||
682 | * a SCSI I/O. | ||
683 | * | ||
684 | * Return codes: | ||
685 | * int - number of scsi buffers that were allocated. | ||
686 | * 0 = failure, less than num_to_alloc is a partial failure. | ||
687 | **/ | ||
688 | static int | ||
689 | lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) | ||
690 | { | ||
691 | struct lpfc_hba *phba = vport->phba; | ||
692 | struct lpfc_scsi_buf *psb; | ||
693 | struct sli4_sge *sgl; | ||
694 | IOCB_t *iocb; | ||
695 | dma_addr_t pdma_phys_fcp_cmd; | ||
696 | dma_addr_t pdma_phys_fcp_rsp; | ||
697 | dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; | ||
698 | uint16_t iotag, last_xritag = NO_XRI; | ||
699 | int status = 0, index; | ||
700 | int bcnt; | ||
701 | int non_sequential_xri = 0; | ||
702 | int rc = 0; | ||
703 | LIST_HEAD(sblist); | ||
704 | |||
705 | for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { | ||
706 | psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); | ||
707 | if (!psb) | ||
708 | break; | ||
501 | 709 | ||
502 | /* Initialize local short-hand pointers. */ | 710 | /* |
503 | bpl = psb->fcp_bpl; | 711 | * Get memory from the pci pool to map the virt space to pci bus |
504 | pdma_phys_fcp_cmd = psb->dma_handle; | 712 | * space for an I/O. The DMA buffer includes space for the |
505 | pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); | 713 | * struct fcp_cmnd, struct fcp_rsp and the number of bde's |
506 | pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + | 714 | * necessary to support the sg_tablesize. |
507 | sizeof(struct fcp_rsp); | 715 | */ |
716 | psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, | ||
717 | GFP_KERNEL, &psb->dma_handle); | ||
718 | if (!psb->data) { | ||
719 | kfree(psb); | ||
720 | break; | ||
721 | } | ||
508 | 722 | ||
509 | /* | 723 | /* Initialize virtual ptrs to dma_buf region. */ |
510 | * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg | 724 | memset(psb->data, 0, phba->cfg_sg_dma_buf_size); |
511 | * list bdes. Initialize the first two and leave the rest for | ||
512 | * queuecommand. | ||
513 | */ | ||
514 | bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); | ||
515 | bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); | ||
516 | bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); | ||
517 | bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
518 | bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); | ||
519 | |||
520 | /* Setup the physical region for the FCP RSP */ | ||
521 | bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); | ||
522 | bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); | ||
523 | bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); | ||
524 | bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
525 | bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); | ||
526 | 725 | ||
527 | /* | 726 | /* Allocate iotag for psb->cur_iocbq. */ |
528 | * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, | 727 | iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); |
529 | * initialize it with all known data now. | 728 | if (iotag == 0) { |
530 | */ | 729 | kfree(psb); |
531 | iocb = &psb->cur_iocbq.iocb; | 730 | break; |
532 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; | 731 | } |
533 | if ((phba->sli_rev == 3) && | 732 | |
534 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { | 733 | psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); |
535 | /* fill in immediate fcp command BDE */ | 734 | if (psb->cur_iocbq.sli4_xritag == NO_XRI) { |
536 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; | 735 | pci_pool_free(phba->lpfc_scsi_dma_buf_pool, |
736 | psb->data, psb->dma_handle); | ||
737 | kfree(psb); | ||
738 | break; | ||
739 | } | ||
740 | if (last_xritag != NO_XRI | ||
741 | && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { | ||
742 | non_sequential_xri = 1; | ||
743 | } else | ||
744 | list_add_tail(&psb->list, &sblist); | ||
745 | last_xritag = psb->cur_iocbq.sli4_xritag; | ||
746 | |||
747 | index = phba->sli4_hba.scsi_xri_cnt++; | ||
748 | psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; | ||
749 | |||
750 | psb->fcp_bpl = psb->data; | ||
751 | psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) | ||
752 | - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); | ||
753 | psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + | ||
754 | sizeof(struct fcp_cmnd)); | ||
755 | |||
756 | /* Initialize local short-hand pointers. */ | ||
757 | sgl = (struct sli4_sge *)psb->fcp_bpl; | ||
758 | pdma_phys_bpl = psb->dma_handle; | ||
759 | pdma_phys_fcp_cmd = | ||
760 | (psb->dma_handle + phba->cfg_sg_dma_buf_size) | ||
761 | - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); | ||
762 | pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); | ||
763 | |||
764 | /* | ||
765 | * The first two bdes are the FCP_CMD and FCP_RSP. The balance | ||
766 | * are sg list bdes. Initialize the first two and leave the | ||
767 | * rest for queuecommand. | ||
768 | */ | ||
769 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); | ||
770 | sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); | ||
771 | bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd)); | ||
772 | bf_set(lpfc_sli4_sge_last, sgl, 0); | ||
773 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
774 | sgl->word3 = cpu_to_le32(sgl->word3); | ||
775 | sgl++; | ||
776 | |||
777 | /* Setup the physical region for the FCP RSP */ | ||
778 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); | ||
779 | sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); | ||
780 | bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp)); | ||
781 | bf_set(lpfc_sli4_sge_last, sgl, 1); | ||
782 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
783 | sgl->word3 = cpu_to_le32(sgl->word3); | ||
784 | |||
785 | /* | ||
786 | * Since the IOCB for the FCP I/O is built into this | ||
787 | * lpfc_scsi_buf, initialize it with all known data now. | ||
788 | */ | ||
789 | iocb = &psb->cur_iocbq.iocb; | ||
790 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; | ||
791 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64; | ||
792 | /* setting the BLP size to 2 * sizeof BDE may not be correct. | ||
793 | * We are setting the bpl to point to out sgl. An sgl's | ||
794 | * entries are 16 bytes, a bpl entries are 12 bytes. | ||
795 | */ | ||
537 | iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); | 796 | iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); |
538 | iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, | 797 | iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd); |
539 | unsli3.fcp_ext.icd); | 798 | iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd); |
540 | iocb->un.fcpi64.bdl.addrHigh = 0; | ||
541 | iocb->ulpBdeCount = 0; | ||
542 | iocb->ulpLe = 0; | ||
543 | /* fill in responce BDE */ | ||
544 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
545 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = | ||
546 | sizeof(struct fcp_rsp); | ||
547 | iocb->unsli3.fcp_ext.rbde.addrLow = | ||
548 | putPaddrLow(pdma_phys_fcp_rsp); | ||
549 | iocb->unsli3.fcp_ext.rbde.addrHigh = | ||
550 | putPaddrHigh(pdma_phys_fcp_rsp); | ||
551 | } else { | ||
552 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
553 | iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); | ||
554 | iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl); | ||
555 | iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl); | ||
556 | iocb->ulpBdeCount = 1; | 799 | iocb->ulpBdeCount = 1; |
557 | iocb->ulpLe = 1; | 800 | iocb->ulpLe = 1; |
801 | iocb->ulpClass = CLASS3; | ||
802 | if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) | ||
803 | pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; | ||
804 | else | ||
805 | pdma_phys_bpl1 = 0; | ||
806 | psb->dma_phys_bpl = pdma_phys_bpl; | ||
807 | phba->sli4_hba.lpfc_scsi_psb_array[index] = psb; | ||
808 | if (non_sequential_xri) { | ||
809 | status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl, | ||
810 | pdma_phys_bpl1, | ||
811 | psb->cur_iocbq.sli4_xritag); | ||
812 | if (status) { | ||
813 | /* Put this back on the abort scsi list */ | ||
814 | psb->status = IOSTAT_LOCAL_REJECT; | ||
815 | psb->result = IOERR_ABORT_REQUESTED; | ||
816 | rc++; | ||
817 | } else | ||
818 | psb->status = IOSTAT_SUCCESS; | ||
819 | /* Put it back into the SCSI buffer list */ | ||
820 | lpfc_release_scsi_buf_s4(phba, psb); | ||
821 | break; | ||
822 | } | ||
823 | } | ||
824 | if (bcnt) { | ||
825 | status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); | ||
826 | /* Reset SCSI buffer count for next round of posting */ | ||
827 | while (!list_empty(&sblist)) { | ||
828 | list_remove_head(&sblist, psb, struct lpfc_scsi_buf, | ||
829 | list); | ||
830 | if (status) { | ||
831 | /* Put this back on the abort scsi list */ | ||
832 | psb->status = IOSTAT_LOCAL_REJECT; | ||
833 | psb->result = IOERR_ABORT_REQUESTED; | ||
834 | rc++; | ||
835 | } else | ||
836 | psb->status = IOSTAT_SUCCESS; | ||
837 | /* Put it back into the SCSI buffer list */ | ||
838 | lpfc_release_scsi_buf_s4(phba, psb); | ||
839 | } | ||
558 | } | 840 | } |
559 | iocb->ulpClass = CLASS3; | ||
560 | 841 | ||
561 | return psb; | 842 | return bcnt + non_sequential_xri - rc; |
562 | } | 843 | } |
563 | 844 | ||
564 | /** | 845 | /** |
565 | * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba | 846 | * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator |
566 | * @phba: The Hba for which this call is being executed. | 847 | * @vport: The virtual port for which this call being executed. |
848 | * @num_to_allocate: The requested number of buffers to allocate. | ||
849 | * | ||
850 | * This routine wraps the actual SCSI buffer allocator function pointer from | ||
851 | * the lpfc_hba struct. | ||
852 | * | ||
853 | * Return codes: | ||
854 | * int - number of scsi buffers that were allocated. | ||
855 | * 0 = failure, less than num_to_alloc is a partial failure. | ||
856 | **/ | ||
857 | static inline int | ||
858 | lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) | ||
859 | { | ||
860 | return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc); | ||
861 | } | ||
862 | |||
863 | /** | ||
864 | * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA | ||
865 | * @phba: The HBA for which this call is being executed. | ||
567 | * | 866 | * |
568 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list | 867 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list |
569 | * and returns to caller. | 868 | * and returns to caller. |
@@ -591,7 +890,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) | |||
591 | } | 890 | } |
592 | 891 | ||
593 | /** | 892 | /** |
594 | * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list | 893 | * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list |
595 | * @phba: The Hba for which this call is being executed. | 894 | * @phba: The Hba for which this call is being executed. |
596 | * @psb: The scsi buffer which is being released. | 895 | * @psb: The scsi buffer which is being released. |
597 | * | 896 | * |
@@ -599,7 +898,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) | |||
599 | * lpfc_scsi_buf_list list. | 898 | * lpfc_scsi_buf_list list. |
600 | **/ | 899 | **/ |
601 | static void | 900 | static void |
602 | lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | 901 | lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) |
603 | { | 902 | { |
604 | unsigned long iflag = 0; | 903 | unsigned long iflag = 0; |
605 | 904 | ||
@@ -610,21 +909,69 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | |||
610 | } | 909 | } |
611 | 910 | ||
612 | /** | 911 | /** |
613 | * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer | 912 | * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. |
913 | * @phba: The Hba for which this call is being executed. | ||
914 | * @psb: The scsi buffer which is being released. | ||
915 | * | ||
916 | * This routine releases @psb scsi buffer by adding it to tail of @phba | ||
917 | * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer | ||
918 | * and cannot be reused for at least RA_TOV amount of time if it was | ||
919 | * aborted. | ||
920 | **/ | ||
921 | static void | ||
922 | lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | ||
923 | { | ||
924 | unsigned long iflag = 0; | ||
925 | |||
926 | if (psb->status == IOSTAT_LOCAL_REJECT | ||
927 | && psb->result == IOERR_ABORT_REQUESTED) { | ||
928 | spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, | ||
929 | iflag); | ||
930 | psb->pCmd = NULL; | ||
931 | list_add_tail(&psb->list, | ||
932 | &phba->sli4_hba.lpfc_abts_scsi_buf_list); | ||
933 | spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, | ||
934 | iflag); | ||
935 | } else { | ||
936 | |||
937 | spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); | ||
938 | psb->pCmd = NULL; | ||
939 | list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); | ||
940 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); | ||
941 | } | ||
942 | } | ||
943 | |||
944 | /** | ||
945 | * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. | ||
946 | * @phba: The Hba for which this call is being executed. | ||
947 | * @psb: The scsi buffer which is being released. | ||
948 | * | ||
949 | * This routine releases @psb scsi buffer by adding it to tail of @phba | ||
950 | * lpfc_scsi_buf_list list. | ||
951 | **/ | ||
952 | static void | ||
953 | lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | ||
954 | { | ||
955 | |||
956 | phba->lpfc_release_scsi_buf(phba, psb); | ||
957 | } | ||
958 | |||
959 | /** | ||
960 | * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec | ||
614 | * @phba: The Hba for which this call is being executed. | 961 | * @phba: The Hba for which this call is being executed. |
615 | * @lpfc_cmd: The scsi buffer which is going to be mapped. | 962 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
616 | * | 963 | * |
617 | * This routine does the pci dma mapping for scatter-gather list of scsi cmnd | 964 | * This routine does the pci dma mapping for scatter-gather list of scsi cmnd |
618 | * field of @lpfc_cmd. This routine scans through sg elements and format the | 965 | * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans |
619 | * bdea. This routine also initializes all IOCB fields which are dependent on | 966 | * through sg elements and format the bdea. This routine also initializes all |
620 | * scsi command request buffer. | 967 | * IOCB fields which are dependent on scsi command request buffer. |
621 | * | 968 | * |
622 | * Return codes: | 969 | * Return codes: |
623 | * 1 - Error | 970 | * 1 - Error |
624 | * 0 - Success | 971 | * 0 - Success |
625 | **/ | 972 | **/ |
626 | static int | 973 | static int |
627 | lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | 974 | lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) |
628 | { | 975 | { |
629 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; | 976 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
630 | struct scatterlist *sgel = NULL; | 977 | struct scatterlist *sgel = NULL; |
@@ -1412,6 +1759,133 @@ out: | |||
1412 | } | 1759 | } |
1413 | 1760 | ||
1414 | /** | 1761 | /** |
1762 | * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec | ||
1763 | * @phba: The Hba for which this call is being executed. | ||
1764 | * @lpfc_cmd: The scsi buffer which is going to be mapped. | ||
1765 | * | ||
1766 | * This routine does the pci dma mapping for scatter-gather list of scsi cmnd | ||
1767 | * field of @lpfc_cmd for device with SLI-4 interface spec. | ||
1768 | * | ||
1769 | * Return codes: | ||
1770 | * 1 - Error | ||
1771 | * 0 - Success | ||
1772 | **/ | ||
1773 | static int | ||
1774 | lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | ||
1775 | { | ||
1776 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; | ||
1777 | struct scatterlist *sgel = NULL; | ||
1778 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; | ||
1779 | struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; | ||
1780 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; | ||
1781 | dma_addr_t physaddr; | ||
1782 | uint32_t num_bde = 0; | ||
1783 | uint32_t dma_len; | ||
1784 | uint32_t dma_offset = 0; | ||
1785 | int nseg; | ||
1786 | |||
1787 | /* | ||
1788 | * There are three possibilities here - use scatter-gather segment, use | ||
1789 | * the single mapping, or neither. Start the lpfc command prep by | ||
1790 | * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first | ||
1791 | * data bde entry. | ||
1792 | */ | ||
1793 | if (scsi_sg_count(scsi_cmnd)) { | ||
1794 | /* | ||
1795 | * The driver stores the segment count returned from pci_map_sg | ||
1796 | * because this a count of dma-mappings used to map the use_sg | ||
1797 | * pages. They are not guaranteed to be the same for those | ||
1798 | * architectures that implement an IOMMU. | ||
1799 | */ | ||
1800 | |||
1801 | nseg = scsi_dma_map(scsi_cmnd); | ||
1802 | if (unlikely(!nseg)) | ||
1803 | return 1; | ||
1804 | sgl += 1; | ||
1805 | /* clear the last flag in the fcp_rsp map entry */ | ||
1806 | sgl->word2 = le32_to_cpu(sgl->word2); | ||
1807 | bf_set(lpfc_sli4_sge_last, sgl, 0); | ||
1808 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
1809 | sgl += 1; | ||
1810 | |||
1811 | lpfc_cmd->seg_cnt = nseg; | ||
1812 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { | ||
1813 | printk(KERN_ERR "%s: Too many sg segments from " | ||
1814 | "dma_map_sg. Config %d, seg_cnt %d\n", | ||
1815 | __func__, phba->cfg_sg_seg_cnt, | ||
1816 | lpfc_cmd->seg_cnt); | ||
1817 | scsi_dma_unmap(scsi_cmnd); | ||
1818 | return 1; | ||
1819 | } | ||
1820 | |||
1821 | /* | ||
1822 | * The driver established a maximum scatter-gather segment count | ||
1823 | * during probe that limits the number of sg elements in any | ||
1824 | * single scsi command. Just run through the seg_cnt and format | ||
1825 | * the sge's. | ||
1826 | * When using SLI-3 the driver will try to fit all the BDEs into | ||
1827 | * the IOCB. If it can't then the BDEs get added to a BPL as it | ||
1828 | * does for SLI-2 mode. | ||
1829 | */ | ||
1830 | scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { | ||
1831 | physaddr = sg_dma_address(sgel); | ||
1832 | dma_len = sg_dma_len(sgel); | ||
1833 | bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel)); | ||
1834 | sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); | ||
1835 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); | ||
1836 | if ((num_bde + 1) == nseg) | ||
1837 | bf_set(lpfc_sli4_sge_last, sgl, 1); | ||
1838 | else | ||
1839 | bf_set(lpfc_sli4_sge_last, sgl, 0); | ||
1840 | bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); | ||
1841 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
1842 | sgl->word3 = cpu_to_le32(sgl->word3); | ||
1843 | dma_offset += dma_len; | ||
1844 | sgl++; | ||
1845 | } | ||
1846 | } else { | ||
1847 | sgl += 1; | ||
1848 | /* clear the last flag in the fcp_rsp map entry */ | ||
1849 | sgl->word2 = le32_to_cpu(sgl->word2); | ||
1850 | bf_set(lpfc_sli4_sge_last, sgl, 1); | ||
1851 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
1852 | } | ||
1853 | |||
1854 | /* | ||
1855 | * Finish initializing those IOCB fields that are dependent on the | ||
1856 | * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is | ||
1857 | * explicitly reinitialized. | ||
1858 | * all iocb memory resources are reused. | ||
1859 | */ | ||
1860 | fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); | ||
1861 | |||
1862 | /* | ||
1863 | * Due to difference in data length between DIF/non-DIF paths, | ||
1864 | * we need to set word 4 of IOCB here | ||
1865 | */ | ||
1866 | iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); | ||
1867 | return 0; | ||
1868 | } | ||
1869 | |||
1870 | /** | ||
1871 | * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer | ||
1872 | * @phba: The Hba for which this call is being executed. | ||
1873 | * @lpfc_cmd: The scsi buffer which is going to be mapped. | ||
1874 | * | ||
1875 | * This routine wraps the actual DMA mapping function pointer from the | ||
1876 | * lpfc_hba struct. | ||
1877 | * | ||
1878 | * Return codes: | ||
1879 | * 1 - Error | ||
1880 | * 0 - Success | ||
1881 | **/ | ||
1882 | static inline int | ||
1883 | lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | ||
1884 | { | ||
1885 | return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); | ||
1886 | } | ||
1887 | |||
1888 | /** | ||
1415 | * lpfc_send_scsi_error_event - Posts an event when there is SCSI error | 1889 | * lpfc_send_scsi_error_event - Posts an event when there is SCSI error |
1416 | * @phba: Pointer to hba context object. | 1890 | * @phba: Pointer to hba context object. |
1417 | * @vport: Pointer to vport object. | 1891 | * @vport: Pointer to vport object. |
@@ -1504,15 +1978,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, | |||
1504 | } | 1978 | } |
1505 | 1979 | ||
1506 | /** | 1980 | /** |
1507 | * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather | 1981 | * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev |
1508 | * @phba: The Hba for which this call is being executed. | 1982 | * @phba: The HBA for which this call is being executed. |
1509 | * @psb: The scsi buffer which is going to be un-mapped. | 1983 | * @psb: The scsi buffer which is going to be un-mapped. |
1510 | * | 1984 | * |
1511 | * This routine does DMA un-mapping of scatter gather list of scsi command | 1985 | * This routine does DMA un-mapping of scatter gather list of scsi command |
1512 | * field of @lpfc_cmd. | 1986 | * field of @lpfc_cmd for device with SLI-3 interface spec. |
1513 | **/ | 1987 | **/ |
1514 | static void | 1988 | static void |
1515 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) | 1989 | lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) |
1516 | { | 1990 | { |
1517 | /* | 1991 | /* |
1518 | * There are only two special cases to consider. (1) the scsi command | 1992 | * There are only two special cases to consider. (1) the scsi command |
@@ -1529,6 +2003,36 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) | |||
1529 | } | 2003 | } |
1530 | 2004 | ||
1531 | /** | 2005 | /** |
2006 | * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev | ||
2007 | * @phba: The Hba for which this call is being executed. | ||
2008 | * @psb: The scsi buffer which is going to be un-mapped. | ||
2009 | * | ||
2010 | * This routine does DMA un-mapping of scatter gather list of scsi command | ||
2011 | * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to | ||
2012 | * remove the sgl for this scsi buffer then we will do it here. For now | ||
2013 | * we should be able to just call the sli3 unprep routine. | ||
2014 | **/ | ||
2015 | static void | ||
2016 | lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | ||
2017 | { | ||
2018 | lpfc_scsi_unprep_dma_buf_s3(phba, psb); | ||
2019 | } | ||
2020 | |||
2021 | /** | ||
2022 | * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list | ||
2023 | * @phba: The Hba for which this call is being executed. | ||
2024 | * @psb: The scsi buffer which is going to be un-mapped. | ||
2025 | * | ||
2026 | * This routine does DMA un-mapping of scatter gather list of scsi command | ||
2027 | * field of @lpfc_cmd for device with SLI-4 interface spec. | ||
2028 | **/ | ||
2029 | static void | ||
2030 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | ||
2031 | { | ||
2032 | phba->lpfc_scsi_unprep_dma_buf(phba, psb); | ||
2033 | } | ||
2034 | |||
2035 | /** | ||
1532 | * lpfc_handler_fcp_err - FCP response handler | 2036 | * lpfc_handler_fcp_err - FCP response handler |
1533 | * @vport: The virtual port for which this call is being executed. | 2037 | * @vport: The virtual port for which this call is being executed. |
1534 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | 2038 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. |
@@ -1676,7 +2180,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
1676 | * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine | 2180 | * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine |
1677 | * @phba: The Hba for which this call is being executed. | 2181 | * @phba: The Hba for which this call is being executed. |
1678 | * @pIocbIn: The command IOCBQ for the scsi cmnd. | 2182 | * @pIocbIn: The command IOCBQ for the scsi cmnd. |
1679 | * @pIocbOut: The response IOCBQ for the scsi cmnd . | 2183 | * @pIocbOut: The response IOCBQ for the scsi cmnd. |
1680 | * | 2184 | * |
1681 | * This routine assigns scsi command result by looking into response IOCB | 2185 | * This routine assigns scsi command result by looking into response IOCB |
1682 | * status field appropriately. This routine handles QUEUE FULL condition as | 2186 | * status field appropriately. This routine handles QUEUE FULL condition as |
@@ -1957,16 +2461,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) | |||
1957 | } | 2461 | } |
1958 | 2462 | ||
1959 | /** | 2463 | /** |
1960 | * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit | 2464 | * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev |
1961 | * @vport: The virtual port for which this call is being executed. | 2465 | * @vport: The virtual port for which this call is being executed. |
1962 | * @lpfc_cmd: The scsi command which needs to send. | 2466 | * @lpfc_cmd: The scsi command which needs to send. |
1963 | * @pnode: Pointer to lpfc_nodelist. | 2467 | * @pnode: Pointer to lpfc_nodelist. |
1964 | * | 2468 | * |
1965 | * This routine initializes fcp_cmnd and iocb data structure from scsi command | 2469 | * This routine initializes fcp_cmnd and iocb data structure from scsi command |
1966 | * to transfer. | 2470 | * to transfer for device with SLI3 interface spec. |
1967 | **/ | 2471 | **/ |
1968 | static void | 2472 | static void |
1969 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | 2473 | lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, |
1970 | struct lpfc_nodelist *pnode) | 2474 | struct lpfc_nodelist *pnode) |
1971 | { | 2475 | { |
1972 | struct lpfc_hba *phba = vport->phba; | 2476 | struct lpfc_hba *phba = vport->phba; |
@@ -2013,8 +2517,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
2013 | if (scsi_sg_count(scsi_cmnd)) { | 2517 | if (scsi_sg_count(scsi_cmnd)) { |
2014 | if (datadir == DMA_TO_DEVICE) { | 2518 | if (datadir == DMA_TO_DEVICE) { |
2015 | iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; | 2519 | iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; |
2016 | iocb_cmd->un.fcpi.fcpi_parm = 0; | 2520 | if (phba->sli_rev < LPFC_SLI_REV4) { |
2017 | iocb_cmd->ulpPU = 0; | 2521 | iocb_cmd->un.fcpi.fcpi_parm = 0; |
2522 | iocb_cmd->ulpPU = 0; | ||
2523 | } else | ||
2524 | iocb_cmd->ulpPU = PARM_READ_CHECK; | ||
2018 | fcp_cmnd->fcpCntl3 = WRITE_DATA; | 2525 | fcp_cmnd->fcpCntl3 = WRITE_DATA; |
2019 | phba->fc4OutputRequests++; | 2526 | phba->fc4OutputRequests++; |
2020 | } else { | 2527 | } else { |
@@ -2051,20 +2558,60 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
2051 | } | 2558 | } |
2052 | 2559 | ||
2053 | /** | 2560 | /** |
2054 | * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit | 2561 | * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev |
2562 | * @vport: The virtual port for which this call is being executed. | ||
2563 | * @lpfc_cmd: The scsi command which needs to send. | ||
2564 | * @pnode: Pointer to lpfc_nodelist. | ||
2565 | * | ||
2566 | * This routine initializes fcp_cmnd and iocb data structure from scsi command | ||
2567 | * to transfer for device with SLI4 interface spec. | ||
2568 | **/ | ||
2569 | static void | ||
2570 | lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | ||
2571 | struct lpfc_nodelist *pnode) | ||
2572 | { | ||
2573 | /* | ||
2574 | * The prep cmnd routines do not touch the sgl or its | ||
2575 | * entries. We may not have to do anything different. | ||
2576 | * I will leave this function in place until we can | ||
2577 | * run some IO through the driver and determine if changes | ||
2578 | * are needed. | ||
2579 | */ | ||
2580 | return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode); | ||
2581 | } | ||
2582 | |||
2583 | /** | ||
2584 | * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit | ||
2585 | * @vport: The virtual port for which this call is being executed. | ||
2586 | * @lpfc_cmd: The scsi command which needs to send. | ||
2587 | * @pnode: Pointer to lpfc_nodelist. | ||
2588 | * | ||
2589 | * This routine wraps the actual convert SCSI cmnd function pointer from | ||
2590 | * the lpfc_hba struct. | ||
2591 | **/ | ||
2592 | static inline void | ||
2593 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | ||
2594 | struct lpfc_nodelist *pnode) | ||
2595 | { | ||
2596 | vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode); | ||
2597 | } | ||
2598 | |||
2599 | /** | ||
2600 | * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit | ||
2055 | * @vport: The virtual port for which this call is being executed. | 2601 | * @vport: The virtual port for which this call is being executed. |
2056 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | 2602 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. |
2057 | * @lun: Logical unit number. | 2603 | * @lun: Logical unit number. |
2058 | * @task_mgmt_cmd: SCSI task management command. | 2604 | * @task_mgmt_cmd: SCSI task management command. |
2059 | * | 2605 | * |
2060 | * This routine creates FCP information unit corresponding to @task_mgmt_cmd. | 2606 | * This routine creates FCP information unit corresponding to @task_mgmt_cmd |
2607 | * for device with SLI-3 interface spec. | ||
2061 | * | 2608 | * |
2062 | * Return codes: | 2609 | * Return codes: |
2063 | * 0 - Error | 2610 | * 0 - Error |
2064 | * 1 - Success | 2611 | * 1 - Success |
2065 | **/ | 2612 | **/ |
2066 | static int | 2613 | static int |
2067 | lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | 2614 | lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, |
2068 | struct lpfc_scsi_buf *lpfc_cmd, | 2615 | struct lpfc_scsi_buf *lpfc_cmd, |
2069 | unsigned int lun, | 2616 | unsigned int lun, |
2070 | uint8_t task_mgmt_cmd) | 2617 | uint8_t task_mgmt_cmd) |
@@ -2114,6 +2661,107 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | |||
2114 | } | 2661 | } |
2115 | 2662 | ||
2116 | /** | 2663 | /** |
2664 | * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit | ||
2665 | * @vport: The virtual port for which this call is being executed. | ||
2666 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | ||
2667 | * @lun: Logical unit number. | ||
2668 | * @task_mgmt_cmd: SCSI task management command. | ||
2669 | * | ||
2670 | * This routine creates FCP information unit corresponding to @task_mgmt_cmd | ||
2671 | * for device with SLI-4 interface spec. | ||
2672 | * | ||
2673 | * Return codes: | ||
2674 | * 0 - Error | ||
2675 | * 1 - Success | ||
2676 | **/ | ||
2677 | static int | ||
2678 | lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, | ||
2679 | struct lpfc_scsi_buf *lpfc_cmd, | ||
2680 | unsigned int lun, | ||
2681 | uint8_t task_mgmt_cmd) | ||
2682 | { | ||
2683 | /* | ||
2684 | * The prep cmnd routines do not touch the sgl or its | ||
2685 | * entries. We may not have to do anything different. | ||
2686 | * I will leave this function in place until we can | ||
2687 | * run some IO through the driver and determine if changes | ||
2688 | * are needed. | ||
2689 | */ | ||
2690 | return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun, | ||
2691 | task_mgmt_cmd); | ||
2692 | } | ||
2693 | |||
2694 | /** | ||
2695 | * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info | ||
2696 | * @vport: The virtual port for which this call is being executed. | ||
2697 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | ||
2698 | * @lun: Logical unit number. | ||
2699 | * @task_mgmt_cmd: SCSI task management command. | ||
2700 | * | ||
2701 | * This routine wraps the actual convert SCSI TM to FCP information unit | ||
2702 | * function pointer from the lpfc_hba struct. | ||
2703 | * | ||
2704 | * Return codes: | ||
2705 | * 0 - Error | ||
2706 | * 1 - Success | ||
2707 | **/ | ||
2708 | static inline int | ||
2709 | lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | ||
2710 | struct lpfc_scsi_buf *lpfc_cmd, | ||
2711 | unsigned int lun, | ||
2712 | uint8_t task_mgmt_cmd) | ||
2713 | { | ||
2714 | struct lpfc_hba *phba = vport->phba; | ||
2715 | |||
2716 | return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, | ||
2717 | task_mgmt_cmd); | ||
2718 | } | ||
2719 | |||
2720 | /** | ||
2721 | * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table | ||
2722 | * @phba: The hba struct for which this call is being executed. | ||
2723 | * @dev_grp: The HBA PCI-Device group number. | ||
2724 | * | ||
2725 | * This routine sets up the SCSI interface API function jump table in @phba | ||
2726 | * struct. | ||
2727 | * Returns: 0 - success, -ENODEV - failure. | ||
2728 | **/ | ||
2729 | int | ||
2730 | lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | ||
2731 | { | ||
2732 | |||
2733 | switch (dev_grp) { | ||
2734 | case LPFC_PCI_DEV_LP: | ||
2735 | phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; | ||
2736 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; | ||
2737 | phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3; | ||
2738 | phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3; | ||
2739 | phba->lpfc_scsi_prep_task_mgmt_cmd = | ||
2740 | lpfc_scsi_prep_task_mgmt_cmd_s3; | ||
2741 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; | ||
2742 | break; | ||
2743 | case LPFC_PCI_DEV_OC: | ||
2744 | phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; | ||
2745 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; | ||
2746 | phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4; | ||
2747 | phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4; | ||
2748 | phba->lpfc_scsi_prep_task_mgmt_cmd = | ||
2749 | lpfc_scsi_prep_task_mgmt_cmd_s4; | ||
2750 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; | ||
2751 | break; | ||
2752 | default: | ||
2753 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2754 | "1418 Invalid HBA PCI-device group: 0x%x\n", | ||
2755 | dev_grp); | ||
2756 | return -ENODEV; | ||
2757 | break; | ||
2758 | } | ||
2759 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; | ||
2760 | phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; | ||
2761 | return 0; | ||
2762 | } | ||
2763 | |||
2764 | /** | ||
2117 | * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command | 2765 | * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command |
2118 | * @phba: The Hba for which this call is being executed. | 2766 | * @phba: The Hba for which this call is being executed. |
2119 | * @cmdiocbq: Pointer to lpfc_iocbq data structure. | 2767 | * @cmdiocbq: Pointer to lpfc_iocbq data structure. |
@@ -2178,9 +2826,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, | |||
2178 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, | 2826 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
2179 | "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", | 2827 | "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", |
2180 | tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); | 2828 | tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); |
2181 | status = lpfc_sli_issue_iocb_wait(phba, | 2829 | status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, |
2182 | &phba->sli.ring[phba->sli.fcp_ring], | 2830 | iocbq, iocbqrsp, lpfc_cmd->timeout); |
2183 | iocbq, iocbqrsp, lpfc_cmd->timeout); | ||
2184 | if (status != IOCB_SUCCESS) { | 2831 | if (status != IOCB_SUCCESS) { |
2185 | if (status == IOCB_TIMEDOUT) { | 2832 | if (status == IOCB_TIMEDOUT) { |
2186 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; | 2833 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; |
@@ -2305,7 +2952,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
2305 | struct Scsi_Host *shost = cmnd->device->host; | 2952 | struct Scsi_Host *shost = cmnd->device->host; |
2306 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 2953 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
2307 | struct lpfc_hba *phba = vport->phba; | 2954 | struct lpfc_hba *phba = vport->phba; |
2308 | struct lpfc_sli *psli = &phba->sli; | ||
2309 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; | 2955 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; |
2310 | struct lpfc_nodelist *ndlp = rdata->pnode; | 2956 | struct lpfc_nodelist *ndlp = rdata->pnode; |
2311 | struct lpfc_scsi_buf *lpfc_cmd; | 2957 | struct lpfc_scsi_buf *lpfc_cmd; |
@@ -2427,7 +3073,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
2427 | lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); | 3073 | lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); |
2428 | 3074 | ||
2429 | atomic_inc(&ndlp->cmd_pending); | 3075 | atomic_inc(&ndlp->cmd_pending); |
2430 | err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], | 3076 | err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, |
2431 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); | 3077 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); |
2432 | if (err) { | 3078 | if (err) { |
2433 | atomic_dec(&ndlp->cmd_pending); | 3079 | atomic_dec(&ndlp->cmd_pending); |
@@ -2490,7 +3136,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
2490 | struct Scsi_Host *shost = cmnd->device->host; | 3136 | struct Scsi_Host *shost = cmnd->device->host; |
2491 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 3137 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
2492 | struct lpfc_hba *phba = vport->phba; | 3138 | struct lpfc_hba *phba = vport->phba; |
2493 | struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; | ||
2494 | struct lpfc_iocbq *iocb; | 3139 | struct lpfc_iocbq *iocb; |
2495 | struct lpfc_iocbq *abtsiocb; | 3140 | struct lpfc_iocbq *abtsiocb; |
2496 | struct lpfc_scsi_buf *lpfc_cmd; | 3141 | struct lpfc_scsi_buf *lpfc_cmd; |
@@ -2531,7 +3176,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
2531 | icmd = &abtsiocb->iocb; | 3176 | icmd = &abtsiocb->iocb; |
2532 | icmd->un.acxri.abortType = ABORT_TYPE_ABTS; | 3177 | icmd->un.acxri.abortType = ABORT_TYPE_ABTS; |
2533 | icmd->un.acxri.abortContextTag = cmd->ulpContext; | 3178 | icmd->un.acxri.abortContextTag = cmd->ulpContext; |
2534 | icmd->un.acxri.abortIoTag = cmd->ulpIoTag; | 3179 | if (phba->sli_rev == LPFC_SLI_REV4) |
3180 | icmd->un.acxri.abortIoTag = iocb->sli4_xritag; | ||
3181 | else | ||
3182 | icmd->un.acxri.abortIoTag = cmd->ulpIoTag; | ||
2535 | 3183 | ||
2536 | icmd->ulpLe = 1; | 3184 | icmd->ulpLe = 1; |
2537 | icmd->ulpClass = cmd->ulpClass; | 3185 | icmd->ulpClass = cmd->ulpClass; |
@@ -2542,7 +3190,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
2542 | 3190 | ||
2543 | abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; | 3191 | abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; |
2544 | abtsiocb->vport = vport; | 3192 | abtsiocb->vport = vport; |
2545 | if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { | 3193 | if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == |
3194 | IOCB_ERROR) { | ||
2546 | lpfc_sli_release_iocbq(phba, abtsiocb); | 3195 | lpfc_sli_release_iocbq(phba, abtsiocb); |
2547 | ret = FAILED; | 3196 | ret = FAILED; |
2548 | goto out; | 3197 | goto out; |
@@ -2668,8 +3317,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
2668 | "0703 Issue target reset to TGT %d LUN %d " | 3317 | "0703 Issue target reset to TGT %d LUN %d " |
2669 | "rpi x%x nlp_flag x%x\n", cmnd->device->id, | 3318 | "rpi x%x nlp_flag x%x\n", cmnd->device->id, |
2670 | cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); | 3319 | cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); |
2671 | status = lpfc_sli_issue_iocb_wait(phba, | 3320 | status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, |
2672 | &phba->sli.ring[phba->sli.fcp_ring], | ||
2673 | iocbq, iocbqrsp, lpfc_cmd->timeout); | 3321 | iocbq, iocbqrsp, lpfc_cmd->timeout); |
2674 | if (status == IOCB_TIMEDOUT) { | 3322 | if (status == IOCB_TIMEDOUT) { |
2675 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; | 3323 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; |
@@ -2825,11 +3473,10 @@ lpfc_slave_alloc(struct scsi_device *sdev) | |||
2825 | { | 3473 | { |
2826 | struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; | 3474 | struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; |
2827 | struct lpfc_hba *phba = vport->phba; | 3475 | struct lpfc_hba *phba = vport->phba; |
2828 | struct lpfc_scsi_buf *scsi_buf = NULL; | ||
2829 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); | 3476 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); |
2830 | uint32_t total = 0, i; | 3477 | uint32_t total = 0; |
2831 | uint32_t num_to_alloc = 0; | 3478 | uint32_t num_to_alloc = 0; |
2832 | unsigned long flags; | 3479 | int num_allocated = 0; |
2833 | 3480 | ||
2834 | if (!rport || fc_remote_port_chkready(rport)) | 3481 | if (!rport || fc_remote_port_chkready(rport)) |
2835 | return -ENXIO; | 3482 | return -ENXIO; |
@@ -2863,20 +3510,13 @@ lpfc_slave_alloc(struct scsi_device *sdev) | |||
2863 | (phba->cfg_hba_queue_depth - total)); | 3510 | (phba->cfg_hba_queue_depth - total)); |
2864 | num_to_alloc = phba->cfg_hba_queue_depth - total; | 3511 | num_to_alloc = phba->cfg_hba_queue_depth - total; |
2865 | } | 3512 | } |
2866 | 3513 | num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); | |
2867 | for (i = 0; i < num_to_alloc; i++) { | 3514 | if (num_to_alloc != num_allocated) { |
2868 | scsi_buf = lpfc_new_scsi_buf(vport); | 3515 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
2869 | if (!scsi_buf) { | 3516 | "0708 Allocation request of %d " |
2870 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 3517 | "command buffers did not succeed. " |
2871 | "0706 Failed to allocate " | 3518 | "Allocated %d buffers.\n", |
2872 | "command buffer\n"); | 3519 | num_to_alloc, num_allocated); |
2873 | break; | ||
2874 | } | ||
2875 | |||
2876 | spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); | ||
2877 | phba->total_scsi_bufs++; | ||
2878 | list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); | ||
2879 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); | ||
2880 | } | 3520 | } |
2881 | return 0; | 3521 | return 0; |
2882 | } | 3522 | } |