aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_scsi.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-12-31 07:35:57 -0500
committerRusty Russell <rusty@rustcorp.com.au>2008-12-31 07:35:57 -0500
commit2ca1a615835d9f4990f42102ab1f2ef434e7e89c (patch)
tree726cf3d5f29a6c66c44e4bd68e7ebed2fd83d059 /drivers/scsi/lpfc/lpfc_scsi.c
parente12f0102ac81d660c9f801d0a0e10ccf4537a9de (diff)
parent6a94cb73064c952255336cc57731904174b2c58f (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: arch/x86/kernel/io_apic.c
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1235
1 files changed, 1176 insertions, 59 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index bd1867411821..b103b6ed4970 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -18,13 +18,14 @@
18 * more details, a copy of which can be found in the file COPYING * 18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. * 19 * included with this package. *
20 *******************************************************************/ 20 *******************************************************************/
21
22#include <linux/pci.h> 21#include <linux/pci.h>
23#include <linux/interrupt.h> 22#include <linux/interrupt.h>
24#include <linux/delay.h> 23#include <linux/delay.h>
24#include <asm/unaligned.h>
25 25
26#include <scsi/scsi.h> 26#include <scsi/scsi.h>
27#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
28#include <scsi/scsi_eh.h>
28#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
29#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
30#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
@@ -43,6 +44,73 @@
43#define LPFC_RESET_WAIT 2 44#define LPFC_RESET_WAIT 2
44#define LPFC_ABORT_WAIT 2 45#define LPFC_ABORT_WAIT 2
45 46
47int _dump_buf_done;
48
49static char *dif_op_str[] = {
50 "SCSI_PROT_NORMAL",
51 "SCSI_PROT_READ_INSERT",
52 "SCSI_PROT_WRITE_STRIP",
53 "SCSI_PROT_READ_STRIP",
54 "SCSI_PROT_WRITE_INSERT",
55 "SCSI_PROT_READ_PASS",
56 "SCSI_PROT_WRITE_PASS",
57 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT"
59};
60
61static void
62lpfc_debug_save_data(struct scsi_cmnd *cmnd)
63{
64 void *src, *dst;
65 struct scatterlist *sgde = scsi_sglist(cmnd);
66
67 if (!_dump_buf_data) {
68 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
69 __func__);
70 return;
71 }
72
73
74 if (!sgde) {
75 printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n");
76 return;
77 }
78
79 dst = (void *) _dump_buf_data;
80 while (sgde) {
81 src = sg_virt(sgde);
82 memcpy(dst, src, sgde->length);
83 dst += sgde->length;
84 sgde = sg_next(sgde);
85 }
86}
87
88static void
89lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
90{
91 void *src, *dst;
92 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
93
94 if (!_dump_buf_dif) {
95 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
96 __func__);
97 return;
98 }
99
100 if (!sgde) {
101 printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n");
102 return;
103 }
104
105 dst = _dump_buf_dif;
106 while (sgde) {
107 src = sg_virt(sgde);
108 memcpy(dst, src, sgde->length);
109 dst += sgde->length;
110 sgde = sg_next(sgde);
111 }
112}
113
46/** 114/**
47 * lpfc_update_stats: Update statistical data for the command completion. 115 * lpfc_update_stats: Update statistical data for the command completion.
48 * @phba: Pointer to HBA object. 116 * @phba: Pointer to HBA object.
@@ -66,6 +134,8 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
66 if (cmd->result) 134 if (cmd->result)
67 return; 135 return;
68 136
137 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
138
69 spin_lock_irqsave(shost->host_lock, flags); 139 spin_lock_irqsave(shost->host_lock, flags);
70 if (!vport->stat_data_enabled || 140 if (!vport->stat_data_enabled ||
71 vport->stat_data_blocked || 141 vport->stat_data_blocked ||
@@ -74,13 +144,15 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
74 spin_unlock_irqrestore(shost->host_lock, flags); 144 spin_unlock_irqrestore(shost->host_lock, flags);
75 return; 145 return;
76 } 146 }
77 latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
78 147
79 if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 148 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
80 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 149 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
81 phba->bucket_step; 150 phba->bucket_step;
82 if (i >= LPFC_MAX_BUCKET_COUNT) 151 /* check array subscript bounds */
83 i = LPFC_MAX_BUCKET_COUNT; 152 if (i < 0)
153 i = 0;
154 else if (i >= LPFC_MAX_BUCKET_COUNT)
155 i = LPFC_MAX_BUCKET_COUNT - 1;
84 } else { 156 } else {
85 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 157 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
86 if (latency <= (phba->bucket_base + 158 if (latency <= (phba->bucket_base +
@@ -92,7 +164,6 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
92 spin_unlock_irqrestore(shost->host_lock, flags); 164 spin_unlock_irqrestore(shost->host_lock, flags);
93} 165}
94 166
95
96/** 167/**
97 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change 168 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
98 * event. 169 * event.
@@ -148,12 +219,19 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
148 return; 219 return;
149} 220}
150 221
151/* 222/**
152 * This function is called with no lock held when there is a resource 223 * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
153 * error in driver or in firmware. 224 * @phba: The Hba for which this call is being executed.
154 */ 225 *
226 * This routine is called when there is resource error in driver or firmware.
227 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
228 * posts at most 1 event each second. This routine wakes up worker thread of
229 * @phba to process WORKER_RAM_DOWN_EVENT event.
230 *
231 * This routine should be called with no lock held.
232 **/
155void 233void
156lpfc_adjust_queue_depth(struct lpfc_hba *phba) 234lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
157{ 235{
158 unsigned long flags; 236 unsigned long flags;
159 uint32_t evt_posted; 237 uint32_t evt_posted;
@@ -182,10 +260,17 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
182 return; 260 return;
183} 261}
184 262
185/* 263/**
186 * This function is called with no lock held when there is a successful 264 * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
187 * SCSI command completion. 265 * @phba: The Hba for which this call is being executed.
188 */ 266 *
267 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
268 * post at most 1 event every 5 minute after last_ramp_up_time or
269 * last_rsrc_error_time. This routine wakes up worker thread of @phba
270 * to process WORKER_RAM_DOWN_EVENT event.
271 *
272 * This routine should be called with no lock held.
273 **/
189static inline void 274static inline void
190lpfc_rampup_queue_depth(struct lpfc_vport *vport, 275lpfc_rampup_queue_depth(struct lpfc_vport *vport,
191 struct scsi_device *sdev) 276 struct scsi_device *sdev)
@@ -217,6 +302,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
217 return; 302 return;
218} 303}
219 304
305/**
306 * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
307 * @phba: The Hba for which this call is being executed.
308 *
309 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
310 * thread.This routine reduces queue depth for all scsi device on each vport
311 * associated with @phba.
312 **/
220void 313void
221lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 314lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
222{ 315{
@@ -267,6 +360,15 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
267 atomic_set(&phba->num_cmd_success, 0); 360 atomic_set(&phba->num_cmd_success, 0);
268} 361}
269 362
363/**
364 * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
365 * @phba: The Hba for which this call is being executed.
366 *
367 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
368 * thread.This routine increases queue depth for all scsi device on each vport
369 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
370 * num_cmd_success to zero.
371 **/
270void 372void
271lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) 373lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
272{ 374{
@@ -336,14 +438,21 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
336 lpfc_destroy_vport_work_array(phba, vports); 438 lpfc_destroy_vport_work_array(phba, vports);
337} 439}
338 440
339/* 441/**
442 * lpfc_new_scsi_buf: Scsi buffer allocator.
443 * @vport: The virtual port for which this call being executed.
444 *
340 * This routine allocates a scsi buffer, which contains all the necessary 445 * This routine allocates a scsi buffer, which contains all the necessary
341 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 446 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
342 * contains information to build the IOCB. The DMAable region contains 447 * contains information to build the IOCB. The DMAable region contains
343 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to 448 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
344 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL 449 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
345 * and the BPL BDE is setup in the IOCB. 450 * and the BPL BDE is setup in the IOCB.
346 */ 451 *
452 * Return codes:
453 * NULL - Error
454 * Pointer to lpfc_scsi_buf data structure - Success
455 **/
347static struct lpfc_scsi_buf * 456static struct lpfc_scsi_buf *
348lpfc_new_scsi_buf(struct lpfc_vport *vport) 457lpfc_new_scsi_buf(struct lpfc_vport *vport)
349{ 458{
@@ -407,14 +516,14 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
407 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 516 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
408 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 517 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
409 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 518 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
410 bpl[0].tus.w = le32_to_cpu(bpl->tus.w); 519 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
411 520
412 /* Setup the physical region for the FCP RSP */ 521 /* Setup the physical region for the FCP RSP */
413 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 522 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
414 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 523 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
415 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 524 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
416 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 525 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
417 bpl[1].tus.w = le32_to_cpu(bpl->tus.w); 526 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
418 527
419 /* 528 /*
420 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 529 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
@@ -422,7 +531,8 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
422 */ 531 */
423 iocb = &psb->cur_iocbq.iocb; 532 iocb = &psb->cur_iocbq.iocb;
424 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 533 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
425 if (phba->sli_rev == 3) { 534 if ((phba->sli_rev == 3) &&
535 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
426 /* fill in immediate fcp command BDE */ 536 /* fill in immediate fcp command BDE */
427 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 537 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
428 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 538 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
@@ -452,6 +562,17 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
452 return psb; 562 return psb;
453} 563}
454 564
565/**
566 * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
567 * @phba: The Hba for which this call is being executed.
568 *
569 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
570 * and returns to caller.
571 *
572 * Return codes:
573 * NULL - Error
574 * Pointer to lpfc_scsi_buf - Success
575 **/
455static struct lpfc_scsi_buf* 576static struct lpfc_scsi_buf*
456lpfc_get_scsi_buf(struct lpfc_hba * phba) 577lpfc_get_scsi_buf(struct lpfc_hba * phba)
457{ 578{
@@ -464,11 +585,20 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
464 if (lpfc_cmd) { 585 if (lpfc_cmd) {
465 lpfc_cmd->seg_cnt = 0; 586 lpfc_cmd->seg_cnt = 0;
466 lpfc_cmd->nonsg_phys = 0; 587 lpfc_cmd->nonsg_phys = 0;
588 lpfc_cmd->prot_seg_cnt = 0;
467 } 589 }
468 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 590 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
469 return lpfc_cmd; 591 return lpfc_cmd;
470} 592}
471 593
594/**
595 * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
596 * @phba: The Hba for which this call is being executed.
597 * @psb: The scsi buffer which is being released.
598 *
599 * This routine releases @psb scsi buffer by adding it to tail of @phba
600 * lpfc_scsi_buf_list list.
601 **/
472static void 602static void
473lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 603lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
474{ 604{
@@ -480,6 +610,20 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
480 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 610 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
481} 611}
482 612
613/**
614 * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
615 * @phba: The Hba for which this call is being executed.
616 * @lpfc_cmd: The scsi buffer which is going to be mapped.
617 *
618 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
619 * field of @lpfc_cmd. This routine scans through sg elements and format the
620 * bdea. This routine also initializes all IOCB fields which are dependent on
621 * scsi command request buffer.
622 *
623 * Return codes:
624 * 1 - Error
625 * 0 - Success
626 **/
483static int 627static int
484lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 628lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
485{ 629{
@@ -516,7 +660,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
516 lpfc_cmd->seg_cnt = nseg; 660 lpfc_cmd->seg_cnt = nseg;
517 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 661 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
518 printk(KERN_ERR "%s: Too many sg segments from " 662 printk(KERN_ERR "%s: Too many sg segments from "
519 "dma_map_sg. Config %d, seg_cnt %d", 663 "dma_map_sg. Config %d, seg_cnt %d\n",
520 __func__, phba->cfg_sg_seg_cnt, 664 __func__, phba->cfg_sg_seg_cnt,
521 lpfc_cmd->seg_cnt); 665 lpfc_cmd->seg_cnt);
522 scsi_dma_unmap(scsi_cmnd); 666 scsi_dma_unmap(scsi_cmnd);
@@ -535,6 +679,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
535 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 679 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
536 physaddr = sg_dma_address(sgel); 680 physaddr = sg_dma_address(sgel);
537 if (phba->sli_rev == 3 && 681 if (phba->sli_rev == 3 &&
682 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
538 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 683 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
539 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 684 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
540 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 685 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -560,7 +705,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
560 * explicitly reinitialized and for SLI-3 the extended bde count is 705 * explicitly reinitialized and for SLI-3 the extended bde count is
561 * explicitly reinitialized since all iocb memory resources are reused. 706 * explicitly reinitialized since all iocb memory resources are reused.
562 */ 707 */
563 if (phba->sli_rev == 3) { 708 if (phba->sli_rev == 3 &&
709 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
564 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 710 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
565 /* 711 /*
566 * The extended IOCB format can only fit 3 BDE or a BPL. 712 * The extended IOCB format can only fit 3 BDE or a BPL.
@@ -587,7 +733,683 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
587 ((num_bde + 2) * sizeof(struct ulp_bde64)); 733 ((num_bde + 2) * sizeof(struct ulp_bde64));
588 } 734 }
589 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 735 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
736
737 /*
738 * Due to difference in data length between DIF/non-DIF paths,
739 * we need to set word 4 of IOCB here
740 */
741 iocb_cmd->un.fcpi.fcpi_parm = le32_to_cpu(scsi_bufflen(scsi_cmnd));
742 return 0;
743}
744
745/*
746 * Given a scsi cmnd, determine the BlockGuard profile to be used
747 * with the cmd
748 */
749static int
750lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
751{
752 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
753 uint8_t ret_prof = LPFC_PROF_INVALID;
754
755 if (guard_type == SHOST_DIX_GUARD_IP) {
756 switch (scsi_get_prot_op(sc)) {
757 case SCSI_PROT_READ_INSERT:
758 case SCSI_PROT_WRITE_STRIP:
759 ret_prof = LPFC_PROF_AST2;
760 break;
761
762 case SCSI_PROT_READ_STRIP:
763 case SCSI_PROT_WRITE_INSERT:
764 ret_prof = LPFC_PROF_A1;
765 break;
766
767 case SCSI_PROT_READ_CONVERT:
768 case SCSI_PROT_WRITE_CONVERT:
769 ret_prof = LPFC_PROF_AST1;
770 break;
771
772 case SCSI_PROT_READ_PASS:
773 case SCSI_PROT_WRITE_PASS:
774 case SCSI_PROT_NORMAL:
775 default:
776 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
777 scsi_get_prot_op(sc), guard_type);
778 break;
779
780 }
781 } else if (guard_type == SHOST_DIX_GUARD_CRC) {
782 switch (scsi_get_prot_op(sc)) {
783 case SCSI_PROT_READ_STRIP:
784 case SCSI_PROT_WRITE_INSERT:
785 ret_prof = LPFC_PROF_A1;
786 break;
787
788 case SCSI_PROT_READ_PASS:
789 case SCSI_PROT_WRITE_PASS:
790 ret_prof = LPFC_PROF_C1;
791 break;
792
793 case SCSI_PROT_READ_CONVERT:
794 case SCSI_PROT_WRITE_CONVERT:
795 case SCSI_PROT_READ_INSERT:
796 case SCSI_PROT_WRITE_STRIP:
797 case SCSI_PROT_NORMAL:
798 default:
799 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
800 scsi_get_prot_op(sc), guard_type);
801 break;
802 }
803 } else {
804 /* unsupported format */
805 BUG();
806 }
807
808 return ret_prof;
809}
810
811struct scsi_dif_tuple {
812 __be16 guard_tag; /* Checksum */
813 __be16 app_tag; /* Opaque storage */
814 __be32 ref_tag; /* Target LBA or indirect LBA */
815};
816
817static inline unsigned
818lpfc_cmd_blksize(struct scsi_cmnd *sc)
819{
820 return sc->device->sector_size;
821}
822
823/**
824 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
825 * @sc: in: SCSI command
826 * @apptagmask out: app tag mask
827 * @apptagval out: app tag value
828 * @reftag out: ref tag (reference tag)
829 *
830 * Description:
831 * Extract DIF paramters from the command if possible. Otherwise,
832 * use default paratmers.
833 *
834 **/
835static inline void
836lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
837 uint16_t *apptagval, uint32_t *reftag)
838{
839 struct scsi_dif_tuple *spt;
840 unsigned char op = scsi_get_prot_op(sc);
841 unsigned int protcnt = scsi_prot_sg_count(sc);
842 static int cnt;
843
844 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
845 op == SCSI_PROT_WRITE_PASS ||
846 op == SCSI_PROT_WRITE_CONVERT)) {
847
848 cnt++;
849 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
850 scsi_prot_sglist(sc)[0].offset;
851 *apptagmask = 0;
852 *apptagval = 0;
853 *reftag = cpu_to_be32(spt->ref_tag);
854
855 } else {
856 /* SBC defines ref tag to be lower 32bits of LBA */
857 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
858 *apptagmask = 0;
859 *apptagval = 0;
860 }
861}
862
863/*
864 * This function sets up buffer list for protection groups of
865 * type LPFC_PG_TYPE_NO_DIF
866 *
867 * This is usually used when the HBA is instructed to generate
868 * DIFs and insert them into data stream (or strip DIF from
869 * incoming data stream)
870 *
871 * The buffer list consists of just one protection group described
872 * below:
873 * +-------------------------+
874 * start of prot group --> | PDE_1 |
875 * +-------------------------+
876 * | Data BDE |
877 * +-------------------------+
878 * |more Data BDE's ... (opt)|
879 * +-------------------------+
880 *
881 * @sc: pointer to scsi command we're working on
882 * @bpl: pointer to buffer list for protection groups
883 * @datacnt: number of segments of data that have been dma mapped
884 *
885 * Note: Data s/g buffers have been dma mapped
886 */
887static int
888lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
889 struct ulp_bde64 *bpl, int datasegcnt)
890{
891 struct scatterlist *sgde = NULL; /* s/g data entry */
892 struct lpfc_pde *pde1 = NULL;
893 dma_addr_t physaddr;
894 int i = 0, num_bde = 0;
895 int datadir = sc->sc_data_direction;
896 int prof = LPFC_PROF_INVALID;
897 unsigned blksize;
898 uint32_t reftag;
899 uint16_t apptagmask, apptagval;
900
901 pde1 = (struct lpfc_pde *) bpl;
902 prof = lpfc_sc_to_sli_prof(sc);
903
904 if (prof == LPFC_PROF_INVALID)
905 goto out;
906
907 /* extract some info from the scsi command for PDE1*/
908 blksize = lpfc_cmd_blksize(sc);
909 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
910
911 /* setup PDE1 with what we have */
912 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
913 BG_EC_STOP_ERR);
914 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
915
916 num_bde++;
917 bpl++;
918
919 /* assumption: caller has already run dma_map_sg on command data */
920 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
921 physaddr = sg_dma_address(sgde);
922 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
923 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
924 bpl->tus.f.bdeSize = sg_dma_len(sgde);
925 if (datadir == DMA_TO_DEVICE)
926 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
927 else
928 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
929 bpl->tus.w = le32_to_cpu(bpl->tus.w);
930 bpl++;
931 num_bde++;
932 }
933
934out:
935 return num_bde;
936}
937
938/*
939 * This function sets up buffer list for protection groups of
940 * type LPFC_PG_TYPE_DIF_BUF
941 *
942 * This is usually used when DIFs are in their own buffers,
943 * separate from the data. The HBA can then by instructed
944 * to place the DIFs in the outgoing stream. For read operations,
945 * The HBA could extract the DIFs and place it in DIF buffers.
946 *
947 * The buffer list for this type consists of one or more of the
948 * protection groups described below:
949 * +-------------------------+
950 * start of first prot group --> | PDE_1 |
951 * +-------------------------+
952 * | PDE_3 (Prot BDE) |
953 * +-------------------------+
954 * | Data BDE |
955 * +-------------------------+
956 * |more Data BDE's ... (opt)|
957 * +-------------------------+
958 * start of new prot group --> | PDE_1 |
959 * +-------------------------+
960 * | ... |
961 * +-------------------------+
962 *
963 * @sc: pointer to scsi command we're working on
964 * @bpl: pointer to buffer list for protection groups
965 * @datacnt: number of segments of data that have been dma mapped
966 * @protcnt: number of segment of protection data that have been dma mapped
967 *
968 * Note: It is assumed that both data and protection s/g buffers have been
969 * mapped for DMA
970 */
971static int
972lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
973 struct ulp_bde64 *bpl, int datacnt, int protcnt)
974{
975 struct scatterlist *sgde = NULL; /* s/g data entry */
976 struct scatterlist *sgpe = NULL; /* s/g prot entry */
977 struct lpfc_pde *pde1 = NULL;
978 struct ulp_bde64 *prot_bde = NULL;
979 dma_addr_t dataphysaddr, protphysaddr;
980 unsigned short curr_data = 0, curr_prot = 0;
981 unsigned int split_offset, protgroup_len;
982 unsigned int protgrp_blks, protgrp_bytes;
983 unsigned int remainder, subtotal;
984 int prof = LPFC_PROF_INVALID;
985 int datadir = sc->sc_data_direction;
986 unsigned char pgdone = 0, alldone = 0;
987 unsigned blksize;
988 uint32_t reftag;
989 uint16_t apptagmask, apptagval;
990 int num_bde = 0;
991
992 sgpe = scsi_prot_sglist(sc);
993 sgde = scsi_sglist(sc);
994
995 if (!sgpe || !sgde) {
996 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
997 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
998 sgpe, sgde);
999 return 0;
1000 }
1001
1002 prof = lpfc_sc_to_sli_prof(sc);
1003 if (prof == LPFC_PROF_INVALID)
1004 goto out;
1005
1006 /* extract some info from the scsi command for PDE1*/
1007 blksize = lpfc_cmd_blksize(sc);
1008 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1009
1010 split_offset = 0;
1011 do {
1012 /* setup the first PDE_1 */
1013 pde1 = (struct lpfc_pde *) bpl;
1014
1015 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
1016 BG_EC_STOP_ERR);
1017 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
1018
1019 num_bde++;
1020 bpl++;
1021
1022 /* setup the first BDE that points to protection buffer */
1023 prot_bde = (struct ulp_bde64 *) bpl;
1024 protphysaddr = sg_dma_address(sgpe);
1025 prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1026 prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1027 protgroup_len = sg_dma_len(sgpe);
1028
1029
1030 /* must be integer multiple of the DIF block length */
1031 BUG_ON(protgroup_len % 8);
1032
1033 protgrp_blks = protgroup_len / 8;
1034 protgrp_bytes = protgrp_blks * blksize;
1035
1036 prot_bde->tus.f.bdeSize = protgroup_len;
1037 if (datadir == DMA_TO_DEVICE)
1038 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1039 else
1040 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1041 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1042
1043 curr_prot++;
1044 num_bde++;
1045
1046 /* setup BDE's for data blocks associated with DIF data */
1047 pgdone = 0;
1048 subtotal = 0; /* total bytes processed for current prot grp */
1049 while (!pgdone) {
1050 if (!sgde) {
1051 printk(KERN_ERR "%s Invalid data segment\n",
1052 __func__);
1053 return 0;
1054 }
1055 bpl++;
1056 dataphysaddr = sg_dma_address(sgde) + split_offset;
1057 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1058 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1059
1060 remainder = sg_dma_len(sgde) - split_offset;
1061
1062 if ((subtotal + remainder) <= protgrp_bytes) {
1063 /* we can use this whole buffer */
1064 bpl->tus.f.bdeSize = remainder;
1065 split_offset = 0;
1066
1067 if ((subtotal + remainder) == protgrp_bytes)
1068 pgdone = 1;
1069 } else {
1070 /* must split this buffer with next prot grp */
1071 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1072 split_offset += bpl->tus.f.bdeSize;
1073 }
1074
1075 subtotal += bpl->tus.f.bdeSize;
1076
1077 if (datadir == DMA_TO_DEVICE)
1078 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1079 else
1080 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1081 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1082
1083 num_bde++;
1084 curr_data++;
1085
1086 if (split_offset)
1087 break;
1088
1089 /* Move to the next s/g segment if possible */
1090 sgde = sg_next(sgde);
1091 }
1092
1093 /* are we done ? */
1094 if (curr_prot == protcnt) {
1095 alldone = 1;
1096 } else if (curr_prot < protcnt) {
1097 /* advance to next prot buffer */
1098 sgpe = sg_next(sgpe);
1099 bpl++;
1100
1101 /* update the reference tag */
1102 reftag += protgrp_blks;
1103 } else {
1104 /* if we're here, we have a bug */
1105 printk(KERN_ERR "BLKGRD: bug in %s\n", __func__);
1106 }
1107
1108 } while (!alldone);
1109
1110out:
1111
1112
1113 return num_bde;
1114}
1115/*
1116 * Given a SCSI command that supports DIF, determine composition of protection
1117 * groups involved in setting up buffer lists
1118 *
1119 * Returns:
1120 * for DIF (for both read and write)
1121 * */
1122static int
1123lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1124{
1125 int ret = LPFC_PG_TYPE_INVALID;
1126 unsigned char op = scsi_get_prot_op(sc);
1127
1128 switch (op) {
1129 case SCSI_PROT_READ_STRIP:
1130 case SCSI_PROT_WRITE_INSERT:
1131 ret = LPFC_PG_TYPE_NO_DIF;
1132 break;
1133 case SCSI_PROT_READ_INSERT:
1134 case SCSI_PROT_WRITE_STRIP:
1135 case SCSI_PROT_READ_PASS:
1136 case SCSI_PROT_WRITE_PASS:
1137 case SCSI_PROT_WRITE_CONVERT:
1138 case SCSI_PROT_READ_CONVERT:
1139 ret = LPFC_PG_TYPE_DIF_BUF;
1140 break;
1141 default:
1142 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1143 "9021 Unsupported protection op:%d\n", op);
1144 break;
1145 }
1146
1147 return ret;
1148}
1149
1150/*
1151 * This is the protection/DIF aware version of
1152 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1153 * two functions eventually, but for now, it's here
1154 */
1155static int
1156lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1157 struct lpfc_scsi_buf *lpfc_cmd)
1158{
1159 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1160 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1161 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1162 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1163 uint32_t num_bde = 0;
1164 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1165 int prot_group_type = 0;
1166 int diflen, fcpdl;
1167 unsigned blksize;
1168
1169 /*
1170 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1171 * fcp_rsp regions to the first data bde entry
1172 */
1173 bpl += 2;
1174 if (scsi_sg_count(scsi_cmnd)) {
1175 /*
1176 * The driver stores the segment count returned from pci_map_sg
1177 * because this a count of dma-mappings used to map the use_sg
1178 * pages. They are not guaranteed to be the same for those
1179 * architectures that implement an IOMMU.
1180 */
1181 datasegcnt = dma_map_sg(&phba->pcidev->dev,
1182 scsi_sglist(scsi_cmnd),
1183 scsi_sg_count(scsi_cmnd), datadir);
1184 if (unlikely(!datasegcnt))
1185 return 1;
1186
1187 lpfc_cmd->seg_cnt = datasegcnt;
1188 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1189 printk(KERN_ERR "%s: Too many sg segments from "
1190 "dma_map_sg. Config %d, seg_cnt %d\n",
1191 __func__, phba->cfg_sg_seg_cnt,
1192 lpfc_cmd->seg_cnt);
1193 scsi_dma_unmap(scsi_cmnd);
1194 return 1;
1195 }
1196
1197 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1198
1199 switch (prot_group_type) {
1200 case LPFC_PG_TYPE_NO_DIF:
1201 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1202 datasegcnt);
1203 /* we shoud have 2 or more entries in buffer list */
1204 if (num_bde < 2)
1205 goto err;
1206 break;
1207 case LPFC_PG_TYPE_DIF_BUF:{
1208 /*
1209 * This type indicates that protection buffers are
1210 * passed to the driver, so that needs to be prepared
1211 * for DMA
1212 */
1213 protsegcnt = dma_map_sg(&phba->pcidev->dev,
1214 scsi_prot_sglist(scsi_cmnd),
1215 scsi_prot_sg_count(scsi_cmnd), datadir);
1216 if (unlikely(!protsegcnt)) {
1217 scsi_dma_unmap(scsi_cmnd);
1218 return 1;
1219 }
1220
1221 lpfc_cmd->prot_seg_cnt = protsegcnt;
1222 if (lpfc_cmd->prot_seg_cnt
1223 > phba->cfg_prot_sg_seg_cnt) {
1224 printk(KERN_ERR "%s: Too many prot sg segments "
1225 "from dma_map_sg. Config %d,"
1226 "prot_seg_cnt %d\n", __func__,
1227 phba->cfg_prot_sg_seg_cnt,
1228 lpfc_cmd->prot_seg_cnt);
1229 dma_unmap_sg(&phba->pcidev->dev,
1230 scsi_prot_sglist(scsi_cmnd),
1231 scsi_prot_sg_count(scsi_cmnd),
1232 datadir);
1233 scsi_dma_unmap(scsi_cmnd);
1234 return 1;
1235 }
1236
1237 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1238 datasegcnt, protsegcnt);
1239 /* we shoud have 3 or more entries in buffer list */
1240 if (num_bde < 3)
1241 goto err;
1242 break;
1243 }
1244 case LPFC_PG_TYPE_INVALID:
1245 default:
1246 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1247 "9022 Unexpected protection group %i\n",
1248 prot_group_type);
1249 return 1;
1250 }
1251 }
1252
1253 /*
1254 * Finish initializing those IOCB fields that are dependent on the
1255 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
1256 * reinitialized since all iocb memory resources are used many times
1257 * for transmit, receive, and continuation bpl's.
1258 */
1259 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1260 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1261 iocb_cmd->ulpBdeCount = 1;
1262 iocb_cmd->ulpLe = 1;
1263
1264 fcpdl = scsi_bufflen(scsi_cmnd);
1265
1266 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1267 /*
1268 * We are in DIF Type 1 mode
1269 * Every data block has a 8 byte DIF (trailer)
1270 * attached to it. Must ajust FCP data length
1271 */
1272 blksize = lpfc_cmd_blksize(scsi_cmnd);
1273 diflen = (fcpdl / blksize) * 8;
1274 fcpdl += diflen;
1275 }
1276 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1277
1278 /*
1279 * Due to difference in data length between DIF/non-DIF paths,
1280 * we need to set word 4 of IOCB here
1281 */
1282 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1283
590 return 0; 1284 return 0;
1285err:
1286 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1287 "9023 Could not setup all needed BDE's"
1288 "prot_group_type=%d, num_bde=%d\n",
1289 prot_group_type, num_bde);
1290 return 1;
1291}
1292
1293/*
1294 * This function checks for BlockGuard errors detected by
1295 * the HBA. In case of errors, the ASC/ASCQ fields in the
1296 * sense buffer will be set accordingly, paired with
1297 * ILLEGAL_REQUEST to signal to the kernel that the HBA
1298 * detected corruption.
1299 *
1300 * Returns:
1301 * 0 - No error found
1302 * 1 - BlockGuard error found
1303 * -1 - Internal error (bad profile, ...etc)
1304 */
1305static int
1306lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1307 struct lpfc_iocbq *pIocbOut)
1308{
1309 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1310 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1311 int ret = 0;
1312 uint32_t bghm = bgf->bghm;
1313 uint32_t bgstat = bgf->bgstat;
1314 uint64_t failing_sector = 0;
1315
1316 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
1317 "bgstat=0x%x bghm=0x%x\n",
1318 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1319 cmd->request->nr_sectors, bgstat, bghm);
1320
1321 spin_lock(&_dump_buf_lock);
1322 if (!_dump_buf_done) {
1323 printk(KERN_ERR "Saving Data for %u blocks to debugfs\n",
1324 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1325 lpfc_debug_save_data(cmd);
1326
1327 /* If we have a prot sgl, save the DIF buffer */
1328 if (lpfc_prot_group_type(phba, cmd) ==
1329 LPFC_PG_TYPE_DIF_BUF) {
1330 printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n",
1331 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1332 lpfc_debug_save_dif(cmd);
1333 }
1334
1335 _dump_buf_done = 1;
1336 }
1337 spin_unlock(&_dump_buf_lock);
1338
1339 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1340 cmd->result = ScsiResult(DID_ERROR, 0);
1341 printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n",
1342 bgstat);
1343 ret = (-1);
1344 goto out;
1345 }
1346
1347 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1348 cmd->result = ScsiResult(DID_ERROR, 0);
1349 printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1350 bgstat);
1351 ret = (-1);
1352 goto out;
1353 }
1354
1355 if (lpfc_bgs_get_guard_err(bgstat)) {
1356 ret = 1;
1357
1358 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1359 0x10, 0x1);
1360 cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
1361 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1362 phba->bg_guard_err_cnt++;
1363 printk(KERN_ERR "BLKGRD: guard_tag error\n");
1364 }
1365
1366 if (lpfc_bgs_get_reftag_err(bgstat)) {
1367 ret = 1;
1368
1369 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1370 0x10, 0x3);
1371 cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
1372 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1373
1374 phba->bg_reftag_err_cnt++;
1375 printk(KERN_ERR "BLKGRD: ref_tag error\n");
1376 }
1377
1378 if (lpfc_bgs_get_apptag_err(bgstat)) {
1379 ret = 1;
1380
1381 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1382 0x10, 0x2);
1383 cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
1384 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1385
1386 phba->bg_apptag_err_cnt++;
1387 printk(KERN_ERR "BLKGRD: app_tag error\n");
1388 }
1389
1390 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1391 /*
1392 * setup sense data descriptor 0 per SPC-4 as an information
1393 * field, and put the failing LBA in it
1394 */
1395 cmd->sense_buffer[8] = 0; /* Information */
1396 cmd->sense_buffer[9] = 0xa; /* Add. length */
1397 do_div(bghm, cmd->device->sector_size);
1398
1399 failing_sector = scsi_get_lba(cmd);
1400 failing_sector += bghm;
1401
1402 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1403 }
1404
1405 if (!ret) {
1406 /* No error was reported - problem in FW? */
1407 cmd->result = ScsiResult(DID_ERROR, 0);
1408 printk(KERN_ERR "BLKGRD: no errors reported!\n");
1409 }
1410
1411out:
1412 return ret;
591} 1413}
592 1414
593/** 1415/**
@@ -681,6 +1503,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
681 lpfc_worker_wake_up(phba); 1503 lpfc_worker_wake_up(phba);
682 return; 1504 return;
683} 1505}
1506
1507/**
1508 * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
1509 * @phba: The Hba for which this call is being executed.
1510 * @psb: The scsi buffer which is going to be un-mapped.
1511 *
1512 * This routine does DMA un-mapping of scatter gather list of scsi command
1513 * field of @lpfc_cmd.
1514 **/
684static void 1515static void
685lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 1516lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
686{ 1517{
@@ -692,8 +1523,22 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
692 */ 1523 */
693 if (psb->seg_cnt > 0) 1524 if (psb->seg_cnt > 0)
694 scsi_dma_unmap(psb->pCmd); 1525 scsi_dma_unmap(psb->pCmd);
1526 if (psb->prot_seg_cnt > 0)
1527 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
1528 scsi_prot_sg_count(psb->pCmd),
1529 psb->pCmd->sc_data_direction);
695} 1530}
696 1531
1532/**
1533 * lpfc_handler_fcp_err: FCP response handler.
1534 * @vport: The virtual port for which this call is being executed.
1535 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
1536 * @rsp_iocb: The response IOCB which contains FCP error.
1537 *
1538 * This routine is called to process response IOCB with status field
1539 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
1540 * based upon SCSI and FCP error.
1541 **/
697static void 1542static void
698lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 1543lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
699 struct lpfc_iocbq *rsp_iocb) 1544 struct lpfc_iocbq *rsp_iocb)
@@ -735,7 +1580,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
735 logit = LOG_FCP; 1580 logit = LOG_FCP;
736 1581
737 lpfc_printf_vlog(vport, KERN_WARNING, logit, 1582 lpfc_printf_vlog(vport, KERN_WARNING, logit,
738 "0730 FCP command x%x failed: x%x SNS x%x x%x " 1583 "9024 FCP command x%x failed: x%x SNS x%x x%x "
739 "Data: x%x x%x x%x x%x x%x\n", 1584 "Data: x%x x%x x%x x%x x%x\n",
740 cmnd->cmnd[0], scsi_status, 1585 cmnd->cmnd[0], scsi_status,
741 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 1586 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
@@ -758,7 +1603,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
758 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 1603 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
759 1604
760 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 1605 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
761 "0716 FCP Read Underrun, expected %d, " 1606 "9025 FCP Read Underrun, expected %d, "
762 "residual %d Data: x%x x%x x%x\n", 1607 "residual %d Data: x%x x%x x%x\n",
763 be32_to_cpu(fcpcmd->fcpDl), 1608 be32_to_cpu(fcpcmd->fcpDl),
764 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 1609 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
@@ -774,7 +1619,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
774 (scsi_get_resid(cmnd) != fcpi_parm)) { 1619 (scsi_get_resid(cmnd) != fcpi_parm)) {
775 lpfc_printf_vlog(vport, KERN_WARNING, 1620 lpfc_printf_vlog(vport, KERN_WARNING,
776 LOG_FCP | LOG_FCP_ERROR, 1621 LOG_FCP | LOG_FCP_ERROR,
777 "0735 FCP Read Check Error " 1622 "9026 FCP Read Check Error "
778 "and Underrun Data: x%x x%x x%x x%x\n", 1623 "and Underrun Data: x%x x%x x%x x%x\n",
779 be32_to_cpu(fcpcmd->fcpDl), 1624 be32_to_cpu(fcpcmd->fcpDl),
780 scsi_get_resid(cmnd), fcpi_parm, 1625 scsi_get_resid(cmnd), fcpi_parm,
@@ -793,7 +1638,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
793 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 1638 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
794 < cmnd->underflow)) { 1639 < cmnd->underflow)) {
795 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 1640 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
796 "0717 FCP command x%x residual " 1641 "9027 FCP command x%x residual "
797 "underrun converted to error " 1642 "underrun converted to error "
798 "Data: x%x x%x x%x\n", 1643 "Data: x%x x%x x%x\n",
799 cmnd->cmnd[0], scsi_bufflen(cmnd), 1644 cmnd->cmnd[0], scsi_bufflen(cmnd),
@@ -802,7 +1647,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
802 } 1647 }
803 } else if (resp_info & RESID_OVER) { 1648 } else if (resp_info & RESID_OVER) {
804 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1649 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
805 "0720 FCP command x%x residual overrun error. " 1650 "9028 FCP command x%x residual overrun error. "
806 "Data: x%x x%x \n", cmnd->cmnd[0], 1651 "Data: x%x x%x \n", cmnd->cmnd[0],
807 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 1652 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
808 host_status = DID_ERROR; 1653 host_status = DID_ERROR;
@@ -814,7 +1659,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
814 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 1659 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
815 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 1660 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
816 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 1661 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
817 "0734 FCP Read Check Error Data: " 1662 "9029 FCP Read Check Error Data: "
818 "x%x x%x x%x x%x\n", 1663 "x%x x%x x%x x%x\n",
819 be32_to_cpu(fcpcmd->fcpDl), 1664 be32_to_cpu(fcpcmd->fcpDl),
820 be32_to_cpu(fcprsp->rspResId), 1665 be32_to_cpu(fcprsp->rspResId),
@@ -828,6 +1673,16 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
828 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); 1673 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
829} 1674}
830 1675
1676/**
1677 * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
1678 * @phba: The Hba for which this call is being executed.
1679 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1680 * @pIocbOut: The response IOCBQ for the scsi cmnd .
1681 *
1682 * This routine assigns scsi command result by looking into response IOCB
1683 * status field appropriately. This routine handles QUEUE FULL condition as
1684 * well by ramping down device queue depth.
1685 **/
831static void 1686static void
832lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 1687lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
833 struct lpfc_iocbq *pIocbOut) 1688 struct lpfc_iocbq *pIocbOut)
@@ -846,7 +1701,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
846 1701
847 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 1702 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
848 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 1703 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
849 atomic_dec(&pnode->cmd_pending); 1704 if (pnode && NLP_CHK_NODE_ACT(pnode))
1705 atomic_dec(&pnode->cmd_pending);
850 1706
851 if (lpfc_cmd->status) { 1707 if (lpfc_cmd->status) {
852 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 1708 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
@@ -856,7 +1712,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
856 lpfc_cmd->status = IOSTAT_DEFAULT; 1712 lpfc_cmd->status = IOSTAT_DEFAULT;
857 1713
858 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1714 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
859 "0729 FCP cmd x%x failed <%d/%d> " 1715 "9030 FCP cmd x%x failed <%d/%d> "
860 "status: x%x result: x%x Data: x%x x%x\n", 1716 "status: x%x result: x%x Data: x%x x%x\n",
861 cmd->cmnd[0], 1717 cmd->cmnd[0],
862 cmd->device ? cmd->device->id : 0xffff, 1718 cmd->device ? cmd->device->id : 0xffff,
@@ -904,7 +1760,28 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
904 lpfc_cmd->result == IOERR_ABORT_REQUESTED) { 1760 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
905 cmd->result = ScsiResult(DID_REQUEUE, 0); 1761 cmd->result = ScsiResult(DID_REQUEUE, 0);
906 break; 1762 break;
907 } /* else: fall through */ 1763 }
1764
1765 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
1766 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
1767 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
1768 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1769 /*
1770 * This is a response for a BG enabled
1771 * cmd. Parse BG error
1772 */
1773 lpfc_parse_bg_err(phba, lpfc_cmd,
1774 pIocbOut);
1775 break;
1776 } else {
1777 lpfc_printf_vlog(vport, KERN_WARNING,
1778 LOG_BG,
1779 "9031 non-zero BGSTAT "
1780 "on unprotected cmd");
1781 }
1782 }
1783
1784 /* else: fall through */
908 default: 1785 default:
909 cmd->result = ScsiResult(DID_ERROR, 0); 1786 cmd->result = ScsiResult(DID_ERROR, 0);
910 break; 1787 break;
@@ -936,23 +1813,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
936 time_after(jiffies, lpfc_cmd->start_time + 1813 time_after(jiffies, lpfc_cmd->start_time +
937 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 1814 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
938 spin_lock_irqsave(sdev->host->host_lock, flags); 1815 spin_lock_irqsave(sdev->host->host_lock, flags);
939 if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) && 1816 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
940 (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) && 1817 if (pnode->cmd_qdepth >
941 ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10)))) 1818 atomic_read(&pnode->cmd_pending) &&
942 pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending); 1819 (atomic_read(&pnode->cmd_pending) >
943 1820 LPFC_MIN_TGT_QDEPTH) &&
944 pnode->last_change_time = jiffies; 1821 ((cmd->cmnd[0] == READ_10) ||
1822 (cmd->cmnd[0] == WRITE_10)))
1823 pnode->cmd_qdepth =
1824 atomic_read(&pnode->cmd_pending);
1825
1826 pnode->last_change_time = jiffies;
1827 }
945 spin_unlock_irqrestore(sdev->host->host_lock, flags); 1828 spin_unlock_irqrestore(sdev->host->host_lock, flags);
946 } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && 1829 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1830 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
947 time_after(jiffies, pnode->last_change_time + 1831 time_after(jiffies, pnode->last_change_time +
948 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { 1832 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
949 spin_lock_irqsave(sdev->host->host_lock, flags); 1833 spin_lock_irqsave(sdev->host->host_lock, flags);
950 pnode->cmd_qdepth += pnode->cmd_qdepth * 1834 pnode->cmd_qdepth += pnode->cmd_qdepth *
951 LPFC_TGTQ_RAMPUP_PCENT / 100; 1835 LPFC_TGTQ_RAMPUP_PCENT / 100;
952 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) 1836 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
953 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; 1837 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
954 pnode->last_change_time = jiffies; 1838 pnode->last_change_time = jiffies;
955 spin_unlock_irqrestore(sdev->host->host_lock, flags); 1839 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1840 }
956 } 1841 }
957 1842
958 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 1843 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
@@ -1067,6 +1952,15 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1067 } 1952 }
1068} 1953}
1069 1954
1955/**
1956 * lpfc_scsi_prep_cmnd: Routine to convert scsi cmnd to FCP information unit.
1957 * @vport: The virtual port for which this call is being executed.
1958 * @lpfc_cmd: The scsi command which needs to send.
1959 * @pnode: Pointer to lpfc_nodelist.
1960 *
1961 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1962 * to transfer.
1963 **/
1070static void 1964static void
1071lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 1965lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1072 struct lpfc_nodelist *pnode) 1966 struct lpfc_nodelist *pnode)
@@ -1122,7 +2016,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1122 } else { 2016 } else {
1123 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 2017 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
1124 iocb_cmd->ulpPU = PARM_READ_CHECK; 2018 iocb_cmd->ulpPU = PARM_READ_CHECK;
1125 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1126 fcp_cmnd->fcpCntl3 = READ_DATA; 2019 fcp_cmnd->fcpCntl3 = READ_DATA;
1127 phba->fc4InputRequests++; 2020 phba->fc4InputRequests++;
1128 } 2021 }
@@ -1133,7 +2026,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1133 fcp_cmnd->fcpCntl3 = 0; 2026 fcp_cmnd->fcpCntl3 = 0;
1134 phba->fc4ControlRequests++; 2027 phba->fc4ControlRequests++;
1135 } 2028 }
1136 if (phba->sli_rev == 3) 2029 if (phba->sli_rev == 3 &&
2030 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
1137 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 2031 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1138 /* 2032 /*
1139 * Finish initializing those IOCB fields that are independent 2033 * Finish initializing those IOCB fields that are independent
@@ -1152,6 +2046,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1152 piocbq->vport = vport; 2046 piocbq->vport = vport;
1153} 2047}
1154 2048
2049/**
2050 * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
2051 * @vport: The virtual port for which this call is being executed.
2052 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2053 * @lun: Logical unit number.
2054 * @task_mgmt_cmd: SCSI task management command.
2055 *
2056 * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
2057 *
2058 * Return codes:
2059 * 0 - Error
2060 * 1 - Success
2061 **/
1155static int 2062static int
1156lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 2063lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1157 struct lpfc_scsi_buf *lpfc_cmd, 2064 struct lpfc_scsi_buf *lpfc_cmd,
@@ -1178,7 +2085,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1178 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2085 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1179 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 2086 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
1180 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 2087 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
1181 if (vport->phba->sli_rev == 3) 2088 if (vport->phba->sli_rev == 3 &&
2089 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
1182 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 2090 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
1183 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 2091 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
1184 piocb->ulpContext = ndlp->nlp_rpi; 2092 piocb->ulpContext = ndlp->nlp_rpi;
@@ -1201,6 +2109,15 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1201 return 1; 2109 return 1;
1202} 2110}
1203 2111
2112/**
2113 * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
2114 * @phba: The Hba for which this call is being executed.
2115 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2116 * @rspiocbq: Pointer to lpfc_iocbq data structure.
2117 *
2118 * This routine is IOCB completion routine for device reset and target reset
2119 * routine. This routine release scsi buffer associated with lpfc_cmd.
2120 **/
1204static void 2121static void
1205lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 2122lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
1206 struct lpfc_iocbq *cmdiocbq, 2123 struct lpfc_iocbq *cmdiocbq,
@@ -1213,6 +2130,20 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
1213 return; 2130 return;
1214} 2131}
1215 2132
2133/**
2134 * lpfc_scsi_tgt_reset: Target reset handler.
2135 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
2136 * @vport: The virtual port for which this call is being executed.
2137 * @tgt_id: Target ID.
2138 * @lun: Lun number.
2139 * @rdata: Pointer to lpfc_rport_data.
2140 *
2141 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
2142 *
2143 * Return Code:
2144 * 0x2003 - Error
2145 * 0x2002 - Success.
2146 **/
1216static int 2147static int
1217lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, 2148lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
1218 unsigned tgt_id, unsigned int lun, 2149 unsigned tgt_id, unsigned int lun,
@@ -1266,6 +2197,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
1266 return ret; 2197 return ret;
1267} 2198}
1268 2199
2200/**
2201 * lpfc_info: Info entry point of scsi_host_template data structure.
2202 * @host: The scsi host for which this call is being executed.
2203 *
2204 * This routine provides module information about hba.
2205 *
2206 * Reutrn code:
2207 * Pointer to char - Success.
2208 **/
1269const char * 2209const char *
1270lpfc_info(struct Scsi_Host *host) 2210lpfc_info(struct Scsi_Host *host)
1271{ 2211{
@@ -1295,6 +2235,13 @@ lpfc_info(struct Scsi_Host *host)
1295 return lpfcinfobuf; 2235 return lpfcinfobuf;
1296} 2236}
1297 2237
2238/**
2239 * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
2240 * @phba: The Hba for which this call is being executed.
2241 *
2242 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
2243 * The default value of cfg_poll_tmo is 10 milliseconds.
2244 **/
1298static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 2245static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
1299{ 2246{
1300 unsigned long poll_tmo_expires = 2247 unsigned long poll_tmo_expires =
@@ -1305,11 +2252,25 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
1305 poll_tmo_expires); 2252 poll_tmo_expires);
1306} 2253}
1307 2254
2255/**
2256 * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
2257 * @phba: The Hba for which this call is being executed.
2258 *
2259 * This routine starts the fcp_poll_timer of @phba.
2260 **/
1308void lpfc_poll_start_timer(struct lpfc_hba * phba) 2261void lpfc_poll_start_timer(struct lpfc_hba * phba)
1309{ 2262{
1310 lpfc_poll_rearm_timer(phba); 2263 lpfc_poll_rearm_timer(phba);
1311} 2264}
1312 2265
2266/**
2267 * lpfc_poll_timeout: Restart polling timer.
2268 * @ptr: Map to lpfc_hba data structure pointer.
2269 *
2270 * This routine restarts fcp_poll timer, when FCP ring polling is enable
2271 * and FCP Ring interrupt is disable.
2272 **/
2273
1313void lpfc_poll_timeout(unsigned long ptr) 2274void lpfc_poll_timeout(unsigned long ptr)
1314{ 2275{
1315 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2276 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
@@ -1321,6 +2282,20 @@ void lpfc_poll_timeout(unsigned long ptr)
1321 } 2282 }
1322} 2283}
1323 2284
2285/**
2286 * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
2287 * structure.
2288 * @cmnd: Pointer to scsi_cmnd data structure.
2289 * @done: Pointer to done routine.
2290 *
2291 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
2292 * This routine prepares an IOCB from scsi command and provides to firmware.
2293 * The @done callback is invoked after driver finished processing the command.
2294 *
2295 * Return value :
2296 * 0 - Success
2297 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
2298 **/
1324static int 2299static int
1325lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 2300lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1326{ 2301{
@@ -1340,6 +2315,17 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1340 goto out_fail_command; 2315 goto out_fail_command;
1341 } 2316 }
1342 2317
2318 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2319 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2320
2321 printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x "
2322 "str=%s without registering for BlockGuard - "
2323 "Rejecting command\n",
2324 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2325 dif_op_str[scsi_get_prot_op(cmnd)]);
2326 goto out_fail_command;
2327 }
2328
1343 /* 2329 /*
1344 * Catch race where our node has transitioned, but the 2330 * Catch race where our node has transitioned, but the
1345 * transport is still transitioning. 2331 * transport is still transitioning.
@@ -1348,12 +2334,13 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1348 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 2334 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
1349 goto out_fail_command; 2335 goto out_fail_command;
1350 } 2336 }
1351 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) 2337 if (vport->cfg_max_scsicmpl_time &&
2338 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
1352 goto out_host_busy; 2339 goto out_host_busy;
1353 2340
1354 lpfc_cmd = lpfc_get_scsi_buf(phba); 2341 lpfc_cmd = lpfc_get_scsi_buf(phba);
1355 if (lpfc_cmd == NULL) { 2342 if (lpfc_cmd == NULL) {
1356 lpfc_adjust_queue_depth(phba); 2343 lpfc_rampdown_queue_depth(phba);
1357 2344
1358 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2345 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1359 "0707 driver's buffer pool is empty, " 2346 "0707 driver's buffer pool is empty, "
@@ -1361,7 +2348,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1361 goto out_host_busy; 2348 goto out_host_busy;
1362 } 2349 }
1363 2350
1364 lpfc_cmd->start_time = jiffies;
1365 /* 2351 /*
1366 * Store the midlayer's command structure for the completion phase 2352 * Store the midlayer's command structure for the completion phase
1367 * and complete the command initialization. 2353 * and complete the command initialization.
@@ -1373,7 +2359,65 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1373 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 2359 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
1374 cmnd->scsi_done = done; 2360 cmnd->scsi_done = done;
1375 2361
1376 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 2362 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2363 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2364 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2365 "str=%s\n",
2366 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2367 dif_op_str[scsi_get_prot_op(cmnd)]);
2368 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2369 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2370 "%02x %02x %02x %02x %02x \n",
2371 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2372 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2373 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2374 cmnd->cmnd[9]);
2375 if (cmnd->cmnd[0] == READ_10)
2376 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2377 "9035 BLKGRD: READ @ sector %llu, "
2378 "count %lu\n",
2379 (unsigned long long)scsi_get_lba(cmnd),
2380 cmnd->request->nr_sectors);
2381 else if (cmnd->cmnd[0] == WRITE_10)
2382 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2383 "9036 BLKGRD: WRITE @ sector %llu, "
2384 "count %lu cmd=%p\n",
2385 (unsigned long long)scsi_get_lba(cmnd),
2386 cmnd->request->nr_sectors,
2387 cmnd);
2388
2389 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2390 } else {
2391 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2392 "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
2393 " str=%s\n",
2394 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2395 dif_op_str[scsi_get_prot_op(cmnd)]);
2396 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2397 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2398 "%02x %02x %02x %02x %02x \n",
2399 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2400 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2401 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2402 cmnd->cmnd[9]);
2403 if (cmnd->cmnd[0] == READ_10)
2404 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2405 "9040 dbg: READ @ sector %llu, "
2406 "count %lu\n",
2407 (unsigned long long)scsi_get_lba(cmnd),
2408 cmnd->request->nr_sectors);
2409 else if (cmnd->cmnd[0] == WRITE_10)
2410 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2411 "9041 dbg: WRITE @ sector %llu, "
2412 "count %lu cmd=%p\n",
2413 (unsigned long long)scsi_get_lba(cmnd),
2414 cmnd->request->nr_sectors, cmnd);
2415 else
2416 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2417 "9042 dbg: parser not implemented\n");
2418 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2419 }
2420
1377 if (err) 2421 if (err)
1378 goto out_host_busy_free_buf; 2422 goto out_host_busy_free_buf;
1379 2423
@@ -1382,9 +2426,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1382 atomic_inc(&ndlp->cmd_pending); 2426 atomic_inc(&ndlp->cmd_pending);
1383 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 2427 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
1384 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 2428 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
1385 if (err) 2429 if (err) {
2430 atomic_dec(&ndlp->cmd_pending);
1386 goto out_host_busy_free_buf; 2431 goto out_host_busy_free_buf;
1387 2432 }
1388 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2433 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1389 lpfc_sli_poll_fcp_ring(phba); 2434 lpfc_sli_poll_fcp_ring(phba);
1390 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2435 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
@@ -1394,7 +2439,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1394 return 0; 2439 return 0;
1395 2440
1396 out_host_busy_free_buf: 2441 out_host_busy_free_buf:
1397 atomic_dec(&ndlp->cmd_pending);
1398 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 2442 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1399 lpfc_release_scsi_buf(phba, lpfc_cmd); 2443 lpfc_release_scsi_buf(phba, lpfc_cmd);
1400 out_host_busy: 2444 out_host_busy:
@@ -1405,6 +2449,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1405 return 0; 2449 return 0;
1406} 2450}
1407 2451
2452/**
2453 * lpfc_block_error_handler: Routine to block error handler.
2454 * @cmnd: Pointer to scsi_cmnd data structure.
2455 *
2456 * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
2457 **/
1408static void 2458static void
1409lpfc_block_error_handler(struct scsi_cmnd *cmnd) 2459lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1410{ 2460{
@@ -1421,6 +2471,17 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1421 return; 2471 return;
1422} 2472}
1423 2473
2474/**
2475 * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
2476 *structure.
2477 * @cmnd: Pointer to scsi_cmnd data structure.
2478 *
2479 * This routine aborts @cmnd pending in base driver.
2480 *
2481 * Return code :
2482 * 0x2003 - Error
2483 * 0x2002 - Success
2484 **/
1424static int 2485static int
1425lpfc_abort_handler(struct scsi_cmnd *cmnd) 2486lpfc_abort_handler(struct scsi_cmnd *cmnd)
1426{ 2487{
@@ -1516,6 +2577,18 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
1516 return ret; 2577 return ret;
1517} 2578}
1518 2579
2580/**
2581 * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
2582 *data structure.
2583 * @cmnd: Pointer to scsi_cmnd data structure.
2584 *
2585 * This routine does a device reset by sending a TARGET_RESET task management
2586 * command.
2587 *
2588 * Return code :
2589 * 0x2003 - Error
2590 * 0ex2002 - Success
2591 **/
1519static int 2592static int
1520lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 2593lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1521{ 2594{
@@ -1560,7 +2633,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1560 fc_get_event_number(), 2633 fc_get_event_number(),
1561 sizeof(scsi_event), 2634 sizeof(scsi_event),
1562 (char *)&scsi_event, 2635 (char *)&scsi_event,
1563 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2636 LPFC_NL_VENDOR_ID);
1564 2637
1565 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { 2638 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1566 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2639 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -1633,6 +2706,17 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1633 return ret; 2706 return ret;
1634} 2707}
1635 2708
2709/**
2710 * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
2711 * Template data structure.
2712 * @cmnd: Pointer to scsi_cmnd data structure.
2713 *
2714 * This routine does target reset to all target on @cmnd->device->host.
2715 *
2716 * Return Code:
2717 * 0x2003 - Error
2718 * 0x2002 - Success
2719 **/
1636static int 2720static int
1637lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 2721lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1638{ 2722{
@@ -1657,7 +2741,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1657 fc_get_event_number(), 2741 fc_get_event_number(),
1658 sizeof(scsi_event), 2742 sizeof(scsi_event),
1659 (char *)&scsi_event, 2743 (char *)&scsi_event,
1660 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2744 LPFC_NL_VENDOR_ID);
1661 2745
1662 lpfc_block_error_handler(cmnd); 2746 lpfc_block_error_handler(cmnd);
1663 /* 2747 /*
@@ -1723,6 +2807,20 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1723 return ret; 2807 return ret;
1724} 2808}
1725 2809
2810/**
2811 * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
2812 * structure.
2813 * @sdev: Pointer to scsi_device.
2814 *
2815 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
2816 * globally available list of scsi buffers. This routine also makes sure scsi
2817 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
2818 * of scsi buffer exists for the lifetime of the driver.
2819 *
2820 * Return codes:
2821 * non-0 - Error
2822 * 0 - Success
2823 **/
1726static int 2824static int
1727lpfc_slave_alloc(struct scsi_device *sdev) 2825lpfc_slave_alloc(struct scsi_device *sdev)
1728{ 2826{
@@ -1784,6 +2882,19 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1784 return 0; 2882 return 0;
1785} 2883}
1786 2884
2885/**
2886 * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
2887 * structure.
2888 * @sdev: Pointer to scsi_device.
2889 *
2890 * This routine configures following items
2891 * - Tag command queuing support for @sdev if supported.
2892 * - Dev loss time out value of fc_rport.
2893 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
2894 *
2895 * Return codes:
2896 * 0 - Success
2897 **/
1787static int 2898static int
1788lpfc_slave_configure(struct scsi_device *sdev) 2899lpfc_slave_configure(struct scsi_device *sdev)
1789{ 2900{
@@ -1813,6 +2924,12 @@ lpfc_slave_configure(struct scsi_device *sdev)
1813 return 0; 2924 return 0;
1814} 2925}
1815 2926
2927/**
2928 * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
2929 * @sdev: Pointer to scsi_device.
2930 *
2931 * This routine sets @sdev hostatdata filed to null.
2932 **/
1816static void 2933static void
1817lpfc_slave_destroy(struct scsi_device *sdev) 2934lpfc_slave_destroy(struct scsi_device *sdev)
1818{ 2935{