diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 72 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 4 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 17 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 84 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 69 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 1907 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 446 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 1006 |
8 files changed, 2517 insertions, 1088 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 1105f9a111ba..6c24c9aabe7b 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -23,6 +23,13 @@ | |||
23 | 23 | ||
24 | struct lpfc_sli2_slim; | 24 | struct lpfc_sli2_slim; |
25 | 25 | ||
26 | #define LPFC_PCI_DEV_LP 0x1 | ||
27 | #define LPFC_PCI_DEV_OC 0x2 | ||
28 | |||
29 | #define LPFC_SLI_REV2 2 | ||
30 | #define LPFC_SLI_REV3 3 | ||
31 | #define LPFC_SLI_REV4 4 | ||
32 | |||
26 | #define LPFC_MAX_TARGET 4096 /* max number of targets supported */ | 33 | #define LPFC_MAX_TARGET 4096 /* max number of targets supported */ |
27 | #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els | 34 | #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els |
28 | requests */ | 35 | requests */ |
@@ -264,8 +271,8 @@ enum hba_state { | |||
264 | }; | 271 | }; |
265 | 272 | ||
266 | struct lpfc_vport { | 273 | struct lpfc_vport { |
267 | struct list_head listentry; | ||
268 | struct lpfc_hba *phba; | 274 | struct lpfc_hba *phba; |
275 | struct list_head listentry; | ||
269 | uint8_t port_type; | 276 | uint8_t port_type; |
270 | #define LPFC_PHYSICAL_PORT 1 | 277 | #define LPFC_PHYSICAL_PORT 1 |
271 | #define LPFC_NPIV_PORT 2 | 278 | #define LPFC_NPIV_PORT 2 |
@@ -420,8 +427,66 @@ enum intr_type_t { | |||
420 | }; | 427 | }; |
421 | 428 | ||
422 | struct lpfc_hba { | 429 | struct lpfc_hba { |
430 | /* SCSI interface function jump table entries */ | ||
431 | int (*lpfc_new_scsi_buf) | ||
432 | (struct lpfc_vport *, int); | ||
433 | struct lpfc_scsi_buf * (*lpfc_get_scsi_buf) | ||
434 | (struct lpfc_hba *); | ||
435 | int (*lpfc_scsi_prep_dma_buf) | ||
436 | (struct lpfc_hba *, struct lpfc_scsi_buf *); | ||
437 | void (*lpfc_scsi_unprep_dma_buf) | ||
438 | (struct lpfc_hba *, struct lpfc_scsi_buf *); | ||
439 | void (*lpfc_release_scsi_buf) | ||
440 | (struct lpfc_hba *, struct lpfc_scsi_buf *); | ||
441 | void (*lpfc_rampdown_queue_depth) | ||
442 | (struct lpfc_hba *); | ||
443 | void (*lpfc_scsi_prep_cmnd) | ||
444 | (struct lpfc_vport *, struct lpfc_scsi_buf *, | ||
445 | struct lpfc_nodelist *); | ||
446 | int (*lpfc_scsi_prep_task_mgmt_cmd) | ||
447 | (struct lpfc_vport *, struct lpfc_scsi_buf *, | ||
448 | unsigned int, uint8_t); | ||
449 | |||
450 | /* IOCB interface function jump table entries */ | ||
451 | int (*__lpfc_sli_issue_iocb) | ||
452 | (struct lpfc_hba *, uint32_t, | ||
453 | struct lpfc_iocbq *, uint32_t); | ||
454 | void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *, | ||
455 | struct lpfc_iocbq *); | ||
456 | int (*lpfc_hba_down_post)(struct lpfc_hba *phba); | ||
457 | |||
458 | |||
459 | IOCB_t * (*lpfc_get_iocb_from_iocbq) | ||
460 | (struct lpfc_iocbq *); | ||
461 | void (*lpfc_scsi_cmd_iocb_cmpl) | ||
462 | (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); | ||
463 | |||
464 | /* MBOX interface function jump table entries */ | ||
465 | int (*lpfc_sli_issue_mbox) | ||
466 | (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); | ||
467 | /* Slow-path IOCB process function jump table entries */ | ||
468 | void (*lpfc_sli_handle_slow_ring_event) | ||
469 | (struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | ||
470 | uint32_t mask); | ||
471 | /* INIT device interface function jump table entries */ | ||
472 | int (*lpfc_sli_hbq_to_firmware) | ||
473 | (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *); | ||
474 | int (*lpfc_sli_brdrestart) | ||
475 | (struct lpfc_hba *); | ||
476 | int (*lpfc_sli_brdready) | ||
477 | (struct lpfc_hba *, uint32_t); | ||
478 | void (*lpfc_handle_eratt) | ||
479 | (struct lpfc_hba *); | ||
480 | void (*lpfc_stop_port) | ||
481 | (struct lpfc_hba *); | ||
482 | |||
483 | |||
484 | /* SLI4 specific HBA data structure */ | ||
485 | struct lpfc_sli4_hba sli4_hba; | ||
486 | |||
423 | struct lpfc_sli sli; | 487 | struct lpfc_sli sli; |
424 | uint32_t sli_rev; /* SLI2 or SLI3 */ | 488 | uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */ |
489 | uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */ | ||
425 | uint32_t sli3_options; /* Mask of enabled SLI3 options */ | 490 | uint32_t sli3_options; /* Mask of enabled SLI3 options */ |
426 | #define LPFC_SLI3_HBQ_ENABLED 0x01 | 491 | #define LPFC_SLI3_HBQ_ENABLED 0x01 |
427 | #define LPFC_SLI3_NPIV_ENABLED 0x02 | 492 | #define LPFC_SLI3_NPIV_ENABLED 0x02 |
@@ -526,11 +591,12 @@ struct lpfc_hba { | |||
526 | unsigned long data_flags; | 591 | unsigned long data_flags; |
527 | 592 | ||
528 | uint32_t hbq_in_use; /* HBQs in use flag */ | 593 | uint32_t hbq_in_use; /* HBQs in use flag */ |
529 | struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ | 594 | struct list_head rb_pend_list; /* Received buffers to be processed */ |
530 | uint32_t hbq_count; /* Count of configured HBQs */ | 595 | uint32_t hbq_count; /* Count of configured HBQs */ |
531 | struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ | 596 | struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ |
532 | 597 | ||
533 | unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ | 598 | unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ |
599 | unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */ | ||
534 | unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ | 600 | unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ |
535 | void __iomem *slim_memmap_p; /* Kernel memory mapped address for | 601 | void __iomem *slim_memmap_p; /* Kernel memory mapped address for |
536 | PCI BAR0 */ | 602 | PCI BAR0 */ |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 896c7b0351e5..4164b935ea9f 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -267,8 +267,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, | |||
267 | uint32_t tmo, uint8_t retry) | 267 | uint32_t tmo, uint8_t retry) |
268 | { | 268 | { |
269 | struct lpfc_hba *phba = vport->phba; | 269 | struct lpfc_hba *phba = vport->phba; |
270 | struct lpfc_sli *psli = &phba->sli; | ||
271 | struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; | ||
272 | IOCB_t *icmd; | 270 | IOCB_t *icmd; |
273 | struct lpfc_iocbq *geniocb; | 271 | struct lpfc_iocbq *geniocb; |
274 | int rc; | 272 | int rc; |
@@ -331,7 +329,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, | |||
331 | geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; | 329 | geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; |
332 | geniocb->vport = vport; | 330 | geniocb->vport = vport; |
333 | geniocb->retry = retry; | 331 | geniocb->retry = retry; |
334 | rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0); | 332 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0); |
335 | 333 | ||
336 | if (rc == IOCB_ERROR) { | 334 | if (rc == IOCB_ERROR) { |
337 | lpfc_sli_release_iocbq(phba, geniocb); | 335 | lpfc_sli_release_iocbq(phba, geniocb); |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 52be5644e07a..5dd66925f4ca 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -280,6 +280,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) | |||
280 | struct lpfc_dmabuf *d_buf; | 280 | struct lpfc_dmabuf *d_buf; |
281 | struct hbq_dmabuf *hbq_buf; | 281 | struct hbq_dmabuf *hbq_buf; |
282 | 282 | ||
283 | if (phba->sli_rev != 3) | ||
284 | return 0; | ||
283 | cnt = LPFC_HBQINFO_SIZE; | 285 | cnt = LPFC_HBQINFO_SIZE; |
284 | spin_lock_irq(&phba->hbalock); | 286 | spin_lock_irq(&phba->hbalock); |
285 | 287 | ||
@@ -489,12 +491,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) | |||
489 | pring->next_cmdidx, pring->local_getidx, | 491 | pring->next_cmdidx, pring->local_getidx, |
490 | pring->flag, pgpp->rspPutInx, pring->numRiocb); | 492 | pring->flag, pgpp->rspPutInx, pring->numRiocb); |
491 | } | 493 | } |
492 | word0 = readl(phba->HAregaddr); | 494 | |
493 | word1 = readl(phba->CAregaddr); | 495 | if (phba->sli_rev <= LPFC_SLI_REV3) { |
494 | word2 = readl(phba->HSregaddr); | 496 | word0 = readl(phba->HAregaddr); |
495 | word3 = readl(phba->HCregaddr); | 497 | word1 = readl(phba->CAregaddr); |
496 | len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n", | 498 | word2 = readl(phba->HSregaddr); |
497 | word0, word1, word2, word3); | 499 | word3 = readl(phba->HCregaddr); |
500 | len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x " | ||
501 | "HC:%08x\n", word0, word1, word2, word3); | ||
502 | } | ||
498 | spin_unlock_irq(&phba->hbalock); | 503 | spin_unlock_irq(&phba->hbalock); |
499 | return len; | 504 | return len; |
500 | } | 505 | } |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b8b34cf5c3d2..8c5c3aea4a19 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -84,7 +84,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) | |||
84 | uint32_t ha_copy; | 84 | uint32_t ha_copy; |
85 | 85 | ||
86 | if (vport->port_state >= LPFC_VPORT_READY || | 86 | if (vport->port_state >= LPFC_VPORT_READY || |
87 | phba->link_state == LPFC_LINK_DOWN) | 87 | phba->link_state == LPFC_LINK_DOWN || |
88 | phba->sli_rev > LPFC_SLI_REV3) | ||
88 | return 0; | 89 | return 0; |
89 | 90 | ||
90 | /* Read the HBA Host Attention Register */ | 91 | /* Read the HBA Host Attention Register */ |
@@ -305,7 +306,7 @@ els_iocb_free_pcmb_exit: | |||
305 | * 0 - successfully issued fabric registration login for @vport | 306 | * 0 - successfully issued fabric registration login for @vport |
306 | * -ENXIO -- failed to issue fabric registration login for @vport | 307 | * -ENXIO -- failed to issue fabric registration login for @vport |
307 | **/ | 308 | **/ |
308 | static int | 309 | int |
309 | lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) | 310 | lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) |
310 | { | 311 | { |
311 | struct lpfc_hba *phba = vport->phba; | 312 | struct lpfc_hba *phba = vport->phba; |
@@ -345,8 +346,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) | |||
345 | err = 4; | 346 | err = 4; |
346 | goto fail; | 347 | goto fail; |
347 | } | 348 | } |
348 | rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, | 349 | rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0); |
349 | 0); | ||
350 | if (rc) { | 350 | if (rc) { |
351 | err = 5; | 351 | err = 5; |
352 | goto fail_free_mbox; | 352 | goto fail_free_mbox; |
@@ -1350,14 +1350,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) | |||
1350 | IOCB_t *icmd; | 1350 | IOCB_t *icmd; |
1351 | struct lpfc_nodelist *ndlp; | 1351 | struct lpfc_nodelist *ndlp; |
1352 | struct lpfc_iocbq *elsiocb; | 1352 | struct lpfc_iocbq *elsiocb; |
1353 | struct lpfc_sli_ring *pring; | ||
1354 | struct lpfc_sli *psli; | 1353 | struct lpfc_sli *psli; |
1355 | uint8_t *pcmd; | 1354 | uint8_t *pcmd; |
1356 | uint16_t cmdsize; | 1355 | uint16_t cmdsize; |
1357 | int ret; | 1356 | int ret; |
1358 | 1357 | ||
1359 | psli = &phba->sli; | 1358 | psli = &phba->sli; |
1360 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | ||
1361 | 1359 | ||
1362 | ndlp = lpfc_findnode_did(vport, did); | 1360 | ndlp = lpfc_findnode_did(vport, did); |
1363 | if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) | 1361 | if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) |
@@ -1391,7 +1389,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) | |||
1391 | 1389 | ||
1392 | phba->fc_stat.elsXmitPLOGI++; | 1390 | phba->fc_stat.elsXmitPLOGI++; |
1393 | elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; | 1391 | elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; |
1394 | ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); | 1392 | ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); |
1395 | 1393 | ||
1396 | if (ret == IOCB_ERROR) { | 1394 | if (ret == IOCB_ERROR) { |
1397 | lpfc_els_free_iocb(phba, elsiocb); | 1395 | lpfc_els_free_iocb(phba, elsiocb); |
@@ -1501,14 +1499,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1501 | PRLI *npr; | 1499 | PRLI *npr; |
1502 | IOCB_t *icmd; | 1500 | IOCB_t *icmd; |
1503 | struct lpfc_iocbq *elsiocb; | 1501 | struct lpfc_iocbq *elsiocb; |
1504 | struct lpfc_sli_ring *pring; | ||
1505 | struct lpfc_sli *psli; | ||
1506 | uint8_t *pcmd; | 1502 | uint8_t *pcmd; |
1507 | uint16_t cmdsize; | 1503 | uint16_t cmdsize; |
1508 | 1504 | ||
1509 | psli = &phba->sli; | ||
1510 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | ||
1511 | |||
1512 | cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); | 1505 | cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); |
1513 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, | 1506 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, |
1514 | ndlp->nlp_DID, ELS_CMD_PRLI); | 1507 | ndlp->nlp_DID, ELS_CMD_PRLI); |
@@ -1550,7 +1543,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1550 | spin_lock_irq(shost->host_lock); | 1543 | spin_lock_irq(shost->host_lock); |
1551 | ndlp->nlp_flag |= NLP_PRLI_SND; | 1544 | ndlp->nlp_flag |= NLP_PRLI_SND; |
1552 | spin_unlock_irq(shost->host_lock); | 1545 | spin_unlock_irq(shost->host_lock); |
1553 | if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { | 1546 | if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == |
1547 | IOCB_ERROR) { | ||
1554 | spin_lock_irq(shost->host_lock); | 1548 | spin_lock_irq(shost->host_lock); |
1555 | ndlp->nlp_flag &= ~NLP_PRLI_SND; | 1549 | ndlp->nlp_flag &= ~NLP_PRLI_SND; |
1556 | spin_unlock_irq(shost->host_lock); | 1550 | spin_unlock_irq(shost->host_lock); |
@@ -1788,8 +1782,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1788 | ADISC *ap; | 1782 | ADISC *ap; |
1789 | IOCB_t *icmd; | 1783 | IOCB_t *icmd; |
1790 | struct lpfc_iocbq *elsiocb; | 1784 | struct lpfc_iocbq *elsiocb; |
1791 | struct lpfc_sli *psli = &phba->sli; | ||
1792 | struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; | ||
1793 | uint8_t *pcmd; | 1785 | uint8_t *pcmd; |
1794 | uint16_t cmdsize; | 1786 | uint16_t cmdsize; |
1795 | 1787 | ||
@@ -1822,7 +1814,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1822 | spin_lock_irq(shost->host_lock); | 1814 | spin_lock_irq(shost->host_lock); |
1823 | ndlp->nlp_flag |= NLP_ADISC_SND; | 1815 | ndlp->nlp_flag |= NLP_ADISC_SND; |
1824 | spin_unlock_irq(shost->host_lock); | 1816 | spin_unlock_irq(shost->host_lock); |
1825 | if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { | 1817 | if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == |
1818 | IOCB_ERROR) { | ||
1826 | spin_lock_irq(shost->host_lock); | 1819 | spin_lock_irq(shost->host_lock); |
1827 | ndlp->nlp_flag &= ~NLP_ADISC_SND; | 1820 | ndlp->nlp_flag &= ~NLP_ADISC_SND; |
1828 | spin_unlock_irq(shost->host_lock); | 1821 | spin_unlock_irq(shost->host_lock); |
@@ -1937,15 +1930,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1937 | struct lpfc_hba *phba = vport->phba; | 1930 | struct lpfc_hba *phba = vport->phba; |
1938 | IOCB_t *icmd; | 1931 | IOCB_t *icmd; |
1939 | struct lpfc_iocbq *elsiocb; | 1932 | struct lpfc_iocbq *elsiocb; |
1940 | struct lpfc_sli_ring *pring; | ||
1941 | struct lpfc_sli *psli; | ||
1942 | uint8_t *pcmd; | 1933 | uint8_t *pcmd; |
1943 | uint16_t cmdsize; | 1934 | uint16_t cmdsize; |
1944 | int rc; | 1935 | int rc; |
1945 | 1936 | ||
1946 | psli = &phba->sli; | ||
1947 | pring = &psli->ring[LPFC_ELS_RING]; | ||
1948 | |||
1949 | spin_lock_irq(shost->host_lock); | 1937 | spin_lock_irq(shost->host_lock); |
1950 | if (ndlp->nlp_flag & NLP_LOGO_SND) { | 1938 | if (ndlp->nlp_flag & NLP_LOGO_SND) { |
1951 | spin_unlock_irq(shost->host_lock); | 1939 | spin_unlock_irq(shost->host_lock); |
@@ -1978,7 +1966,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1978 | spin_lock_irq(shost->host_lock); | 1966 | spin_lock_irq(shost->host_lock); |
1979 | ndlp->nlp_flag |= NLP_LOGO_SND; | 1967 | ndlp->nlp_flag |= NLP_LOGO_SND; |
1980 | spin_unlock_irq(shost->host_lock); | 1968 | spin_unlock_irq(shost->host_lock); |
1981 | rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); | 1969 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); |
1982 | 1970 | ||
1983 | if (rc == IOCB_ERROR) { | 1971 | if (rc == IOCB_ERROR) { |
1984 | spin_lock_irq(shost->host_lock); | 1972 | spin_lock_irq(shost->host_lock); |
@@ -2058,14 +2046,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
2058 | struct lpfc_hba *phba = vport->phba; | 2046 | struct lpfc_hba *phba = vport->phba; |
2059 | IOCB_t *icmd; | 2047 | IOCB_t *icmd; |
2060 | struct lpfc_iocbq *elsiocb; | 2048 | struct lpfc_iocbq *elsiocb; |
2061 | struct lpfc_sli_ring *pring; | ||
2062 | struct lpfc_sli *psli; | 2049 | struct lpfc_sli *psli; |
2063 | uint8_t *pcmd; | 2050 | uint8_t *pcmd; |
2064 | uint16_t cmdsize; | 2051 | uint16_t cmdsize; |
2065 | struct lpfc_nodelist *ndlp; | 2052 | struct lpfc_nodelist *ndlp; |
2066 | 2053 | ||
2067 | psli = &phba->sli; | 2054 | psli = &phba->sli; |
2068 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | ||
2069 | cmdsize = (sizeof(uint32_t) + sizeof(SCR)); | 2055 | cmdsize = (sizeof(uint32_t) + sizeof(SCR)); |
2070 | 2056 | ||
2071 | ndlp = lpfc_findnode_did(vport, nportid); | 2057 | ndlp = lpfc_findnode_did(vport, nportid); |
@@ -2108,7 +2094,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
2108 | 2094 | ||
2109 | phba->fc_stat.elsXmitSCR++; | 2095 | phba->fc_stat.elsXmitSCR++; |
2110 | elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; | 2096 | elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; |
2111 | if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { | 2097 | if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == |
2098 | IOCB_ERROR) { | ||
2112 | /* The additional lpfc_nlp_put will cause the following | 2099 | /* The additional lpfc_nlp_put will cause the following |
2113 | * lpfc_els_free_iocb routine to trigger the rlease of | 2100 | * lpfc_els_free_iocb routine to trigger the rlease of |
2114 | * the node. | 2101 | * the node. |
@@ -2152,7 +2139,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
2152 | struct lpfc_hba *phba = vport->phba; | 2139 | struct lpfc_hba *phba = vport->phba; |
2153 | IOCB_t *icmd; | 2140 | IOCB_t *icmd; |
2154 | struct lpfc_iocbq *elsiocb; | 2141 | struct lpfc_iocbq *elsiocb; |
2155 | struct lpfc_sli_ring *pring; | ||
2156 | struct lpfc_sli *psli; | 2142 | struct lpfc_sli *psli; |
2157 | FARP *fp; | 2143 | FARP *fp; |
2158 | uint8_t *pcmd; | 2144 | uint8_t *pcmd; |
@@ -2162,7 +2148,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
2162 | struct lpfc_nodelist *ndlp; | 2148 | struct lpfc_nodelist *ndlp; |
2163 | 2149 | ||
2164 | psli = &phba->sli; | 2150 | psli = &phba->sli; |
2165 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | ||
2166 | cmdsize = (sizeof(uint32_t) + sizeof(FARP)); | 2151 | cmdsize = (sizeof(uint32_t) + sizeof(FARP)); |
2167 | 2152 | ||
2168 | ndlp = lpfc_findnode_did(vport, nportid); | 2153 | ndlp = lpfc_findnode_did(vport, nportid); |
@@ -2219,7 +2204,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
2219 | 2204 | ||
2220 | phba->fc_stat.elsXmitFARPR++; | 2205 | phba->fc_stat.elsXmitFARPR++; |
2221 | elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; | 2206 | elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; |
2222 | if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { | 2207 | if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == |
2208 | IOCB_ERROR) { | ||
2223 | /* The additional lpfc_nlp_put will cause the following | 2209 | /* The additional lpfc_nlp_put will cause the following |
2224 | * lpfc_els_free_iocb routine to trigger the release of | 2210 | * lpfc_els_free_iocb routine to trigger the release of |
2225 | * the node. | 2211 | * the node. |
@@ -2961,6 +2947,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
2961 | */ | 2947 | */ |
2962 | lpfc_nlp_not_used(ndlp); | 2948 | lpfc_nlp_not_used(ndlp); |
2963 | } | 2949 | } |
2950 | |||
2964 | return; | 2951 | return; |
2965 | } | 2952 | } |
2966 | 2953 | ||
@@ -3170,7 +3157,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, | |||
3170 | IOCB_t *icmd; | 3157 | IOCB_t *icmd; |
3171 | IOCB_t *oldcmd; | 3158 | IOCB_t *oldcmd; |
3172 | struct lpfc_iocbq *elsiocb; | 3159 | struct lpfc_iocbq *elsiocb; |
3173 | struct lpfc_sli_ring *pring; | ||
3174 | struct lpfc_sli *psli; | 3160 | struct lpfc_sli *psli; |
3175 | uint8_t *pcmd; | 3161 | uint8_t *pcmd; |
3176 | uint16_t cmdsize; | 3162 | uint16_t cmdsize; |
@@ -3178,7 +3164,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, | |||
3178 | ELS_PKT *els_pkt_ptr; | 3164 | ELS_PKT *els_pkt_ptr; |
3179 | 3165 | ||
3180 | psli = &phba->sli; | 3166 | psli = &phba->sli; |
3181 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | ||
3182 | oldcmd = &oldiocb->iocb; | 3167 | oldcmd = &oldiocb->iocb; |
3183 | 3168 | ||
3184 | switch (flag) { | 3169 | switch (flag) { |
@@ -3266,7 +3251,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, | |||
3266 | } | 3251 | } |
3267 | 3252 | ||
3268 | phba->fc_stat.elsXmitACC++; | 3253 | phba->fc_stat.elsXmitACC++; |
3269 | rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); | 3254 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); |
3270 | if (rc == IOCB_ERROR) { | 3255 | if (rc == IOCB_ERROR) { |
3271 | lpfc_els_free_iocb(phba, elsiocb); | 3256 | lpfc_els_free_iocb(phba, elsiocb); |
3272 | return 1; | 3257 | return 1; |
@@ -3305,15 +3290,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, | |||
3305 | IOCB_t *icmd; | 3290 | IOCB_t *icmd; |
3306 | IOCB_t *oldcmd; | 3291 | IOCB_t *oldcmd; |
3307 | struct lpfc_iocbq *elsiocb; | 3292 | struct lpfc_iocbq *elsiocb; |
3308 | struct lpfc_sli_ring *pring; | ||
3309 | struct lpfc_sli *psli; | 3293 | struct lpfc_sli *psli; |
3310 | uint8_t *pcmd; | 3294 | uint8_t *pcmd; |
3311 | uint16_t cmdsize; | 3295 | uint16_t cmdsize; |
3312 | int rc; | 3296 | int rc; |
3313 | 3297 | ||
3314 | psli = &phba->sli; | 3298 | psli = &phba->sli; |
3315 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | ||
3316 | |||
3317 | cmdsize = 2 * sizeof(uint32_t); | 3299 | cmdsize = 2 * sizeof(uint32_t); |
3318 | elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, | 3300 | elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, |
3319 | ndlp->nlp_DID, ELS_CMD_LS_RJT); | 3301 | ndlp->nlp_DID, ELS_CMD_LS_RJT); |
@@ -3346,7 +3328,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, | |||
3346 | 3328 | ||
3347 | phba->fc_stat.elsXmitLSRJT++; | 3329 | phba->fc_stat.elsXmitLSRJT++; |
3348 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; | 3330 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; |
3349 | rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); | 3331 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); |
3350 | 3332 | ||
3351 | if (rc == IOCB_ERROR) { | 3333 | if (rc == IOCB_ERROR) { |
3352 | lpfc_els_free_iocb(phba, elsiocb); | 3334 | lpfc_els_free_iocb(phba, elsiocb); |
@@ -3379,8 +3361,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, | |||
3379 | struct lpfc_nodelist *ndlp) | 3361 | struct lpfc_nodelist *ndlp) |
3380 | { | 3362 | { |
3381 | struct lpfc_hba *phba = vport->phba; | 3363 | struct lpfc_hba *phba = vport->phba; |
3382 | struct lpfc_sli *psli = &phba->sli; | ||
3383 | struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; | ||
3384 | ADISC *ap; | 3364 | ADISC *ap; |
3385 | IOCB_t *icmd, *oldcmd; | 3365 | IOCB_t *icmd, *oldcmd; |
3386 | struct lpfc_iocbq *elsiocb; | 3366 | struct lpfc_iocbq *elsiocb; |
@@ -3422,7 +3402,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, | |||
3422 | 3402 | ||
3423 | phba->fc_stat.elsXmitACC++; | 3403 | phba->fc_stat.elsXmitACC++; |
3424 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; | 3404 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; |
3425 | rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); | 3405 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); |
3426 | if (rc == IOCB_ERROR) { | 3406 | if (rc == IOCB_ERROR) { |
3427 | lpfc_els_free_iocb(phba, elsiocb); | 3407 | lpfc_els_free_iocb(phba, elsiocb); |
3428 | return 1; | 3408 | return 1; |
@@ -3459,14 +3439,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, | |||
3459 | IOCB_t *icmd; | 3439 | IOCB_t *icmd; |
3460 | IOCB_t *oldcmd; | 3440 | IOCB_t *oldcmd; |
3461 | struct lpfc_iocbq *elsiocb; | 3441 | struct lpfc_iocbq *elsiocb; |
3462 | struct lpfc_sli_ring *pring; | ||
3463 | struct lpfc_sli *psli; | 3442 | struct lpfc_sli *psli; |
3464 | uint8_t *pcmd; | 3443 | uint8_t *pcmd; |
3465 | uint16_t cmdsize; | 3444 | uint16_t cmdsize; |
3466 | int rc; | 3445 | int rc; |
3467 | 3446 | ||
3468 | psli = &phba->sli; | 3447 | psli = &phba->sli; |
3469 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | ||
3470 | 3448 | ||
3471 | cmdsize = sizeof(uint32_t) + sizeof(PRLI); | 3449 | cmdsize = sizeof(uint32_t) + sizeof(PRLI); |
3472 | elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, | 3450 | elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, |
@@ -3520,7 +3498,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, | |||
3520 | phba->fc_stat.elsXmitACC++; | 3498 | phba->fc_stat.elsXmitACC++; |
3521 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; | 3499 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; |
3522 | 3500 | ||
3523 | rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); | 3501 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); |
3524 | if (rc == IOCB_ERROR) { | 3502 | if (rc == IOCB_ERROR) { |
3525 | lpfc_els_free_iocb(phba, elsiocb); | 3503 | lpfc_els_free_iocb(phba, elsiocb); |
3526 | return 1; | 3504 | return 1; |
@@ -3562,15 +3540,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, | |||
3562 | RNID *rn; | 3540 | RNID *rn; |
3563 | IOCB_t *icmd, *oldcmd; | 3541 | IOCB_t *icmd, *oldcmd; |
3564 | struct lpfc_iocbq *elsiocb; | 3542 | struct lpfc_iocbq *elsiocb; |
3565 | struct lpfc_sli_ring *pring; | ||
3566 | struct lpfc_sli *psli; | 3543 | struct lpfc_sli *psli; |
3567 | uint8_t *pcmd; | 3544 | uint8_t *pcmd; |
3568 | uint16_t cmdsize; | 3545 | uint16_t cmdsize; |
3569 | int rc; | 3546 | int rc; |
3570 | 3547 | ||
3571 | psli = &phba->sli; | 3548 | psli = &phba->sli; |
3572 | pring = &psli->ring[LPFC_ELS_RING]; | ||
3573 | |||
3574 | cmdsize = sizeof(uint32_t) + sizeof(uint32_t) | 3549 | cmdsize = sizeof(uint32_t) + sizeof(uint32_t) |
3575 | + (2 * sizeof(struct lpfc_name)); | 3550 | + (2 * sizeof(struct lpfc_name)); |
3576 | if (format) | 3551 | if (format) |
@@ -3626,7 +3601,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, | |||
3626 | elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, | 3601 | elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, |
3627 | * it could be freed */ | 3602 | * it could be freed */ |
3628 | 3603 | ||
3629 | rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); | 3604 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); |
3630 | if (rc == IOCB_ERROR) { | 3605 | if (rc == IOCB_ERROR) { |
3631 | lpfc_els_free_iocb(phba, elsiocb); | 3606 | lpfc_els_free_iocb(phba, elsiocb); |
3632 | return 1; | 3607 | return 1; |
@@ -4440,8 +4415,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
4440 | static void | 4415 | static void |
4441 | lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | 4416 | lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
4442 | { | 4417 | { |
4443 | struct lpfc_sli *psli = &phba->sli; | ||
4444 | struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; | ||
4445 | MAILBOX_t *mb; | 4418 | MAILBOX_t *mb; |
4446 | IOCB_t *icmd; | 4419 | IOCB_t *icmd; |
4447 | RPS_RSP *rps_rsp; | 4420 | RPS_RSP *rps_rsp; |
@@ -4507,7 +4480,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
4507 | ndlp->nlp_rpi); | 4480 | ndlp->nlp_rpi); |
4508 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; | 4481 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; |
4509 | phba->fc_stat.elsXmitACC++; | 4482 | phba->fc_stat.elsXmitACC++; |
4510 | if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) | 4483 | if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) |
4511 | lpfc_els_free_iocb(phba, elsiocb); | 4484 | lpfc_els_free_iocb(phba, elsiocb); |
4512 | return; | 4485 | return; |
4513 | } | 4486 | } |
@@ -4616,8 +4589,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, | |||
4616 | IOCB_t *icmd, *oldcmd; | 4589 | IOCB_t *icmd, *oldcmd; |
4617 | RPL_RSP rpl_rsp; | 4590 | RPL_RSP rpl_rsp; |
4618 | struct lpfc_iocbq *elsiocb; | 4591 | struct lpfc_iocbq *elsiocb; |
4619 | struct lpfc_sli *psli = &phba->sli; | ||
4620 | struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; | ||
4621 | uint8_t *pcmd; | 4592 | uint8_t *pcmd; |
4622 | 4593 | ||
4623 | elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, | 4594 | elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, |
@@ -4654,7 +4625,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, | |||
4654 | ndlp->nlp_rpi); | 4625 | ndlp->nlp_rpi); |
4655 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; | 4626 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; |
4656 | phba->fc_stat.elsXmitACC++; | 4627 | phba->fc_stat.elsXmitACC++; |
4657 | if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { | 4628 | if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == |
4629 | IOCB_ERROR) { | ||
4658 | lpfc_els_free_iocb(phba, elsiocb); | 4630 | lpfc_els_free_iocb(phba, elsiocb); |
4659 | return 1; | 4631 | return 1; |
4660 | } | 4632 | } |
@@ -6139,7 +6111,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
6139 | { | 6111 | { |
6140 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 6112 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
6141 | struct lpfc_hba *phba = vport->phba; | 6113 | struct lpfc_hba *phba = vport->phba; |
6142 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | ||
6143 | IOCB_t *icmd; | 6114 | IOCB_t *icmd; |
6144 | struct lpfc_iocbq *elsiocb; | 6115 | struct lpfc_iocbq *elsiocb; |
6145 | uint8_t *pcmd; | 6116 | uint8_t *pcmd; |
@@ -6169,7 +6140,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
6169 | spin_lock_irq(shost->host_lock); | 6140 | spin_lock_irq(shost->host_lock); |
6170 | ndlp->nlp_flag |= NLP_LOGO_SND; | 6141 | ndlp->nlp_flag |= NLP_LOGO_SND; |
6171 | spin_unlock_irq(shost->host_lock); | 6142 | spin_unlock_irq(shost->host_lock); |
6172 | if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { | 6143 | if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == |
6144 | IOCB_ERROR) { | ||
6173 | spin_lock_irq(shost->host_lock); | 6145 | spin_lock_irq(shost->host_lock); |
6174 | ndlp->nlp_flag &= ~NLP_LOGO_SND; | 6146 | ndlp->nlp_flag &= ~NLP_LOGO_SND; |
6175 | spin_unlock_irq(shost->host_lock); | 6147 | spin_unlock_irq(shost->host_lock); |
@@ -6224,7 +6196,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) | |||
6224 | struct lpfc_iocbq *iocb; | 6196 | struct lpfc_iocbq *iocb; |
6225 | unsigned long iflags; | 6197 | unsigned long iflags; |
6226 | int ret; | 6198 | int ret; |
6227 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | ||
6228 | IOCB_t *cmd; | 6199 | IOCB_t *cmd; |
6229 | 6200 | ||
6230 | repeat: | 6201 | repeat: |
@@ -6248,7 +6219,7 @@ repeat: | |||
6248 | "Fabric sched1: ste:x%x", | 6219 | "Fabric sched1: ste:x%x", |
6249 | iocb->vport->port_state, 0, 0); | 6220 | iocb->vport->port_state, 0, 0); |
6250 | 6221 | ||
6251 | ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); | 6222 | ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); |
6252 | 6223 | ||
6253 | if (ret == IOCB_ERROR) { | 6224 | if (ret == IOCB_ERROR) { |
6254 | iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; | 6225 | iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; |
@@ -6394,7 +6365,6 @@ static int | |||
6394 | lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) | 6365 | lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) |
6395 | { | 6366 | { |
6396 | unsigned long iflags; | 6367 | unsigned long iflags; |
6397 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | ||
6398 | int ready; | 6368 | int ready; |
6399 | int ret; | 6369 | int ret; |
6400 | 6370 | ||
@@ -6418,7 +6388,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) | |||
6418 | "Fabric sched2: ste:x%x", | 6388 | "Fabric sched2: ste:x%x", |
6419 | iocb->vport->port_state, 0, 0); | 6389 | iocb->vport->port_state, 0, 0); |
6420 | 6390 | ||
6421 | ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); | 6391 | ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); |
6422 | 6392 | ||
6423 | if (ret == IOCB_ERROR) { | 6393 | if (ret == IOCB_ERROR) { |
6424 | iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; | 6394 | iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index e764ce0bf704..25fc96c9081f 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -555,23 +555,24 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
555 | /* | 555 | /* |
556 | * Turn on Ring interrupts | 556 | * Turn on Ring interrupts |
557 | */ | 557 | */ |
558 | spin_lock_irq(&phba->hbalock); | 558 | if (phba->sli_rev <= LPFC_SLI_REV3) { |
559 | control = readl(phba->HCregaddr); | 559 | spin_lock_irq(&phba->hbalock); |
560 | if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { | 560 | control = readl(phba->HCregaddr); |
561 | lpfc_debugfs_slow_ring_trc(phba, | 561 | if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { |
562 | "WRK Enable ring: cntl:x%x hacopy:x%x", | 562 | lpfc_debugfs_slow_ring_trc(phba, |
563 | control, ha_copy, 0); | 563 | "WRK Enable ring: cntl:x%x hacopy:x%x", |
564 | 564 | control, ha_copy, 0); | |
565 | control |= (HC_R0INT_ENA << LPFC_ELS_RING); | 565 | |
566 | writel(control, phba->HCregaddr); | 566 | control |= (HC_R0INT_ENA << LPFC_ELS_RING); |
567 | readl(phba->HCregaddr); /* flush */ | 567 | writel(control, phba->HCregaddr); |
568 | } | 568 | readl(phba->HCregaddr); /* flush */ |
569 | else { | 569 | } else { |
570 | lpfc_debugfs_slow_ring_trc(phba, | 570 | lpfc_debugfs_slow_ring_trc(phba, |
571 | "WRK Ring ok: cntl:x%x hacopy:x%x", | 571 | "WRK Ring ok: cntl:x%x hacopy:x%x", |
572 | control, ha_copy, 0); | 572 | control, ha_copy, 0); |
573 | } | ||
574 | spin_unlock_irq(&phba->hbalock); | ||
573 | } | 575 | } |
574 | spin_unlock_irq(&phba->hbalock); | ||
575 | } | 576 | } |
576 | lpfc_work_list_done(phba); | 577 | lpfc_work_list_done(phba); |
577 | } | 578 | } |
@@ -689,7 +690,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport) | |||
689 | lpfc_can_disctmo(vport); | 690 | lpfc_can_disctmo(vport); |
690 | } | 691 | } |
691 | 692 | ||
692 | static void | 693 | void |
693 | lpfc_linkdown_port(struct lpfc_vport *vport) | 694 | lpfc_linkdown_port(struct lpfc_vport *vport) |
694 | { | 695 | { |
695 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 696 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
@@ -1147,10 +1148,12 @@ lpfc_enable_la(struct lpfc_hba *phba) | |||
1147 | struct lpfc_sli *psli = &phba->sli; | 1148 | struct lpfc_sli *psli = &phba->sli; |
1148 | spin_lock_irq(&phba->hbalock); | 1149 | spin_lock_irq(&phba->hbalock); |
1149 | psli->sli_flag |= LPFC_PROCESS_LA; | 1150 | psli->sli_flag |= LPFC_PROCESS_LA; |
1150 | control = readl(phba->HCregaddr); | 1151 | if (phba->sli_rev <= LPFC_SLI_REV3) { |
1151 | control |= HC_LAINT_ENA; | 1152 | control = readl(phba->HCregaddr); |
1152 | writel(control, phba->HCregaddr); | 1153 | control |= HC_LAINT_ENA; |
1153 | readl(phba->HCregaddr); /* flush */ | 1154 | writel(control, phba->HCregaddr); |
1155 | readl(phba->HCregaddr); /* flush */ | ||
1156 | } | ||
1154 | spin_unlock_irq(&phba->hbalock); | 1157 | spin_unlock_irq(&phba->hbalock); |
1155 | } | 1158 | } |
1156 | 1159 | ||
@@ -2919,11 +2922,13 @@ restart_disc: | |||
2919 | * set port_state to PORT_READY if SLI2. | 2922 | * set port_state to PORT_READY if SLI2. |
2920 | * cmpl_reg_vpi will set port_state to READY for SLI3. | 2923 | * cmpl_reg_vpi will set port_state to READY for SLI3. |
2921 | */ | 2924 | */ |
2922 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) | 2925 | if (phba->sli_rev < LPFC_SLI_REV4) { |
2923 | lpfc_issue_reg_vpi(phba, vport); | 2926 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) |
2924 | else { /* NPIV Not enabled */ | 2927 | lpfc_issue_reg_vpi(phba, vport); |
2925 | lpfc_issue_clear_la(phba, vport); | 2928 | else { /* NPIV Not enabled */ |
2926 | vport->port_state = LPFC_VPORT_READY; | 2929 | lpfc_issue_clear_la(phba, vport); |
2930 | vport->port_state = LPFC_VPORT_READY; | ||
2931 | } | ||
2927 | } | 2932 | } |
2928 | 2933 | ||
2929 | /* Setup and issue mailbox INITIALIZE LINK command */ | 2934 | /* Setup and issue mailbox INITIALIZE LINK command */ |
@@ -2959,11 +2964,13 @@ restart_disc: | |||
2959 | * set port_state to PORT_READY if SLI2. | 2964 | * set port_state to PORT_READY if SLI2. |
2960 | * cmpl_reg_vpi will set port_state to READY for SLI3. | 2965 | * cmpl_reg_vpi will set port_state to READY for SLI3. |
2961 | */ | 2966 | */ |
2962 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) | 2967 | if (phba->sli_rev < LPFC_SLI_REV4) { |
2963 | lpfc_issue_reg_vpi(phba, vport); | 2968 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) |
2964 | else { /* NPIV Not enabled */ | 2969 | lpfc_issue_reg_vpi(phba, vport); |
2965 | lpfc_issue_clear_la(phba, vport); | 2970 | else { /* NPIV Not enabled */ |
2966 | vport->port_state = LPFC_VPORT_READY; | 2971 | lpfc_issue_clear_la(phba, vport); |
2972 | vport->port_state = LPFC_VPORT_READY; | ||
2973 | } | ||
2967 | } | 2974 | } |
2968 | break; | 2975 | break; |
2969 | 2976 | ||
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 86d1bdcbf2d8..3f06ce2becf5 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -571,16 +571,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) | |||
571 | { | 571 | { |
572 | struct lpfc_vport **vports; | 572 | struct lpfc_vport **vports; |
573 | int i; | 573 | int i; |
574 | /* Disable interrupts */ | 574 | |
575 | writel(0, phba->HCregaddr); | 575 | if (phba->sli_rev <= LPFC_SLI_REV3) { |
576 | readl(phba->HCregaddr); /* flush */ | 576 | /* Disable interrupts */ |
577 | writel(0, phba->HCregaddr); | ||
578 | readl(phba->HCregaddr); /* flush */ | ||
579 | } | ||
577 | 580 | ||
578 | if (phba->pport->load_flag & FC_UNLOADING) | 581 | if (phba->pport->load_flag & FC_UNLOADING) |
579 | lpfc_cleanup_discovery_resources(phba->pport); | 582 | lpfc_cleanup_discovery_resources(phba->pport); |
580 | else { | 583 | else { |
581 | vports = lpfc_create_vport_work_array(phba); | 584 | vports = lpfc_create_vport_work_array(phba); |
582 | if (vports != NULL) | 585 | if (vports != NULL) |
583 | for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) | 586 | for (i = 0; i <= phba->max_vports && |
587 | vports[i] != NULL; i++) | ||
584 | lpfc_cleanup_discovery_resources(vports[i]); | 588 | lpfc_cleanup_discovery_resources(vports[i]); |
585 | lpfc_destroy_vport_work_array(phba, vports); | 589 | lpfc_destroy_vport_work_array(phba, vports); |
586 | } | 590 | } |
@@ -588,7 +592,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) | |||
588 | } | 592 | } |
589 | 593 | ||
590 | /** | 594 | /** |
591 | * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset | 595 | * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset |
592 | * @phba: pointer to lpfc HBA data structure. | 596 | * @phba: pointer to lpfc HBA data structure. |
593 | * | 597 | * |
594 | * This routine will do uninitialization after the HBA is reset when bring | 598 | * This routine will do uninitialization after the HBA is reset when bring |
@@ -598,8 +602,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) | |||
598 | * 0 - sucess. | 602 | * 0 - sucess. |
599 | * Any other value - error. | 603 | * Any other value - error. |
600 | **/ | 604 | **/ |
601 | int | 605 | static int |
602 | lpfc_hba_down_post(struct lpfc_hba *phba) | 606 | lpfc_hba_down_post_s3(struct lpfc_hba *phba) |
603 | { | 607 | { |
604 | struct lpfc_sli *psli = &phba->sli; | 608 | struct lpfc_sli *psli = &phba->sli; |
605 | struct lpfc_sli_ring *pring; | 609 | struct lpfc_sli_ring *pring; |
@@ -909,13 +913,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) | |||
909 | if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) | 913 | if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) |
910 | phba->work_hs = old_host_status & ~HS_FFER1; | 914 | phba->work_hs = old_host_status & ~HS_FFER1; |
911 | 915 | ||
916 | spin_lock_irq(&phba->hbalock); | ||
912 | phba->hba_flag &= ~DEFER_ERATT; | 917 | phba->hba_flag &= ~DEFER_ERATT; |
918 | spin_unlock_irq(&phba->hbalock); | ||
913 | phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); | 919 | phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); |
914 | phba->work_status[1] = readl(phba->MBslimaddr + 0xac); | 920 | phba->work_status[1] = readl(phba->MBslimaddr + 0xac); |
915 | } | 921 | } |
916 | 922 | ||
923 | static void | ||
924 | lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) | ||
925 | { | ||
926 | struct lpfc_board_event_header board_event; | ||
927 | struct Scsi_Host *shost; | ||
928 | |||
929 | board_event.event_type = FC_REG_BOARD_EVENT; | ||
930 | board_event.subcategory = LPFC_EVENT_PORTINTERR; | ||
931 | shost = lpfc_shost_from_vport(phba->pport); | ||
932 | fc_host_post_vendor_event(shost, fc_get_event_number(), | ||
933 | sizeof(board_event), | ||
934 | (char *) &board_event, | ||
935 | LPFC_NL_VENDOR_ID); | ||
936 | } | ||
937 | |||
917 | /** | 938 | /** |
918 | * lpfc_handle_eratt - The HBA hardware error handler | 939 | * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler |
919 | * @phba: pointer to lpfc hba data structure. | 940 | * @phba: pointer to lpfc hba data structure. |
920 | * | 941 | * |
921 | * This routine is invoked to handle the following HBA hardware error | 942 | * This routine is invoked to handle the following HBA hardware error |
@@ -924,8 +945,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) | |||
924 | * 2 - DMA ring index out of range | 945 | * 2 - DMA ring index out of range |
925 | * 3 - Mailbox command came back as unknown | 946 | * 3 - Mailbox command came back as unknown |
926 | **/ | 947 | **/ |
927 | void | 948 | static void |
928 | lpfc_handle_eratt(struct lpfc_hba *phba) | 949 | lpfc_handle_eratt_s3(struct lpfc_hba *phba) |
929 | { | 950 | { |
930 | struct lpfc_vport *vport = phba->pport; | 951 | struct lpfc_vport *vport = phba->pport; |
931 | struct lpfc_sli *psli = &phba->sli; | 952 | struct lpfc_sli *psli = &phba->sli; |
@@ -934,24 +955,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba) | |||
934 | unsigned long temperature; | 955 | unsigned long temperature; |
935 | struct temp_event temp_event_data; | 956 | struct temp_event temp_event_data; |
936 | struct Scsi_Host *shost; | 957 | struct Scsi_Host *shost; |
937 | struct lpfc_board_event_header board_event; | ||
938 | 958 | ||
939 | /* If the pci channel is offline, ignore possible errors, | 959 | /* If the pci channel is offline, ignore possible errors, |
940 | * since we cannot communicate with the pci card anyway. */ | 960 | * since we cannot communicate with the pci card anyway. |
941 | if (pci_channel_offline(phba->pcidev)) | 961 | */ |
962 | if (pci_channel_offline(phba->pcidev)) { | ||
963 | spin_lock_irq(&phba->hbalock); | ||
964 | phba->hba_flag &= ~DEFER_ERATT; | ||
965 | spin_unlock_irq(&phba->hbalock); | ||
942 | return; | 966 | return; |
967 | } | ||
968 | |||
943 | /* If resets are disabled then leave the HBA alone and return */ | 969 | /* If resets are disabled then leave the HBA alone and return */ |
944 | if (!phba->cfg_enable_hba_reset) | 970 | if (!phba->cfg_enable_hba_reset) |
945 | return; | 971 | return; |
946 | 972 | ||
947 | /* Send an internal error event to mgmt application */ | 973 | /* Send an internal error event to mgmt application */ |
948 | board_event.event_type = FC_REG_BOARD_EVENT; | 974 | lpfc_board_errevt_to_mgmt(phba); |
949 | board_event.subcategory = LPFC_EVENT_PORTINTERR; | ||
950 | shost = lpfc_shost_from_vport(phba->pport); | ||
951 | fc_host_post_vendor_event(shost, fc_get_event_number(), | ||
952 | sizeof(board_event), | ||
953 | (char *) &board_event, | ||
954 | LPFC_NL_VENDOR_ID); | ||
955 | 975 | ||
956 | if (phba->hba_flag & DEFER_ERATT) | 976 | if (phba->hba_flag & DEFER_ERATT) |
957 | lpfc_handle_deferred_eratt(phba); | 977 | lpfc_handle_deferred_eratt(phba); |
@@ -1137,7 +1157,7 @@ lpfc_handle_latt_err_exit: | |||
1137 | * 0 - pointer to the VPD passed in is NULL | 1157 | * 0 - pointer to the VPD passed in is NULL |
1138 | * 1 - success | 1158 | * 1 - success |
1139 | **/ | 1159 | **/ |
1140 | static int | 1160 | int |
1141 | lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) | 1161 | lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) |
1142 | { | 1162 | { |
1143 | uint8_t lenlo, lenhi; | 1163 | uint8_t lenlo, lenhi; |
@@ -1533,7 +1553,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) | |||
1533 | icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; | 1553 | icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; |
1534 | icmd->ulpLe = 1; | 1554 | icmd->ulpLe = 1; |
1535 | 1555 | ||
1536 | if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { | 1556 | if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == |
1557 | IOCB_ERROR) { | ||
1537 | lpfc_mbuf_free(phba, mp1->virt, mp1->phys); | 1558 | lpfc_mbuf_free(phba, mp1->virt, mp1->phys); |
1538 | kfree(mp1); | 1559 | kfree(mp1); |
1539 | cnt++; | 1560 | cnt++; |
@@ -1761,7 +1782,6 @@ lpfc_cleanup(struct lpfc_vport *vport) | |||
1761 | * Lets wait for this to happen, if needed. | 1782 | * Lets wait for this to happen, if needed. |
1762 | */ | 1783 | */ |
1763 | while (!list_empty(&vport->fc_nodes)) { | 1784 | while (!list_empty(&vport->fc_nodes)) { |
1764 | |||
1765 | if (i++ > 3000) { | 1785 | if (i++ > 3000) { |
1766 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | 1786 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
1767 | "0233 Nodelist not empty\n"); | 1787 | "0233 Nodelist not empty\n"); |
@@ -1782,7 +1802,6 @@ lpfc_cleanup(struct lpfc_vport *vport) | |||
1782 | /* Wait for any activity on ndlps to settle */ | 1802 | /* Wait for any activity on ndlps to settle */ |
1783 | msleep(10); | 1803 | msleep(10); |
1784 | } | 1804 | } |
1785 | return; | ||
1786 | } | 1805 | } |
1787 | 1806 | ||
1788 | /** | 1807 | /** |
@@ -1803,22 +1822,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) | |||
1803 | } | 1822 | } |
1804 | 1823 | ||
1805 | /** | 1824 | /** |
1806 | * lpfc_stop_phba_timers - Stop all the timers associated with an HBA | 1825 | * lpfc_stop_hba_timers - Stop all the timers associated with an HBA |
1807 | * @phba: pointer to lpfc hba data structure. | 1826 | * @phba: pointer to lpfc hba data structure. |
1808 | * | 1827 | * |
1809 | * This routine stops all the timers associated with a HBA. This function is | 1828 | * This routine stops all the timers associated with a HBA. This function is |
1810 | * invoked before either putting a HBA offline or unloading the driver. | 1829 | * invoked before either putting a HBA offline or unloading the driver. |
1811 | **/ | 1830 | **/ |
1812 | static void | 1831 | void |
1813 | lpfc_stop_phba_timers(struct lpfc_hba *phba) | 1832 | lpfc_stop_hba_timers(struct lpfc_hba *phba) |
1814 | { | 1833 | { |
1815 | del_timer_sync(&phba->fcp_poll_timer); | ||
1816 | lpfc_stop_vport_timers(phba->pport); | 1834 | lpfc_stop_vport_timers(phba->pport); |
1817 | del_timer_sync(&phba->sli.mbox_tmo); | 1835 | del_timer_sync(&phba->sli.mbox_tmo); |
1818 | del_timer_sync(&phba->fabric_block_timer); | 1836 | del_timer_sync(&phba->fabric_block_timer); |
1819 | phba->hb_outstanding = 0; | ||
1820 | del_timer_sync(&phba->hb_tmofunc); | ||
1821 | del_timer_sync(&phba->eratt_poll); | 1837 | del_timer_sync(&phba->eratt_poll); |
1838 | del_timer_sync(&phba->hb_tmofunc); | ||
1839 | phba->hb_outstanding = 0; | ||
1840 | |||
1841 | switch (phba->pci_dev_grp) { | ||
1842 | case LPFC_PCI_DEV_LP: | ||
1843 | /* Stop any LightPulse device specific driver timers */ | ||
1844 | del_timer_sync(&phba->fcp_poll_timer); | ||
1845 | break; | ||
1846 | case LPFC_PCI_DEV_OC: | ||
1847 | /* Stop any OneConnect device sepcific driver timers */ | ||
1848 | break; | ||
1849 | default: | ||
1850 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
1851 | "0297 Invalid device group (x%x)\n", | ||
1852 | phba->pci_dev_grp); | ||
1853 | break; | ||
1854 | } | ||
1822 | return; | 1855 | return; |
1823 | } | 1856 | } |
1824 | 1857 | ||
@@ -2509,9 +2542,8 @@ lpfc_disable_msi(struct lpfc_hba *phba) | |||
2509 | * | 2542 | * |
2510 | * This routine it invoked to log the currently used active interrupt mode | 2543 | * This routine it invoked to log the currently used active interrupt mode |
2511 | * to the device. | 2544 | * to the device. |
2512 | */ | 2545 | **/ |
2513 | static void | 2546 | static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) |
2514 | lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) | ||
2515 | { | 2547 | { |
2516 | switch (intr_mode) { | 2548 | switch (intr_mode) { |
2517 | case 0: | 2549 | case 0: |
@@ -2534,228 +2566,671 @@ lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) | |||
2534 | return; | 2566 | return; |
2535 | } | 2567 | } |
2536 | 2568 | ||
2569 | /** | ||
2570 | * lpfc_enable_pci_dev - Enable a generic PCI device. | ||
2571 | * @phba: pointer to lpfc hba data structure. | ||
2572 | * | ||
2573 | * This routine is invoked to enable the PCI device that is common to all | ||
2574 | * PCI devices. | ||
2575 | * | ||
2576 | * Return codes | ||
2577 | * 0 - sucessful | ||
2578 | * other values - error | ||
2579 | **/ | ||
2580 | static int | ||
2581 | lpfc_enable_pci_dev(struct lpfc_hba *phba) | ||
2582 | { | ||
2583 | struct pci_dev *pdev; | ||
2584 | int bars; | ||
2585 | |||
2586 | /* Obtain PCI device reference */ | ||
2587 | if (!phba->pcidev) | ||
2588 | goto out_error; | ||
2589 | else | ||
2590 | pdev = phba->pcidev; | ||
2591 | /* Select PCI BARs */ | ||
2592 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
2593 | /* Enable PCI device */ | ||
2594 | if (pci_enable_device_mem(pdev)) | ||
2595 | goto out_error; | ||
2596 | /* Request PCI resource for the device */ | ||
2597 | if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) | ||
2598 | goto out_disable_device; | ||
2599 | /* Set up device as PCI master and save state for EEH */ | ||
2600 | pci_set_master(pdev); | ||
2601 | pci_try_set_mwi(pdev); | ||
2602 | pci_save_state(pdev); | ||
2603 | |||
2604 | return 0; | ||
2605 | |||
2606 | out_disable_device: | ||
2607 | pci_disable_device(pdev); | ||
2608 | out_error: | ||
2609 | return -ENODEV; | ||
2610 | } | ||
2611 | |||
2612 | /** | ||
2613 | * lpfc_disable_pci_dev - Disable a generic PCI device. | ||
2614 | * @phba: pointer to lpfc hba data structure. | ||
2615 | * | ||
2616 | * This routine is invoked to disable the PCI device that is common to all | ||
2617 | * PCI devices. | ||
2618 | **/ | ||
2537 | static void | 2619 | static void |
2538 | lpfc_stop_port(struct lpfc_hba *phba) | 2620 | lpfc_disable_pci_dev(struct lpfc_hba *phba) |
2539 | { | 2621 | { |
2540 | /* Clear all interrupt enable conditions */ | 2622 | struct pci_dev *pdev; |
2541 | writel(0, phba->HCregaddr); | 2623 | int bars; |
2542 | readl(phba->HCregaddr); /* flush */ | ||
2543 | /* Clear all pending interrupts */ | ||
2544 | writel(0xffffffff, phba->HAregaddr); | ||
2545 | readl(phba->HAregaddr); /* flush */ | ||
2546 | 2624 | ||
2547 | /* Reset some HBA SLI setup states */ | 2625 | /* Obtain PCI device reference */ |
2548 | lpfc_stop_phba_timers(phba); | 2626 | if (!phba->pcidev) |
2549 | phba->pport->work_port_events = 0; | 2627 | return; |
2628 | else | ||
2629 | pdev = phba->pcidev; | ||
2630 | /* Select PCI BARs */ | ||
2631 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
2632 | /* Release PCI resource and disable PCI device */ | ||
2633 | pci_release_selected_regions(pdev, bars); | ||
2634 | pci_disable_device(pdev); | ||
2635 | /* Null out PCI private reference to driver */ | ||
2636 | pci_set_drvdata(pdev, NULL); | ||
2550 | 2637 | ||
2551 | return; | 2638 | return; |
2552 | } | 2639 | } |
2553 | 2640 | ||
2554 | /** | 2641 | /** |
2555 | * lpfc_enable_intr - Enable device interrupt | 2642 | * lpfc_reset_hba - Reset a hba |
2556 | * @phba: pointer to lpfc hba data structure. | 2643 | * @phba: pointer to lpfc hba data structure. |
2557 | * | 2644 | * |
2558 | * This routine is invoked to enable device interrupt and associate driver's | 2645 | * This routine is invoked to reset a hba device. It brings the HBA |
2559 | * interrupt handler(s) to interrupt vector(s). Depends on the interrupt | 2646 | * offline, performs a board restart, and then brings the board back |
2560 | * mode configured to the driver, the driver will try to fallback from the | 2647 | * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up |
2561 | * configured interrupt mode to an interrupt mode which is supported by the | 2648 | * on outstanding mailbox commands. |
2562 | * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. | 2649 | **/ |
2650 | void | ||
2651 | lpfc_reset_hba(struct lpfc_hba *phba) | ||
2652 | { | ||
2653 | /* If resets are disabled then set error state and return. */ | ||
2654 | if (!phba->cfg_enable_hba_reset) { | ||
2655 | phba->link_state = LPFC_HBA_ERROR; | ||
2656 | return; | ||
2657 | } | ||
2658 | lpfc_offline_prep(phba); | ||
2659 | lpfc_offline(phba); | ||
2660 | lpfc_sli_brdrestart(phba); | ||
2661 | lpfc_online(phba); | ||
2662 | lpfc_unblock_mgmt_io(phba); | ||
2663 | } | ||
2664 | |||
2665 | /** | ||
2666 | * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. | ||
2667 | * @phba: pointer to lpfc hba data structure. | ||
2668 | * | ||
2669 | * This routine is invoked to set up the driver internal resources specific to | ||
2670 | * support the SLI-3 HBA device it attached to. | ||
2563 | * | 2671 | * |
2564 | * Return codes | 2672 | * Return codes |
2565 | * 0 - sucessful | 2673 | * 0 - sucessful |
2566 | * other values - error | 2674 | * other values - error |
2567 | **/ | 2675 | **/ |
2568 | static uint32_t | 2676 | static int |
2569 | lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) | 2677 | lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) |
2570 | { | 2678 | { |
2571 | uint32_t intr_mode = LPFC_INTR_ERROR; | 2679 | struct lpfc_sli *psli; |
2572 | int retval; | ||
2573 | 2680 | ||
2574 | if (cfg_mode == 2) { | 2681 | /* |
2575 | /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ | 2682 | * Initialize timers used by driver |
2576 | retval = lpfc_sli_config_port(phba, 3); | 2683 | */ |
2577 | if (!retval) { | 2684 | |
2578 | /* Now, try to enable MSI-X interrupt mode */ | 2685 | /* Heartbeat timer */ |
2579 | retval = lpfc_enable_msix(phba); | 2686 | init_timer(&phba->hb_tmofunc); |
2580 | if (!retval) { | 2687 | phba->hb_tmofunc.function = lpfc_hb_timeout; |
2581 | /* Indicate initialization to MSI-X mode */ | 2688 | phba->hb_tmofunc.data = (unsigned long)phba; |
2582 | phba->intr_type = MSIX; | 2689 | |
2583 | intr_mode = 2; | 2690 | psli = &phba->sli; |
2584 | } | 2691 | /* MBOX heartbeat timer */ |
2585 | } | 2692 | init_timer(&psli->mbox_tmo); |
2693 | psli->mbox_tmo.function = lpfc_mbox_timeout; | ||
2694 | psli->mbox_tmo.data = (unsigned long) phba; | ||
2695 | /* FCP polling mode timer */ | ||
2696 | init_timer(&phba->fcp_poll_timer); | ||
2697 | phba->fcp_poll_timer.function = lpfc_poll_timeout; | ||
2698 | phba->fcp_poll_timer.data = (unsigned long) phba; | ||
2699 | /* Fabric block timer */ | ||
2700 | init_timer(&phba->fabric_block_timer); | ||
2701 | phba->fabric_block_timer.function = lpfc_fabric_block_timeout; | ||
2702 | phba->fabric_block_timer.data = (unsigned long) phba; | ||
2703 | /* EA polling mode timer */ | ||
2704 | init_timer(&phba->eratt_poll); | ||
2705 | phba->eratt_poll.function = lpfc_poll_eratt; | ||
2706 | phba->eratt_poll.data = (unsigned long) phba; | ||
2707 | |||
2708 | /* Host attention work mask setup */ | ||
2709 | phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); | ||
2710 | phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); | ||
2711 | |||
2712 | /* Get all the module params for configuring this host */ | ||
2713 | lpfc_get_cfgparam(phba); | ||
2714 | /* | ||
2715 | * Since the sg_tablesize is module parameter, the sg_dma_buf_size | ||
2716 | * used to create the sg_dma_buf_pool must be dynamically calculated. | ||
2717 | * 2 segments are added since the IOCB needs a command and response bde. | ||
2718 | */ | ||
2719 | phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + | ||
2720 | sizeof(struct fcp_rsp) + | ||
2721 | ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); | ||
2722 | |||
2723 | if (phba->cfg_enable_bg) { | ||
2724 | phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; | ||
2725 | phba->cfg_sg_dma_buf_size += | ||
2726 | phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); | ||
2586 | } | 2727 | } |
2587 | 2728 | ||
2588 | /* Fallback to MSI if MSI-X initialization failed */ | 2729 | /* Also reinitialize the host templates with new values. */ |
2589 | if (cfg_mode >= 1 && phba->intr_type == NONE) { | 2730 | lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; |
2590 | retval = lpfc_enable_msi(phba); | 2731 | lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; |
2591 | if (!retval) { | 2732 | |
2592 | /* Indicate initialization to MSI mode */ | 2733 | phba->max_vpi = LPFC_MAX_VPI; |
2593 | phba->intr_type = MSI; | 2734 | /* This will be set to correct value after config_port mbox */ |
2594 | intr_mode = 1; | 2735 | phba->max_vports = 0; |
2595 | } | 2736 | |
2737 | /* | ||
2738 | * Initialize the SLI Layer to run with lpfc HBAs. | ||
2739 | */ | ||
2740 | lpfc_sli_setup(phba); | ||
2741 | lpfc_sli_queue_setup(phba); | ||
2742 | |||
2743 | /* Allocate device driver memory */ | ||
2744 | if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) | ||
2745 | return -ENOMEM; | ||
2746 | |||
2747 | return 0; | ||
2748 | } | ||
2749 | |||
2750 | /** | ||
2751 | * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev | ||
2752 | * @phba: pointer to lpfc hba data structure. | ||
2753 | * | ||
2754 | * This routine is invoked to unset the driver internal resources set up | ||
2755 | * specific for supporting the SLI-3 HBA device it attached to. | ||
2756 | **/ | ||
2757 | static void | ||
2758 | lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) | ||
2759 | { | ||
2760 | /* Free device driver memory allocated */ | ||
2761 | lpfc_mem_free_all(phba); | ||
2762 | |||
2763 | return; | ||
2764 | } | ||
2765 | |||
2766 | /** | ||
2767 | * lpfc_init_api_table_setup - Set up init api fucntion jump table | ||
2768 | * @phba: The hba struct for which this call is being executed. | ||
2769 | * @dev_grp: The HBA PCI-Device group number. | ||
2770 | * | ||
2771 | * This routine sets up the device INIT interface API function jump table | ||
2772 | * in @phba struct. | ||
2773 | * | ||
2774 | * Returns: 0 - success, -ENODEV - failure. | ||
2775 | **/ | ||
2776 | int | ||
2777 | lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | ||
2778 | { | ||
2779 | switch (dev_grp) { | ||
2780 | case LPFC_PCI_DEV_LP: | ||
2781 | phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; | ||
2782 | phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; | ||
2783 | phba->lpfc_stop_port = lpfc_stop_port_s3; | ||
2784 | break; | ||
2785 | default: | ||
2786 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2787 | "1431 Invalid HBA PCI-device group: 0x%x\n", | ||
2788 | dev_grp); | ||
2789 | return -ENODEV; | ||
2790 | break; | ||
2596 | } | 2791 | } |
2792 | return 0; | ||
2793 | } | ||
2597 | 2794 | ||
2598 | /* Fallback to INTx if both MSI-X/MSI initalization failed */ | 2795 | /** |
2599 | if (phba->intr_type == NONE) { | 2796 | * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. |
2600 | retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, | 2797 | * @phba: pointer to lpfc hba data structure. |
2601 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); | 2798 | * |
2602 | if (!retval) { | 2799 | * This routine is invoked to set up the driver internal resources before the |
2603 | /* Indicate initialization to INTx mode */ | 2800 | * device specific resource setup to support the HBA device it attached to. |
2604 | phba->intr_type = INTx; | 2801 | * |
2605 | intr_mode = 0; | 2802 | * Return codes |
2606 | } | 2803 | * 0 - sucessful |
2804 | * other values - error | ||
2805 | **/ | ||
2806 | static int | ||
2807 | lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) | ||
2808 | { | ||
2809 | /* | ||
2810 | * Driver resources common to all SLI revisions | ||
2811 | */ | ||
2812 | atomic_set(&phba->fast_event_count, 0); | ||
2813 | spin_lock_init(&phba->hbalock); | ||
2814 | |||
2815 | /* Initialize ndlp management spinlock */ | ||
2816 | spin_lock_init(&phba->ndlp_lock); | ||
2817 | |||
2818 | INIT_LIST_HEAD(&phba->port_list); | ||
2819 | INIT_LIST_HEAD(&phba->work_list); | ||
2820 | init_waitqueue_head(&phba->wait_4_mlo_m_q); | ||
2821 | |||
2822 | /* Initialize the wait queue head for the kernel thread */ | ||
2823 | init_waitqueue_head(&phba->work_waitq); | ||
2824 | |||
2825 | /* Initialize the scsi buffer list used by driver for scsi IO */ | ||
2826 | spin_lock_init(&phba->scsi_buf_list_lock); | ||
2827 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); | ||
2828 | |||
2829 | /* Initialize the fabric iocb list */ | ||
2830 | INIT_LIST_HEAD(&phba->fabric_iocb_list); | ||
2831 | |||
2832 | /* Initialize list to save ELS buffers */ | ||
2833 | INIT_LIST_HEAD(&phba->elsbuf); | ||
2834 | |||
2835 | /* Initialize FCF connection rec list */ | ||
2836 | INIT_LIST_HEAD(&phba->fcf_conn_rec_list); | ||
2837 | |||
2838 | return 0; | ||
2839 | } | ||
2840 | |||
2841 | /** | ||
2842 | * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. | ||
2843 | * @phba: pointer to lpfc hba data structure. | ||
2844 | * | ||
2845 | * This routine is invoked to set up the driver internal resources after the | ||
2846 | * device specific resource setup to support the HBA device it attached to. | ||
2847 | * | ||
2848 | * Return codes | ||
2849 | * 0 - sucessful | ||
2850 | * other values - error | ||
2851 | **/ | ||
2852 | static int | ||
2853 | lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) | ||
2854 | { | ||
2855 | int error; | ||
2856 | |||
2857 | /* Startup the kernel thread for this host adapter. */ | ||
2858 | phba->worker_thread = kthread_run(lpfc_do_work, phba, | ||
2859 | "lpfc_worker_%d", phba->brd_no); | ||
2860 | if (IS_ERR(phba->worker_thread)) { | ||
2861 | error = PTR_ERR(phba->worker_thread); | ||
2862 | return error; | ||
2607 | } | 2863 | } |
2608 | return intr_mode; | 2864 | |
2865 | return 0; | ||
2609 | } | 2866 | } |
2610 | 2867 | ||
2611 | /** | 2868 | /** |
2612 | * lpfc_disable_intr - Disable device interrupt | 2869 | * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. |
2613 | * @phba: pointer to lpfc hba data structure. | 2870 | * @phba: pointer to lpfc hba data structure. |
2614 | * | 2871 | * |
2615 | * This routine is invoked to disable device interrupt and disassociate the | 2872 | * This routine is invoked to unset the driver internal resources set up after |
2616 | * driver's interrupt handler(s) from interrupt vector(s). Depending on the | 2873 | * the device specific resource setup for supporting the HBA device it |
2617 | * interrupt mode, the driver will release the interrupt vector(s) for the | 2874 | * attached to. |
2618 | * message signaled interrupt. | ||
2619 | **/ | 2875 | **/ |
2620 | static void | 2876 | static void |
2621 | lpfc_disable_intr(struct lpfc_hba *phba) | 2877 | lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) |
2622 | { | 2878 | { |
2623 | /* Disable the currently initialized interrupt mode */ | 2879 | /* Stop kernel worker thread */ |
2624 | if (phba->intr_type == MSIX) | 2880 | kthread_stop(phba->worker_thread); |
2625 | lpfc_disable_msix(phba); | 2881 | } |
2626 | else if (phba->intr_type == MSI) | ||
2627 | lpfc_disable_msi(phba); | ||
2628 | else if (phba->intr_type == INTx) | ||
2629 | free_irq(phba->pcidev->irq, phba); | ||
2630 | 2882 | ||
2631 | /* Reset interrupt management states */ | 2883 | /** |
2632 | phba->intr_type = NONE; | 2884 | * lpfc_free_iocb_list - Free iocb list. |
2633 | phba->sli.slistat.sli_intr = 0; | 2885 | * @phba: pointer to lpfc hba data structure. |
2886 | * | ||
2887 | * This routine is invoked to free the driver's IOCB list and memory. | ||
2888 | **/ | ||
2889 | static void | ||
2890 | lpfc_free_iocb_list(struct lpfc_hba *phba) | ||
2891 | { | ||
2892 | struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; | ||
2893 | |||
2894 | spin_lock_irq(&phba->hbalock); | ||
2895 | list_for_each_entry_safe(iocbq_entry, iocbq_next, | ||
2896 | &phba->lpfc_iocb_list, list) { | ||
2897 | list_del(&iocbq_entry->list); | ||
2898 | kfree(iocbq_entry); | ||
2899 | phba->total_iocbq_bufs--; | ||
2900 | } | ||
2901 | spin_unlock_irq(&phba->hbalock); | ||
2634 | 2902 | ||
2635 | return; | 2903 | return; |
2636 | } | 2904 | } |
2637 | 2905 | ||
2638 | /** | 2906 | /** |
2639 | * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem | 2907 | * lpfc_init_iocb_list - Allocate and initialize iocb list. |
2640 | * @pdev: pointer to PCI device | 2908 | * @phba: pointer to lpfc hba data structure. |
2641 | * @pid: pointer to PCI device identifier | ||
2642 | * | 2909 | * |
2643 | * This routine is to be registered to the kernel's PCI subsystem. When an | 2910 | * This routine is invoked to allocate and initizlize the driver's IOCB |
2644 | * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at | 2911 | * list and set up the IOCB tag array accordingly. |
2645 | * PCI device-specific information of the device and driver to see if the | ||
2646 | * driver state that it can support this kind of device. If the match is | ||
2647 | * successful, the driver core invokes this routine. If this routine | ||
2648 | * determines it can claim the HBA, it does all the initialization that it | ||
2649 | * needs to do to handle the HBA properly. | ||
2650 | * | 2912 | * |
2651 | * Return code | 2913 | * Return codes |
2652 | * 0 - driver can claim the device | 2914 | * 0 - sucessful |
2653 | * negative value - driver can not claim the device | 2915 | * other values - error |
2654 | **/ | 2916 | **/ |
2655 | static int __devinit | 2917 | static int |
2656 | lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | 2918 | lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) |
2657 | { | 2919 | { |
2658 | struct lpfc_vport *vport = NULL; | 2920 | struct lpfc_iocbq *iocbq_entry = NULL; |
2659 | struct lpfc_hba *phba; | ||
2660 | struct lpfc_sli *psli; | ||
2661 | struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; | ||
2662 | struct Scsi_Host *shost = NULL; | ||
2663 | void *ptr; | ||
2664 | unsigned long bar0map_len, bar2map_len; | ||
2665 | int error = -ENODEV, retval; | ||
2666 | int i, hbq_count; | ||
2667 | uint16_t iotag; | 2921 | uint16_t iotag; |
2668 | uint32_t cfg_mode, intr_mode; | 2922 | int i; |
2669 | int bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
2670 | struct lpfc_adapter_event_header adapter_event; | ||
2671 | 2923 | ||
2672 | if (pci_enable_device_mem(pdev)) | 2924 | /* Initialize and populate the iocb list per host. */ |
2673 | goto out; | 2925 | INIT_LIST_HEAD(&phba->lpfc_iocb_list); |
2674 | if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) | 2926 | for (i = 0; i < iocb_count; i++) { |
2675 | goto out_disable_device; | 2927 | iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); |
2928 | if (iocbq_entry == NULL) { | ||
2929 | printk(KERN_ERR "%s: only allocated %d iocbs of " | ||
2930 | "expected %d count. Unloading driver.\n", | ||
2931 | __func__, i, LPFC_IOCB_LIST_CNT); | ||
2932 | goto out_free_iocbq; | ||
2933 | } | ||
2676 | 2934 | ||
2677 | phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); | 2935 | iotag = lpfc_sli_next_iotag(phba, iocbq_entry); |
2678 | if (!phba) | 2936 | if (iotag == 0) { |
2679 | goto out_release_regions; | 2937 | kfree(iocbq_entry); |
2938 | printk(KERN_ERR "%s: failed to allocate IOTAG. " | ||
2939 | "Unloading driver.\n", __func__); | ||
2940 | goto out_free_iocbq; | ||
2941 | } | ||
2942 | iocbq_entry->sli4_xritag = NO_XRI; | ||
2680 | 2943 | ||
2681 | atomic_set(&phba->fast_event_count, 0); | 2944 | spin_lock_irq(&phba->hbalock); |
2682 | spin_lock_init(&phba->hbalock); | 2945 | list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); |
2946 | phba->total_iocbq_bufs++; | ||
2947 | spin_unlock_irq(&phba->hbalock); | ||
2948 | } | ||
2683 | 2949 | ||
2684 | /* Initialize ndlp management spinlock */ | 2950 | return 0; |
2685 | spin_lock_init(&phba->ndlp_lock); | 2951 | |
2952 | out_free_iocbq: | ||
2953 | lpfc_free_iocb_list(phba); | ||
2686 | 2954 | ||
2955 | return -ENOMEM; | ||
2956 | } | ||
2957 | |||
2958 | /** | ||
2959 | * lpfc_hba_alloc - Allocate driver hba data structure for a device. | ||
2960 | * @pdev: pointer to pci device data structure. | ||
2961 | * | ||
2962 | * This routine is invoked to allocate the driver hba data structure for an | ||
2963 | * HBA device. If the allocation is successful, the phba reference to the | ||
2964 | * PCI device data structure is set. | ||
2965 | * | ||
2966 | * Return codes | ||
2967 | * pointer to @phba - sucessful | ||
2968 | * NULL - error | ||
2969 | **/ | ||
2970 | static struct lpfc_hba * | ||
2971 | lpfc_hba_alloc(struct pci_dev *pdev) | ||
2972 | { | ||
2973 | struct lpfc_hba *phba; | ||
2974 | |||
2975 | /* Allocate memory for HBA structure */ | ||
2976 | phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); | ||
2977 | if (!phba) { | ||
2978 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2979 | "1417 Failed to allocate hba struct.\n"); | ||
2980 | return NULL; | ||
2981 | } | ||
2982 | |||
2983 | /* Set reference to PCI device in HBA structure */ | ||
2687 | phba->pcidev = pdev; | 2984 | phba->pcidev = pdev; |
2688 | 2985 | ||
2689 | /* Assign an unused board number */ | 2986 | /* Assign an unused board number */ |
2690 | if ((phba->brd_no = lpfc_get_instance()) < 0) | 2987 | phba->brd_no = lpfc_get_instance(); |
2691 | goto out_free_phba; | 2988 | if (phba->brd_no < 0) { |
2989 | kfree(phba); | ||
2990 | return NULL; | ||
2991 | } | ||
2992 | |||
2993 | return phba; | ||
2994 | } | ||
2995 | |||
2996 | /** | ||
2997 | * lpfc_hba_free - Free driver hba data structure with a device. | ||
2998 | * @phba: pointer to lpfc hba data structure. | ||
2999 | * | ||
3000 | * This routine is invoked to free the driver hba data structure with an | ||
3001 | * HBA device. | ||
3002 | **/ | ||
3003 | static void | ||
3004 | lpfc_hba_free(struct lpfc_hba *phba) | ||
3005 | { | ||
3006 | /* Release the driver assigned board number */ | ||
3007 | idr_remove(&lpfc_hba_index, phba->brd_no); | ||
3008 | |||
3009 | kfree(phba); | ||
3010 | return; | ||
3011 | } | ||
3012 | |||
3013 | /** | ||
3014 | * lpfc_create_shost - Create hba physical port with associated scsi host. | ||
3015 | * @phba: pointer to lpfc hba data structure. | ||
3016 | * | ||
3017 | * This routine is invoked to create HBA physical port and associate a SCSI | ||
3018 | * host with it. | ||
3019 | * | ||
3020 | * Return codes | ||
3021 | * 0 - sucessful | ||
3022 | * other values - error | ||
3023 | **/ | ||
3024 | static int | ||
3025 | lpfc_create_shost(struct lpfc_hba *phba) | ||
3026 | { | ||
3027 | struct lpfc_vport *vport; | ||
3028 | struct Scsi_Host *shost; | ||
3029 | |||
3030 | /* Initialize HBA FC structure */ | ||
3031 | phba->fc_edtov = FF_DEF_EDTOV; | ||
3032 | phba->fc_ratov = FF_DEF_RATOV; | ||
3033 | phba->fc_altov = FF_DEF_ALTOV; | ||
3034 | phba->fc_arbtov = FF_DEF_ARBTOV; | ||
3035 | |||
3036 | vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); | ||
3037 | if (!vport) | ||
3038 | return -ENODEV; | ||
3039 | |||
3040 | shost = lpfc_shost_from_vport(vport); | ||
3041 | phba->pport = vport; | ||
3042 | lpfc_debugfs_initialize(vport); | ||
3043 | /* Put reference to SCSI host to driver's device private data */ | ||
3044 | pci_set_drvdata(phba->pcidev, shost); | ||
3045 | |||
3046 | return 0; | ||
3047 | } | ||
3048 | |||
3049 | /** | ||
3050 | * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. | ||
3051 | * @phba: pointer to lpfc hba data structure. | ||
3052 | * | ||
3053 | * This routine is invoked to destroy HBA physical port and the associated | ||
3054 | * SCSI host. | ||
3055 | **/ | ||
3056 | static void | ||
3057 | lpfc_destroy_shost(struct lpfc_hba *phba) | ||
3058 | { | ||
3059 | struct lpfc_vport *vport = phba->pport; | ||
3060 | |||
3061 | /* Destroy physical port that associated with the SCSI host */ | ||
3062 | destroy_port(vport); | ||
3063 | |||
3064 | return; | ||
3065 | } | ||
3066 | |||
3067 | /** | ||
3068 | * lpfc_setup_bg - Setup Block guard structures and debug areas. | ||
3069 | * @phba: pointer to lpfc hba data structure. | ||
3070 | * @shost: the shost to be used to detect Block guard settings. | ||
3071 | * | ||
3072 | * This routine sets up the local Block guard protocol settings for @shost. | ||
3073 | * This routine also allocates memory for debugging bg buffers. | ||
3074 | **/ | ||
3075 | static void | ||
3076 | lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) | ||
3077 | { | ||
3078 | int pagecnt = 10; | ||
3079 | if (lpfc_prot_mask && lpfc_prot_guard) { | ||
3080 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
3081 | "1478 Registering BlockGuard with the " | ||
3082 | "SCSI layer\n"); | ||
3083 | scsi_host_set_prot(shost, lpfc_prot_mask); | ||
3084 | scsi_host_set_guard(shost, lpfc_prot_guard); | ||
3085 | } | ||
3086 | if (!_dump_buf_data) { | ||
3087 | while (pagecnt) { | ||
3088 | spin_lock_init(&_dump_buf_lock); | ||
3089 | _dump_buf_data = | ||
3090 | (char *) __get_free_pages(GFP_KERNEL, pagecnt); | ||
3091 | if (_dump_buf_data) { | ||
3092 | printk(KERN_ERR "BLKGRD allocated %d pages for " | ||
3093 | "_dump_buf_data at 0x%p\n", | ||
3094 | (1 << pagecnt), _dump_buf_data); | ||
3095 | _dump_buf_data_order = pagecnt; | ||
3096 | memset(_dump_buf_data, 0, | ||
3097 | ((1 << PAGE_SHIFT) << pagecnt)); | ||
3098 | break; | ||
3099 | } else | ||
3100 | --pagecnt; | ||
3101 | } | ||
3102 | if (!_dump_buf_data_order) | ||
3103 | printk(KERN_ERR "BLKGRD ERROR unable to allocate " | ||
3104 | "memory for hexdump\n"); | ||
3105 | } else | ||
3106 | printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" | ||
3107 | "\n", _dump_buf_data); | ||
3108 | if (!_dump_buf_dif) { | ||
3109 | while (pagecnt) { | ||
3110 | _dump_buf_dif = | ||
3111 | (char *) __get_free_pages(GFP_KERNEL, pagecnt); | ||
3112 | if (_dump_buf_dif) { | ||
3113 | printk(KERN_ERR "BLKGRD allocated %d pages for " | ||
3114 | "_dump_buf_dif at 0x%p\n", | ||
3115 | (1 << pagecnt), _dump_buf_dif); | ||
3116 | _dump_buf_dif_order = pagecnt; | ||
3117 | memset(_dump_buf_dif, 0, | ||
3118 | ((1 << PAGE_SHIFT) << pagecnt)); | ||
3119 | break; | ||
3120 | } else | ||
3121 | --pagecnt; | ||
3122 | } | ||
3123 | if (!_dump_buf_dif_order) | ||
3124 | printk(KERN_ERR "BLKGRD ERROR unable to allocate " | ||
3125 | "memory for hexdump\n"); | ||
3126 | } else | ||
3127 | printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", | ||
3128 | _dump_buf_dif); | ||
3129 | } | ||
3130 | |||
3131 | /** | ||
3132 | * lpfc_post_init_setup - Perform necessary device post initialization setup. | ||
3133 | * @phba: pointer to lpfc hba data structure. | ||
3134 | * | ||
3135 | * This routine is invoked to perform all the necessary post initialization | ||
3136 | * setup for the device. | ||
3137 | **/ | ||
3138 | static void | ||
3139 | lpfc_post_init_setup(struct lpfc_hba *phba) | ||
3140 | { | ||
3141 | struct Scsi_Host *shost; | ||
3142 | struct lpfc_adapter_event_header adapter_event; | ||
3143 | |||
3144 | /* Get the default values for Model Name and Description */ | ||
3145 | lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); | ||
2692 | 3146 | ||
2693 | INIT_LIST_HEAD(&phba->port_list); | ||
2694 | init_waitqueue_head(&phba->wait_4_mlo_m_q); | ||
2695 | /* | 3147 | /* |
2696 | * Get all the module params for configuring this host and then | 3148 | * hba setup may have changed the hba_queue_depth so we need to |
2697 | * establish the host. | 3149 | * adjust the value of can_queue. |
2698 | */ | 3150 | */ |
2699 | lpfc_get_cfgparam(phba); | 3151 | shost = pci_get_drvdata(phba->pcidev); |
2700 | phba->max_vpi = LPFC_MAX_VPI; | 3152 | shost->can_queue = phba->cfg_hba_queue_depth - 10; |
3153 | if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) | ||
3154 | lpfc_setup_bg(phba, shost); | ||
2701 | 3155 | ||
2702 | /* Initialize timers used by driver */ | 3156 | lpfc_host_attrib_init(shost); |
2703 | init_timer(&phba->hb_tmofunc); | ||
2704 | phba->hb_tmofunc.function = lpfc_hb_timeout; | ||
2705 | phba->hb_tmofunc.data = (unsigned long)phba; | ||
2706 | 3157 | ||
2707 | psli = &phba->sli; | 3158 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
2708 | init_timer(&psli->mbox_tmo); | 3159 | spin_lock_irq(shost->host_lock); |
2709 | psli->mbox_tmo.function = lpfc_mbox_timeout; | 3160 | lpfc_poll_start_timer(phba); |
2710 | psli->mbox_tmo.data = (unsigned long) phba; | 3161 | spin_unlock_irq(shost->host_lock); |
2711 | init_timer(&phba->fcp_poll_timer); | 3162 | } |
2712 | phba->fcp_poll_timer.function = lpfc_poll_timeout; | ||
2713 | phba->fcp_poll_timer.data = (unsigned long) phba; | ||
2714 | init_timer(&phba->fabric_block_timer); | ||
2715 | phba->fabric_block_timer.function = lpfc_fabric_block_timeout; | ||
2716 | phba->fabric_block_timer.data = (unsigned long) phba; | ||
2717 | init_timer(&phba->eratt_poll); | ||
2718 | phba->eratt_poll.function = lpfc_poll_eratt; | ||
2719 | phba->eratt_poll.data = (unsigned long) phba; | ||
2720 | 3163 | ||
2721 | pci_set_master(pdev); | 3164 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
2722 | pci_save_state(pdev); | 3165 | "0428 Perform SCSI scan\n"); |
2723 | pci_try_set_mwi(pdev); | 3166 | /* Send board arrival event to upper layer */ |
3167 | adapter_event.event_type = FC_REG_ADAPTER_EVENT; | ||
3168 | adapter_event.subcategory = LPFC_EVENT_ARRIVAL; | ||
3169 | fc_host_post_vendor_event(shost, fc_get_event_number(), | ||
3170 | sizeof(adapter_event), | ||
3171 | (char *) &adapter_event, | ||
3172 | LPFC_NL_VENDOR_ID); | ||
3173 | return; | ||
3174 | } | ||
3175 | |||
3176 | /** | ||
3177 | * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. | ||
3178 | * @phba: pointer to lpfc hba data structure. | ||
3179 | * | ||
3180 | * This routine is invoked to set up the PCI device memory space for device | ||
3181 | * with SLI-3 interface spec. | ||
3182 | * | ||
3183 | * Return codes | ||
3184 | * 0 - sucessful | ||
3185 | * other values - error | ||
3186 | **/ | ||
3187 | static int | ||
3188 | lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) | ||
3189 | { | ||
3190 | struct pci_dev *pdev; | ||
3191 | unsigned long bar0map_len, bar2map_len; | ||
3192 | int i, hbq_count; | ||
3193 | void *ptr; | ||
3194 | int error = -ENODEV; | ||
2724 | 3195 | ||
2725 | if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0) | 3196 | /* Obtain PCI device reference */ |
2726 | if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0) | 3197 | if (!phba->pcidev) |
2727 | goto out_idr_remove; | 3198 | return error; |
3199 | else | ||
3200 | pdev = phba->pcidev; | ||
2728 | 3201 | ||
2729 | /* | 3202 | /* Set the device DMA mask size */ |
2730 | * Get the bus address of Bar0 and Bar2 and the number of bytes | 3203 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) |
3204 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) | ||
3205 | return error; | ||
3206 | |||
3207 | /* Get the bus address of Bar0 and Bar2 and the number of bytes | ||
2731 | * required by each mapping. | 3208 | * required by each mapping. |
2732 | */ | 3209 | */ |
2733 | phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); | 3210 | phba->pci_bar0_map = pci_resource_start(pdev, 0); |
2734 | bar0map_len = pci_resource_len(phba->pcidev, 0); | 3211 | bar0map_len = pci_resource_len(pdev, 0); |
2735 | 3212 | ||
2736 | phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); | 3213 | phba->pci_bar2_map = pci_resource_start(pdev, 2); |
2737 | bar2map_len = pci_resource_len(phba->pcidev, 2); | 3214 | bar2map_len = pci_resource_len(pdev, 2); |
2738 | 3215 | ||
2739 | /* Map HBA SLIM to a kernel virtual address. */ | 3216 | /* Map HBA SLIM to a kernel virtual address. */ |
2740 | phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); | 3217 | phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); |
2741 | if (!phba->slim_memmap_p) { | 3218 | if (!phba->slim_memmap_p) { |
2742 | error = -ENODEV; | ||
2743 | dev_printk(KERN_ERR, &pdev->dev, | 3219 | dev_printk(KERN_ERR, &pdev->dev, |
2744 | "ioremap failed for SLIM memory.\n"); | 3220 | "ioremap failed for SLIM memory.\n"); |
2745 | goto out_idr_remove; | 3221 | goto out; |
2746 | } | 3222 | } |
2747 | 3223 | ||
2748 | /* Map HBA Control Registers to a kernel virtual address. */ | 3224 | /* Map HBA Control Registers to a kernel virtual address. */ |
2749 | phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); | 3225 | phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); |
2750 | if (!phba->ctrl_regs_memmap_p) { | 3226 | if (!phba->ctrl_regs_memmap_p) { |
2751 | error = -ENODEV; | ||
2752 | dev_printk(KERN_ERR, &pdev->dev, | 3227 | dev_printk(KERN_ERR, &pdev->dev, |
2753 | "ioremap failed for HBA control registers.\n"); | 3228 | "ioremap failed for HBA control registers.\n"); |
2754 | goto out_iounmap_slim; | 3229 | goto out_iounmap_slim; |
2755 | } | 3230 | } |
2756 | 3231 | ||
2757 | /* Allocate memory for SLI-2 structures */ | 3232 | /* Allocate memory for SLI-2 structures */ |
2758 | phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, | 3233 | phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, |
2759 | SLI2_SLIM_SIZE, | 3234 | SLI2_SLIM_SIZE, |
2760 | &phba->slim2p.phys, | 3235 | &phba->slim2p.phys, |
2761 | GFP_KERNEL); | 3236 | GFP_KERNEL); |
@@ -2768,7 +3243,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2768 | phba->IOCBs = (phba->slim2p.virt + | 3243 | phba->IOCBs = (phba->slim2p.virt + |
2769 | offsetof(struct lpfc_sli2_slim, IOCBs)); | 3244 | offsetof(struct lpfc_sli2_slim, IOCBs)); |
2770 | 3245 | ||
2771 | phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, | 3246 | phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, |
2772 | lpfc_sli_hbq_size(), | 3247 | lpfc_sli_hbq_size(), |
2773 | &phba->hbqslimp.phys, | 3248 | &phba->hbqslimp.phys, |
2774 | GFP_KERNEL); | 3249 | GFP_KERNEL); |
@@ -2784,115 +3259,487 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2784 | sizeof(struct lpfc_hbq_entry)); | 3259 | sizeof(struct lpfc_hbq_entry)); |
2785 | } | 3260 | } |
2786 | phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; | 3261 | phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; |
2787 | phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; | 3262 | phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; |
2788 | 3263 | ||
2789 | memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); | 3264 | memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); |
2790 | 3265 | ||
2791 | INIT_LIST_HEAD(&phba->hbqbuf_in_list); | 3266 | INIT_LIST_HEAD(&phba->rb_pend_list); |
2792 | 3267 | ||
2793 | /* Initialize the SLI Layer to run with lpfc HBAs. */ | 3268 | phba->MBslimaddr = phba->slim_memmap_p; |
2794 | lpfc_sli_setup(phba); | 3269 | phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; |
2795 | lpfc_sli_queue_setup(phba); | 3270 | phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; |
3271 | phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; | ||
3272 | phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; | ||
3273 | |||
3274 | return 0; | ||
3275 | |||
3276 | out_free_slim: | ||
3277 | dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, | ||
3278 | phba->slim2p.virt, phba->slim2p.phys); | ||
3279 | out_iounmap: | ||
3280 | iounmap(phba->ctrl_regs_memmap_p); | ||
3281 | out_iounmap_slim: | ||
3282 | iounmap(phba->slim_memmap_p); | ||
3283 | out: | ||
3284 | return error; | ||
3285 | } | ||
3286 | |||
3287 | /** | ||
3288 | * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. | ||
3289 | * @phba: pointer to lpfc hba data structure. | ||
3290 | * | ||
3291 | * This routine is invoked to unset the PCI device memory space for device | ||
3292 | * with SLI-3 interface spec. | ||
3293 | **/ | ||
3294 | static void | ||
3295 | lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) | ||
3296 | { | ||
3297 | struct pci_dev *pdev; | ||
3298 | |||
3299 | /* Obtain PCI device reference */ | ||
3300 | if (!phba->pcidev) | ||
3301 | return; | ||
3302 | else | ||
3303 | pdev = phba->pcidev; | ||
3304 | |||
3305 | /* Free coherent DMA memory allocated */ | ||
3306 | dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), | ||
3307 | phba->hbqslimp.virt, phba->hbqslimp.phys); | ||
3308 | dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, | ||
3309 | phba->slim2p.virt, phba->slim2p.phys); | ||
3310 | |||
3311 | /* I/O memory unmap */ | ||
3312 | iounmap(phba->ctrl_regs_memmap_p); | ||
3313 | iounmap(phba->slim_memmap_p); | ||
3314 | |||
3315 | return; | ||
3316 | } | ||
3317 | |||
3318 | /** | ||
3319 | * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device | ||
3320 | * @phba: pointer to lpfc hba data structure. | ||
3321 | * | ||
3322 | * This routine is invoked to enable the MSI-X interrupt vectors to device | ||
3323 | * with SLI-3 interface specs. The kernel function pci_enable_msix() is | ||
3324 | * called to enable the MSI-X vectors. Note that pci_enable_msix(), once | ||
3325 | * invoked, enables either all or nothing, depending on the current | ||
3326 | * availability of PCI vector resources. The device driver is responsible | ||
3327 | * for calling the individual request_irq() to register each MSI-X vector | ||
3328 | * with a interrupt handler, which is done in this function. Note that | ||
3329 | * later when device is unloading, the driver should always call free_irq() | ||
3330 | * on all MSI-X vectors it has done request_irq() on before calling | ||
3331 | * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device | ||
3332 | * will be left with MSI-X enabled and leaks its vectors. | ||
3333 | * | ||
3334 | * Return codes | ||
3335 | * 0 - sucessful | ||
3336 | * other values - error | ||
3337 | **/ | ||
3338 | static int | ||
3339 | lpfc_sli_enable_msix(struct lpfc_hba *phba) | ||
3340 | { | ||
3341 | int rc, i; | ||
3342 | LPFC_MBOXQ_t *pmb; | ||
3343 | |||
3344 | /* Set up MSI-X multi-message vectors */ | ||
3345 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) | ||
3346 | phba->msix_entries[i].entry = i; | ||
2796 | 3347 | ||
2797 | retval = lpfc_mem_alloc(phba); | 3348 | /* Configure MSI-X capability structure */ |
2798 | if (retval) { | 3349 | rc = pci_enable_msix(phba->pcidev, phba->msix_entries, |
2799 | error = retval; | 3350 | ARRAY_SIZE(phba->msix_entries)); |
2800 | goto out_free_hbqslimp; | 3351 | if (rc) { |
3352 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
3353 | "0420 PCI enable MSI-X failed (%d)\n", rc); | ||
3354 | goto msi_fail_out; | ||
2801 | } | 3355 | } |
3356 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) | ||
3357 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
3358 | "0477 MSI-X entry[%d]: vector=x%x " | ||
3359 | "message=%d\n", i, | ||
3360 | phba->msix_entries[i].vector, | ||
3361 | phba->msix_entries[i].entry); | ||
3362 | /* | ||
3363 | * Assign MSI-X vectors to interrupt handlers | ||
3364 | */ | ||
2802 | 3365 | ||
2803 | /* Initialize and populate the iocb list per host. */ | 3366 | /* vector-0 is associated to slow-path handler */ |
2804 | INIT_LIST_HEAD(&phba->lpfc_iocb_list); | 3367 | rc = request_irq(phba->msix_entries[0].vector, |
2805 | for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { | 3368 | &lpfc_sli_sp_intr_handler, IRQF_SHARED, |
2806 | iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); | 3369 | LPFC_SP_DRIVER_HANDLER_NAME, phba); |
2807 | if (iocbq_entry == NULL) { | 3370 | if (rc) { |
2808 | printk(KERN_ERR "%s: only allocated %d iocbs of " | 3371 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
2809 | "expected %d count. Unloading driver.\n", | 3372 | "0421 MSI-X slow-path request_irq failed " |
2810 | __func__, i, LPFC_IOCB_LIST_CNT); | 3373 | "(%d)\n", rc); |
2811 | error = -ENOMEM; | 3374 | goto msi_fail_out; |
2812 | goto out_free_iocbq; | 3375 | } |
3376 | |||
3377 | /* vector-1 is associated to fast-path handler */ | ||
3378 | rc = request_irq(phba->msix_entries[1].vector, | ||
3379 | &lpfc_sli_fp_intr_handler, IRQF_SHARED, | ||
3380 | LPFC_FP_DRIVER_HANDLER_NAME, phba); | ||
3381 | |||
3382 | if (rc) { | ||
3383 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
3384 | "0429 MSI-X fast-path request_irq failed " | ||
3385 | "(%d)\n", rc); | ||
3386 | goto irq_fail_out; | ||
3387 | } | ||
3388 | |||
3389 | /* | ||
3390 | * Configure HBA MSI-X attention conditions to messages | ||
3391 | */ | ||
3392 | pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
3393 | |||
3394 | if (!pmb) { | ||
3395 | rc = -ENOMEM; | ||
3396 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3397 | "0474 Unable to allocate memory for issuing " | ||
3398 | "MBOX_CONFIG_MSI command\n"); | ||
3399 | goto mem_fail_out; | ||
3400 | } | ||
3401 | rc = lpfc_config_msi(phba, pmb); | ||
3402 | if (rc) | ||
3403 | goto mbx_fail_out; | ||
3404 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); | ||
3405 | if (rc != MBX_SUCCESS) { | ||
3406 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, | ||
3407 | "0351 Config MSI mailbox command failed, " | ||
3408 | "mbxCmd x%x, mbxStatus x%x\n", | ||
3409 | pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); | ||
3410 | goto mbx_fail_out; | ||
3411 | } | ||
3412 | |||
3413 | /* Free memory allocated for mailbox command */ | ||
3414 | mempool_free(pmb, phba->mbox_mem_pool); | ||
3415 | return rc; | ||
3416 | |||
3417 | mbx_fail_out: | ||
3418 | /* Free memory allocated for mailbox command */ | ||
3419 | mempool_free(pmb, phba->mbox_mem_pool); | ||
3420 | |||
3421 | mem_fail_out: | ||
3422 | /* free the irq already requested */ | ||
3423 | free_irq(phba->msix_entries[1].vector, phba); | ||
3424 | |||
3425 | irq_fail_out: | ||
3426 | /* free the irq already requested */ | ||
3427 | free_irq(phba->msix_entries[0].vector, phba); | ||
3428 | |||
3429 | msi_fail_out: | ||
3430 | /* Unconfigure MSI-X capability structure */ | ||
3431 | pci_disable_msix(phba->pcidev); | ||
3432 | return rc; | ||
3433 | } | ||
3434 | |||
3435 | /** | ||
3436 | * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. | ||
3437 | * @phba: pointer to lpfc hba data structure. | ||
3438 | * | ||
3439 | * This routine is invoked to release the MSI-X vectors and then disable the | ||
3440 | * MSI-X interrupt mode to device with SLI-3 interface spec. | ||
3441 | **/ | ||
3442 | static void | ||
3443 | lpfc_sli_disable_msix(struct lpfc_hba *phba) | ||
3444 | { | ||
3445 | int i; | ||
3446 | |||
3447 | /* Free up MSI-X multi-message vectors */ | ||
3448 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) | ||
3449 | free_irq(phba->msix_entries[i].vector, phba); | ||
3450 | /* Disable MSI-X */ | ||
3451 | pci_disable_msix(phba->pcidev); | ||
3452 | |||
3453 | return; | ||
3454 | } | ||
3455 | |||
3456 | /** | ||
3457 | * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. | ||
3458 | * @phba: pointer to lpfc hba data structure. | ||
3459 | * | ||
3460 | * This routine is invoked to enable the MSI interrupt mode to device with | ||
3461 | * SLI-3 interface spec. The kernel function pci_enable_msi() is called to | ||
3462 | * enable the MSI vector. The device driver is responsible for calling the | ||
3463 | * request_irq() to register MSI vector with a interrupt the handler, which | ||
3464 | * is done in this function. | ||
3465 | * | ||
3466 | * Return codes | ||
3467 | * 0 - sucessful | ||
3468 | * other values - error | ||
3469 | */ | ||
3470 | static int | ||
3471 | lpfc_sli_enable_msi(struct lpfc_hba *phba) | ||
3472 | { | ||
3473 | int rc; | ||
3474 | |||
3475 | rc = pci_enable_msi(phba->pcidev); | ||
3476 | if (!rc) | ||
3477 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
3478 | "0462 PCI enable MSI mode success.\n"); | ||
3479 | else { | ||
3480 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
3481 | "0471 PCI enable MSI mode failed (%d)\n", rc); | ||
3482 | return rc; | ||
3483 | } | ||
3484 | |||
3485 | rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, | ||
3486 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); | ||
3487 | if (rc) { | ||
3488 | pci_disable_msi(phba->pcidev); | ||
3489 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
3490 | "0478 MSI request_irq failed (%d)\n", rc); | ||
3491 | } | ||
3492 | return rc; | ||
3493 | } | ||
3494 | |||
3495 | /** | ||
3496 | * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. | ||
3497 | * @phba: pointer to lpfc hba data structure. | ||
3498 | * | ||
3499 | * This routine is invoked to disable the MSI interrupt mode to device with | ||
3500 | * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has | ||
3501 | * done request_irq() on before calling pci_disable_msi(). Failure to do so | ||
3502 | * results in a BUG_ON() and a device will be left with MSI enabled and leaks | ||
3503 | * its vector. | ||
3504 | */ | ||
3505 | static void | ||
3506 | lpfc_sli_disable_msi(struct lpfc_hba *phba) | ||
3507 | { | ||
3508 | free_irq(phba->pcidev->irq, phba); | ||
3509 | pci_disable_msi(phba->pcidev); | ||
3510 | return; | ||
3511 | } | ||
3512 | |||
3513 | /** | ||
3514 | * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. | ||
3515 | * @phba: pointer to lpfc hba data structure. | ||
3516 | * | ||
3517 | * This routine is invoked to enable device interrupt and associate driver's | ||
3518 | * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface | ||
3519 | * spec. Depends on the interrupt mode configured to the driver, the driver | ||
3520 | * will try to fallback from the configured interrupt mode to an interrupt | ||
3521 | * mode which is supported by the platform, kernel, and device in the order | ||
3522 | * of: | ||
3523 | * MSI-X -> MSI -> IRQ. | ||
3524 | * | ||
3525 | * Return codes | ||
3526 | * 0 - sucessful | ||
3527 | * other values - error | ||
3528 | **/ | ||
3529 | static uint32_t | ||
3530 | lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) | ||
3531 | { | ||
3532 | uint32_t intr_mode = LPFC_INTR_ERROR; | ||
3533 | int retval; | ||
3534 | |||
3535 | if (cfg_mode == 2) { | ||
3536 | /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ | ||
3537 | retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); | ||
3538 | if (!retval) { | ||
3539 | /* Now, try to enable MSI-X interrupt mode */ | ||
3540 | retval = lpfc_sli_enable_msix(phba); | ||
3541 | if (!retval) { | ||
3542 | /* Indicate initialization to MSI-X mode */ | ||
3543 | phba->intr_type = MSIX; | ||
3544 | intr_mode = 2; | ||
3545 | } | ||
2813 | } | 3546 | } |
3547 | } | ||
2814 | 3548 | ||
2815 | iotag = lpfc_sli_next_iotag(phba, iocbq_entry); | 3549 | /* Fallback to MSI if MSI-X initialization failed */ |
2816 | if (iotag == 0) { | 3550 | if (cfg_mode >= 1 && phba->intr_type == NONE) { |
2817 | kfree (iocbq_entry); | 3551 | retval = lpfc_sli_enable_msi(phba); |
2818 | printk(KERN_ERR "%s: failed to allocate IOTAG. " | 3552 | if (!retval) { |
2819 | "Unloading driver.\n", | 3553 | /* Indicate initialization to MSI mode */ |
2820 | __func__); | 3554 | phba->intr_type = MSI; |
2821 | error = -ENOMEM; | 3555 | intr_mode = 1; |
2822 | goto out_free_iocbq; | ||
2823 | } | 3556 | } |
3557 | } | ||
2824 | 3558 | ||
2825 | spin_lock_irq(&phba->hbalock); | 3559 | /* Fallback to INTx if both MSI-X/MSI initalization failed */ |
2826 | list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); | 3560 | if (phba->intr_type == NONE) { |
2827 | phba->total_iocbq_bufs++; | 3561 | retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, |
2828 | spin_unlock_irq(&phba->hbalock); | 3562 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); |
3563 | if (!retval) { | ||
3564 | /* Indicate initialization to INTx mode */ | ||
3565 | phba->intr_type = INTx; | ||
3566 | intr_mode = 0; | ||
3567 | } | ||
2829 | } | 3568 | } |
3569 | return intr_mode; | ||
3570 | } | ||
2830 | 3571 | ||
2831 | /* Initialize HBA structure */ | 3572 | /** |
2832 | phba->fc_edtov = FF_DEF_EDTOV; | 3573 | * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. |
2833 | phba->fc_ratov = FF_DEF_RATOV; | 3574 | * @phba: pointer to lpfc hba data structure. |
2834 | phba->fc_altov = FF_DEF_ALTOV; | 3575 | * |
2835 | phba->fc_arbtov = FF_DEF_ARBTOV; | 3576 | * This routine is invoked to disable device interrupt and disassociate the |
3577 | * driver's interrupt handler(s) from interrupt vector(s) to device with | ||
3578 | * SLI-3 interface spec. Depending on the interrupt mode, the driver will | ||
3579 | * release the interrupt vector(s) for the message signaled interrupt. | ||
3580 | **/ | ||
3581 | static void | ||
3582 | lpfc_sli_disable_intr(struct lpfc_hba *phba) | ||
3583 | { | ||
3584 | /* Disable the currently initialized interrupt mode */ | ||
3585 | if (phba->intr_type == MSIX) | ||
3586 | lpfc_sli_disable_msix(phba); | ||
3587 | else if (phba->intr_type == MSI) | ||
3588 | lpfc_sli_disable_msi(phba); | ||
3589 | else if (phba->intr_type == INTx) | ||
3590 | free_irq(phba->pcidev->irq, phba); | ||
2836 | 3591 | ||
2837 | INIT_LIST_HEAD(&phba->work_list); | 3592 | /* Reset interrupt management states */ |
2838 | phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); | 3593 | phba->intr_type = NONE; |
2839 | phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); | 3594 | phba->sli.slistat.sli_intr = 0; |
2840 | 3595 | ||
2841 | /* Initialize the wait queue head for the kernel thread */ | 3596 | return; |
2842 | init_waitqueue_head(&phba->work_waitq); | 3597 | } |
2843 | 3598 | ||
2844 | /* Startup the kernel thread for this host adapter. */ | 3599 | /** |
2845 | phba->worker_thread = kthread_run(lpfc_do_work, phba, | 3600 | * lpfc_unset_hba - Unset SLI3 hba device initialization |
2846 | "lpfc_worker_%d", phba->brd_no); | 3601 | * @phba: pointer to lpfc hba data structure. |
2847 | if (IS_ERR(phba->worker_thread)) { | 3602 | * |
2848 | error = PTR_ERR(phba->worker_thread); | 3603 | * This routine is invoked to unset the HBA device initialization steps to |
2849 | goto out_free_iocbq; | 3604 | * a device with SLI-3 interface spec. |
3605 | **/ | ||
3606 | static void | ||
3607 | lpfc_unset_hba(struct lpfc_hba *phba) | ||
3608 | { | ||
3609 | struct lpfc_vport *vport = phba->pport; | ||
3610 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
3611 | |||
3612 | spin_lock_irq(shost->host_lock); | ||
3613 | vport->load_flag |= FC_UNLOADING; | ||
3614 | spin_unlock_irq(shost->host_lock); | ||
3615 | |||
3616 | lpfc_stop_hba_timers(phba); | ||
3617 | |||
3618 | phba->pport->work_port_events = 0; | ||
3619 | |||
3620 | lpfc_sli_hba_down(phba); | ||
3621 | |||
3622 | lpfc_sli_brdrestart(phba); | ||
3623 | |||
3624 | lpfc_sli_disable_intr(phba); | ||
3625 | |||
3626 | return; | ||
3627 | } | ||
3628 | |||
3629 | /** | ||
3630 | * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. | ||
3631 | * @pdev: pointer to PCI device | ||
3632 | * @pid: pointer to PCI device identifier | ||
3633 | * | ||
3634 | * This routine is to be called to attach a device with SLI-3 interface spec | ||
3635 | * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is | ||
3636 | * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific | ||
3637 | * information of the device and driver to see if the driver state that it can | ||
3638 | * support this kind of device. If the match is successful, the driver core | ||
3639 | * invokes this routine. If this routine determines it can claim the HBA, it | ||
3640 | * does all the initialization that it needs to do to handle the HBA properly. | ||
3641 | * | ||
3642 | * Return code | ||
3643 | * 0 - driver can claim the device | ||
3644 | * negative value - driver can not claim the device | ||
3645 | **/ | ||
3646 | static int __devinit | ||
3647 | lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) | ||
3648 | { | ||
3649 | struct lpfc_hba *phba; | ||
3650 | struct lpfc_vport *vport = NULL; | ||
3651 | int error; | ||
3652 | uint32_t cfg_mode, intr_mode; | ||
3653 | |||
3654 | /* Allocate memory for HBA structure */ | ||
3655 | phba = lpfc_hba_alloc(pdev); | ||
3656 | if (!phba) | ||
3657 | return -ENOMEM; | ||
3658 | |||
3659 | /* Perform generic PCI device enabling operation */ | ||
3660 | error = lpfc_enable_pci_dev(phba); | ||
3661 | if (error) { | ||
3662 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3663 | "1401 Failed to enable pci device.\n"); | ||
3664 | goto out_free_phba; | ||
2850 | } | 3665 | } |
2851 | 3666 | ||
2852 | /* Initialize the list of scsi buffers used by driver for scsi IO. */ | 3667 | /* Set up SLI API function jump table for PCI-device group-0 HBAs */ |
2853 | spin_lock_init(&phba->scsi_buf_list_lock); | 3668 | error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); |
2854 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); | 3669 | if (error) |
3670 | goto out_disable_pci_dev; | ||
2855 | 3671 | ||
2856 | /* Initialize list of fabric iocbs */ | 3672 | /* Set up SLI-3 specific device PCI memory space */ |
2857 | INIT_LIST_HEAD(&phba->fabric_iocb_list); | 3673 | error = lpfc_sli_pci_mem_setup(phba); |
3674 | if (error) { | ||
3675 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3676 | "1402 Failed to set up pci memory space.\n"); | ||
3677 | goto out_disable_pci_dev; | ||
3678 | } | ||
2858 | 3679 | ||
2859 | /* Initialize list to save ELS buffers */ | 3680 | /* Set up phase-1 common device driver resources */ |
2860 | INIT_LIST_HEAD(&phba->elsbuf); | 3681 | error = lpfc_setup_driver_resource_phase1(phba); |
3682 | if (error) { | ||
3683 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3684 | "1403 Failed to set up driver resource.\n"); | ||
3685 | goto out_unset_pci_mem_s3; | ||
3686 | } | ||
2861 | 3687 | ||
2862 | vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); | 3688 | /* Set up SLI-3 specific device driver resources */ |
2863 | if (!vport) | 3689 | error = lpfc_sli_driver_resource_setup(phba); |
2864 | goto out_kthread_stop; | 3690 | if (error) { |
3691 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3692 | "1404 Failed to set up driver resource.\n"); | ||
3693 | goto out_unset_pci_mem_s3; | ||
3694 | } | ||
2865 | 3695 | ||
2866 | shost = lpfc_shost_from_vport(vport); | 3696 | /* Initialize and populate the iocb list per host */ |
2867 | phba->pport = vport; | 3697 | error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); |
2868 | lpfc_debugfs_initialize(vport); | 3698 | if (error) { |
3699 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3700 | "1405 Failed to initialize iocb list.\n"); | ||
3701 | goto out_unset_driver_resource_s3; | ||
3702 | } | ||
2869 | 3703 | ||
2870 | pci_set_drvdata(pdev, shost); | 3704 | /* Set up common device driver resources */ |
3705 | error = lpfc_setup_driver_resource_phase2(phba); | ||
3706 | if (error) { | ||
3707 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3708 | "1406 Failed to set up driver resource.\n"); | ||
3709 | goto out_free_iocb_list; | ||
3710 | } | ||
2871 | 3711 | ||
2872 | phba->MBslimaddr = phba->slim_memmap_p; | 3712 | /* Create SCSI host to the physical port */ |
2873 | phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; | 3713 | error = lpfc_create_shost(phba); |
2874 | phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; | 3714 | if (error) { |
2875 | phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; | 3715 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
2876 | phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; | 3716 | "1407 Failed to create scsi host.\n"); |
3717 | goto out_unset_driver_resource; | ||
3718 | } | ||
2877 | 3719 | ||
2878 | /* Configure sysfs attributes */ | 3720 | /* Configure sysfs attributes */ |
2879 | if (lpfc_alloc_sysfs_attr(vport)) { | 3721 | vport = phba->pport; |
3722 | error = lpfc_alloc_sysfs_attr(vport); | ||
3723 | if (error) { | ||
2880 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 3724 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
2881 | "1476 Failed to allocate sysfs attr\n"); | 3725 | "1476 Failed to allocate sysfs attr\n"); |
2882 | error = -ENOMEM; | 3726 | goto out_destroy_shost; |
2883 | goto out_destroy_port; | ||
2884 | } | 3727 | } |
2885 | 3728 | ||
3729 | /* Now, trying to enable interrupt and bring up the device */ | ||
2886 | cfg_mode = phba->cfg_use_msi; | 3730 | cfg_mode = phba->cfg_use_msi; |
2887 | while (true) { | 3731 | while (true) { |
3732 | /* Put device to a known state before enabling interrupt */ | ||
3733 | lpfc_stop_port(phba); | ||
2888 | /* Configure and enable interrupt */ | 3734 | /* Configure and enable interrupt */ |
2889 | intr_mode = lpfc_enable_intr(phba, cfg_mode); | 3735 | intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); |
2890 | if (intr_mode == LPFC_INTR_ERROR) { | 3736 | if (intr_mode == LPFC_INTR_ERROR) { |
2891 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 3737 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
2892 | "0426 Failed to enable interrupt.\n"); | 3738 | "0431 Failed to enable interrupt.\n"); |
3739 | error = -ENODEV; | ||
2893 | goto out_free_sysfs_attr; | 3740 | goto out_free_sysfs_attr; |
2894 | } | 3741 | } |
2895 | /* HBA SLI setup */ | 3742 | /* SLI-3 HBA setup */ |
2896 | if (lpfc_sli_hba_setup(phba)) { | 3743 | if (lpfc_sli_hba_setup(phba)) { |
2897 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 3744 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
2898 | "1477 Failed to set up hba\n"); | 3745 | "1477 Failed to set up hba\n"); |
@@ -2902,185 +3749,65 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2902 | 3749 | ||
2903 | /* Wait 50ms for the interrupts of previous mailbox commands */ | 3750 | /* Wait 50ms for the interrupts of previous mailbox commands */ |
2904 | msleep(50); | 3751 | msleep(50); |
2905 | /* Check active interrupts received */ | 3752 | /* Check active interrupts on message signaled interrupts */ |
2906 | if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { | 3753 | if (intr_mode == 0 || |
3754 | phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { | ||
2907 | /* Log the current active interrupt mode */ | 3755 | /* Log the current active interrupt mode */ |
2908 | phba->intr_mode = intr_mode; | 3756 | phba->intr_mode = intr_mode; |
2909 | lpfc_log_intr_mode(phba, intr_mode); | 3757 | lpfc_log_intr_mode(phba, intr_mode); |
2910 | break; | 3758 | break; |
2911 | } else { | 3759 | } else { |
2912 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 3760 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
2913 | "0451 Configure interrupt mode (%d) " | 3761 | "0447 Configure interrupt mode (%d) " |
2914 | "failed active interrupt test.\n", | 3762 | "failed active interrupt test.\n", |
2915 | intr_mode); | 3763 | intr_mode); |
2916 | if (intr_mode == 0) { | ||
2917 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2918 | "0479 Failed to enable " | ||
2919 | "interrupt.\n"); | ||
2920 | error = -ENODEV; | ||
2921 | goto out_remove_device; | ||
2922 | } | ||
2923 | /* Stop HBA SLI setups */ | ||
2924 | lpfc_stop_port(phba); | ||
2925 | /* Disable the current interrupt mode */ | 3764 | /* Disable the current interrupt mode */ |
2926 | lpfc_disable_intr(phba); | 3765 | lpfc_sli_disable_intr(phba); |
2927 | /* Try next level of interrupt mode */ | 3766 | /* Try next level of interrupt mode */ |
2928 | cfg_mode = --intr_mode; | 3767 | cfg_mode = --intr_mode; |
2929 | } | 3768 | } |
2930 | } | 3769 | } |
2931 | 3770 | ||
2932 | /* | 3771 | /* Perform post initialization setup */ |
2933 | * hba setup may have changed the hba_queue_depth so we need to adjust | 3772 | lpfc_post_init_setup(phba); |
2934 | * the value of can_queue. | ||
2935 | */ | ||
2936 | shost->can_queue = phba->cfg_hba_queue_depth - 10; | ||
2937 | if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { | ||
2938 | |||
2939 | if (lpfc_prot_mask && lpfc_prot_guard) { | ||
2940 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2941 | "1478 Registering BlockGuard with the " | ||
2942 | "SCSI layer\n"); | ||
2943 | 3773 | ||
2944 | scsi_host_set_prot(shost, lpfc_prot_mask); | 3774 | /* Check if there are static vports to be created. */ |
2945 | scsi_host_set_guard(shost, lpfc_prot_guard); | 3775 | lpfc_create_static_vport(phba); |
2946 | } | ||
2947 | } | ||
2948 | |||
2949 | if (!_dump_buf_data) { | ||
2950 | int pagecnt = 10; | ||
2951 | while (pagecnt) { | ||
2952 | spin_lock_init(&_dump_buf_lock); | ||
2953 | _dump_buf_data = | ||
2954 | (char *) __get_free_pages(GFP_KERNEL, pagecnt); | ||
2955 | if (_dump_buf_data) { | ||
2956 | printk(KERN_ERR "BLKGRD allocated %d pages for " | ||
2957 | "_dump_buf_data at 0x%p\n", | ||
2958 | (1 << pagecnt), _dump_buf_data); | ||
2959 | _dump_buf_data_order = pagecnt; | ||
2960 | memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT) | ||
2961 | << pagecnt)); | ||
2962 | break; | ||
2963 | } else { | ||
2964 | --pagecnt; | ||
2965 | } | ||
2966 | |||
2967 | } | ||
2968 | |||
2969 | if (!_dump_buf_data_order) | ||
2970 | printk(KERN_ERR "BLKGRD ERROR unable to allocate " | ||
2971 | "memory for hexdump\n"); | ||
2972 | |||
2973 | } else { | ||
2974 | printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" | ||
2975 | "\n", _dump_buf_data); | ||
2976 | } | ||
2977 | |||
2978 | |||
2979 | if (!_dump_buf_dif) { | ||
2980 | int pagecnt = 10; | ||
2981 | while (pagecnt) { | ||
2982 | _dump_buf_dif = | ||
2983 | (char *) __get_free_pages(GFP_KERNEL, pagecnt); | ||
2984 | if (_dump_buf_dif) { | ||
2985 | printk(KERN_ERR "BLKGRD allocated %d pages for " | ||
2986 | "_dump_buf_dif at 0x%p\n", | ||
2987 | (1 << pagecnt), _dump_buf_dif); | ||
2988 | _dump_buf_dif_order = pagecnt; | ||
2989 | memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT) | ||
2990 | << pagecnt)); | ||
2991 | break; | ||
2992 | } else { | ||
2993 | --pagecnt; | ||
2994 | } | ||
2995 | |||
2996 | } | ||
2997 | |||
2998 | if (!_dump_buf_dif_order) | ||
2999 | printk(KERN_ERR "BLKGRD ERROR unable to allocate " | ||
3000 | "memory for hexdump\n"); | ||
3001 | |||
3002 | } else { | ||
3003 | printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", | ||
3004 | _dump_buf_dif); | ||
3005 | } | ||
3006 | |||
3007 | lpfc_host_attrib_init(shost); | ||
3008 | |||
3009 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | ||
3010 | spin_lock_irq(shost->host_lock); | ||
3011 | lpfc_poll_start_timer(phba); | ||
3012 | spin_unlock_irq(shost->host_lock); | ||
3013 | } | ||
3014 | |||
3015 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
3016 | "0428 Perform SCSI scan\n"); | ||
3017 | /* Send board arrival event to upper layer */ | ||
3018 | adapter_event.event_type = FC_REG_ADAPTER_EVENT; | ||
3019 | adapter_event.subcategory = LPFC_EVENT_ARRIVAL; | ||
3020 | fc_host_post_vendor_event(shost, fc_get_event_number(), | ||
3021 | sizeof(adapter_event), | ||
3022 | (char *) &adapter_event, | ||
3023 | LPFC_NL_VENDOR_ID); | ||
3024 | 3776 | ||
3025 | return 0; | 3777 | return 0; |
3026 | 3778 | ||
3027 | out_remove_device: | 3779 | out_remove_device: |
3028 | spin_lock_irq(shost->host_lock); | 3780 | lpfc_unset_hba(phba); |
3029 | vport->load_flag |= FC_UNLOADING; | ||
3030 | spin_unlock_irq(shost->host_lock); | ||
3031 | lpfc_stop_phba_timers(phba); | ||
3032 | phba->pport->work_port_events = 0; | ||
3033 | lpfc_disable_intr(phba); | ||
3034 | lpfc_sli_hba_down(phba); | ||
3035 | lpfc_sli_brdrestart(phba); | ||
3036 | out_free_sysfs_attr: | 3781 | out_free_sysfs_attr: |
3037 | lpfc_free_sysfs_attr(vport); | 3782 | lpfc_free_sysfs_attr(vport); |
3038 | out_destroy_port: | 3783 | out_destroy_shost: |
3039 | destroy_port(vport); | 3784 | lpfc_destroy_shost(phba); |
3040 | out_kthread_stop: | 3785 | out_unset_driver_resource: |
3041 | kthread_stop(phba->worker_thread); | 3786 | lpfc_unset_driver_resource_phase2(phba); |
3042 | out_free_iocbq: | 3787 | out_free_iocb_list: |
3043 | list_for_each_entry_safe(iocbq_entry, iocbq_next, | 3788 | lpfc_free_iocb_list(phba); |
3044 | &phba->lpfc_iocb_list, list) { | 3789 | out_unset_driver_resource_s3: |
3045 | kfree(iocbq_entry); | 3790 | lpfc_sli_driver_resource_unset(phba); |
3046 | phba->total_iocbq_bufs--; | 3791 | out_unset_pci_mem_s3: |
3047 | } | 3792 | lpfc_sli_pci_mem_unset(phba); |
3048 | lpfc_mem_free(phba); | 3793 | out_disable_pci_dev: |
3049 | out_free_hbqslimp: | 3794 | lpfc_disable_pci_dev(phba); |
3050 | dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), | ||
3051 | phba->hbqslimp.virt, phba->hbqslimp.phys); | ||
3052 | out_free_slim: | ||
3053 | dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, | ||
3054 | phba->slim2p.virt, phba->slim2p.phys); | ||
3055 | out_iounmap: | ||
3056 | iounmap(phba->ctrl_regs_memmap_p); | ||
3057 | out_iounmap_slim: | ||
3058 | iounmap(phba->slim_memmap_p); | ||
3059 | out_idr_remove: | ||
3060 | idr_remove(&lpfc_hba_index, phba->brd_no); | ||
3061 | out_free_phba: | 3795 | out_free_phba: |
3062 | kfree(phba); | 3796 | lpfc_hba_free(phba); |
3063 | out_release_regions: | ||
3064 | pci_release_selected_regions(pdev, bars); | ||
3065 | out_disable_device: | ||
3066 | pci_disable_device(pdev); | ||
3067 | out: | ||
3068 | pci_set_drvdata(pdev, NULL); | ||
3069 | if (shost) | ||
3070 | scsi_host_put(shost); | ||
3071 | return error; | 3797 | return error; |
3072 | } | 3798 | } |
3073 | 3799 | ||
3074 | /** | 3800 | /** |
3075 | * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem | 3801 | * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. |
3076 | * @pdev: pointer to PCI device | 3802 | * @pdev: pointer to PCI device |
3077 | * | 3803 | * |
3078 | * This routine is to be registered to the kernel's PCI subsystem. When an | 3804 | * This routine is to be called to disattach a device with SLI-3 interface |
3079 | * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup | 3805 | * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is |
3080 | * for the HBA device to be removed from the PCI subsystem properly. | 3806 | * removed from PCI bus, it performs all the necessary cleanup for the HBA |
3807 | * device to be removed from the PCI subsystem properly. | ||
3081 | **/ | 3808 | **/ |
3082 | static void __devexit | 3809 | static void __devexit |
3083 | lpfc_pci_remove_one(struct pci_dev *pdev) | 3810 | lpfc_pci_remove_one_s3(struct pci_dev *pdev) |
3084 | { | 3811 | { |
3085 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 3812 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
3086 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 3813 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
@@ -3098,7 +3825,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
3098 | /* Release all the vports against this physical port */ | 3825 | /* Release all the vports against this physical port */ |
3099 | vports = lpfc_create_vport_work_array(phba); | 3826 | vports = lpfc_create_vport_work_array(phba); |
3100 | if (vports != NULL) | 3827 | if (vports != NULL) |
3101 | for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) | 3828 | for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) |
3102 | fc_vport_terminate(vports[i]->fc_vport); | 3829 | fc_vport_terminate(vports[i]->fc_vport); |
3103 | lpfc_destroy_vport_work_array(phba, vports); | 3830 | lpfc_destroy_vport_work_array(phba, vports); |
3104 | 3831 | ||
@@ -3120,7 +3847,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
3120 | /* Final cleanup of txcmplq and reset the HBA */ | 3847 | /* Final cleanup of txcmplq and reset the HBA */ |
3121 | lpfc_sli_brdrestart(phba); | 3848 | lpfc_sli_brdrestart(phba); |
3122 | 3849 | ||
3123 | lpfc_stop_phba_timers(phba); | 3850 | lpfc_stop_hba_timers(phba); |
3124 | spin_lock_irq(&phba->hbalock); | 3851 | spin_lock_irq(&phba->hbalock); |
3125 | list_del_init(&vport->listentry); | 3852 | list_del_init(&vport->listentry); |
3126 | spin_unlock_irq(&phba->hbalock); | 3853 | spin_unlock_irq(&phba->hbalock); |
@@ -3128,7 +3855,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
3128 | lpfc_debugfs_terminate(vport); | 3855 | lpfc_debugfs_terminate(vport); |
3129 | 3856 | ||
3130 | /* Disable interrupt */ | 3857 | /* Disable interrupt */ |
3131 | lpfc_disable_intr(phba); | 3858 | lpfc_sli_disable_intr(phba); |
3132 | 3859 | ||
3133 | pci_set_drvdata(pdev, NULL); | 3860 | pci_set_drvdata(pdev, NULL); |
3134 | scsi_host_put(shost); | 3861 | scsi_host_put(shost); |
@@ -3138,7 +3865,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
3138 | * corresponding pools here. | 3865 | * corresponding pools here. |
3139 | */ | 3866 | */ |
3140 | lpfc_scsi_free(phba); | 3867 | lpfc_scsi_free(phba); |
3141 | lpfc_mem_free(phba); | 3868 | lpfc_mem_free_all(phba); |
3142 | 3869 | ||
3143 | dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), | 3870 | dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), |
3144 | phba->hbqslimp.virt, phba->hbqslimp.phys); | 3871 | phba->hbqslimp.virt, phba->hbqslimp.phys); |
@@ -3151,36 +3878,35 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
3151 | iounmap(phba->ctrl_regs_memmap_p); | 3878 | iounmap(phba->ctrl_regs_memmap_p); |
3152 | iounmap(phba->slim_memmap_p); | 3879 | iounmap(phba->slim_memmap_p); |
3153 | 3880 | ||
3154 | idr_remove(&lpfc_hba_index, phba->brd_no); | 3881 | lpfc_hba_free(phba); |
3155 | |||
3156 | kfree(phba); | ||
3157 | 3882 | ||
3158 | pci_release_selected_regions(pdev, bars); | 3883 | pci_release_selected_regions(pdev, bars); |
3159 | pci_disable_device(pdev); | 3884 | pci_disable_device(pdev); |
3160 | } | 3885 | } |
3161 | 3886 | ||
3162 | /** | 3887 | /** |
3163 | * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management | 3888 | * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt |
3164 | * @pdev: pointer to PCI device | 3889 | * @pdev: pointer to PCI device |
3165 | * @msg: power management message | 3890 | * @msg: power management message |
3166 | * | 3891 | * |
3167 | * This routine is to be registered to the kernel's PCI subsystem to support | 3892 | * This routine is to be called from the kernel's PCI subsystem to support |
3168 | * system Power Management (PM). When PM invokes this method, it quiesces the | 3893 | * system Power Management (PM) to device with SLI-3 interface spec. When |
3169 | * device by stopping the driver's worker thread for the device, turning off | 3894 | * PM invokes this method, it quiesces the device by stopping the driver's |
3170 | * device's interrupt and DMA, and bring the device offline. Note that as the | 3895 | * worker thread for the device, turning off device's interrupt and DMA, |
3171 | * driver implements the minimum PM requirements to a power-aware driver's PM | 3896 | * and bring the device offline. Note that as the driver implements the |
3172 | * support for suspend/resume -- all the possible PM messages (SUSPEND, | 3897 | * minimum PM requirements to a power-aware driver's PM support for the |
3173 | * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND | 3898 | * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) |
3174 | * and the driver will fully reinitialize its device during resume() method | 3899 | * to the suspend() method call will be treated as SUSPEND and the driver will |
3175 | * call, the driver will set device to PCI_D3hot state in PCI config space | 3900 | * fully reinitialize its device during resume() method call, the driver will |
3176 | * instead of setting it according to the @msg provided by the PM. | 3901 | * set device to PCI_D3hot state in PCI config space instead of setting it |
3902 | * according to the @msg provided by the PM. | ||
3177 | * | 3903 | * |
3178 | * Return code | 3904 | * Return code |
3179 | * 0 - driver suspended the device | 3905 | * 0 - driver suspended the device |
3180 | * Error otherwise | 3906 | * Error otherwise |
3181 | **/ | 3907 | **/ |
3182 | static int | 3908 | static int |
3183 | lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) | 3909 | lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) |
3184 | { | 3910 | { |
3185 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 3911 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
3186 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 3912 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; |
@@ -3194,7 +3920,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) | |||
3194 | kthread_stop(phba->worker_thread); | 3920 | kthread_stop(phba->worker_thread); |
3195 | 3921 | ||
3196 | /* Disable interrupt from device */ | 3922 | /* Disable interrupt from device */ |
3197 | lpfc_disable_intr(phba); | 3923 | lpfc_sli_disable_intr(phba); |
3198 | 3924 | ||
3199 | /* Save device state to PCI config space */ | 3925 | /* Save device state to PCI config space */ |
3200 | pci_save_state(pdev); | 3926 | pci_save_state(pdev); |
@@ -3204,25 +3930,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) | |||
3204 | } | 3930 | } |
3205 | 3931 | ||
3206 | /** | 3932 | /** |
3207 | * lpfc_pci_resume_one - lpfc PCI func to resume device for power management | 3933 | * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt |
3208 | * @pdev: pointer to PCI device | 3934 | * @pdev: pointer to PCI device |
3209 | * | 3935 | * |
3210 | * This routine is to be registered to the kernel's PCI subsystem to support | 3936 | * This routine is to be called from the kernel's PCI subsystem to support |
3211 | * system Power Management (PM). When PM invokes this method, it restores | 3937 | * system Power Management (PM) to device with SLI-3 interface spec. When PM |
3212 | * the device's PCI config space state and fully reinitializes the device | 3938 | * invokes this method, it restores the device's PCI config space state and |
3213 | * and brings it online. Note that as the driver implements the minimum PM | 3939 | * fully reinitializes the device and brings it online. Note that as the |
3214 | * requirements to a power-aware driver's PM for suspend/resume -- all | 3940 | * driver implements the minimum PM requirements to a power-aware driver's |
3215 | * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() | 3941 | * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, |
3216 | * method call will be treated as SUSPEND and the driver will fully | 3942 | * FREEZE) to the suspend() method call will be treated as SUSPEND and the |
3217 | * reinitialize its device during resume() method call, the device will be | 3943 | * driver will fully reinitialize its device during resume() method call, |
3218 | * set to PCI_D0 directly in PCI config space before restoring the state. | 3944 | * the device will be set to PCI_D0 directly in PCI config space before |
3945 | * restoring the state. | ||
3219 | * | 3946 | * |
3220 | * Return code | 3947 | * Return code |
3221 | * 0 - driver suspended the device | 3948 | * 0 - driver suspended the device |
3222 | * Error otherwise | 3949 | * Error otherwise |
3223 | **/ | 3950 | **/ |
3224 | static int | 3951 | static int |
3225 | lpfc_pci_resume_one(struct pci_dev *pdev) | 3952 | lpfc_pci_resume_one_s3(struct pci_dev *pdev) |
3226 | { | 3953 | { |
3227 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 3954 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
3228 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 3955 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; |
@@ -3250,7 +3977,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev) | |||
3250 | } | 3977 | } |
3251 | 3978 | ||
3252 | /* Configure and enable interrupt */ | 3979 | /* Configure and enable interrupt */ |
3253 | intr_mode = lpfc_enable_intr(phba, phba->intr_mode); | 3980 | intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); |
3254 | if (intr_mode == LPFC_INTR_ERROR) { | 3981 | if (intr_mode == LPFC_INTR_ERROR) { |
3255 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 3982 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
3256 | "0430 PM resume Failed to enable interrupt\n"); | 3983 | "0430 PM resume Failed to enable interrupt\n"); |
@@ -3269,23 +3996,24 @@ lpfc_pci_resume_one(struct pci_dev *pdev) | |||
3269 | } | 3996 | } |
3270 | 3997 | ||
3271 | /** | 3998 | /** |
3272 | * lpfc_io_error_detected - Driver method for handling PCI I/O error detected | 3999 | * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error |
3273 | * @pdev: pointer to PCI device. | 4000 | * @pdev: pointer to PCI device. |
3274 | * @state: the current PCI connection state. | 4001 | * @state: the current PCI connection state. |
3275 | * | 4002 | * |
3276 | * This routine is registered to the PCI subsystem for error handling. This | 4003 | * This routine is called from the PCI subsystem for I/O error handling to |
3277 | * function is called by the PCI subsystem after a PCI bus error affecting | 4004 | * device with SLI-3 interface spec. This function is called by the PCI |
3278 | * this device has been detected. When this function is invoked, it will | 4005 | * subsystem after a PCI bus error affecting this device has been detected. |
3279 | * need to stop all the I/Os and interrupt(s) to the device. Once that is | 4006 | * When this function is invoked, it will need to stop all the I/Os and |
3280 | * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to | 4007 | * interrupt(s) to the device. Once that is done, it will return |
3281 | * perform proper recovery as desired. | 4008 | * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery |
4009 | * as desired. | ||
3282 | * | 4010 | * |
3283 | * Return codes | 4011 | * Return codes |
3284 | * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery | 4012 | * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery |
3285 | * PCI_ERS_RESULT_DISCONNECT - device could not be recovered | 4013 | * PCI_ERS_RESULT_DISCONNECT - device could not be recovered |
3286 | **/ | 4014 | **/ |
3287 | static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, | 4015 | static pci_ers_result_t |
3288 | pci_channel_state_t state) | 4016 | lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) |
3289 | { | 4017 | { |
3290 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 4018 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
3291 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 4019 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; |
@@ -3312,30 +4040,32 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, | |||
3312 | lpfc_sli_abort_iocb_ring(phba, pring); | 4040 | lpfc_sli_abort_iocb_ring(phba, pring); |
3313 | 4041 | ||
3314 | /* Disable interrupt */ | 4042 | /* Disable interrupt */ |
3315 | lpfc_disable_intr(phba); | 4043 | lpfc_sli_disable_intr(phba); |
3316 | 4044 | ||
3317 | /* Request a slot reset. */ | 4045 | /* Request a slot reset. */ |
3318 | return PCI_ERS_RESULT_NEED_RESET; | 4046 | return PCI_ERS_RESULT_NEED_RESET; |
3319 | } | 4047 | } |
3320 | 4048 | ||
3321 | /** | 4049 | /** |
3322 | * lpfc_io_slot_reset - Restart a PCI device from scratch | 4050 | * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. |
3323 | * @pdev: pointer to PCI device. | 4051 | * @pdev: pointer to PCI device. |
3324 | * | 4052 | * |
3325 | * This routine is registered to the PCI subsystem for error handling. This is | 4053 | * This routine is called from the PCI subsystem for error handling to |
3326 | * called after PCI bus has been reset to restart the PCI card from scratch, | 4054 | * device with SLI-3 interface spec. This is called after PCI bus has been |
3327 | * as if from a cold-boot. During the PCI subsystem error recovery, after the | 4055 | * reset to restart the PCI card from scratch, as if from a cold-boot. |
3328 | * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform | 4056 | * During the PCI subsystem error recovery, after driver returns |
3329 | * proper error recovery and then call this routine before calling the .resume | 4057 | * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error |
3330 | * method to recover the device. This function will initialize the HBA device, | 4058 | * recovery and then call this routine before calling the .resume method |
3331 | * enable the interrupt, but it will just put the HBA to offline state without | 4059 | * to recover the device. This function will initialize the HBA device, |
3332 | * passing any I/O traffic. | 4060 | * enable the interrupt, but it will just put the HBA to offline state |
4061 | * without passing any I/O traffic. | ||
3333 | * | 4062 | * |
3334 | * Return codes | 4063 | * Return codes |
3335 | * PCI_ERS_RESULT_RECOVERED - the device has been recovered | 4064 | * PCI_ERS_RESULT_RECOVERED - the device has been recovered |
3336 | * PCI_ERS_RESULT_DISCONNECT - device could not be recovered | 4065 | * PCI_ERS_RESULT_DISCONNECT - device could not be recovered |
3337 | */ | 4066 | */ |
3338 | static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | 4067 | static pci_ers_result_t |
4068 | lpfc_io_slot_reset_s3(struct pci_dev *pdev) | ||
3339 | { | 4069 | { |
3340 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 4070 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
3341 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 4071 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; |
@@ -3354,11 +4084,11 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | |||
3354 | pci_set_master(pdev); | 4084 | pci_set_master(pdev); |
3355 | 4085 | ||
3356 | spin_lock_irq(&phba->hbalock); | 4086 | spin_lock_irq(&phba->hbalock); |
3357 | psli->sli_flag &= ~LPFC_SLI2_ACTIVE; | 4087 | psli->sli_flag &= ~LPFC_SLI_ACTIVE; |
3358 | spin_unlock_irq(&phba->hbalock); | 4088 | spin_unlock_irq(&phba->hbalock); |
3359 | 4089 | ||
3360 | /* Configure and enable interrupt */ | 4090 | /* Configure and enable interrupt */ |
3361 | intr_mode = lpfc_enable_intr(phba, phba->intr_mode); | 4091 | intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); |
3362 | if (intr_mode == LPFC_INTR_ERROR) { | 4092 | if (intr_mode == LPFC_INTR_ERROR) { |
3363 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 4093 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
3364 | "0427 Cannot re-enable interrupt after " | 4094 | "0427 Cannot re-enable interrupt after " |
@@ -3378,15 +4108,17 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | |||
3378 | } | 4108 | } |
3379 | 4109 | ||
3380 | /** | 4110 | /** |
3381 | * lpfc_io_resume - Resume PCI I/O operation | 4111 | * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. |
3382 | * @pdev: pointer to PCI device | 4112 | * @pdev: pointer to PCI device |
3383 | * | 4113 | * |
3384 | * This routine is registered to the PCI subsystem for error handling. It is | 4114 | * This routine is called from the PCI subsystem for error handling to device |
3385 | * called when kernel error recovery tells the lpfc driver that it is ok to | 4115 | * with SLI-3 interface spec. It is called when kernel error recovery tells |
3386 | * resume normal PCI operation after PCI bus error recovery. After this call, | 4116 | * the lpfc driver that it is ok to resume normal PCI operation after PCI bus |
3387 | * traffic can start to flow from this device again. | 4117 | * error recovery. After this call, traffic can start to flow from this device |
4118 | * again. | ||
3388 | */ | 4119 | */ |
3389 | static void lpfc_io_resume(struct pci_dev *pdev) | 4120 | static void |
4121 | lpfc_io_resume_s3(struct pci_dev *pdev) | ||
3390 | { | 4122 | { |
3391 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 4123 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
3392 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 4124 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; |
@@ -3394,6 +4126,235 @@ static void lpfc_io_resume(struct pci_dev *pdev) | |||
3394 | lpfc_online(phba); | 4126 | lpfc_online(phba); |
3395 | } | 4127 | } |
3396 | 4128 | ||
4129 | /** | ||
4130 | * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem | ||
4131 | * @pdev: pointer to PCI device | ||
4132 | * @pid: pointer to PCI device identifier | ||
4133 | * | ||
4134 | * This routine is to be registered to the kernel's PCI subsystem. When an | ||
4135 | * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks | ||
4136 | * at PCI device-specific information of the device and driver to see if the | ||
4137 | * driver state that it can support this kind of device. If the match is | ||
4138 | * successful, the driver core invokes this routine. This routine dispatches | ||
4139 | * the action to the proper SLI-3 or SLI-4 device probing routine, which will | ||
4140 | * do all the initialization that it needs to do to handle the HBA device | ||
4141 | * properly. | ||
4142 | * | ||
4143 | * Return code | ||
4144 | * 0 - driver can claim the device | ||
4145 | * negative value - driver can not claim the device | ||
4146 | **/ | ||
4147 | static int __devinit | ||
4148 | lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | ||
4149 | { | ||
4150 | int rc; | ||
4151 | uint16_t dev_id; | ||
4152 | |||
4153 | if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id)) | ||
4154 | return -ENODEV; | ||
4155 | |||
4156 | switch (dev_id) { | ||
4157 | default: | ||
4158 | rc = lpfc_pci_probe_one_s3(pdev, pid); | ||
4159 | break; | ||
4160 | } | ||
4161 | return rc; | ||
4162 | } | ||
4163 | |||
4164 | /** | ||
4165 | * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem | ||
4166 | * @pdev: pointer to PCI device | ||
4167 | * | ||
4168 | * This routine is to be registered to the kernel's PCI subsystem. When an | ||
4169 | * Emulex HBA is removed from PCI bus, the driver core invokes this routine. | ||
4170 | * This routine dispatches the action to the proper SLI-3 or SLI-4 device | ||
4171 | * remove routine, which will perform all the necessary cleanup for the | ||
4172 | * device to be removed from the PCI subsystem properly. | ||
4173 | **/ | ||
4174 | static void __devexit | ||
4175 | lpfc_pci_remove_one(struct pci_dev *pdev) | ||
4176 | { | ||
4177 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
4178 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | ||
4179 | |||
4180 | switch (phba->pci_dev_grp) { | ||
4181 | case LPFC_PCI_DEV_LP: | ||
4182 | lpfc_pci_remove_one_s3(pdev); | ||
4183 | break; | ||
4184 | default: | ||
4185 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4186 | "1424 Invalid PCI device group: 0x%x\n", | ||
4187 | phba->pci_dev_grp); | ||
4188 | break; | ||
4189 | } | ||
4190 | return; | ||
4191 | } | ||
4192 | |||
4193 | /** | ||
4194 | * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management | ||
4195 | * @pdev: pointer to PCI device | ||
4196 | * @msg: power management message | ||
4197 | * | ||
4198 | * This routine is to be registered to the kernel's PCI subsystem to support | ||
4199 | * system Power Management (PM). When PM invokes this method, it dispatches | ||
4200 | * the action to the proper SLI-3 or SLI-4 device suspend routine, which will | ||
4201 | * suspend the device. | ||
4202 | * | ||
4203 | * Return code | ||
4204 | * 0 - driver suspended the device | ||
4205 | * Error otherwise | ||
4206 | **/ | ||
4207 | static int | ||
4208 | lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) | ||
4209 | { | ||
4210 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
4211 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | ||
4212 | int rc = -ENODEV; | ||
4213 | |||
4214 | switch (phba->pci_dev_grp) { | ||
4215 | case LPFC_PCI_DEV_LP: | ||
4216 | rc = lpfc_pci_suspend_one_s3(pdev, msg); | ||
4217 | break; | ||
4218 | default: | ||
4219 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4220 | "1425 Invalid PCI device group: 0x%x\n", | ||
4221 | phba->pci_dev_grp); | ||
4222 | break; | ||
4223 | } | ||
4224 | return rc; | ||
4225 | } | ||
4226 | |||
4227 | /** | ||
4228 | * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management | ||
4229 | * @pdev: pointer to PCI device | ||
4230 | * | ||
4231 | * This routine is to be registered to the kernel's PCI subsystem to support | ||
4232 | * system Power Management (PM). When PM invokes this method, it dispatches | ||
4233 | * the action to the proper SLI-3 or SLI-4 device resume routine, which will | ||
4234 | * resume the device. | ||
4235 | * | ||
4236 | * Return code | ||
4237 | * 0 - driver suspended the device | ||
4238 | * Error otherwise | ||
4239 | **/ | ||
4240 | static int | ||
4241 | lpfc_pci_resume_one(struct pci_dev *pdev) | ||
4242 | { | ||
4243 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
4244 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | ||
4245 | int rc = -ENODEV; | ||
4246 | |||
4247 | switch (phba->pci_dev_grp) { | ||
4248 | case LPFC_PCI_DEV_LP: | ||
4249 | rc = lpfc_pci_resume_one_s3(pdev); | ||
4250 | break; | ||
4251 | default: | ||
4252 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4253 | "1426 Invalid PCI device group: 0x%x\n", | ||
4254 | phba->pci_dev_grp); | ||
4255 | break; | ||
4256 | } | ||
4257 | return rc; | ||
4258 | } | ||
4259 | |||
4260 | /** | ||
4261 | * lpfc_io_error_detected - lpfc method for handling PCI I/O error | ||
4262 | * @pdev: pointer to PCI device. | ||
4263 | * @state: the current PCI connection state. | ||
4264 | * | ||
4265 | * This routine is registered to the PCI subsystem for error handling. This | ||
4266 | * function is called by the PCI subsystem after a PCI bus error affecting | ||
4267 | * this device has been detected. When this routine is invoked, it dispatches | ||
4268 | * the action to the proper SLI-3 or SLI-4 device error detected handling | ||
4269 | * routine, which will perform the proper error detected operation. | ||
4270 | * | ||
4271 | * Return codes | ||
4272 | * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery | ||
4273 | * PCI_ERS_RESULT_DISCONNECT - device could not be recovered | ||
4274 | **/ | ||
4275 | static pci_ers_result_t | ||
4276 | lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | ||
4277 | { | ||
4278 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
4279 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | ||
4280 | pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; | ||
4281 | |||
4282 | switch (phba->pci_dev_grp) { | ||
4283 | case LPFC_PCI_DEV_LP: | ||
4284 | rc = lpfc_io_error_detected_s3(pdev, state); | ||
4285 | break; | ||
4286 | default: | ||
4287 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4288 | "1427 Invalid PCI device group: 0x%x\n", | ||
4289 | phba->pci_dev_grp); | ||
4290 | break; | ||
4291 | } | ||
4292 | return rc; | ||
4293 | } | ||
4294 | |||
4295 | /** | ||
4296 | * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch | ||
4297 | * @pdev: pointer to PCI device. | ||
4298 | * | ||
4299 | * This routine is registered to the PCI subsystem for error handling. This | ||
4300 | * function is called after PCI bus has been reset to restart the PCI card | ||
4301 | * from scratch, as if from a cold-boot. When this routine is invoked, it | ||
4302 | * dispatches the action to the proper SLI-3 or SLI-4 device reset handling | ||
4303 | * routine, which will perform the proper device reset. | ||
4304 | * | ||
4305 | * Return codes | ||
4306 | * PCI_ERS_RESULT_RECOVERED - the device has been recovered | ||
4307 | * PCI_ERS_RESULT_DISCONNECT - device could not be recovered | ||
4308 | **/ | ||
4309 | static pci_ers_result_t | ||
4310 | lpfc_io_slot_reset(struct pci_dev *pdev) | ||
4311 | { | ||
4312 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
4313 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | ||
4314 | pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; | ||
4315 | |||
4316 | switch (phba->pci_dev_grp) { | ||
4317 | case LPFC_PCI_DEV_LP: | ||
4318 | rc = lpfc_io_slot_reset_s3(pdev); | ||
4319 | break; | ||
4320 | default: | ||
4321 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4322 | "1428 Invalid PCI device group: 0x%x\n", | ||
4323 | phba->pci_dev_grp); | ||
4324 | break; | ||
4325 | } | ||
4326 | return rc; | ||
4327 | } | ||
4328 | |||
4329 | /** | ||
4330 | * lpfc_io_resume - lpfc method for resuming PCI I/O operation | ||
4331 | * @pdev: pointer to PCI device | ||
4332 | * | ||
4333 | * This routine is registered to the PCI subsystem for error handling. It | ||
4334 | * is called when kernel error recovery tells the lpfc driver that it is | ||
4335 | * OK to resume normal PCI operation after PCI bus error recovery. When | ||
4336 | * this routine is invoked, it dispatches the action to the proper SLI-3 | ||
4337 | * or SLI-4 device io_resume routine, which will resume the device operation. | ||
4338 | **/ | ||
4339 | static void | ||
4340 | lpfc_io_resume(struct pci_dev *pdev) | ||
4341 | { | ||
4342 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
4343 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | ||
4344 | |||
4345 | switch (phba->pci_dev_grp) { | ||
4346 | case LPFC_PCI_DEV_LP: | ||
4347 | lpfc_io_resume_s3(pdev); | ||
4348 | break; | ||
4349 | default: | ||
4350 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4351 | "1429 Invalid PCI device group: 0x%x\n", | ||
4352 | phba->pci_dev_grp); | ||
4353 | break; | ||
4354 | } | ||
4355 | return; | ||
4356 | } | ||
4357 | |||
3397 | static struct pci_device_id lpfc_id_table[] = { | 4358 | static struct pci_device_id lpfc_id_table[] = { |
3398 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, | 4359 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, |
3399 | PCI_ANY_ID, PCI_ANY_ID, }, | 4360 | PCI_ANY_ID, PCI_ANY_ID, }, |
@@ -3469,6 +4430,10 @@ static struct pci_device_id lpfc_id_table[] = { | |||
3469 | PCI_ANY_ID, PCI_ANY_ID, }, | 4430 | PCI_ANY_ID, PCI_ANY_ID, }, |
3470 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, | 4431 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, |
3471 | PCI_ANY_ID, PCI_ANY_ID, }, | 4432 | PCI_ANY_ID, PCI_ANY_ID, }, |
4433 | {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, | ||
4434 | PCI_ANY_ID, PCI_ANY_ID, }, | ||
4435 | {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S, | ||
4436 | PCI_ANY_ID, PCI_ANY_ID, }, | ||
3472 | { 0 } | 4437 | { 0 } |
3473 | }; | 4438 | }; |
3474 | 4439 | ||
@@ -3486,7 +4451,7 @@ static struct pci_driver lpfc_driver = { | |||
3486 | .probe = lpfc_pci_probe_one, | 4451 | .probe = lpfc_pci_probe_one, |
3487 | .remove = __devexit_p(lpfc_pci_remove_one), | 4452 | .remove = __devexit_p(lpfc_pci_remove_one), |
3488 | .suspend = lpfc_pci_suspend_one, | 4453 | .suspend = lpfc_pci_suspend_one, |
3489 | .resume = lpfc_pci_resume_one, | 4454 | .resume = lpfc_pci_resume_one, |
3490 | .err_handler = &lpfc_err_handler, | 4455 | .err_handler = &lpfc_err_handler, |
3491 | }; | 4456 | }; |
3492 | 4457 | ||
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 167b66dd34c7..a226c053c0f4 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -438,22 +438,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba) | |||
438 | } | 438 | } |
439 | 439 | ||
440 | /** | 440 | /** |
441 | * lpfc_new_scsi_buf - Scsi buffer allocator | 441 | * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec |
442 | * @vport: The virtual port for which this call being executed. | 442 | * @vport: The virtual port for which this call being executed. |
443 | * @num_to_allocate: The requested number of buffers to allocate. | ||
443 | * | 444 | * |
444 | * This routine allocates a scsi buffer, which contains all the necessary | 445 | * This routine allocates a scsi buffer for device with SLI-3 interface spec, |
445 | * information needed to initiate a SCSI I/O. The non-DMAable buffer region | 446 | * the scsi buffer contains all the necessary information needed to initiate |
446 | * contains information to build the IOCB. The DMAable region contains | 447 | * a SCSI I/O. The non-DMAable buffer region contains information to build |
447 | * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to | 448 | * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, |
448 | * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL | 449 | * and the initial BPL. In addition to allocating memory, the FCP CMND and |
449 | * and the BPL BDE is setup in the IOCB. | 450 | * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. |
450 | * | 451 | * |
451 | * Return codes: | 452 | * Return codes: |
452 | * NULL - Error | 453 | * int - number of scsi buffers that were allocated. |
453 | * Pointer to lpfc_scsi_buf data structure - Success | 454 | * 0 = failure, less than num_to_alloc is a partial failure. |
454 | **/ | 455 | **/ |
455 | static struct lpfc_scsi_buf * | 456 | static int |
456 | lpfc_new_scsi_buf(struct lpfc_vport *vport) | 457 | lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) |
457 | { | 458 | { |
458 | struct lpfc_hba *phba = vport->phba; | 459 | struct lpfc_hba *phba = vport->phba; |
459 | struct lpfc_scsi_buf *psb; | 460 | struct lpfc_scsi_buf *psb; |
@@ -463,107 +464,134 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport) | |||
463 | dma_addr_t pdma_phys_fcp_rsp; | 464 | dma_addr_t pdma_phys_fcp_rsp; |
464 | dma_addr_t pdma_phys_bpl; | 465 | dma_addr_t pdma_phys_bpl; |
465 | uint16_t iotag; | 466 | uint16_t iotag; |
467 | int bcnt; | ||
466 | 468 | ||
467 | psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); | 469 | for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { |
468 | if (!psb) | 470 | psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); |
469 | return NULL; | 471 | if (!psb) |
472 | break; | ||
470 | 473 | ||
471 | /* | 474 | /* |
472 | * Get memory from the pci pool to map the virt space to pci bus space | 475 | * Get memory from the pci pool to map the virt space to pci |
473 | * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, | 476 | * bus space for an I/O. The DMA buffer includes space for the |
474 | * struct fcp_rsp and the number of bde's necessary to support the | 477 | * struct fcp_cmnd, struct fcp_rsp and the number of bde's |
475 | * sg_tablesize. | 478 | * necessary to support the sg_tablesize. |
476 | */ | 479 | */ |
477 | psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, | 480 | psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, |
478 | &psb->dma_handle); | 481 | GFP_KERNEL, &psb->dma_handle); |
479 | if (!psb->data) { | 482 | if (!psb->data) { |
480 | kfree(psb); | 483 | kfree(psb); |
481 | return NULL; | 484 | break; |
482 | } | 485 | } |
483 | 486 | ||
484 | /* Initialize virtual ptrs to dma_buf region. */ | 487 | /* Initialize virtual ptrs to dma_buf region. */ |
485 | memset(psb->data, 0, phba->cfg_sg_dma_buf_size); | 488 | memset(psb->data, 0, phba->cfg_sg_dma_buf_size); |
486 | 489 | ||
487 | /* Allocate iotag for psb->cur_iocbq. */ | 490 | /* Allocate iotag for psb->cur_iocbq. */ |
488 | iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); | 491 | iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); |
489 | if (iotag == 0) { | 492 | if (iotag == 0) { |
490 | pci_pool_free(phba->lpfc_scsi_dma_buf_pool, | 493 | pci_pool_free(phba->lpfc_scsi_dma_buf_pool, |
491 | psb->data, psb->dma_handle); | 494 | psb->data, psb->dma_handle); |
492 | kfree (psb); | 495 | kfree(psb); |
493 | return NULL; | 496 | break; |
494 | } | 497 | } |
495 | psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; | 498 | psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; |
496 | 499 | ||
497 | psb->fcp_cmnd = psb->data; | 500 | psb->fcp_cmnd = psb->data; |
498 | psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); | 501 | psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); |
499 | psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + | 502 | psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + |
500 | sizeof(struct fcp_rsp); | ||
501 | |||
502 | /* Initialize local short-hand pointers. */ | ||
503 | bpl = psb->fcp_bpl; | ||
504 | pdma_phys_fcp_cmd = psb->dma_handle; | ||
505 | pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); | ||
506 | pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + | ||
507 | sizeof(struct fcp_rsp); | 503 | sizeof(struct fcp_rsp); |
508 | 504 | ||
509 | /* | 505 | /* Initialize local short-hand pointers. */ |
510 | * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg | 506 | bpl = psb->fcp_bpl; |
511 | * list bdes. Initialize the first two and leave the rest for | 507 | pdma_phys_fcp_cmd = psb->dma_handle; |
512 | * queuecommand. | 508 | pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); |
513 | */ | 509 | pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + |
514 | bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); | 510 | sizeof(struct fcp_rsp); |
515 | bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); | 511 | |
516 | bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); | 512 | /* |
517 | bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; | 513 | * The first two bdes are the FCP_CMD and FCP_RSP. The balance |
518 | bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); | 514 | * are sg list bdes. Initialize the first two and leave the |
519 | 515 | * rest for queuecommand. | |
520 | /* Setup the physical region for the FCP RSP */ | 516 | */ |
521 | bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); | 517 | bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); |
522 | bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); | 518 | bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); |
523 | bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); | 519 | bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); |
524 | bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; | 520 | bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
525 | bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); | 521 | bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); |
522 | |||
523 | /* Setup the physical region for the FCP RSP */ | ||
524 | bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); | ||
525 | bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); | ||
526 | bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); | ||
527 | bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
528 | bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); | ||
529 | |||
530 | /* | ||
531 | * Since the IOCB for the FCP I/O is built into this | ||
532 | * lpfc_scsi_buf, initialize it with all known data now. | ||
533 | */ | ||
534 | iocb = &psb->cur_iocbq.iocb; | ||
535 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; | ||
536 | if ((phba->sli_rev == 3) && | ||
537 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { | ||
538 | /* fill in immediate fcp command BDE */ | ||
539 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; | ||
540 | iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); | ||
541 | iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, | ||
542 | unsli3.fcp_ext.icd); | ||
543 | iocb->un.fcpi64.bdl.addrHigh = 0; | ||
544 | iocb->ulpBdeCount = 0; | ||
545 | iocb->ulpLe = 0; | ||
546 | /* fill in responce BDE */ | ||
547 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = | ||
548 | BUFF_TYPE_BDE_64; | ||
549 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = | ||
550 | sizeof(struct fcp_rsp); | ||
551 | iocb->unsli3.fcp_ext.rbde.addrLow = | ||
552 | putPaddrLow(pdma_phys_fcp_rsp); | ||
553 | iocb->unsli3.fcp_ext.rbde.addrHigh = | ||
554 | putPaddrHigh(pdma_phys_fcp_rsp); | ||
555 | } else { | ||
556 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
557 | iocb->un.fcpi64.bdl.bdeSize = | ||
558 | (2 * sizeof(struct ulp_bde64)); | ||
559 | iocb->un.fcpi64.bdl.addrLow = | ||
560 | putPaddrLow(pdma_phys_bpl); | ||
561 | iocb->un.fcpi64.bdl.addrHigh = | ||
562 | putPaddrHigh(pdma_phys_bpl); | ||
563 | iocb->ulpBdeCount = 1; | ||
564 | iocb->ulpLe = 1; | ||
565 | } | ||
566 | iocb->ulpClass = CLASS3; | ||
567 | psb->status = IOSTAT_SUCCESS; | ||
526 | 568 | ||
527 | /* | ||
528 | * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, | ||
529 | * initialize it with all known data now. | ||
530 | */ | ||
531 | iocb = &psb->cur_iocbq.iocb; | ||
532 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; | ||
533 | if ((phba->sli_rev == 3) && | ||
534 | !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { | ||
535 | /* fill in immediate fcp command BDE */ | ||
536 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; | ||
537 | iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); | ||
538 | iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, | ||
539 | unsli3.fcp_ext.icd); | ||
540 | iocb->un.fcpi64.bdl.addrHigh = 0; | ||
541 | iocb->ulpBdeCount = 0; | ||
542 | iocb->ulpLe = 0; | ||
543 | /* fill in responce BDE */ | ||
544 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
545 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = | ||
546 | sizeof(struct fcp_rsp); | ||
547 | iocb->unsli3.fcp_ext.rbde.addrLow = | ||
548 | putPaddrLow(pdma_phys_fcp_rsp); | ||
549 | iocb->unsli3.fcp_ext.rbde.addrHigh = | ||
550 | putPaddrHigh(pdma_phys_fcp_rsp); | ||
551 | } else { | ||
552 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
553 | iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); | ||
554 | iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl); | ||
555 | iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl); | ||
556 | iocb->ulpBdeCount = 1; | ||
557 | iocb->ulpLe = 1; | ||
558 | } | 569 | } |
559 | iocb->ulpClass = CLASS3; | ||
560 | 570 | ||
561 | return psb; | 571 | return bcnt; |
562 | } | 572 | } |
563 | 573 | ||
564 | /** | 574 | /** |
565 | * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba | 575 | * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator |
566 | * @phba: The Hba for which this call is being executed. | 576 | * @vport: The virtual port for which this call being executed. |
577 | * @num_to_allocate: The requested number of buffers to allocate. | ||
578 | * | ||
579 | * This routine wraps the actual SCSI buffer allocator function pointer from | ||
580 | * the lpfc_hba struct. | ||
581 | * | ||
582 | * Return codes: | ||
583 | * int - number of scsi buffers that were allocated. | ||
584 | * 0 = failure, less than num_to_alloc is a partial failure. | ||
585 | **/ | ||
586 | static inline int | ||
587 | lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) | ||
588 | { | ||
589 | return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc); | ||
590 | } | ||
591 | |||
592 | /** | ||
593 | * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA | ||
594 | * @phba: The HBA for which this call is being executed. | ||
567 | * | 595 | * |
568 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list | 596 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list |
569 | * and returns to caller. | 597 | * and returns to caller. |
@@ -591,7 +619,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) | |||
591 | } | 619 | } |
592 | 620 | ||
593 | /** | 621 | /** |
594 | * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list | 622 | * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list |
595 | * @phba: The Hba for which this call is being executed. | 623 | * @phba: The Hba for which this call is being executed. |
596 | * @psb: The scsi buffer which is being released. | 624 | * @psb: The scsi buffer which is being released. |
597 | * | 625 | * |
@@ -599,7 +627,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) | |||
599 | * lpfc_scsi_buf_list list. | 627 | * lpfc_scsi_buf_list list. |
600 | **/ | 628 | **/ |
601 | static void | 629 | static void |
602 | lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | 630 | lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) |
603 | { | 631 | { |
604 | unsigned long iflag = 0; | 632 | unsigned long iflag = 0; |
605 | 633 | ||
@@ -610,21 +638,36 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | |||
610 | } | 638 | } |
611 | 639 | ||
612 | /** | 640 | /** |
613 | * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer | 641 | * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. |
642 | * @phba: The Hba for which this call is being executed. | ||
643 | * @psb: The scsi buffer which is being released. | ||
644 | * | ||
645 | * This routine releases @psb scsi buffer by adding it to tail of @phba | ||
646 | * lpfc_scsi_buf_list list. | ||
647 | **/ | ||
648 | static void | ||
649 | lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | ||
650 | { | ||
651 | |||
652 | phba->lpfc_release_scsi_buf(phba, psb); | ||
653 | } | ||
654 | |||
655 | /** | ||
656 | * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec | ||
614 | * @phba: The Hba for which this call is being executed. | 657 | * @phba: The Hba for which this call is being executed. |
615 | * @lpfc_cmd: The scsi buffer which is going to be mapped. | 658 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
616 | * | 659 | * |
617 | * This routine does the pci dma mapping for scatter-gather list of scsi cmnd | 660 | * This routine does the pci dma mapping for scatter-gather list of scsi cmnd |
618 | * field of @lpfc_cmd. This routine scans through sg elements and format the | 661 | * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans |
619 | * bdea. This routine also initializes all IOCB fields which are dependent on | 662 | * through sg elements and format the bdea. This routine also initializes all |
620 | * scsi command request buffer. | 663 | * IOCB fields which are dependent on scsi command request buffer. |
621 | * | 664 | * |
622 | * Return codes: | 665 | * Return codes: |
623 | * 1 - Error | 666 | * 1 - Error |
624 | * 0 - Success | 667 | * 0 - Success |
625 | **/ | 668 | **/ |
626 | static int | 669 | static int |
627 | lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | 670 | lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) |
628 | { | 671 | { |
629 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; | 672 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; |
630 | struct scatterlist *sgel = NULL; | 673 | struct scatterlist *sgel = NULL; |
@@ -1412,6 +1455,24 @@ out: | |||
1412 | } | 1455 | } |
1413 | 1456 | ||
1414 | /** | 1457 | /** |
1458 | * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer | ||
1459 | * @phba: The Hba for which this call is being executed. | ||
1460 | * @lpfc_cmd: The scsi buffer which is going to be mapped. | ||
1461 | * | ||
1462 | * This routine wraps the actual DMA mapping function pointer from the | ||
1463 | * lpfc_hba struct. | ||
1464 | * | ||
1465 | * Return codes: | ||
1466 | * 1 - Error | ||
1467 | * 0 - Success | ||
1468 | **/ | ||
1469 | static inline int | ||
1470 | lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | ||
1471 | { | ||
1472 | return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); | ||
1473 | } | ||
1474 | |||
1475 | /** | ||
1415 | * lpfc_send_scsi_error_event - Posts an event when there is SCSI error | 1476 | * lpfc_send_scsi_error_event - Posts an event when there is SCSI error |
1416 | * @phba: Pointer to hba context object. | 1477 | * @phba: Pointer to hba context object. |
1417 | * @vport: Pointer to vport object. | 1478 | * @vport: Pointer to vport object. |
@@ -1504,15 +1565,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, | |||
1504 | } | 1565 | } |
1505 | 1566 | ||
1506 | /** | 1567 | /** |
1507 | * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather | 1568 | * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev |
1508 | * @phba: The Hba for which this call is being executed. | 1569 | * @phba: The HBA for which this call is being executed. |
1509 | * @psb: The scsi buffer which is going to be un-mapped. | 1570 | * @psb: The scsi buffer which is going to be un-mapped. |
1510 | * | 1571 | * |
1511 | * This routine does DMA un-mapping of scatter gather list of scsi command | 1572 | * This routine does DMA un-mapping of scatter gather list of scsi command |
1512 | * field of @lpfc_cmd. | 1573 | * field of @lpfc_cmd for device with SLI-3 interface spec. |
1513 | **/ | 1574 | **/ |
1514 | static void | 1575 | static void |
1515 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) | 1576 | lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) |
1516 | { | 1577 | { |
1517 | /* | 1578 | /* |
1518 | * There are only two special cases to consider. (1) the scsi command | 1579 | * There are only two special cases to consider. (1) the scsi command |
@@ -1529,6 +1590,20 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) | |||
1529 | } | 1590 | } |
1530 | 1591 | ||
1531 | /** | 1592 | /** |
1593 | * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list | ||
1594 | * @phba: The Hba for which this call is being executed. | ||
1595 | * @psb: The scsi buffer which is going to be un-mapped. | ||
1596 | * | ||
1597 | * This routine does DMA un-mapping of scatter gather list of scsi command | ||
1598 | * field of @lpfc_cmd for device with SLI-4 interface spec. | ||
1599 | **/ | ||
1600 | static void | ||
1601 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | ||
1602 | { | ||
1603 | phba->lpfc_scsi_unprep_dma_buf(phba, psb); | ||
1604 | } | ||
1605 | |||
1606 | /** | ||
1532 | * lpfc_handler_fcp_err - FCP response handler | 1607 | * lpfc_handler_fcp_err - FCP response handler |
1533 | * @vport: The virtual port for which this call is being executed. | 1608 | * @vport: The virtual port for which this call is being executed. |
1534 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | 1609 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. |
@@ -1676,7 +1751,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
1676 | * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine | 1751 | * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine |
1677 | * @phba: The Hba for which this call is being executed. | 1752 | * @phba: The Hba for which this call is being executed. |
1678 | * @pIocbIn: The command IOCBQ for the scsi cmnd. | 1753 | * @pIocbIn: The command IOCBQ for the scsi cmnd. |
1679 | * @pIocbOut: The response IOCBQ for the scsi cmnd . | 1754 | * @pIocbOut: The response IOCBQ for the scsi cmnd. |
1680 | * | 1755 | * |
1681 | * This routine assigns scsi command result by looking into response IOCB | 1756 | * This routine assigns scsi command result by looking into response IOCB |
1682 | * status field appropriately. This routine handles QUEUE FULL condition as | 1757 | * status field appropriately. This routine handles QUEUE FULL condition as |
@@ -1957,16 +2032,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) | |||
1957 | } | 2032 | } |
1958 | 2033 | ||
1959 | /** | 2034 | /** |
1960 | * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit | 2035 | * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev |
1961 | * @vport: The virtual port for which this call is being executed. | 2036 | * @vport: The virtual port for which this call is being executed. |
1962 | * @lpfc_cmd: The scsi command which needs to send. | 2037 | * @lpfc_cmd: The scsi command which needs to send. |
1963 | * @pnode: Pointer to lpfc_nodelist. | 2038 | * @pnode: Pointer to lpfc_nodelist. |
1964 | * | 2039 | * |
1965 | * This routine initializes fcp_cmnd and iocb data structure from scsi command | 2040 | * This routine initializes fcp_cmnd and iocb data structure from scsi command |
1966 | * to transfer. | 2041 | * to transfer for device with SLI3 interface spec. |
1967 | **/ | 2042 | **/ |
1968 | static void | 2043 | static void |
1969 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | 2044 | lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, |
1970 | struct lpfc_nodelist *pnode) | 2045 | struct lpfc_nodelist *pnode) |
1971 | { | 2046 | { |
1972 | struct lpfc_hba *phba = vport->phba; | 2047 | struct lpfc_hba *phba = vport->phba; |
@@ -2013,8 +2088,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
2013 | if (scsi_sg_count(scsi_cmnd)) { | 2088 | if (scsi_sg_count(scsi_cmnd)) { |
2014 | if (datadir == DMA_TO_DEVICE) { | 2089 | if (datadir == DMA_TO_DEVICE) { |
2015 | iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; | 2090 | iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; |
2016 | iocb_cmd->un.fcpi.fcpi_parm = 0; | 2091 | if (phba->sli_rev < LPFC_SLI_REV4) { |
2017 | iocb_cmd->ulpPU = 0; | 2092 | iocb_cmd->un.fcpi.fcpi_parm = 0; |
2093 | iocb_cmd->ulpPU = 0; | ||
2094 | } else | ||
2095 | iocb_cmd->ulpPU = PARM_READ_CHECK; | ||
2018 | fcp_cmnd->fcpCntl3 = WRITE_DATA; | 2096 | fcp_cmnd->fcpCntl3 = WRITE_DATA; |
2019 | phba->fc4OutputRequests++; | 2097 | phba->fc4OutputRequests++; |
2020 | } else { | 2098 | } else { |
@@ -2051,20 +2129,37 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
2051 | } | 2129 | } |
2052 | 2130 | ||
2053 | /** | 2131 | /** |
2054 | * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit | 2132 | * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit |
2133 | * @vport: The virtual port for which this call is being executed. | ||
2134 | * @lpfc_cmd: The scsi command which needs to send. | ||
2135 | * @pnode: Pointer to lpfc_nodelist. | ||
2136 | * | ||
2137 | * This routine wraps the actual convert SCSI cmnd function pointer from | ||
2138 | * the lpfc_hba struct. | ||
2139 | **/ | ||
2140 | static inline void | ||
2141 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | ||
2142 | struct lpfc_nodelist *pnode) | ||
2143 | { | ||
2144 | vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode); | ||
2145 | } | ||
2146 | |||
2147 | /** | ||
2148 | * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit | ||
2055 | * @vport: The virtual port for which this call is being executed. | 2149 | * @vport: The virtual port for which this call is being executed. |
2056 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | 2150 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. |
2057 | * @lun: Logical unit number. | 2151 | * @lun: Logical unit number. |
2058 | * @task_mgmt_cmd: SCSI task management command. | 2152 | * @task_mgmt_cmd: SCSI task management command. |
2059 | * | 2153 | * |
2060 | * This routine creates FCP information unit corresponding to @task_mgmt_cmd. | 2154 | * This routine creates FCP information unit corresponding to @task_mgmt_cmd |
2155 | * for device with SLI-3 interface spec. | ||
2061 | * | 2156 | * |
2062 | * Return codes: | 2157 | * Return codes: |
2063 | * 0 - Error | 2158 | * 0 - Error |
2064 | * 1 - Success | 2159 | * 1 - Success |
2065 | **/ | 2160 | **/ |
2066 | static int | 2161 | static int |
2067 | lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | 2162 | lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, |
2068 | struct lpfc_scsi_buf *lpfc_cmd, | 2163 | struct lpfc_scsi_buf *lpfc_cmd, |
2069 | unsigned int lun, | 2164 | unsigned int lun, |
2070 | uint8_t task_mgmt_cmd) | 2165 | uint8_t task_mgmt_cmd) |
@@ -2114,6 +2209,67 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | |||
2114 | } | 2209 | } |
2115 | 2210 | ||
2116 | /** | 2211 | /** |
2212 | * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info | ||
2213 | * @vport: The virtual port for which this call is being executed. | ||
2214 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | ||
2215 | * @lun: Logical unit number. | ||
2216 | * @task_mgmt_cmd: SCSI task management command. | ||
2217 | * | ||
2218 | * This routine wraps the actual convert SCSI TM to FCP information unit | ||
2219 | * function pointer from the lpfc_hba struct. | ||
2220 | * | ||
2221 | * Return codes: | ||
2222 | * 0 - Error | ||
2223 | * 1 - Success | ||
2224 | **/ | ||
2225 | static inline int | ||
2226 | lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | ||
2227 | struct lpfc_scsi_buf *lpfc_cmd, | ||
2228 | unsigned int lun, | ||
2229 | uint8_t task_mgmt_cmd) | ||
2230 | { | ||
2231 | struct lpfc_hba *phba = vport->phba; | ||
2232 | |||
2233 | return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, | ||
2234 | task_mgmt_cmd); | ||
2235 | } | ||
2236 | |||
2237 | /** | ||
2238 | * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table | ||
2239 | * @phba: The hba struct for which this call is being executed. | ||
2240 | * @dev_grp: The HBA PCI-Device group number. | ||
2241 | * | ||
2242 | * This routine sets up the SCSI interface API function jump table in @phba | ||
2243 | * struct. | ||
2244 | * Returns: 0 - success, -ENODEV - failure. | ||
2245 | **/ | ||
2246 | int | ||
2247 | lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | ||
2248 | { | ||
2249 | |||
2250 | switch (dev_grp) { | ||
2251 | case LPFC_PCI_DEV_LP: | ||
2252 | phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; | ||
2253 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; | ||
2254 | phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3; | ||
2255 | phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3; | ||
2256 | phba->lpfc_scsi_prep_task_mgmt_cmd = | ||
2257 | lpfc_scsi_prep_task_mgmt_cmd_s3; | ||
2258 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; | ||
2259 | break; | ||
2260 | default: | ||
2261 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2262 | "1418 Invalid HBA PCI-device group: 0x%x\n", | ||
2263 | dev_grp); | ||
2264 | return -ENODEV; | ||
2265 | break; | ||
2266 | } | ||
2267 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; | ||
2268 | phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; | ||
2269 | return 0; | ||
2270 | } | ||
2271 | |||
2272 | /** | ||
2117 | * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command | 2273 | * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command |
2118 | * @phba: The Hba for which this call is being executed. | 2274 | * @phba: The Hba for which this call is being executed. |
2119 | * @cmdiocbq: Pointer to lpfc_iocbq data structure. | 2275 | * @cmdiocbq: Pointer to lpfc_iocbq data structure. |
@@ -2178,9 +2334,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, | |||
2178 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, | 2334 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
2179 | "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", | 2335 | "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", |
2180 | tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); | 2336 | tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); |
2181 | status = lpfc_sli_issue_iocb_wait(phba, | 2337 | status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, |
2182 | &phba->sli.ring[phba->sli.fcp_ring], | 2338 | iocbq, iocbqrsp, lpfc_cmd->timeout); |
2183 | iocbq, iocbqrsp, lpfc_cmd->timeout); | ||
2184 | if (status != IOCB_SUCCESS) { | 2339 | if (status != IOCB_SUCCESS) { |
2185 | if (status == IOCB_TIMEDOUT) { | 2340 | if (status == IOCB_TIMEDOUT) { |
2186 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; | 2341 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; |
@@ -2305,7 +2460,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
2305 | struct Scsi_Host *shost = cmnd->device->host; | 2460 | struct Scsi_Host *shost = cmnd->device->host; |
2306 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 2461 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
2307 | struct lpfc_hba *phba = vport->phba; | 2462 | struct lpfc_hba *phba = vport->phba; |
2308 | struct lpfc_sli *psli = &phba->sli; | ||
2309 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; | 2463 | struct lpfc_rport_data *rdata = cmnd->device->hostdata; |
2310 | struct lpfc_nodelist *ndlp = rdata->pnode; | 2464 | struct lpfc_nodelist *ndlp = rdata->pnode; |
2311 | struct lpfc_scsi_buf *lpfc_cmd; | 2465 | struct lpfc_scsi_buf *lpfc_cmd; |
@@ -2427,7 +2581,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
2427 | lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); | 2581 | lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); |
2428 | 2582 | ||
2429 | atomic_inc(&ndlp->cmd_pending); | 2583 | atomic_inc(&ndlp->cmd_pending); |
2430 | err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], | 2584 | err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, |
2431 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); | 2585 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); |
2432 | if (err) { | 2586 | if (err) { |
2433 | atomic_dec(&ndlp->cmd_pending); | 2587 | atomic_dec(&ndlp->cmd_pending); |
@@ -2490,7 +2644,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
2490 | struct Scsi_Host *shost = cmnd->device->host; | 2644 | struct Scsi_Host *shost = cmnd->device->host; |
2491 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 2645 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
2492 | struct lpfc_hba *phba = vport->phba; | 2646 | struct lpfc_hba *phba = vport->phba; |
2493 | struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; | ||
2494 | struct lpfc_iocbq *iocb; | 2647 | struct lpfc_iocbq *iocb; |
2495 | struct lpfc_iocbq *abtsiocb; | 2648 | struct lpfc_iocbq *abtsiocb; |
2496 | struct lpfc_scsi_buf *lpfc_cmd; | 2649 | struct lpfc_scsi_buf *lpfc_cmd; |
@@ -2531,7 +2684,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
2531 | icmd = &abtsiocb->iocb; | 2684 | icmd = &abtsiocb->iocb; |
2532 | icmd->un.acxri.abortType = ABORT_TYPE_ABTS; | 2685 | icmd->un.acxri.abortType = ABORT_TYPE_ABTS; |
2533 | icmd->un.acxri.abortContextTag = cmd->ulpContext; | 2686 | icmd->un.acxri.abortContextTag = cmd->ulpContext; |
2534 | icmd->un.acxri.abortIoTag = cmd->ulpIoTag; | 2687 | if (phba->sli_rev == LPFC_SLI_REV4) |
2688 | icmd->un.acxri.abortIoTag = iocb->sli4_xritag; | ||
2689 | else | ||
2690 | icmd->un.acxri.abortIoTag = cmd->ulpIoTag; | ||
2535 | 2691 | ||
2536 | icmd->ulpLe = 1; | 2692 | icmd->ulpLe = 1; |
2537 | icmd->ulpClass = cmd->ulpClass; | 2693 | icmd->ulpClass = cmd->ulpClass; |
@@ -2542,7 +2698,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
2542 | 2698 | ||
2543 | abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; | 2699 | abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; |
2544 | abtsiocb->vport = vport; | 2700 | abtsiocb->vport = vport; |
2545 | if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { | 2701 | if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == |
2702 | IOCB_ERROR) { | ||
2546 | lpfc_sli_release_iocbq(phba, abtsiocb); | 2703 | lpfc_sli_release_iocbq(phba, abtsiocb); |
2547 | ret = FAILED; | 2704 | ret = FAILED; |
2548 | goto out; | 2705 | goto out; |
@@ -2668,8 +2825,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
2668 | "0703 Issue target reset to TGT %d LUN %d " | 2825 | "0703 Issue target reset to TGT %d LUN %d " |
2669 | "rpi x%x nlp_flag x%x\n", cmnd->device->id, | 2826 | "rpi x%x nlp_flag x%x\n", cmnd->device->id, |
2670 | cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); | 2827 | cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); |
2671 | status = lpfc_sli_issue_iocb_wait(phba, | 2828 | status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, |
2672 | &phba->sli.ring[phba->sli.fcp_ring], | ||
2673 | iocbq, iocbqrsp, lpfc_cmd->timeout); | 2829 | iocbq, iocbqrsp, lpfc_cmd->timeout); |
2674 | if (status == IOCB_TIMEDOUT) { | 2830 | if (status == IOCB_TIMEDOUT) { |
2675 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; | 2831 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; |
@@ -2825,11 +2981,10 @@ lpfc_slave_alloc(struct scsi_device *sdev) | |||
2825 | { | 2981 | { |
2826 | struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; | 2982 | struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; |
2827 | struct lpfc_hba *phba = vport->phba; | 2983 | struct lpfc_hba *phba = vport->phba; |
2828 | struct lpfc_scsi_buf *scsi_buf = NULL; | ||
2829 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); | 2984 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); |
2830 | uint32_t total = 0, i; | 2985 | uint32_t total = 0; |
2831 | uint32_t num_to_alloc = 0; | 2986 | uint32_t num_to_alloc = 0; |
2832 | unsigned long flags; | 2987 | int num_allocated = 0; |
2833 | 2988 | ||
2834 | if (!rport || fc_remote_port_chkready(rport)) | 2989 | if (!rport || fc_remote_port_chkready(rport)) |
2835 | return -ENXIO; | 2990 | return -ENXIO; |
@@ -2863,20 +3018,13 @@ lpfc_slave_alloc(struct scsi_device *sdev) | |||
2863 | (phba->cfg_hba_queue_depth - total)); | 3018 | (phba->cfg_hba_queue_depth - total)); |
2864 | num_to_alloc = phba->cfg_hba_queue_depth - total; | 3019 | num_to_alloc = phba->cfg_hba_queue_depth - total; |
2865 | } | 3020 | } |
2866 | 3021 | num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); | |
2867 | for (i = 0; i < num_to_alloc; i++) { | 3022 | if (num_to_alloc != num_allocated) { |
2868 | scsi_buf = lpfc_new_scsi_buf(vport); | 3023 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
2869 | if (!scsi_buf) { | 3024 | "0708 Allocation request of %d " |
2870 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 3025 | "command buffers did not succeed. " |
2871 | "0706 Failed to allocate " | 3026 | "Allocated %d buffers.\n", |
2872 | "command buffer\n"); | 3027 | num_to_alloc, num_allocated); |
2873 | break; | ||
2874 | } | ||
2875 | |||
2876 | spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); | ||
2877 | phba->total_scsi_bufs++; | ||
2878 | list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); | ||
2879 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); | ||
2880 | } | 3028 | } |
2881 | return 0; | 3029 | return 0; |
2882 | } | 3030 | } |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index eb5c75c45ba4..e2d07d97fa8b 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -142,7 +142,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | /** | 144 | /** |
145 | * __lpfc_sli_release_iocbq - Release iocb to the iocb pool | 145 | * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool |
146 | * @phba: Pointer to HBA context object. | 146 | * @phba: Pointer to HBA context object. |
147 | * @iocbq: Pointer to driver iocb object. | 147 | * @iocbq: Pointer to driver iocb object. |
148 | * | 148 | * |
@@ -152,7 +152,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) | |||
152 | * clears all other fields of the iocb object when it is freed. | 152 | * clears all other fields of the iocb object when it is freed. |
153 | **/ | 153 | **/ |
154 | static void | 154 | static void |
155 | __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | 155 | __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) |
156 | { | 156 | { |
157 | size_t start_clean = offsetof(struct lpfc_iocbq, iocb); | 157 | size_t start_clean = offsetof(struct lpfc_iocbq, iocb); |
158 | 158 | ||
@@ -160,10 +160,27 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | |||
160 | * Clean all volatile data fields, preserve iotag and node struct. | 160 | * Clean all volatile data fields, preserve iotag and node struct. |
161 | */ | 161 | */ |
162 | memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); | 162 | memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); |
163 | iocbq->sli4_xritag = NO_XRI; | ||
163 | list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); | 164 | list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); |
164 | } | 165 | } |
165 | 166 | ||
166 | /** | 167 | /** |
168 | * __lpfc_sli_release_iocbq - Release iocb to the iocb pool | ||
169 | * @phba: Pointer to HBA context object. | ||
170 | * @iocbq: Pointer to driver iocb object. | ||
171 | * | ||
172 | * This function is called with hbalock held to release driver | ||
173 | * iocb object to the iocb pool. The iotag in the iocb object | ||
174 | * does not change for each use of the iocb object. This function | ||
175 | * clears all other fields of the iocb object when it is freed. | ||
176 | **/ | ||
177 | static void | ||
178 | __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | ||
179 | { | ||
180 | phba->__lpfc_sli_release_iocbq(phba, iocbq); | ||
181 | } | ||
182 | |||
183 | /** | ||
167 | * lpfc_sli_release_iocbq - Release iocb to the iocb pool | 184 | * lpfc_sli_release_iocbq - Release iocb to the iocb pool |
168 | * @phba: Pointer to HBA context object. | 185 | * @phba: Pointer to HBA context object. |
169 | * @iocbq: Pointer to driver iocb object. | 186 | * @iocbq: Pointer to driver iocb object. |
@@ -779,8 +796,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) | |||
779 | phba->hbqs[i].buffer_count = 0; | 796 | phba->hbqs[i].buffer_count = 0; |
780 | } | 797 | } |
781 | /* Return all HBQ buffer that are in-fly */ | 798 | /* Return all HBQ buffer that are in-fly */ |
782 | list_for_each_entry_safe(dmabuf, next_dmabuf, | 799 | list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, |
783 | &phba->hbqbuf_in_list, list) { | 800 | list) { |
784 | hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); | 801 | hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); |
785 | list_del(&hbq_buf->dbuf.list); | 802 | list_del(&hbq_buf->dbuf.list); |
786 | if (hbq_buf->tag == -1) { | 803 | if (hbq_buf->tag == -1) { |
@@ -814,10 +831,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) | |||
814 | * pointer to the hbq entry if it successfully post the buffer | 831 | * pointer to the hbq entry if it successfully post the buffer |
815 | * else it will return NULL. | 832 | * else it will return NULL. |
816 | **/ | 833 | **/ |
817 | static struct lpfc_hbq_entry * | 834 | static int |
818 | lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, | 835 | lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, |
819 | struct hbq_dmabuf *hbq_buf) | 836 | struct hbq_dmabuf *hbq_buf) |
820 | { | 837 | { |
838 | return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); | ||
839 | } | ||
840 | |||
841 | /** | ||
842 | * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware | ||
843 | * @phba: Pointer to HBA context object. | ||
844 | * @hbqno: HBQ number. | ||
845 | * @hbq_buf: Pointer to HBQ buffer. | ||
846 | * | ||
847 | * This function is called with the hbalock held to post a hbq buffer to the | ||
848 | * firmware. If the function finds an empty slot in the HBQ, it will post the | ||
849 | * buffer and place it on the hbq_buffer_list. The function will return zero if | ||
850 | * it successfully post the buffer else it will return an error. | ||
851 | **/ | ||
852 | static int | ||
853 | lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, | ||
854 | struct hbq_dmabuf *hbq_buf) | ||
855 | { | ||
821 | struct lpfc_hbq_entry *hbqe; | 856 | struct lpfc_hbq_entry *hbqe; |
822 | dma_addr_t physaddr = hbq_buf->dbuf.phys; | 857 | dma_addr_t physaddr = hbq_buf->dbuf.phys; |
823 | 858 | ||
@@ -838,8 +873,9 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, | |||
838 | /* flush */ | 873 | /* flush */ |
839 | readl(phba->hbq_put + hbqno); | 874 | readl(phba->hbq_put + hbqno); |
840 | list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); | 875 | list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); |
841 | } | 876 | return 0; |
842 | return hbqe; | 877 | } else |
878 | return -ENOMEM; | ||
843 | } | 879 | } |
844 | 880 | ||
845 | /* HBQ for ELS and CT traffic. */ | 881 | /* HBQ for ELS and CT traffic. */ |
@@ -914,7 +950,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) | |||
914 | dbuf.list); | 950 | dbuf.list); |
915 | hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | | 951 | hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | |
916 | (hbqno << 16)); | 952 | (hbqno << 16)); |
917 | if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { | 953 | if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { |
918 | phba->hbqs[hbqno].buffer_count++; | 954 | phba->hbqs[hbqno].buffer_count++; |
919 | posted++; | 955 | posted++; |
920 | } else | 956 | } else |
@@ -965,6 +1001,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) | |||
965 | } | 1001 | } |
966 | 1002 | ||
967 | /** | 1003 | /** |
1004 | * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list | ||
1005 | * @phba: Pointer to HBA context object. | ||
1006 | * @hbqno: HBQ number. | ||
1007 | * | ||
1008 | * This function removes the first hbq buffer on an hbq list and returns a | ||
1009 | * pointer to that buffer. If it finds no buffers on the list it returns NULL. | ||
1010 | **/ | ||
1011 | static struct hbq_dmabuf * | ||
1012 | lpfc_sli_hbqbuf_get(struct list_head *rb_list) | ||
1013 | { | ||
1014 | struct lpfc_dmabuf *d_buf; | ||
1015 | |||
1016 | list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); | ||
1017 | if (!d_buf) | ||
1018 | return NULL; | ||
1019 | return container_of(d_buf, struct hbq_dmabuf, dbuf); | ||
1020 | } | ||
1021 | |||
1022 | /** | ||
968 | * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag | 1023 | * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag |
969 | * @phba: Pointer to HBA context object. | 1024 | * @phba: Pointer to HBA context object. |
970 | * @tag: Tag of the hbq buffer. | 1025 | * @tag: Tag of the hbq buffer. |
@@ -985,12 +1040,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) | |||
985 | if (hbqno >= LPFC_MAX_HBQS) | 1040 | if (hbqno >= LPFC_MAX_HBQS) |
986 | return NULL; | 1041 | return NULL; |
987 | 1042 | ||
1043 | spin_lock_irq(&phba->hbalock); | ||
988 | list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { | 1044 | list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { |
989 | hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); | 1045 | hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
990 | if (hbq_buf->tag == tag) { | 1046 | if (hbq_buf->tag == tag) { |
1047 | spin_unlock_irq(&phba->hbalock); | ||
991 | return hbq_buf; | 1048 | return hbq_buf; |
992 | } | 1049 | } |
993 | } | 1050 | } |
1051 | spin_unlock_irq(&phba->hbalock); | ||
994 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, | 1052 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, |
995 | "1803 Bad hbq tag. Data: x%x x%x\n", | 1053 | "1803 Bad hbq tag. Data: x%x x%x\n", |
996 | tag, phba->hbqs[tag >> 16].buffer_count); | 1054 | tag, phba->hbqs[tag >> 16].buffer_count); |
@@ -1013,9 +1071,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) | |||
1013 | 1071 | ||
1014 | if (hbq_buffer) { | 1072 | if (hbq_buffer) { |
1015 | hbqno = hbq_buffer->tag >> 16; | 1073 | hbqno = hbq_buffer->tag >> 16; |
1016 | if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { | 1074 | if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) |
1017 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); | 1075 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); |
1018 | } | ||
1019 | } | 1076 | } |
1020 | } | 1077 | } |
1021 | 1078 | ||
@@ -1317,6 +1374,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba, | |||
1317 | return &hbq_entry->dbuf; | 1374 | return &hbq_entry->dbuf; |
1318 | } | 1375 | } |
1319 | 1376 | ||
1377 | /** | ||
1378 | * lpfc_complete_unsol_iocb - Complete an unsolicited sequence | ||
1379 | * @phba: Pointer to HBA context object. | ||
1380 | * @pring: Pointer to driver SLI ring object. | ||
1381 | * @saveq: Pointer to the iocbq struct representing the sequence starting frame. | ||
1382 | * @fch_r_ctl: the r_ctl for the first frame of the sequence. | ||
1383 | * @fch_type: the type for the first frame of the sequence. | ||
1384 | * | ||
1385 | * This function is called with no lock held. This function uses the r_ctl and | ||
1386 | * type of the received sequence to find the correct callback function to call | ||
1387 | * to process the sequence. | ||
1388 | **/ | ||
1389 | static int | ||
1390 | lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | ||
1391 | struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, | ||
1392 | uint32_t fch_type) | ||
1393 | { | ||
1394 | int i; | ||
1395 | |||
1396 | /* unSolicited Responses */ | ||
1397 | if (pring->prt[0].profile) { | ||
1398 | if (pring->prt[0].lpfc_sli_rcv_unsol_event) | ||
1399 | (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, | ||
1400 | saveq); | ||
1401 | return 1; | ||
1402 | } | ||
1403 | /* We must search, based on rctl / type | ||
1404 | for the right routine */ | ||
1405 | for (i = 0; i < pring->num_mask; i++) { | ||
1406 | if ((pring->prt[i].rctl == fch_r_ctl) && | ||
1407 | (pring->prt[i].type == fch_type)) { | ||
1408 | if (pring->prt[i].lpfc_sli_rcv_unsol_event) | ||
1409 | (pring->prt[i].lpfc_sli_rcv_unsol_event) | ||
1410 | (phba, pring, saveq); | ||
1411 | return 1; | ||
1412 | } | ||
1413 | } | ||
1414 | return 0; | ||
1415 | } | ||
1320 | 1416 | ||
1321 | /** | 1417 | /** |
1322 | * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler | 1418 | * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler |
@@ -1339,7 +1435,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
1339 | IOCB_t * irsp; | 1435 | IOCB_t * irsp; |
1340 | WORD5 * w5p; | 1436 | WORD5 * w5p; |
1341 | uint32_t Rctl, Type; | 1437 | uint32_t Rctl, Type; |
1342 | uint32_t match, i; | 1438 | uint32_t match; |
1343 | struct lpfc_iocbq *iocbq; | 1439 | struct lpfc_iocbq *iocbq; |
1344 | struct lpfc_dmabuf *dmzbuf; | 1440 | struct lpfc_dmabuf *dmzbuf; |
1345 | 1441 | ||
@@ -1482,35 +1578,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
1482 | } | 1578 | } |
1483 | } | 1579 | } |
1484 | 1580 | ||
1485 | /* unSolicited Responses */ | 1581 | if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) |
1486 | if (pring->prt[0].profile) { | ||
1487 | if (pring->prt[0].lpfc_sli_rcv_unsol_event) | ||
1488 | (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, | ||
1489 | saveq); | ||
1490 | match = 1; | ||
1491 | } else { | ||
1492 | /* We must search, based on rctl / type | ||
1493 | for the right routine */ | ||
1494 | for (i = 0; i < pring->num_mask; i++) { | ||
1495 | if ((pring->prt[i].rctl == Rctl) | ||
1496 | && (pring->prt[i].type == Type)) { | ||
1497 | if (pring->prt[i].lpfc_sli_rcv_unsol_event) | ||
1498 | (pring->prt[i].lpfc_sli_rcv_unsol_event) | ||
1499 | (phba, pring, saveq); | ||
1500 | match = 1; | ||
1501 | break; | ||
1502 | } | ||
1503 | } | ||
1504 | } | ||
1505 | if (match == 0) { | ||
1506 | /* Unexpected Rctl / Type received */ | ||
1507 | /* Ring <ringno> handler: unexpected | ||
1508 | Rctl <Rctl> Type <Type> received */ | ||
1509 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | 1582 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
1510 | "0313 Ring %d handler: unexpected Rctl x%x " | 1583 | "0313 Ring %d handler: unexpected Rctl x%x " |
1511 | "Type x%x received\n", | 1584 | "Type x%x received\n", |
1512 | pring->ringno, Rctl, Type); | 1585 | pring->ringno, Rctl, Type); |
1513 | } | 1586 | |
1514 | return 1; | 1587 | return 1; |
1515 | } | 1588 | } |
1516 | 1589 | ||
@@ -1552,6 +1625,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, | |||
1552 | } | 1625 | } |
1553 | 1626 | ||
1554 | /** | 1627 | /** |
1628 | * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag | ||
1629 | * @phba: Pointer to HBA context object. | ||
1630 | * @pring: Pointer to driver SLI ring object. | ||
1631 | * @iotag: IOCB tag. | ||
1632 | * | ||
1633 | * This function looks up the iocb_lookup table to get the command iocb | ||
1634 | * corresponding to the given iotag. This function is called with the | ||
1635 | * hbalock held. | ||
1636 | * This function returns the command iocb object if it finds the command | ||
1637 | * iocb else returns NULL. | ||
1638 | **/ | ||
1639 | static struct lpfc_iocbq * | ||
1640 | lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, | ||
1641 | struct lpfc_sli_ring *pring, uint16_t iotag) | ||
1642 | { | ||
1643 | struct lpfc_iocbq *cmd_iocb; | ||
1644 | |||
1645 | if (iotag != 0 && iotag <= phba->sli.last_iotag) { | ||
1646 | cmd_iocb = phba->sli.iocbq_lookup[iotag]; | ||
1647 | list_del_init(&cmd_iocb->list); | ||
1648 | pring->txcmplq_cnt--; | ||
1649 | return cmd_iocb; | ||
1650 | } | ||
1651 | |||
1652 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
1653 | "0372 iotag x%x is out off range: max iotag (x%x)\n", | ||
1654 | iotag, phba->sli.last_iotag); | ||
1655 | return NULL; | ||
1656 | } | ||
1657 | |||
1658 | /** | ||
1555 | * lpfc_sli_process_sol_iocb - process solicited iocb completion | 1659 | * lpfc_sli_process_sol_iocb - process solicited iocb completion |
1556 | * @phba: Pointer to HBA context object. | 1660 | * @phba: Pointer to HBA context object. |
1557 | * @pring: Pointer to driver SLI ring object. | 1661 | * @pring: Pointer to driver SLI ring object. |
@@ -1954,7 +2058,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
1954 | if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && | 2058 | if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && |
1955 | (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { | 2059 | (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { |
1956 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 2060 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
1957 | lpfc_rampdown_queue_depth(phba); | 2061 | phba->lpfc_rampdown_queue_depth(phba); |
1958 | spin_lock_irqsave(&phba->hbalock, iflag); | 2062 | spin_lock_irqsave(&phba->hbalock, iflag); |
1959 | } | 2063 | } |
1960 | 2064 | ||
@@ -2068,39 +2172,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
2068 | } | 2172 | } |
2069 | 2173 | ||
2070 | /** | 2174 | /** |
2071 | * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings | 2175 | * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb |
2176 | * @phba: Pointer to HBA context object. | ||
2177 | * @pring: Pointer to driver SLI ring object. | ||
2178 | * @rspiocbp: Pointer to driver response IOCB object. | ||
2179 | * | ||
2180 | * This function is called from the worker thread when there is a slow-path | ||
2181 | * response IOCB to process. This function chains all the response iocbs until | ||
2182 | * seeing the iocb with the LE bit set. The function will call | ||
2183 | * lpfc_sli_process_sol_iocb function if the response iocb indicates a | ||
2184 | * completion of a command iocb. The function will call the | ||
2185 | * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. | ||
2186 | * The function frees the resources or calls the completion handler if this | ||
2187 | * iocb is an abort completion. The function returns NULL when the response | ||
2188 | * iocb has the LE bit set and all the chained iocbs are processed, otherwise | ||
2189 | * this function shall chain the iocb on to the iocb_continueq and return the | ||
2190 | * response iocb passed in. | ||
2191 | **/ | ||
2192 | static struct lpfc_iocbq * | ||
2193 | lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | ||
2194 | struct lpfc_iocbq *rspiocbp) | ||
2195 | { | ||
2196 | struct lpfc_iocbq *saveq; | ||
2197 | struct lpfc_iocbq *cmdiocbp; | ||
2198 | struct lpfc_iocbq *next_iocb; | ||
2199 | IOCB_t *irsp = NULL; | ||
2200 | uint32_t free_saveq; | ||
2201 | uint8_t iocb_cmd_type; | ||
2202 | lpfc_iocb_type type; | ||
2203 | unsigned long iflag; | ||
2204 | int rc; | ||
2205 | |||
2206 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
2207 | /* First add the response iocb to the countinueq list */ | ||
2208 | list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); | ||
2209 | pring->iocb_continueq_cnt++; | ||
2210 | |||
2211 | /* Now, determine whetehr the list is completed for processing */ | ||
2212 | irsp = &rspiocbp->iocb; | ||
2213 | if (irsp->ulpLe) { | ||
2214 | /* | ||
2215 | * By default, the driver expects to free all resources | ||
2216 | * associated with this iocb completion. | ||
2217 | */ | ||
2218 | free_saveq = 1; | ||
2219 | saveq = list_get_first(&pring->iocb_continueq, | ||
2220 | struct lpfc_iocbq, list); | ||
2221 | irsp = &(saveq->iocb); | ||
2222 | list_del_init(&pring->iocb_continueq); | ||
2223 | pring->iocb_continueq_cnt = 0; | ||
2224 | |||
2225 | pring->stats.iocb_rsp++; | ||
2226 | |||
2227 | /* | ||
2228 | * If resource errors reported from HBA, reduce | ||
2229 | * queuedepths of the SCSI device. | ||
2230 | */ | ||
2231 | if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && | ||
2232 | (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { | ||
2233 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
2234 | phba->lpfc_rampdown_queue_depth(phba); | ||
2235 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
2236 | } | ||
2237 | |||
2238 | if (irsp->ulpStatus) { | ||
2239 | /* Rsp ring <ringno> error: IOCB */ | ||
2240 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
2241 | "0328 Rsp Ring %d error: " | ||
2242 | "IOCB Data: " | ||
2243 | "x%x x%x x%x x%x " | ||
2244 | "x%x x%x x%x x%x " | ||
2245 | "x%x x%x x%x x%x " | ||
2246 | "x%x x%x x%x x%x\n", | ||
2247 | pring->ringno, | ||
2248 | irsp->un.ulpWord[0], | ||
2249 | irsp->un.ulpWord[1], | ||
2250 | irsp->un.ulpWord[2], | ||
2251 | irsp->un.ulpWord[3], | ||
2252 | irsp->un.ulpWord[4], | ||
2253 | irsp->un.ulpWord[5], | ||
2254 | *(((uint32_t *) irsp) + 6), | ||
2255 | *(((uint32_t *) irsp) + 7), | ||
2256 | *(((uint32_t *) irsp) + 8), | ||
2257 | *(((uint32_t *) irsp) + 9), | ||
2258 | *(((uint32_t *) irsp) + 10), | ||
2259 | *(((uint32_t *) irsp) + 11), | ||
2260 | *(((uint32_t *) irsp) + 12), | ||
2261 | *(((uint32_t *) irsp) + 13), | ||
2262 | *(((uint32_t *) irsp) + 14), | ||
2263 | *(((uint32_t *) irsp) + 15)); | ||
2264 | } | ||
2265 | |||
2266 | /* | ||
2267 | * Fetch the IOCB command type and call the correct completion | ||
2268 | * routine. Solicited and Unsolicited IOCBs on the ELS ring | ||
2269 | * get freed back to the lpfc_iocb_list by the discovery | ||
2270 | * kernel thread. | ||
2271 | */ | ||
2272 | iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; | ||
2273 | type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); | ||
2274 | switch (type) { | ||
2275 | case LPFC_SOL_IOCB: | ||
2276 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
2277 | rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); | ||
2278 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
2279 | break; | ||
2280 | |||
2281 | case LPFC_UNSOL_IOCB: | ||
2282 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
2283 | rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); | ||
2284 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
2285 | if (!rc) | ||
2286 | free_saveq = 0; | ||
2287 | break; | ||
2288 | |||
2289 | case LPFC_ABORT_IOCB: | ||
2290 | cmdiocbp = NULL; | ||
2291 | if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) | ||
2292 | cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, | ||
2293 | saveq); | ||
2294 | if (cmdiocbp) { | ||
2295 | /* Call the specified completion routine */ | ||
2296 | if (cmdiocbp->iocb_cmpl) { | ||
2297 | spin_unlock_irqrestore(&phba->hbalock, | ||
2298 | iflag); | ||
2299 | (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, | ||
2300 | saveq); | ||
2301 | spin_lock_irqsave(&phba->hbalock, | ||
2302 | iflag); | ||
2303 | } else | ||
2304 | __lpfc_sli_release_iocbq(phba, | ||
2305 | cmdiocbp); | ||
2306 | } | ||
2307 | break; | ||
2308 | |||
2309 | case LPFC_UNKNOWN_IOCB: | ||
2310 | if (irsp->ulpCommand == CMD_ADAPTER_MSG) { | ||
2311 | char adaptermsg[LPFC_MAX_ADPTMSG]; | ||
2312 | memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); | ||
2313 | memcpy(&adaptermsg[0], (uint8_t *)irsp, | ||
2314 | MAX_MSG_DATA); | ||
2315 | dev_warn(&((phba->pcidev)->dev), | ||
2316 | "lpfc%d: %s\n", | ||
2317 | phba->brd_no, adaptermsg); | ||
2318 | } else { | ||
2319 | /* Unknown IOCB command */ | ||
2320 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
2321 | "0335 Unknown IOCB " | ||
2322 | "command Data: x%x " | ||
2323 | "x%x x%x x%x\n", | ||
2324 | irsp->ulpCommand, | ||
2325 | irsp->ulpStatus, | ||
2326 | irsp->ulpIoTag, | ||
2327 | irsp->ulpContext); | ||
2328 | } | ||
2329 | break; | ||
2330 | } | ||
2331 | |||
2332 | if (free_saveq) { | ||
2333 | list_for_each_entry_safe(rspiocbp, next_iocb, | ||
2334 | &saveq->list, list) { | ||
2335 | list_del(&rspiocbp->list); | ||
2336 | __lpfc_sli_release_iocbq(phba, rspiocbp); | ||
2337 | } | ||
2338 | __lpfc_sli_release_iocbq(phba, saveq); | ||
2339 | } | ||
2340 | rspiocbp = NULL; | ||
2341 | } | ||
2342 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
2343 | return rspiocbp; | ||
2344 | } | ||
2345 | |||
2346 | /** | ||
2347 | * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs | ||
2072 | * @phba: Pointer to HBA context object. | 2348 | * @phba: Pointer to HBA context object. |
2073 | * @pring: Pointer to driver SLI ring object. | 2349 | * @pring: Pointer to driver SLI ring object. |
2074 | * @mask: Host attention register mask for this ring. | 2350 | * @mask: Host attention register mask for this ring. |
2075 | * | 2351 | * |
2076 | * This function is called from the worker thread when there is a ring | 2352 | * This routine wraps the actual slow_ring event process routine from the |
2077 | * event for non-fcp rings. The caller does not hold any lock . | 2353 | * API jump table function pointer from the lpfc_hba struct. |
2078 | * The function processes each response iocb in the response ring until it | ||
2079 | * finds an iocb with LE bit set and chains all the iocbs upto the iocb with | ||
2080 | * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the | ||
2081 | * response iocb indicates a completion of a command iocb. The function | ||
2082 | * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited | ||
2083 | * iocb. The function frees the resources or calls the completion handler if | ||
2084 | * this iocb is an abort completion. The function returns 0 when the allocated | ||
2085 | * iocbs are not freed, otherwise returns 1. | ||
2086 | **/ | 2354 | **/ |
2087 | int | 2355 | void |
2088 | lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, | 2356 | lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, |
2089 | struct lpfc_sli_ring *pring, uint32_t mask) | 2357 | struct lpfc_sli_ring *pring, uint32_t mask) |
2090 | { | 2358 | { |
2359 | phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); | ||
2360 | } | ||
2361 | |||
2362 | /** | ||
2363 | * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings | ||
2364 | * @phba: Pointer to HBA context object. | ||
2365 | * @pring: Pointer to driver SLI ring object. | ||
2366 | * @mask: Host attention register mask for this ring. | ||
2367 | * | ||
2368 | * This function is called from the worker thread when there is a ring event | ||
2369 | * for non-fcp rings. The caller does not hold any lock. The function will | ||
2370 | * remove each response iocb in the response ring and calls the handle | ||
2371 | * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. | ||
2372 | **/ | ||
2373 | static void | ||
2374 | lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, | ||
2375 | struct lpfc_sli_ring *pring, uint32_t mask) | ||
2376 | { | ||
2091 | struct lpfc_pgp *pgp; | 2377 | struct lpfc_pgp *pgp; |
2092 | IOCB_t *entry; | 2378 | IOCB_t *entry; |
2093 | IOCB_t *irsp = NULL; | 2379 | IOCB_t *irsp = NULL; |
2094 | struct lpfc_iocbq *rspiocbp = NULL; | 2380 | struct lpfc_iocbq *rspiocbp = NULL; |
2095 | struct lpfc_iocbq *next_iocb; | ||
2096 | struct lpfc_iocbq *cmdiocbp; | ||
2097 | struct lpfc_iocbq *saveq; | ||
2098 | uint8_t iocb_cmd_type; | ||
2099 | lpfc_iocb_type type; | ||
2100 | uint32_t status, free_saveq; | ||
2101 | uint32_t portRspPut, portRspMax; | 2381 | uint32_t portRspPut, portRspMax; |
2102 | int rc = 1; | ||
2103 | unsigned long iflag; | 2382 | unsigned long iflag; |
2383 | uint32_t status; | ||
2104 | 2384 | ||
2105 | pgp = &phba->port_gp[pring->ringno]; | 2385 | pgp = &phba->port_gp[pring->ringno]; |
2106 | spin_lock_irqsave(&phba->hbalock, iflag); | 2386 | spin_lock_irqsave(&phba->hbalock, iflag); |
@@ -2128,7 +2408,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, | |||
2128 | phba->work_hs = HS_FFER3; | 2408 | phba->work_hs = HS_FFER3; |
2129 | lpfc_handle_eratt(phba); | 2409 | lpfc_handle_eratt(phba); |
2130 | 2410 | ||
2131 | return 1; | 2411 | return; |
2132 | } | 2412 | } |
2133 | 2413 | ||
2134 | rmb(); | 2414 | rmb(); |
@@ -2173,138 +2453,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, | |||
2173 | 2453 | ||
2174 | writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); | 2454 | writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); |
2175 | 2455 | ||
2176 | list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); | 2456 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
2177 | 2457 | /* Handle the response IOCB */ | |
2178 | pring->iocb_continueq_cnt++; | 2458 | rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); |
2179 | if (irsp->ulpLe) { | 2459 | spin_lock_irqsave(&phba->hbalock, iflag); |
2180 | /* | ||
2181 | * By default, the driver expects to free all resources | ||
2182 | * associated with this iocb completion. | ||
2183 | */ | ||
2184 | free_saveq = 1; | ||
2185 | saveq = list_get_first(&pring->iocb_continueq, | ||
2186 | struct lpfc_iocbq, list); | ||
2187 | irsp = &(saveq->iocb); | ||
2188 | list_del_init(&pring->iocb_continueq); | ||
2189 | pring->iocb_continueq_cnt = 0; | ||
2190 | |||
2191 | pring->stats.iocb_rsp++; | ||
2192 | |||
2193 | /* | ||
2194 | * If resource errors reported from HBA, reduce | ||
2195 | * queuedepths of the SCSI device. | ||
2196 | */ | ||
2197 | if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && | ||
2198 | (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { | ||
2199 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
2200 | lpfc_rampdown_queue_depth(phba); | ||
2201 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
2202 | } | ||
2203 | |||
2204 | if (irsp->ulpStatus) { | ||
2205 | /* Rsp ring <ringno> error: IOCB */ | ||
2206 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
2207 | "0328 Rsp Ring %d error: " | ||
2208 | "IOCB Data: " | ||
2209 | "x%x x%x x%x x%x " | ||
2210 | "x%x x%x x%x x%x " | ||
2211 | "x%x x%x x%x x%x " | ||
2212 | "x%x x%x x%x x%x\n", | ||
2213 | pring->ringno, | ||
2214 | irsp->un.ulpWord[0], | ||
2215 | irsp->un.ulpWord[1], | ||
2216 | irsp->un.ulpWord[2], | ||
2217 | irsp->un.ulpWord[3], | ||
2218 | irsp->un.ulpWord[4], | ||
2219 | irsp->un.ulpWord[5], | ||
2220 | *(((uint32_t *) irsp) + 6), | ||
2221 | *(((uint32_t *) irsp) + 7), | ||
2222 | *(((uint32_t *) irsp) + 8), | ||
2223 | *(((uint32_t *) irsp) + 9), | ||
2224 | *(((uint32_t *) irsp) + 10), | ||
2225 | *(((uint32_t *) irsp) + 11), | ||
2226 | *(((uint32_t *) irsp) + 12), | ||
2227 | *(((uint32_t *) irsp) + 13), | ||
2228 | *(((uint32_t *) irsp) + 14), | ||
2229 | *(((uint32_t *) irsp) + 15)); | ||
2230 | } | ||
2231 | |||
2232 | /* | ||
2233 | * Fetch the IOCB command type and call the correct | ||
2234 | * completion routine. Solicited and Unsolicited | ||
2235 | * IOCBs on the ELS ring get freed back to the | ||
2236 | * lpfc_iocb_list by the discovery kernel thread. | ||
2237 | */ | ||
2238 | iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; | ||
2239 | type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); | ||
2240 | if (type == LPFC_SOL_IOCB) { | ||
2241 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
2242 | rc = lpfc_sli_process_sol_iocb(phba, pring, | ||
2243 | saveq); | ||
2244 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
2245 | } else if (type == LPFC_UNSOL_IOCB) { | ||
2246 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
2247 | rc = lpfc_sli_process_unsol_iocb(phba, pring, | ||
2248 | saveq); | ||
2249 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
2250 | if (!rc) | ||
2251 | free_saveq = 0; | ||
2252 | } else if (type == LPFC_ABORT_IOCB) { | ||
2253 | if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && | ||
2254 | ((cmdiocbp = | ||
2255 | lpfc_sli_iocbq_lookup(phba, pring, | ||
2256 | saveq)))) { | ||
2257 | /* Call the specified completion | ||
2258 | routine */ | ||
2259 | if (cmdiocbp->iocb_cmpl) { | ||
2260 | spin_unlock_irqrestore( | ||
2261 | &phba->hbalock, | ||
2262 | iflag); | ||
2263 | (cmdiocbp->iocb_cmpl) (phba, | ||
2264 | cmdiocbp, saveq); | ||
2265 | spin_lock_irqsave( | ||
2266 | &phba->hbalock, | ||
2267 | iflag); | ||
2268 | } else | ||
2269 | __lpfc_sli_release_iocbq(phba, | ||
2270 | cmdiocbp); | ||
2271 | } | ||
2272 | } else if (type == LPFC_UNKNOWN_IOCB) { | ||
2273 | if (irsp->ulpCommand == CMD_ADAPTER_MSG) { | ||
2274 | |||
2275 | char adaptermsg[LPFC_MAX_ADPTMSG]; | ||
2276 | |||
2277 | memset(adaptermsg, 0, | ||
2278 | LPFC_MAX_ADPTMSG); | ||
2279 | memcpy(&adaptermsg[0], (uint8_t *) irsp, | ||
2280 | MAX_MSG_DATA); | ||
2281 | dev_warn(&((phba->pcidev)->dev), | ||
2282 | "lpfc%d: %s\n", | ||
2283 | phba->brd_no, adaptermsg); | ||
2284 | } else { | ||
2285 | /* Unknown IOCB command */ | ||
2286 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
2287 | "0335 Unknown IOCB " | ||
2288 | "command Data: x%x " | ||
2289 | "x%x x%x x%x\n", | ||
2290 | irsp->ulpCommand, | ||
2291 | irsp->ulpStatus, | ||
2292 | irsp->ulpIoTag, | ||
2293 | irsp->ulpContext); | ||
2294 | } | ||
2295 | } | ||
2296 | |||
2297 | if (free_saveq) { | ||
2298 | list_for_each_entry_safe(rspiocbp, next_iocb, | ||
2299 | &saveq->list, list) { | ||
2300 | list_del(&rspiocbp->list); | ||
2301 | __lpfc_sli_release_iocbq(phba, | ||
2302 | rspiocbp); | ||
2303 | } | ||
2304 | __lpfc_sli_release_iocbq(phba, saveq); | ||
2305 | } | ||
2306 | rspiocbp = NULL; | ||
2307 | } | ||
2308 | 2460 | ||
2309 | /* | 2461 | /* |
2310 | * If the port response put pointer has not been updated, sync | 2462 | * If the port response put pointer has not been updated, sync |
@@ -2338,7 +2490,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, | |||
2338 | } | 2490 | } |
2339 | 2491 | ||
2340 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 2492 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
2341 | return rc; | 2493 | return; |
2342 | } | 2494 | } |
2343 | 2495 | ||
2344 | /** | 2496 | /** |
@@ -2420,7 +2572,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) | |||
2420 | } | 2572 | } |
2421 | 2573 | ||
2422 | /** | 2574 | /** |
2423 | * lpfc_sli_brdready - Check for host status bits | 2575 | * lpfc_sli_brdready_s3 - Check for sli3 host ready status |
2424 | * @phba: Pointer to HBA context object. | 2576 | * @phba: Pointer to HBA context object. |
2425 | * @mask: Bit mask to be checked. | 2577 | * @mask: Bit mask to be checked. |
2426 | * | 2578 | * |
@@ -2432,8 +2584,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) | |||
2432 | * function returns 1 when HBA fail to restart otherwise returns | 2584 | * function returns 1 when HBA fail to restart otherwise returns |
2433 | * zero. | 2585 | * zero. |
2434 | **/ | 2586 | **/ |
2435 | int | 2587 | static int |
2436 | lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) | 2588 | lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) |
2437 | { | 2589 | { |
2438 | uint32_t status; | 2590 | uint32_t status; |
2439 | int i = 0; | 2591 | int i = 0; |
@@ -2647,7 +2799,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) | |||
2647 | } | 2799 | } |
2648 | 2800 | ||
2649 | /** | 2801 | /** |
2650 | * lpfc_sli_brdreset - Reset the HBA | 2802 | * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA |
2651 | * @phba: Pointer to HBA context object. | 2803 | * @phba: Pointer to HBA context object. |
2652 | * | 2804 | * |
2653 | * This function resets the HBA by writing HC_INITFF to the control | 2805 | * This function resets the HBA by writing HC_INITFF to the control |
@@ -2683,7 +2835,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) | |||
2683 | (cfg_value & | 2835 | (cfg_value & |
2684 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); | 2836 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); |
2685 | 2837 | ||
2686 | psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); | 2838 | psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); |
2839 | |||
2687 | /* Now toggle INITFF bit in the Host Control Register */ | 2840 | /* Now toggle INITFF bit in the Host Control Register */ |
2688 | writel(HC_INITFF, phba->HCregaddr); | 2841 | writel(HC_INITFF, phba->HCregaddr); |
2689 | mdelay(1); | 2842 | mdelay(1); |
@@ -3289,32 +3442,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) | |||
3289 | 3442 | ||
3290 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 3443 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
3291 | "0345 Resetting board due to mailbox timeout\n"); | 3444 | "0345 Resetting board due to mailbox timeout\n"); |
3292 | /* | 3445 | |
3293 | * lpfc_offline calls lpfc_sli_hba_down which will clean up | 3446 | /* Reset the HBA device */ |
3294 | * on oustanding mailbox commands. | 3447 | lpfc_reset_hba(phba); |
3295 | */ | ||
3296 | /* If resets are disabled then set error state and return. */ | ||
3297 | if (!phba->cfg_enable_hba_reset) { | ||
3298 | phba->link_state = LPFC_HBA_ERROR; | ||
3299 | return; | ||
3300 | } | ||
3301 | lpfc_offline_prep(phba); | ||
3302 | lpfc_offline(phba); | ||
3303 | lpfc_sli_brdrestart(phba); | ||
3304 | lpfc_online(phba); | ||
3305 | lpfc_unblock_mgmt_io(phba); | ||
3306 | return; | ||
3307 | } | 3448 | } |
3308 | 3449 | ||
3309 | /** | 3450 | /** |
3310 | * lpfc_sli_issue_mbox - Issue a mailbox command to firmware | 3451 | * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware |
3311 | * @phba: Pointer to HBA context object. | 3452 | * @phba: Pointer to HBA context object. |
3312 | * @pmbox: Pointer to mailbox object. | 3453 | * @pmbox: Pointer to mailbox object. |
3313 | * @flag: Flag indicating how the mailbox need to be processed. | 3454 | * @flag: Flag indicating how the mailbox need to be processed. |
3314 | * | 3455 | * |
3315 | * This function is called by discovery code and HBA management code | 3456 | * This function is called by discovery code and HBA management code |
3316 | * to submit a mailbox command to firmware. This function gets the | 3457 | * to submit a mailbox command to firmware with SLI-3 interface spec. This |
3317 | * hbalock to protect the data structures. | 3458 | * function gets the hbalock to protect the data structures. |
3318 | * The mailbox command can be submitted in polling mode, in which case | 3459 | * The mailbox command can be submitted in polling mode, in which case |
3319 | * this function will wait in a polling loop for the completion of the | 3460 | * this function will wait in a polling loop for the completion of the |
3320 | * mailbox. | 3461 | * mailbox. |
@@ -3332,8 +3473,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) | |||
3332 | * return codes the caller owns the mailbox command after the return of | 3473 | * return codes the caller owns the mailbox command after the return of |
3333 | * the function. | 3474 | * the function. |
3334 | **/ | 3475 | **/ |
3335 | int | 3476 | static int |
3336 | lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | 3477 | lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, |
3478 | uint32_t flag) | ||
3337 | { | 3479 | { |
3338 | MAILBOX_t *mb; | 3480 | MAILBOX_t *mb; |
3339 | struct lpfc_sli *psli = &phba->sli; | 3481 | struct lpfc_sli *psli = &phba->sli; |
@@ -3349,6 +3491,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
3349 | spin_lock_irqsave(&phba->hbalock, drvr_flag); | 3491 | spin_lock_irqsave(&phba->hbalock, drvr_flag); |
3350 | if (!pmbox) { | 3492 | if (!pmbox) { |
3351 | /* processing mbox queue from intr_handler */ | 3493 | /* processing mbox queue from intr_handler */ |
3494 | if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { | ||
3495 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | ||
3496 | return MBX_SUCCESS; | ||
3497 | } | ||
3352 | processing_queue = 1; | 3498 | processing_queue = 1; |
3353 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | 3499 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
3354 | pmbox = lpfc_mbox_get(phba); | 3500 | pmbox = lpfc_mbox_get(phba); |
@@ -3365,7 +3511,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
3365 | lpfc_printf_log(phba, KERN_ERR, | 3511 | lpfc_printf_log(phba, KERN_ERR, |
3366 | LOG_MBOX | LOG_VPORT, | 3512 | LOG_MBOX | LOG_VPORT, |
3367 | "1806 Mbox x%x failed. No vport\n", | 3513 | "1806 Mbox x%x failed. No vport\n", |
3368 | pmbox->mb.mbxCommand); | 3514 | pmbox->u.mb.mbxCommand); |
3369 | dump_stack(); | 3515 | dump_stack(); |
3370 | goto out_not_finished; | 3516 | goto out_not_finished; |
3371 | } | 3517 | } |
@@ -3385,21 +3531,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
3385 | 3531 | ||
3386 | psli = &phba->sli; | 3532 | psli = &phba->sli; |
3387 | 3533 | ||
3388 | mb = &pmbox->mb; | 3534 | mb = &pmbox->u.mb; |
3389 | status = MBX_SUCCESS; | 3535 | status = MBX_SUCCESS; |
3390 | 3536 | ||
3391 | if (phba->link_state == LPFC_HBA_ERROR) { | 3537 | if (phba->link_state == LPFC_HBA_ERROR) { |
3392 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 3538 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
3393 | 3539 | ||
3394 | /* Mbox command <mbxCommand> cannot issue */ | 3540 | /* Mbox command <mbxCommand> cannot issue */ |
3395 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); | 3541 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
3542 | "(%d):0311 Mailbox command x%x cannot " | ||
3543 | "issue Data: x%x x%x\n", | ||
3544 | pmbox->vport ? pmbox->vport->vpi : 0, | ||
3545 | pmbox->u.mb.mbxCommand, psli->sli_flag, flag); | ||
3396 | goto out_not_finished; | 3546 | goto out_not_finished; |
3397 | } | 3547 | } |
3398 | 3548 | ||
3399 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && | 3549 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && |
3400 | !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { | 3550 | !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { |
3401 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 3551 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
3402 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); | 3552 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
3553 | "(%d):2528 Mailbox command x%x cannot " | ||
3554 | "issue Data: x%x x%x\n", | ||
3555 | pmbox->vport ? pmbox->vport->vpi : 0, | ||
3556 | pmbox->u.mb.mbxCommand, psli->sli_flag, flag); | ||
3403 | goto out_not_finished; | 3557 | goto out_not_finished; |
3404 | } | 3558 | } |
3405 | 3559 | ||
@@ -3413,14 +3567,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
3413 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 3567 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
3414 | 3568 | ||
3415 | /* Mbox command <mbxCommand> cannot issue */ | 3569 | /* Mbox command <mbxCommand> cannot issue */ |
3416 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); | 3570 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
3571 | "(%d):2529 Mailbox command x%x " | ||
3572 | "cannot issue Data: x%x x%x\n", | ||
3573 | pmbox->vport ? pmbox->vport->vpi : 0, | ||
3574 | pmbox->u.mb.mbxCommand, | ||
3575 | psli->sli_flag, flag); | ||
3417 | goto out_not_finished; | 3576 | goto out_not_finished; |
3418 | } | 3577 | } |
3419 | 3578 | ||
3420 | if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { | 3579 | if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { |
3421 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 3580 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
3422 | /* Mbox command <mbxCommand> cannot issue */ | 3581 | /* Mbox command <mbxCommand> cannot issue */ |
3423 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); | 3582 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
3583 | "(%d):2530 Mailbox command x%x " | ||
3584 | "cannot issue Data: x%x x%x\n", | ||
3585 | pmbox->vport ? pmbox->vport->vpi : 0, | ||
3586 | pmbox->u.mb.mbxCommand, | ||
3587 | psli->sli_flag, flag); | ||
3424 | goto out_not_finished; | 3588 | goto out_not_finished; |
3425 | } | 3589 | } |
3426 | 3590 | ||
@@ -3462,12 +3626,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
3462 | 3626 | ||
3463 | /* If we are not polling, we MUST be in SLI2 mode */ | 3627 | /* If we are not polling, we MUST be in SLI2 mode */ |
3464 | if (flag != MBX_POLL) { | 3628 | if (flag != MBX_POLL) { |
3465 | if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && | 3629 | if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && |
3466 | (mb->mbxCommand != MBX_KILL_BOARD)) { | 3630 | (mb->mbxCommand != MBX_KILL_BOARD)) { |
3467 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | 3631 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
3468 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 3632 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
3469 | /* Mbox command <mbxCommand> cannot issue */ | 3633 | /* Mbox command <mbxCommand> cannot issue */ |
3470 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); | 3634 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
3635 | "(%d):2531 Mailbox command x%x " | ||
3636 | "cannot issue Data: x%x x%x\n", | ||
3637 | pmbox->vport ? pmbox->vport->vpi : 0, | ||
3638 | pmbox->u.mb.mbxCommand, | ||
3639 | psli->sli_flag, flag); | ||
3471 | goto out_not_finished; | 3640 | goto out_not_finished; |
3472 | } | 3641 | } |
3473 | /* timeout active mbox command */ | 3642 | /* timeout active mbox command */ |
@@ -3506,7 +3675,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
3506 | /* next set own bit for the adapter and copy over command word */ | 3675 | /* next set own bit for the adapter and copy over command word */ |
3507 | mb->mbxOwner = OWN_CHIP; | 3676 | mb->mbxOwner = OWN_CHIP; |
3508 | 3677 | ||
3509 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { | 3678 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
3510 | /* First copy command data to host SLIM area */ | 3679 | /* First copy command data to host SLIM area */ |
3511 | lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); | 3680 | lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); |
3512 | } else { | 3681 | } else { |
@@ -3529,7 +3698,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
3529 | 3698 | ||
3530 | if (mb->mbxCommand == MBX_CONFIG_PORT) { | 3699 | if (mb->mbxCommand == MBX_CONFIG_PORT) { |
3531 | /* switch over to host mailbox */ | 3700 | /* switch over to host mailbox */ |
3532 | psli->sli_flag |= LPFC_SLI2_ACTIVE; | 3701 | psli->sli_flag |= LPFC_SLI_ACTIVE; |
3533 | } | 3702 | } |
3534 | } | 3703 | } |
3535 | 3704 | ||
@@ -3552,7 +3721,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
3552 | writel(CA_MBATT, phba->CAregaddr); | 3721 | writel(CA_MBATT, phba->CAregaddr); |
3553 | readl(phba->CAregaddr); /* flush */ | 3722 | readl(phba->CAregaddr); /* flush */ |
3554 | 3723 | ||
3555 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { | 3724 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
3556 | /* First read mbox status word */ | 3725 | /* First read mbox status word */ |
3557 | word0 = *((uint32_t *)phba->mbox); | 3726 | word0 = *((uint32_t *)phba->mbox); |
3558 | word0 = le32_to_cpu(word0); | 3727 | word0 = le32_to_cpu(word0); |
@@ -3591,7 +3760,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
3591 | spin_lock_irqsave(&phba->hbalock, drvr_flag); | 3760 | spin_lock_irqsave(&phba->hbalock, drvr_flag); |
3592 | } | 3761 | } |
3593 | 3762 | ||
3594 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { | 3763 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
3595 | /* First copy command data */ | 3764 | /* First copy command data */ |
3596 | word0 = *((uint32_t *)phba->mbox); | 3765 | word0 = *((uint32_t *)phba->mbox); |
3597 | word0 = le32_to_cpu(word0); | 3766 | word0 = le32_to_cpu(word0); |
@@ -3604,7 +3773,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
3604 | if (((slimword0 & OWN_CHIP) != OWN_CHIP) | 3773 | if (((slimword0 & OWN_CHIP) != OWN_CHIP) |
3605 | && slimmb->mbxStatus) { | 3774 | && slimmb->mbxStatus) { |
3606 | psli->sli_flag &= | 3775 | psli->sli_flag &= |
3607 | ~LPFC_SLI2_ACTIVE; | 3776 | ~LPFC_SLI_ACTIVE; |
3608 | word0 = slimword0; | 3777 | word0 = slimword0; |
3609 | } | 3778 | } |
3610 | } | 3779 | } |
@@ -3616,7 +3785,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
3616 | ha_copy = readl(phba->HAregaddr); | 3785 | ha_copy = readl(phba->HAregaddr); |
3617 | } | 3786 | } |
3618 | 3787 | ||
3619 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { | 3788 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
3620 | /* copy results back to user */ | 3789 | /* copy results back to user */ |
3621 | lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); | 3790 | lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); |
3622 | } else { | 3791 | } else { |
@@ -3701,35 +3870,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3701 | } | 3870 | } |
3702 | 3871 | ||
3703 | /** | 3872 | /** |
3704 | * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb | 3873 | * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb |
3705 | * @phba: Pointer to HBA context object. | 3874 | * @phba: Pointer to HBA context object. |
3706 | * @pring: Pointer to driver SLI ring object. | 3875 | * @ring_number: SLI ring number to issue iocb on. |
3707 | * @piocb: Pointer to command iocb. | 3876 | * @piocb: Pointer to command iocb. |
3708 | * @flag: Flag indicating if this command can be put into txq. | 3877 | * @flag: Flag indicating if this command can be put into txq. |
3709 | * | 3878 | * |
3710 | * __lpfc_sli_issue_iocb is used by other functions in the driver | 3879 | * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue |
3711 | * to issue an iocb command to the HBA. If the PCI slot is recovering | 3880 | * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is |
3712 | * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT | 3881 | * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT |
3713 | * flag is turned on, the function returns IOCB_ERROR. | 3882 | * flag is turned on, the function returns IOCB_ERROR. When the link is down, |
3714 | * When the link is down, this function allows only iocbs for | 3883 | * this function allows only iocbs for posting buffers. This function finds |
3715 | * posting buffers. | 3884 | * next available slot in the command ring and posts the command to the |
3716 | * This function finds next available slot in the command ring and | 3885 | * available slot and writes the port attention register to request HBA start |
3717 | * posts the command to the available slot and writes the port | 3886 | * processing new iocb. If there is no slot available in the ring and |
3718 | * attention register to request HBA start processing new iocb. | 3887 | * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise |
3719 | * If there is no slot available in the ring and | 3888 | * the function returns IOCB_BUSY. |
3720 | * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the | ||
3721 | * txq, otherwise the function returns IOCB_BUSY. | ||
3722 | * | 3889 | * |
3723 | * This function is called with hbalock held. | 3890 | * This function is called with hbalock held. The function will return success |
3724 | * The function will return success after it successfully submit the | 3891 | * after it successfully submit the iocb to firmware or after adding to the |
3725 | * iocb to firmware or after adding to the txq. | 3892 | * txq. |
3726 | **/ | 3893 | **/ |
3727 | static int | 3894 | static int |
3728 | __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 3895 | __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, |
3729 | struct lpfc_iocbq *piocb, uint32_t flag) | 3896 | struct lpfc_iocbq *piocb, uint32_t flag) |
3730 | { | 3897 | { |
3731 | struct lpfc_iocbq *nextiocb; | 3898 | struct lpfc_iocbq *nextiocb; |
3732 | IOCB_t *iocb; | 3899 | IOCB_t *iocb; |
3900 | struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; | ||
3733 | 3901 | ||
3734 | if (piocb->iocb_cmpl && (!piocb->vport) && | 3902 | if (piocb->iocb_cmpl && (!piocb->vport) && |
3735 | (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && | 3903 | (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && |
@@ -3833,6 +4001,52 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3833 | return IOCB_BUSY; | 4001 | return IOCB_BUSY; |
3834 | } | 4002 | } |
3835 | 4003 | ||
4004 | /** | ||
4005 | * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb | ||
4006 | * | ||
4007 | * This routine wraps the actual lockless version for issusing IOCB function | ||
4008 | * pointer from the lpfc_hba struct. | ||
4009 | * | ||
4010 | * Return codes: | ||
4011 | * IOCB_ERROR - Error | ||
4012 | * IOCB_SUCCESS - Success | ||
4013 | * IOCB_BUSY - Busy | ||
4014 | **/ | ||
4015 | static inline int | ||
4016 | __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, | ||
4017 | struct lpfc_iocbq *piocb, uint32_t flag) | ||
4018 | { | ||
4019 | return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); | ||
4020 | } | ||
4021 | |||
4022 | /** | ||
4023 | * lpfc_sli_api_table_setup - Set up sli api fucntion jump table | ||
4024 | * @phba: The hba struct for which this call is being executed. | ||
4025 | * @dev_grp: The HBA PCI-Device group number. | ||
4026 | * | ||
4027 | * This routine sets up the SLI interface API function jump table in @phba | ||
4028 | * struct. | ||
4029 | * Returns: 0 - success, -ENODEV - failure. | ||
4030 | **/ | ||
4031 | int | ||
4032 | lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | ||
4033 | { | ||
4034 | |||
4035 | switch (dev_grp) { | ||
4036 | case LPFC_PCI_DEV_LP: | ||
4037 | phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; | ||
4038 | phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; | ||
4039 | break; | ||
4040 | default: | ||
4041 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4042 | "1419 Invalid HBA PCI-device group: 0x%x\n", | ||
4043 | dev_grp); | ||
4044 | return -ENODEV; | ||
4045 | break; | ||
4046 | } | ||
4047 | phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; | ||
4048 | return 0; | ||
4049 | } | ||
3836 | 4050 | ||
3837 | /** | 4051 | /** |
3838 | * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb | 4052 | * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb |
@@ -3848,14 +4062,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3848 | * functions which do not hold hbalock. | 4062 | * functions which do not hold hbalock. |
3849 | **/ | 4063 | **/ |
3850 | int | 4064 | int |
3851 | lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 4065 | lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, |
3852 | struct lpfc_iocbq *piocb, uint32_t flag) | 4066 | struct lpfc_iocbq *piocb, uint32_t flag) |
3853 | { | 4067 | { |
3854 | unsigned long iflags; | 4068 | unsigned long iflags; |
3855 | int rc; | 4069 | int rc; |
3856 | 4070 | ||
3857 | spin_lock_irqsave(&phba->hbalock, iflags); | 4071 | spin_lock_irqsave(&phba->hbalock, iflags); |
3858 | rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); | 4072 | rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); |
3859 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 4073 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
3860 | 4074 | ||
3861 | return rc; | 4075 | return rc; |
@@ -5077,53 +5291,104 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, | |||
5077 | } | 5291 | } |
5078 | 5292 | ||
5079 | /** | 5293 | /** |
5080 | * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function | 5294 | * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system |
5081 | * @phba: Pointer to HBA context. | 5295 | * @phba: Pointer to HBA context. |
5082 | * | 5296 | * |
5083 | * This function is called to cleanup any pending mailbox | 5297 | * This function is called to shutdown the driver's mailbox sub-system. |
5084 | * objects in the driver queue before bringing the HBA offline. | 5298 | * It first marks the mailbox sub-system is in a block state to prevent |
5085 | * This function is called while resetting the HBA. | 5299 | * the asynchronous mailbox command from issued off the pending mailbox |
5086 | * The function is called without any lock held. The function | 5300 | * command queue. If the mailbox command sub-system shutdown is due to |
5087 | * takes hbalock to update SLI data structure. | 5301 | * HBA error conditions such as EEH or ERATT, this routine shall invoke |
5088 | * This function returns 1 when there is an active mailbox | 5302 | * the mailbox sub-system flush routine to forcefully bring down the |
5089 | * command pending else returns 0. | 5303 | * mailbox sub-system. Otherwise, if it is due to normal condition (such |
5304 | * as with offline or HBA function reset), this routine will wait for the | ||
5305 | * outstanding mailbox command to complete before invoking the mailbox | ||
5306 | * sub-system flush routine to gracefully bring down mailbox sub-system. | ||
5090 | **/ | 5307 | **/ |
5091 | int | 5308 | void |
5092 | lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) | 5309 | lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) |
5093 | { | 5310 | { |
5094 | struct lpfc_vport *vport = phba->pport; | 5311 | struct lpfc_sli *psli = &phba->sli; |
5095 | int i = 0; | 5312 | uint8_t actcmd = MBX_HEARTBEAT; |
5096 | uint32_t ha_copy; | 5313 | unsigned long timeout; |
5097 | 5314 | ||
5098 | while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { | 5315 | spin_lock_irq(&phba->hbalock); |
5099 | if (i++ > LPFC_MBOX_TMO * 1000) | 5316 | psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; |
5100 | return 1; | 5317 | spin_unlock_irq(&phba->hbalock); |
5101 | 5318 | ||
5102 | /* | 5319 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
5103 | * Call lpfc_sli_handle_mb_event only if a mailbox cmd | ||
5104 | * did finish. This way we won't get the misleading | ||
5105 | * "Stray Mailbox Interrupt" message. | ||
5106 | */ | ||
5107 | spin_lock_irq(&phba->hbalock); | 5320 | spin_lock_irq(&phba->hbalock); |
5108 | ha_copy = phba->work_ha; | 5321 | if (phba->sli.mbox_active) |
5109 | phba->work_ha &= ~HA_MBATT; | 5322 | actcmd = phba->sli.mbox_active->u.mb.mbxCommand; |
5110 | spin_unlock_irq(&phba->hbalock); | 5323 | spin_unlock_irq(&phba->hbalock); |
5324 | /* Determine how long we might wait for the active mailbox | ||
5325 | * command to be gracefully completed by firmware. | ||
5326 | */ | ||
5327 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * | ||
5328 | 1000) + jiffies; | ||
5329 | while (phba->sli.mbox_active) { | ||
5330 | /* Check active mailbox complete status every 2ms */ | ||
5331 | msleep(2); | ||
5332 | if (time_after(jiffies, timeout)) | ||
5333 | /* Timeout, let the mailbox flush routine to | ||
5334 | * forcefully release active mailbox command | ||
5335 | */ | ||
5336 | break; | ||
5337 | } | ||
5338 | } | ||
5339 | lpfc_sli_mbox_sys_flush(phba); | ||
5340 | } | ||
5111 | 5341 | ||
5112 | if (ha_copy & HA_MBATT) | 5342 | /** |
5113 | if (lpfc_sli_handle_mb_event(phba) == 0) | 5343 | * lpfc_sli_eratt_read - read sli-3 error attention events |
5114 | i = 0; | 5344 | * @phba: Pointer to HBA context. |
5345 | * | ||
5346 | * This function is called to read the SLI3 device error attention registers | ||
5347 | * for possible error attention events. The caller must hold the hostlock | ||
5348 | * with spin_lock_irq(). | ||
5349 | * | ||
5350 | * This fucntion returns 1 when there is Error Attention in the Host Attention | ||
5351 | * Register and returns 0 otherwise. | ||
5352 | **/ | ||
5353 | static int | ||
5354 | lpfc_sli_eratt_read(struct lpfc_hba *phba) | ||
5355 | { | ||
5356 | uint32_t ha_copy; | ||
5115 | 5357 | ||
5116 | msleep(1); | 5358 | /* Read chip Host Attention (HA) register */ |
5117 | } | 5359 | ha_copy = readl(phba->HAregaddr); |
5360 | if (ha_copy & HA_ERATT) { | ||
5361 | /* Read host status register to retrieve error event */ | ||
5362 | lpfc_sli_read_hs(phba); | ||
5118 | 5363 | ||
5119 | return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; | 5364 | /* Check if there is a deferred error condition is active */ |
5365 | if ((HS_FFER1 & phba->work_hs) && | ||
5366 | ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | | ||
5367 | HS_FFER6 | HS_FFER7) & phba->work_hs)) { | ||
5368 | spin_lock_irq(&phba->hbalock); | ||
5369 | phba->hba_flag |= DEFER_ERATT; | ||
5370 | spin_unlock_irq(&phba->hbalock); | ||
5371 | /* Clear all interrupt enable conditions */ | ||
5372 | writel(0, phba->HCregaddr); | ||
5373 | readl(phba->HCregaddr); | ||
5374 | } | ||
5375 | |||
5376 | /* Set the driver HA work bitmap */ | ||
5377 | spin_lock_irq(&phba->hbalock); | ||
5378 | phba->work_ha |= HA_ERATT; | ||
5379 | /* Indicate polling handles this ERATT */ | ||
5380 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
5381 | spin_unlock_irq(&phba->hbalock); | ||
5382 | return 1; | ||
5383 | } | ||
5384 | return 0; | ||
5120 | } | 5385 | } |
5121 | 5386 | ||
5122 | /** | 5387 | /** |
5123 | * lpfc_sli_check_eratt - check error attention events | 5388 | * lpfc_sli_check_eratt - check error attention events |
5124 | * @phba: Pointer to HBA context. | 5389 | * @phba: Pointer to HBA context. |
5125 | * | 5390 | * |
5126 | * This function is called form timer soft interrupt context to check HBA's | 5391 | * This function is called from timer soft interrupt context to check HBA's |
5127 | * error attention register bit for error attention events. | 5392 | * error attention register bit for error attention events. |
5128 | * | 5393 | * |
5129 | * This fucntion returns 1 when there is Error Attention in the Host Attention | 5394 | * This fucntion returns 1 when there is Error Attention in the Host Attention |
@@ -5134,10 +5399,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) | |||
5134 | { | 5399 | { |
5135 | uint32_t ha_copy; | 5400 | uint32_t ha_copy; |
5136 | 5401 | ||
5137 | /* If PCI channel is offline, don't process it */ | ||
5138 | if (unlikely(pci_channel_offline(phba->pcidev))) | ||
5139 | return 0; | ||
5140 | |||
5141 | /* If somebody is waiting to handle an eratt, don't process it | 5402 | /* If somebody is waiting to handle an eratt, don't process it |
5142 | * here. The brdkill function will do this. | 5403 | * here. The brdkill function will do this. |
5143 | */ | 5404 | */ |
@@ -5161,56 +5422,80 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) | |||
5161 | return 0; | 5422 | return 0; |
5162 | } | 5423 | } |
5163 | 5424 | ||
5164 | /* Read chip Host Attention (HA) register */ | 5425 | /* If PCI channel is offline, don't process it */ |
5165 | ha_copy = readl(phba->HAregaddr); | 5426 | if (unlikely(pci_channel_offline(phba->pcidev))) { |
5166 | if (ha_copy & HA_ERATT) { | ||
5167 | /* Read host status register to retrieve error event */ | ||
5168 | lpfc_sli_read_hs(phba); | ||
5169 | |||
5170 | /* Check if there is a deferred error condition is active */ | ||
5171 | if ((HS_FFER1 & phba->work_hs) && | ||
5172 | ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | | ||
5173 | HS_FFER6 | HS_FFER7) & phba->work_hs)) { | ||
5174 | phba->hba_flag |= DEFER_ERATT; | ||
5175 | /* Clear all interrupt enable conditions */ | ||
5176 | writel(0, phba->HCregaddr); | ||
5177 | readl(phba->HCregaddr); | ||
5178 | } | ||
5179 | |||
5180 | /* Set the driver HA work bitmap */ | ||
5181 | phba->work_ha |= HA_ERATT; | ||
5182 | /* Indicate polling handles this ERATT */ | ||
5183 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
5184 | spin_unlock_irq(&phba->hbalock); | 5427 | spin_unlock_irq(&phba->hbalock); |
5185 | return 1; | 5428 | return 0; |
5429 | } | ||
5430 | |||
5431 | switch (phba->sli_rev) { | ||
5432 | case LPFC_SLI_REV2: | ||
5433 | case LPFC_SLI_REV3: | ||
5434 | /* Read chip Host Attention (HA) register */ | ||
5435 | ha_copy = lpfc_sli_eratt_read(phba); | ||
5436 | break; | ||
5437 | default: | ||
5438 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5439 | "0299 Invalid SLI revision (%d)\n", | ||
5440 | phba->sli_rev); | ||
5441 | ha_copy = 0; | ||
5442 | break; | ||
5186 | } | 5443 | } |
5187 | spin_unlock_irq(&phba->hbalock); | 5444 | spin_unlock_irq(&phba->hbalock); |
5445 | |||
5446 | return ha_copy; | ||
5447 | } | ||
5448 | |||
5449 | /** | ||
5450 | * lpfc_intr_state_check - Check device state for interrupt handling | ||
5451 | * @phba: Pointer to HBA context. | ||
5452 | * | ||
5453 | * This inline routine checks whether a device or its PCI slot is in a state | ||
5454 | * that the interrupt should be handled. | ||
5455 | * | ||
5456 | * This function returns 0 if the device or the PCI slot is in a state that | ||
5457 | * interrupt should be handled, otherwise -EIO. | ||
5458 | */ | ||
5459 | static inline int | ||
5460 | lpfc_intr_state_check(struct lpfc_hba *phba) | ||
5461 | { | ||
5462 | /* If the pci channel is offline, ignore all the interrupts */ | ||
5463 | if (unlikely(pci_channel_offline(phba->pcidev))) | ||
5464 | return -EIO; | ||
5465 | |||
5466 | /* Update device level interrupt statistics */ | ||
5467 | phba->sli.slistat.sli_intr++; | ||
5468 | |||
5469 | /* Ignore all interrupts during initialization. */ | ||
5470 | if (unlikely(phba->link_state < LPFC_LINK_DOWN)) | ||
5471 | return -EIO; | ||
5472 | |||
5188 | return 0; | 5473 | return 0; |
5189 | } | 5474 | } |
5190 | 5475 | ||
5191 | /** | 5476 | /** |
5192 | * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver | 5477 | * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device |
5193 | * @irq: Interrupt number. | 5478 | * @irq: Interrupt number. |
5194 | * @dev_id: The device context pointer. | 5479 | * @dev_id: The device context pointer. |
5195 | * | 5480 | * |
5196 | * This function is directly called from the PCI layer as an interrupt | 5481 | * This function is directly called from the PCI layer as an interrupt |
5197 | * service routine when the device is enabled with MSI-X multi-message | 5482 | * service routine when device with SLI-3 interface spec is enabled with |
5198 | * interrupt mode and there are slow-path events in the HBA. However, | 5483 | * MSI-X multi-message interrupt mode and there are slow-path events in |
5199 | * when the device is enabled with either MSI or Pin-IRQ interrupt mode, | 5484 | * the HBA. However, when the device is enabled with either MSI or Pin-IRQ |
5200 | * this function is called as part of the device-level interrupt handler. | 5485 | * interrupt mode, this function is called as part of the device-level |
5201 | * When the PCI slot is in error recovery or the HBA is undergoing | 5486 | * interrupt handler. When the PCI slot is in error recovery or the HBA |
5202 | * initialization, the interrupt handler will not process the interrupt. | 5487 | * is undergoing initialization, the interrupt handler will not process |
5203 | * The link attention and ELS ring attention events are handled by the | 5488 | * the interrupt. The link attention and ELS ring attention events are |
5204 | * worker thread. The interrupt handler signals the worker thread and | 5489 | * handled by the worker thread. The interrupt handler signals the worker |
5205 | * and returns for these events. This function is called without any | 5490 | * thread and returns for these events. This function is called without |
5206 | * lock held. It gets the hbalock to access and update SLI data | 5491 | * any lock held. It gets the hbalock to access and update SLI data |
5207 | * structures. | 5492 | * structures. |
5208 | * | 5493 | * |
5209 | * This function returns IRQ_HANDLED when interrupt is handled else it | 5494 | * This function returns IRQ_HANDLED when interrupt is handled else it |
5210 | * returns IRQ_NONE. | 5495 | * returns IRQ_NONE. |
5211 | **/ | 5496 | **/ |
5212 | irqreturn_t | 5497 | irqreturn_t |
5213 | lpfc_sp_intr_handler(int irq, void *dev_id) | 5498 | lpfc_sli_sp_intr_handler(int irq, void *dev_id) |
5214 | { | 5499 | { |
5215 | struct lpfc_hba *phba; | 5500 | struct lpfc_hba *phba; |
5216 | uint32_t ha_copy; | 5501 | uint32_t ha_copy; |
@@ -5240,13 +5525,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id) | |||
5240 | * individual interrupt handler in MSI-X multi-message interrupt mode | 5525 | * individual interrupt handler in MSI-X multi-message interrupt mode |
5241 | */ | 5526 | */ |
5242 | if (phba->intr_type == MSIX) { | 5527 | if (phba->intr_type == MSIX) { |
5243 | /* If the pci channel is offline, ignore all the interrupts */ | 5528 | /* Check device state for handling interrupt */ |
5244 | if (unlikely(pci_channel_offline(phba->pcidev))) | 5529 | if (lpfc_intr_state_check(phba)) |
5245 | return IRQ_NONE; | ||
5246 | /* Update device-level interrupt statistics */ | ||
5247 | phba->sli.slistat.sli_intr++; | ||
5248 | /* Ignore all interrupts during initialization. */ | ||
5249 | if (unlikely(phba->link_state < LPFC_LINK_DOWN)) | ||
5250 | return IRQ_NONE; | 5530 | return IRQ_NONE; |
5251 | /* Need to read HA REG for slow-path events */ | 5531 | /* Need to read HA REG for slow-path events */ |
5252 | spin_lock_irqsave(&phba->hbalock, iflag); | 5532 | spin_lock_irqsave(&phba->hbalock, iflag); |
@@ -5271,7 +5551,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id) | |||
5271 | * interrupt. | 5551 | * interrupt. |
5272 | */ | 5552 | */ |
5273 | if (unlikely(phba->hba_flag & DEFER_ERATT)) { | 5553 | if (unlikely(phba->hba_flag & DEFER_ERATT)) { |
5274 | spin_unlock_irq(&phba->hbalock); | 5554 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
5275 | return IRQ_NONE; | 5555 | return IRQ_NONE; |
5276 | } | 5556 | } |
5277 | 5557 | ||
@@ -5434,7 +5714,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id) | |||
5434 | LOG_MBOX | LOG_SLI, | 5714 | LOG_MBOX | LOG_SLI, |
5435 | "0350 rc should have" | 5715 | "0350 rc should have" |
5436 | "been MBX_BUSY"); | 5716 | "been MBX_BUSY"); |
5437 | goto send_current_mbox; | 5717 | if (rc != MBX_NOT_FINISHED) |
5718 | goto send_current_mbox; | ||
5438 | } | 5719 | } |
5439 | } | 5720 | } |
5440 | spin_lock_irqsave( | 5721 | spin_lock_irqsave( |
@@ -5471,29 +5752,29 @@ send_current_mbox: | |||
5471 | } | 5752 | } |
5472 | return IRQ_HANDLED; | 5753 | return IRQ_HANDLED; |
5473 | 5754 | ||
5474 | } /* lpfc_sp_intr_handler */ | 5755 | } /* lpfc_sli_sp_intr_handler */ |
5475 | 5756 | ||
5476 | /** | 5757 | /** |
5477 | * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver | 5758 | * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. |
5478 | * @irq: Interrupt number. | 5759 | * @irq: Interrupt number. |
5479 | * @dev_id: The device context pointer. | 5760 | * @dev_id: The device context pointer. |
5480 | * | 5761 | * |
5481 | * This function is directly called from the PCI layer as an interrupt | 5762 | * This function is directly called from the PCI layer as an interrupt |
5482 | * service routine when the device is enabled with MSI-X multi-message | 5763 | * service routine when device with SLI-3 interface spec is enabled with |
5483 | * interrupt mode and there is a fast-path FCP IOCB ring event in the | 5764 | * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB |
5484 | * HBA. However, when the device is enabled with either MSI or Pin-IRQ | 5765 | * ring event in the HBA. However, when the device is enabled with either |
5485 | * interrupt mode, this function is called as part of the device-level | 5766 | * MSI or Pin-IRQ interrupt mode, this function is called as part of the |
5486 | * interrupt handler. When the PCI slot is in error recovery or the HBA | 5767 | * device-level interrupt handler. When the PCI slot is in error recovery |
5487 | * is undergoing initialization, the interrupt handler will not process | 5768 | * or the HBA is undergoing initialization, the interrupt handler will not |
5488 | * the interrupt. The SCSI FCP fast-path ring event are handled in the | 5769 | * process the interrupt. The SCSI FCP fast-path ring event are handled in |
5489 | * intrrupt context. This function is called without any lock held. It | 5770 | * the intrrupt context. This function is called without any lock held. |
5490 | * gets the hbalock to access and update SLI data structures. | 5771 | * It gets the hbalock to access and update SLI data structures. |
5491 | * | 5772 | * |
5492 | * This function returns IRQ_HANDLED when interrupt is handled else it | 5773 | * This function returns IRQ_HANDLED when interrupt is handled else it |
5493 | * returns IRQ_NONE. | 5774 | * returns IRQ_NONE. |
5494 | **/ | 5775 | **/ |
5495 | irqreturn_t | 5776 | irqreturn_t |
5496 | lpfc_fp_intr_handler(int irq, void *dev_id) | 5777 | lpfc_sli_fp_intr_handler(int irq, void *dev_id) |
5497 | { | 5778 | { |
5498 | struct lpfc_hba *phba; | 5779 | struct lpfc_hba *phba; |
5499 | uint32_t ha_copy; | 5780 | uint32_t ha_copy; |
@@ -5513,13 +5794,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id) | |||
5513 | * individual interrupt handler in MSI-X multi-message interrupt mode | 5794 | * individual interrupt handler in MSI-X multi-message interrupt mode |
5514 | */ | 5795 | */ |
5515 | if (phba->intr_type == MSIX) { | 5796 | if (phba->intr_type == MSIX) { |
5516 | /* If pci channel is offline, ignore all the interrupts */ | 5797 | /* Check device state for handling interrupt */ |
5517 | if (unlikely(pci_channel_offline(phba->pcidev))) | 5798 | if (lpfc_intr_state_check(phba)) |
5518 | return IRQ_NONE; | ||
5519 | /* Update device-level interrupt statistics */ | ||
5520 | phba->sli.slistat.sli_intr++; | ||
5521 | /* Ignore all interrupts during initialization. */ | ||
5522 | if (unlikely(phba->link_state < LPFC_LINK_DOWN)) | ||
5523 | return IRQ_NONE; | 5799 | return IRQ_NONE; |
5524 | /* Need to read HA REG for FCP ring and other ring events */ | 5800 | /* Need to read HA REG for FCP ring and other ring events */ |
5525 | ha_copy = readl(phba->HAregaddr); | 5801 | ha_copy = readl(phba->HAregaddr); |
@@ -5530,7 +5806,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id) | |||
5530 | * any interrupt. | 5806 | * any interrupt. |
5531 | */ | 5807 | */ |
5532 | if (unlikely(phba->hba_flag & DEFER_ERATT)) { | 5808 | if (unlikely(phba->hba_flag & DEFER_ERATT)) { |
5533 | spin_unlock_irq(&phba->hbalock); | 5809 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
5534 | return IRQ_NONE; | 5810 | return IRQ_NONE; |
5535 | } | 5811 | } |
5536 | writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), | 5812 | writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), |
@@ -5566,26 +5842,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id) | |||
5566 | } | 5842 | } |
5567 | } | 5843 | } |
5568 | return IRQ_HANDLED; | 5844 | return IRQ_HANDLED; |
5569 | } /* lpfc_fp_intr_handler */ | 5845 | } /* lpfc_sli_fp_intr_handler */ |
5570 | 5846 | ||
5571 | /** | 5847 | /** |
5572 | * lpfc_intr_handler - The device-level interrupt handler of lpfc driver | 5848 | * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device |
5573 | * @irq: Interrupt number. | 5849 | * @irq: Interrupt number. |
5574 | * @dev_id: The device context pointer. | 5850 | * @dev_id: The device context pointer. |
5575 | * | 5851 | * |
5576 | * This function is the device-level interrupt handler called from the PCI | 5852 | * This function is the HBA device-level interrupt handler to device with |
5577 | * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is | 5853 | * SLI-3 interface spec, called from the PCI layer when either MSI or |
5578 | * an event in the HBA which requires driver attention. This function | 5854 | * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which |
5579 | * invokes the slow-path interrupt attention handling function and fast-path | 5855 | * requires driver attention. This function invokes the slow-path interrupt |
5580 | * interrupt attention handling function in turn to process the relevant | 5856 | * attention handling function and fast-path interrupt attention handling |
5581 | * HBA attention events. This function is called without any lock held. It | 5857 | * function in turn to process the relevant HBA attention events. This |
5582 | * gets the hbalock to access and update SLI data structures. | 5858 | * function is called without any lock held. It gets the hbalock to access |
5859 | * and update SLI data structures. | ||
5583 | * | 5860 | * |
5584 | * This function returns IRQ_HANDLED when interrupt is handled, else it | 5861 | * This function returns IRQ_HANDLED when interrupt is handled, else it |
5585 | * returns IRQ_NONE. | 5862 | * returns IRQ_NONE. |
5586 | **/ | 5863 | **/ |
5587 | irqreturn_t | 5864 | irqreturn_t |
5588 | lpfc_intr_handler(int irq, void *dev_id) | 5865 | lpfc_sli_intr_handler(int irq, void *dev_id) |
5589 | { | 5866 | { |
5590 | struct lpfc_hba *phba; | 5867 | struct lpfc_hba *phba; |
5591 | irqreturn_t sp_irq_rc, fp_irq_rc; | 5868 | irqreturn_t sp_irq_rc, fp_irq_rc; |
@@ -5600,15 +5877,8 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
5600 | if (unlikely(!phba)) | 5877 | if (unlikely(!phba)) |
5601 | return IRQ_NONE; | 5878 | return IRQ_NONE; |
5602 | 5879 | ||
5603 | /* If the pci channel is offline, ignore all the interrupts. */ | 5880 | /* Check device state for handling interrupt */ |
5604 | if (unlikely(pci_channel_offline(phba->pcidev))) | 5881 | if (lpfc_intr_state_check(phba)) |
5605 | return IRQ_NONE; | ||
5606 | |||
5607 | /* Update device level interrupt statistics */ | ||
5608 | phba->sli.slistat.sli_intr++; | ||
5609 | |||
5610 | /* Ignore all interrupts during initialization. */ | ||
5611 | if (unlikely(phba->link_state < LPFC_LINK_DOWN)) | ||
5612 | return IRQ_NONE; | 5882 | return IRQ_NONE; |
5613 | 5883 | ||
5614 | spin_lock(&phba->hbalock); | 5884 | spin_lock(&phba->hbalock); |
@@ -5650,7 +5920,7 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
5650 | status2 >>= (4*LPFC_ELS_RING); | 5920 | status2 >>= (4*LPFC_ELS_RING); |
5651 | 5921 | ||
5652 | if (status1 || (status2 & HA_RXMASK)) | 5922 | if (status1 || (status2 & HA_RXMASK)) |
5653 | sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); | 5923 | sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); |
5654 | else | 5924 | else |
5655 | sp_irq_rc = IRQ_NONE; | 5925 | sp_irq_rc = IRQ_NONE; |
5656 | 5926 | ||
@@ -5670,10 +5940,10 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
5670 | status2 = 0; | 5940 | status2 = 0; |
5671 | 5941 | ||
5672 | if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) | 5942 | if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) |
5673 | fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); | 5943 | fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); |
5674 | else | 5944 | else |
5675 | fp_irq_rc = IRQ_NONE; | 5945 | fp_irq_rc = IRQ_NONE; |
5676 | 5946 | ||
5677 | /* Return device-level interrupt handling status */ | 5947 | /* Return device-level interrupt handling status */ |
5678 | return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; | 5948 | return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; |
5679 | } /* lpfc_intr_handler */ | 5949 | } /* lpfc_sli_intr_handler */ |