aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2009-05-22 14:50:54 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-06-08 12:18:10 -0400
commit3772a99175f5378b5001e8da364341a8b8226a4a (patch)
treedd710f890c5bc097c874ad1783cd26ea56e88f57 /drivers/scsi/lpfc/lpfc_sli.c
parenta366695592ebc9151dd5a248681270f0925d8324 (diff)
[SCSI] lpfc 8.3.2 : Reorganization for SLI4
Preps the organization of the driver so that the bottom half, which interacts with the hardware, can share common code sequences for attachment, detachment, initialization, teardown, etc with new hardware. For very common code sections, which become specific to the interface type, the driver uses an indirect function call. The function is set at initialization. For less common sections, such as initialization, the driver looks at the interface type and calls the routines relative to the interface. Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1006
1 files changed, 638 insertions, 368 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index eb5c75c45ba4..e2d07d97fa8b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -142,7 +142,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
142} 142}
143 143
144/** 144/**
145 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 145 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
146 * @phba: Pointer to HBA context object. 146 * @phba: Pointer to HBA context object.
147 * @iocbq: Pointer to driver iocb object. 147 * @iocbq: Pointer to driver iocb object.
148 * 148 *
@@ -152,7 +152,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
152 * clears all other fields of the iocb object when it is freed. 152 * clears all other fields of the iocb object when it is freed.
153 **/ 153 **/
154static void 154static void
155__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 155__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
156{ 156{
157 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 157 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
158 158
@@ -160,10 +160,27 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
160 * Clean all volatile data fields, preserve iotag and node struct. 160 * Clean all volatile data fields, preserve iotag and node struct.
161 */ 161 */
162 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 162 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
163 iocbq->sli4_xritag = NO_XRI;
163 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 164 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
164} 165}
165 166
166/** 167/**
168 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
169 * @phba: Pointer to HBA context object.
170 * @iocbq: Pointer to driver iocb object.
171 *
172 * This function is called with hbalock held to release driver
173 * iocb object to the iocb pool. The iotag in the iocb object
174 * does not change for each use of the iocb object. This function
175 * clears all other fields of the iocb object when it is freed.
176 **/
177static void
178__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
179{
180 phba->__lpfc_sli_release_iocbq(phba, iocbq);
181}
182
183/**
167 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 184 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
168 * @phba: Pointer to HBA context object. 185 * @phba: Pointer to HBA context object.
169 * @iocbq: Pointer to driver iocb object. 186 * @iocbq: Pointer to driver iocb object.
@@ -779,8 +796,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
779 phba->hbqs[i].buffer_count = 0; 796 phba->hbqs[i].buffer_count = 0;
780 } 797 }
781 /* Return all HBQ buffer that are in-fly */ 798 /* Return all HBQ buffer that are in-fly */
782 list_for_each_entry_safe(dmabuf, next_dmabuf, 799 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
783 &phba->hbqbuf_in_list, list) { 800 list) {
784 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 801 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
785 list_del(&hbq_buf->dbuf.list); 802 list_del(&hbq_buf->dbuf.list);
786 if (hbq_buf->tag == -1) { 803 if (hbq_buf->tag == -1) {
@@ -814,10 +831,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
814 * pointer to the hbq entry if it successfully post the buffer 831 * pointer to the hbq entry if it successfully post the buffer
815 * else it will return NULL. 832 * else it will return NULL.
816 **/ 833 **/
817static struct lpfc_hbq_entry * 834static int
818lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 835lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
819 struct hbq_dmabuf *hbq_buf) 836 struct hbq_dmabuf *hbq_buf)
820{ 837{
838 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
839}
840
841/**
842 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
843 * @phba: Pointer to HBA context object.
844 * @hbqno: HBQ number.
845 * @hbq_buf: Pointer to HBQ buffer.
846 *
847 * This function is called with the hbalock held to post a hbq buffer to the
848 * firmware. If the function finds an empty slot in the HBQ, it will post the
849 * buffer and place it on the hbq_buffer_list. The function will return zero if
850 * it successfully post the buffer else it will return an error.
851 **/
852static int
853lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
854 struct hbq_dmabuf *hbq_buf)
855{
821 struct lpfc_hbq_entry *hbqe; 856 struct lpfc_hbq_entry *hbqe;
822 dma_addr_t physaddr = hbq_buf->dbuf.phys; 857 dma_addr_t physaddr = hbq_buf->dbuf.phys;
823 858
@@ -838,8 +873,9 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
838 /* flush */ 873 /* flush */
839 readl(phba->hbq_put + hbqno); 874 readl(phba->hbq_put + hbqno);
840 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 875 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
841 } 876 return 0;
842 return hbqe; 877 } else
878 return -ENOMEM;
843} 879}
844 880
845/* HBQ for ELS and CT traffic. */ 881/* HBQ for ELS and CT traffic. */
@@ -914,7 +950,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
914 dbuf.list); 950 dbuf.list);
915 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 951 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
916 (hbqno << 16)); 952 (hbqno << 16));
917 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 953 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
918 phba->hbqs[hbqno].buffer_count++; 954 phba->hbqs[hbqno].buffer_count++;
919 posted++; 955 posted++;
920 } else 956 } else
@@ -965,6 +1001,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
965} 1001}
966 1002
967/** 1003/**
1004 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1005 * @phba: Pointer to HBA context object.
1006 * @hbqno: HBQ number.
1007 *
1008 * This function removes the first hbq buffer on an hbq list and returns a
1009 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1010 **/
1011static struct hbq_dmabuf *
1012lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1013{
1014 struct lpfc_dmabuf *d_buf;
1015
1016 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1017 if (!d_buf)
1018 return NULL;
1019 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1020}
1021
1022/**
968 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1023 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
969 * @phba: Pointer to HBA context object. 1024 * @phba: Pointer to HBA context object.
970 * @tag: Tag of the hbq buffer. 1025 * @tag: Tag of the hbq buffer.
@@ -985,12 +1040,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
985 if (hbqno >= LPFC_MAX_HBQS) 1040 if (hbqno >= LPFC_MAX_HBQS)
986 return NULL; 1041 return NULL;
987 1042
1043 spin_lock_irq(&phba->hbalock);
988 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1044 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
989 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1045 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
990 if (hbq_buf->tag == tag) { 1046 if (hbq_buf->tag == tag) {
1047 spin_unlock_irq(&phba->hbalock);
991 return hbq_buf; 1048 return hbq_buf;
992 } 1049 }
993 } 1050 }
1051 spin_unlock_irq(&phba->hbalock);
994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
995 "1803 Bad hbq tag. Data: x%x x%x\n", 1053 "1803 Bad hbq tag. Data: x%x x%x\n",
996 tag, phba->hbqs[tag >> 16].buffer_count); 1054 tag, phba->hbqs[tag >> 16].buffer_count);
@@ -1013,9 +1071,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1013 1071
1014 if (hbq_buffer) { 1072 if (hbq_buffer) {
1015 hbqno = hbq_buffer->tag >> 16; 1073 hbqno = hbq_buffer->tag >> 16;
1016 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1074 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1017 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1075 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1018 }
1019 } 1076 }
1020} 1077}
1021 1078
@@ -1317,6 +1374,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
1317 return &hbq_entry->dbuf; 1374 return &hbq_entry->dbuf;
1318} 1375}
1319 1376
1377/**
1378 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1379 * @phba: Pointer to HBA context object.
1380 * @pring: Pointer to driver SLI ring object.
1381 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1382 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1383 * @fch_type: the type for the first frame of the sequence.
1384 *
1385 * This function is called with no lock held. This function uses the r_ctl and
1386 * type of the received sequence to find the correct callback function to call
1387 * to process the sequence.
1388 **/
1389static int
1390lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1391 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1392 uint32_t fch_type)
1393{
1394 int i;
1395
1396 /* unSolicited Responses */
1397 if (pring->prt[0].profile) {
1398 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1399 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1400 saveq);
1401 return 1;
1402 }
1403 /* We must search, based on rctl / type
1404 for the right routine */
1405 for (i = 0; i < pring->num_mask; i++) {
1406 if ((pring->prt[i].rctl == fch_r_ctl) &&
1407 (pring->prt[i].type == fch_type)) {
1408 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1409 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1410 (phba, pring, saveq);
1411 return 1;
1412 }
1413 }
1414 return 0;
1415}
1320 1416
1321/** 1417/**
1322 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 1418 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
@@ -1339,7 +1435,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1339 IOCB_t * irsp; 1435 IOCB_t * irsp;
1340 WORD5 * w5p; 1436 WORD5 * w5p;
1341 uint32_t Rctl, Type; 1437 uint32_t Rctl, Type;
1342 uint32_t match, i; 1438 uint32_t match;
1343 struct lpfc_iocbq *iocbq; 1439 struct lpfc_iocbq *iocbq;
1344 struct lpfc_dmabuf *dmzbuf; 1440 struct lpfc_dmabuf *dmzbuf;
1345 1441
@@ -1482,35 +1578,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1482 } 1578 }
1483 } 1579 }
1484 1580
1485 /* unSolicited Responses */ 1581 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
1486 if (pring->prt[0].profile) {
1487 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1488 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1489 saveq);
1490 match = 1;
1491 } else {
1492 /* We must search, based on rctl / type
1493 for the right routine */
1494 for (i = 0; i < pring->num_mask; i++) {
1495 if ((pring->prt[i].rctl == Rctl)
1496 && (pring->prt[i].type == Type)) {
1497 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1498 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1499 (phba, pring, saveq);
1500 match = 1;
1501 break;
1502 }
1503 }
1504 }
1505 if (match == 0) {
1506 /* Unexpected Rctl / Type received */
1507 /* Ring <ringno> handler: unexpected
1508 Rctl <Rctl> Type <Type> received */
1509 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1582 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1510 "0313 Ring %d handler: unexpected Rctl x%x " 1583 "0313 Ring %d handler: unexpected Rctl x%x "
1511 "Type x%x received\n", 1584 "Type x%x received\n",
1512 pring->ringno, Rctl, Type); 1585 pring->ringno, Rctl, Type);
1513 } 1586
1514 return 1; 1587 return 1;
1515} 1588}
1516 1589
@@ -1552,6 +1625,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1552} 1625}
1553 1626
1554/** 1627/**
1628 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
1629 * @phba: Pointer to HBA context object.
1630 * @pring: Pointer to driver SLI ring object.
1631 * @iotag: IOCB tag.
1632 *
1633 * This function looks up the iocb_lookup table to get the command iocb
1634 * corresponding to the given iotag. This function is called with the
1635 * hbalock held.
1636 * This function returns the command iocb object if it finds the command
1637 * iocb else returns NULL.
1638 **/
1639static struct lpfc_iocbq *
1640lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
1641 struct lpfc_sli_ring *pring, uint16_t iotag)
1642{
1643 struct lpfc_iocbq *cmd_iocb;
1644
1645 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
1646 cmd_iocb = phba->sli.iocbq_lookup[iotag];
1647 list_del_init(&cmd_iocb->list);
1648 pring->txcmplq_cnt--;
1649 return cmd_iocb;
1650 }
1651
1652 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1653 "0372 iotag x%x is out off range: max iotag (x%x)\n",
1654 iotag, phba->sli.last_iotag);
1655 return NULL;
1656}
1657
1658/**
1555 * lpfc_sli_process_sol_iocb - process solicited iocb completion 1659 * lpfc_sli_process_sol_iocb - process solicited iocb completion
1556 * @phba: Pointer to HBA context object. 1660 * @phba: Pointer to HBA context object.
1557 * @pring: Pointer to driver SLI ring object. 1661 * @pring: Pointer to driver SLI ring object.
@@ -1954,7 +2058,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1954 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2058 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1955 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2059 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1956 spin_unlock_irqrestore(&phba->hbalock, iflag); 2060 spin_unlock_irqrestore(&phba->hbalock, iflag);
1957 lpfc_rampdown_queue_depth(phba); 2061 phba->lpfc_rampdown_queue_depth(phba);
1958 spin_lock_irqsave(&phba->hbalock, iflag); 2062 spin_lock_irqsave(&phba->hbalock, iflag);
1959 } 2063 }
1960 2064
@@ -2068,39 +2172,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2068} 2172}
2069 2173
2070/** 2174/**
2071 * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings 2175 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2176 * @phba: Pointer to HBA context object.
2177 * @pring: Pointer to driver SLI ring object.
2178 * @rspiocbp: Pointer to driver response IOCB object.
2179 *
2180 * This function is called from the worker thread when there is a slow-path
2181 * response IOCB to process. This function chains all the response iocbs until
2182 * seeing the iocb with the LE bit set. The function will call
2183 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2184 * completion of a command iocb. The function will call the
2185 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2186 * The function frees the resources or calls the completion handler if this
2187 * iocb is an abort completion. The function returns NULL when the response
2188 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2189 * this function shall chain the iocb on to the iocb_continueq and return the
2190 * response iocb passed in.
2191 **/
2192static struct lpfc_iocbq *
2193lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2194 struct lpfc_iocbq *rspiocbp)
2195{
2196 struct lpfc_iocbq *saveq;
2197 struct lpfc_iocbq *cmdiocbp;
2198 struct lpfc_iocbq *next_iocb;
2199 IOCB_t *irsp = NULL;
2200 uint32_t free_saveq;
2201 uint8_t iocb_cmd_type;
2202 lpfc_iocb_type type;
2203 unsigned long iflag;
2204 int rc;
2205
2206 spin_lock_irqsave(&phba->hbalock, iflag);
2207 /* First add the response iocb to the countinueq list */
2208 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2209 pring->iocb_continueq_cnt++;
2210
2211 /* Now, determine whetehr the list is completed for processing */
2212 irsp = &rspiocbp->iocb;
2213 if (irsp->ulpLe) {
2214 /*
2215 * By default, the driver expects to free all resources
2216 * associated with this iocb completion.
2217 */
2218 free_saveq = 1;
2219 saveq = list_get_first(&pring->iocb_continueq,
2220 struct lpfc_iocbq, list);
2221 irsp = &(saveq->iocb);
2222 list_del_init(&pring->iocb_continueq);
2223 pring->iocb_continueq_cnt = 0;
2224
2225 pring->stats.iocb_rsp++;
2226
2227 /*
2228 * If resource errors reported from HBA, reduce
2229 * queuedepths of the SCSI device.
2230 */
2231 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2232 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2233 spin_unlock_irqrestore(&phba->hbalock, iflag);
2234 phba->lpfc_rampdown_queue_depth(phba);
2235 spin_lock_irqsave(&phba->hbalock, iflag);
2236 }
2237
2238 if (irsp->ulpStatus) {
2239 /* Rsp ring <ringno> error: IOCB */
2240 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2241 "0328 Rsp Ring %d error: "
2242 "IOCB Data: "
2243 "x%x x%x x%x x%x "
2244 "x%x x%x x%x x%x "
2245 "x%x x%x x%x x%x "
2246 "x%x x%x x%x x%x\n",
2247 pring->ringno,
2248 irsp->un.ulpWord[0],
2249 irsp->un.ulpWord[1],
2250 irsp->un.ulpWord[2],
2251 irsp->un.ulpWord[3],
2252 irsp->un.ulpWord[4],
2253 irsp->un.ulpWord[5],
2254 *(((uint32_t *) irsp) + 6),
2255 *(((uint32_t *) irsp) + 7),
2256 *(((uint32_t *) irsp) + 8),
2257 *(((uint32_t *) irsp) + 9),
2258 *(((uint32_t *) irsp) + 10),
2259 *(((uint32_t *) irsp) + 11),
2260 *(((uint32_t *) irsp) + 12),
2261 *(((uint32_t *) irsp) + 13),
2262 *(((uint32_t *) irsp) + 14),
2263 *(((uint32_t *) irsp) + 15));
2264 }
2265
2266 /*
2267 * Fetch the IOCB command type and call the correct completion
2268 * routine. Solicited and Unsolicited IOCBs on the ELS ring
2269 * get freed back to the lpfc_iocb_list by the discovery
2270 * kernel thread.
2271 */
2272 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2273 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2274 switch (type) {
2275 case LPFC_SOL_IOCB:
2276 spin_unlock_irqrestore(&phba->hbalock, iflag);
2277 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2278 spin_lock_irqsave(&phba->hbalock, iflag);
2279 break;
2280
2281 case LPFC_UNSOL_IOCB:
2282 spin_unlock_irqrestore(&phba->hbalock, iflag);
2283 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2284 spin_lock_irqsave(&phba->hbalock, iflag);
2285 if (!rc)
2286 free_saveq = 0;
2287 break;
2288
2289 case LPFC_ABORT_IOCB:
2290 cmdiocbp = NULL;
2291 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2292 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2293 saveq);
2294 if (cmdiocbp) {
2295 /* Call the specified completion routine */
2296 if (cmdiocbp->iocb_cmpl) {
2297 spin_unlock_irqrestore(&phba->hbalock,
2298 iflag);
2299 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2300 saveq);
2301 spin_lock_irqsave(&phba->hbalock,
2302 iflag);
2303 } else
2304 __lpfc_sli_release_iocbq(phba,
2305 cmdiocbp);
2306 }
2307 break;
2308
2309 case LPFC_UNKNOWN_IOCB:
2310 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2311 char adaptermsg[LPFC_MAX_ADPTMSG];
2312 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2313 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2314 MAX_MSG_DATA);
2315 dev_warn(&((phba->pcidev)->dev),
2316 "lpfc%d: %s\n",
2317 phba->brd_no, adaptermsg);
2318 } else {
2319 /* Unknown IOCB command */
2320 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2321 "0335 Unknown IOCB "
2322 "command Data: x%x "
2323 "x%x x%x x%x\n",
2324 irsp->ulpCommand,
2325 irsp->ulpStatus,
2326 irsp->ulpIoTag,
2327 irsp->ulpContext);
2328 }
2329 break;
2330 }
2331
2332 if (free_saveq) {
2333 list_for_each_entry_safe(rspiocbp, next_iocb,
2334 &saveq->list, list) {
2335 list_del(&rspiocbp->list);
2336 __lpfc_sli_release_iocbq(phba, rspiocbp);
2337 }
2338 __lpfc_sli_release_iocbq(phba, saveq);
2339 }
2340 rspiocbp = NULL;
2341 }
2342 spin_unlock_irqrestore(&phba->hbalock, iflag);
2343 return rspiocbp;
2344}
2345
2346/**
2347 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
2072 * @phba: Pointer to HBA context object. 2348 * @phba: Pointer to HBA context object.
2073 * @pring: Pointer to driver SLI ring object. 2349 * @pring: Pointer to driver SLI ring object.
2074 * @mask: Host attention register mask for this ring. 2350 * @mask: Host attention register mask for this ring.
2075 * 2351 *
2076 * This function is called from the worker thread when there is a ring 2352 * This routine wraps the actual slow_ring event process routine from the
2077 * event for non-fcp rings. The caller does not hold any lock . 2353 * API jump table function pointer from the lpfc_hba struct.
2078 * The function processes each response iocb in the response ring until it
2079 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2080 * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
2081 * response iocb indicates a completion of a command iocb. The function
2082 * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
2083 * iocb. The function frees the resources or calls the completion handler if
2084 * this iocb is an abort completion. The function returns 0 when the allocated
2085 * iocbs are not freed, otherwise returns 1.
2086 **/ 2354 **/
2087int 2355void
2088lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 2356lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2089 struct lpfc_sli_ring *pring, uint32_t mask) 2357 struct lpfc_sli_ring *pring, uint32_t mask)
2090{ 2358{
2359 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2360}
2361
2362/**
2363 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2364 * @phba: Pointer to HBA context object.
2365 * @pring: Pointer to driver SLI ring object.
2366 * @mask: Host attention register mask for this ring.
2367 *
2368 * This function is called from the worker thread when there is a ring event
2369 * for non-fcp rings. The caller does not hold any lock. The function will
2370 * remove each response iocb in the response ring and calls the handle
2371 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2372 **/
2373static void
2374lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2375 struct lpfc_sli_ring *pring, uint32_t mask)
2376{
2091 struct lpfc_pgp *pgp; 2377 struct lpfc_pgp *pgp;
2092 IOCB_t *entry; 2378 IOCB_t *entry;
2093 IOCB_t *irsp = NULL; 2379 IOCB_t *irsp = NULL;
2094 struct lpfc_iocbq *rspiocbp = NULL; 2380 struct lpfc_iocbq *rspiocbp = NULL;
2095 struct lpfc_iocbq *next_iocb;
2096 struct lpfc_iocbq *cmdiocbp;
2097 struct lpfc_iocbq *saveq;
2098 uint8_t iocb_cmd_type;
2099 lpfc_iocb_type type;
2100 uint32_t status, free_saveq;
2101 uint32_t portRspPut, portRspMax; 2381 uint32_t portRspPut, portRspMax;
2102 int rc = 1;
2103 unsigned long iflag; 2382 unsigned long iflag;
2383 uint32_t status;
2104 2384
2105 pgp = &phba->port_gp[pring->ringno]; 2385 pgp = &phba->port_gp[pring->ringno];
2106 spin_lock_irqsave(&phba->hbalock, iflag); 2386 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2128,7 +2408,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2128 phba->work_hs = HS_FFER3; 2408 phba->work_hs = HS_FFER3;
2129 lpfc_handle_eratt(phba); 2409 lpfc_handle_eratt(phba);
2130 2410
2131 return 1; 2411 return;
2132 } 2412 }
2133 2413
2134 rmb(); 2414 rmb();
@@ -2173,138 +2453,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2173 2453
2174 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2454 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2175 2455
2176 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 2456 spin_unlock_irqrestore(&phba->hbalock, iflag);
2177 2457 /* Handle the response IOCB */
2178 pring->iocb_continueq_cnt++; 2458 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2179 if (irsp->ulpLe) { 2459 spin_lock_irqsave(&phba->hbalock, iflag);
2180 /*
2181 * By default, the driver expects to free all resources
2182 * associated with this iocb completion.
2183 */
2184 free_saveq = 1;
2185 saveq = list_get_first(&pring->iocb_continueq,
2186 struct lpfc_iocbq, list);
2187 irsp = &(saveq->iocb);
2188 list_del_init(&pring->iocb_continueq);
2189 pring->iocb_continueq_cnt = 0;
2190
2191 pring->stats.iocb_rsp++;
2192
2193 /*
2194 * If resource errors reported from HBA, reduce
2195 * queuedepths of the SCSI device.
2196 */
2197 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2198 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2199 spin_unlock_irqrestore(&phba->hbalock, iflag);
2200 lpfc_rampdown_queue_depth(phba);
2201 spin_lock_irqsave(&phba->hbalock, iflag);
2202 }
2203
2204 if (irsp->ulpStatus) {
2205 /* Rsp ring <ringno> error: IOCB */
2206 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2207 "0328 Rsp Ring %d error: "
2208 "IOCB Data: "
2209 "x%x x%x x%x x%x "
2210 "x%x x%x x%x x%x "
2211 "x%x x%x x%x x%x "
2212 "x%x x%x x%x x%x\n",
2213 pring->ringno,
2214 irsp->un.ulpWord[0],
2215 irsp->un.ulpWord[1],
2216 irsp->un.ulpWord[2],
2217 irsp->un.ulpWord[3],
2218 irsp->un.ulpWord[4],
2219 irsp->un.ulpWord[5],
2220 *(((uint32_t *) irsp) + 6),
2221 *(((uint32_t *) irsp) + 7),
2222 *(((uint32_t *) irsp) + 8),
2223 *(((uint32_t *) irsp) + 9),
2224 *(((uint32_t *) irsp) + 10),
2225 *(((uint32_t *) irsp) + 11),
2226 *(((uint32_t *) irsp) + 12),
2227 *(((uint32_t *) irsp) + 13),
2228 *(((uint32_t *) irsp) + 14),
2229 *(((uint32_t *) irsp) + 15));
2230 }
2231
2232 /*
2233 * Fetch the IOCB command type and call the correct
2234 * completion routine. Solicited and Unsolicited
2235 * IOCBs on the ELS ring get freed back to the
2236 * lpfc_iocb_list by the discovery kernel thread.
2237 */
2238 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2239 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2240 if (type == LPFC_SOL_IOCB) {
2241 spin_unlock_irqrestore(&phba->hbalock, iflag);
2242 rc = lpfc_sli_process_sol_iocb(phba, pring,
2243 saveq);
2244 spin_lock_irqsave(&phba->hbalock, iflag);
2245 } else if (type == LPFC_UNSOL_IOCB) {
2246 spin_unlock_irqrestore(&phba->hbalock, iflag);
2247 rc = lpfc_sli_process_unsol_iocb(phba, pring,
2248 saveq);
2249 spin_lock_irqsave(&phba->hbalock, iflag);
2250 if (!rc)
2251 free_saveq = 0;
2252 } else if (type == LPFC_ABORT_IOCB) {
2253 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
2254 ((cmdiocbp =
2255 lpfc_sli_iocbq_lookup(phba, pring,
2256 saveq)))) {
2257 /* Call the specified completion
2258 routine */
2259 if (cmdiocbp->iocb_cmpl) {
2260 spin_unlock_irqrestore(
2261 &phba->hbalock,
2262 iflag);
2263 (cmdiocbp->iocb_cmpl) (phba,
2264 cmdiocbp, saveq);
2265 spin_lock_irqsave(
2266 &phba->hbalock,
2267 iflag);
2268 } else
2269 __lpfc_sli_release_iocbq(phba,
2270 cmdiocbp);
2271 }
2272 } else if (type == LPFC_UNKNOWN_IOCB) {
2273 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2274
2275 char adaptermsg[LPFC_MAX_ADPTMSG];
2276
2277 memset(adaptermsg, 0,
2278 LPFC_MAX_ADPTMSG);
2279 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2280 MAX_MSG_DATA);
2281 dev_warn(&((phba->pcidev)->dev),
2282 "lpfc%d: %s\n",
2283 phba->brd_no, adaptermsg);
2284 } else {
2285 /* Unknown IOCB command */
2286 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2287 "0335 Unknown IOCB "
2288 "command Data: x%x "
2289 "x%x x%x x%x\n",
2290 irsp->ulpCommand,
2291 irsp->ulpStatus,
2292 irsp->ulpIoTag,
2293 irsp->ulpContext);
2294 }
2295 }
2296
2297 if (free_saveq) {
2298 list_for_each_entry_safe(rspiocbp, next_iocb,
2299 &saveq->list, list) {
2300 list_del(&rspiocbp->list);
2301 __lpfc_sli_release_iocbq(phba,
2302 rspiocbp);
2303 }
2304 __lpfc_sli_release_iocbq(phba, saveq);
2305 }
2306 rspiocbp = NULL;
2307 }
2308 2460
2309 /* 2461 /*
2310 * If the port response put pointer has not been updated, sync 2462 * If the port response put pointer has not been updated, sync
@@ -2338,7 +2490,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2338 } 2490 }
2339 2491
2340 spin_unlock_irqrestore(&phba->hbalock, iflag); 2492 spin_unlock_irqrestore(&phba->hbalock, iflag);
2341 return rc; 2493 return;
2342} 2494}
2343 2495
2344/** 2496/**
@@ -2420,7 +2572,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2420} 2572}
2421 2573
2422/** 2574/**
2423 * lpfc_sli_brdready - Check for host status bits 2575 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
2424 * @phba: Pointer to HBA context object. 2576 * @phba: Pointer to HBA context object.
2425 * @mask: Bit mask to be checked. 2577 * @mask: Bit mask to be checked.
2426 * 2578 *
@@ -2432,8 +2584,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2432 * function returns 1 when HBA fail to restart otherwise returns 2584 * function returns 1 when HBA fail to restart otherwise returns
2433 * zero. 2585 * zero.
2434 **/ 2586 **/
2435int 2587static int
2436lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 2588lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
2437{ 2589{
2438 uint32_t status; 2590 uint32_t status;
2439 int i = 0; 2591 int i = 0;
@@ -2647,7 +2799,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2647} 2799}
2648 2800
2649/** 2801/**
2650 * lpfc_sli_brdreset - Reset the HBA 2802 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
2651 * @phba: Pointer to HBA context object. 2803 * @phba: Pointer to HBA context object.
2652 * 2804 *
2653 * This function resets the HBA by writing HC_INITFF to the control 2805 * This function resets the HBA by writing HC_INITFF to the control
@@ -2683,7 +2835,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2683 (cfg_value & 2835 (cfg_value &
2684 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 2836 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2685 2837
2686 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 2838 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
2839
2687 /* Now toggle INITFF bit in the Host Control Register */ 2840 /* Now toggle INITFF bit in the Host Control Register */
2688 writel(HC_INITFF, phba->HCregaddr); 2841 writel(HC_INITFF, phba->HCregaddr);
2689 mdelay(1); 2842 mdelay(1);
@@ -3289,32 +3442,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3289 3442
3290 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 3443 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3291 "0345 Resetting board due to mailbox timeout\n"); 3444 "0345 Resetting board due to mailbox timeout\n");
3292 /* 3445
3293 * lpfc_offline calls lpfc_sli_hba_down which will clean up 3446 /* Reset the HBA device */
3294 * on oustanding mailbox commands. 3447 lpfc_reset_hba(phba);
3295 */
3296 /* If resets are disabled then set error state and return. */
3297 if (!phba->cfg_enable_hba_reset) {
3298 phba->link_state = LPFC_HBA_ERROR;
3299 return;
3300 }
3301 lpfc_offline_prep(phba);
3302 lpfc_offline(phba);
3303 lpfc_sli_brdrestart(phba);
3304 lpfc_online(phba);
3305 lpfc_unblock_mgmt_io(phba);
3306 return;
3307} 3448}
3308 3449
3309/** 3450/**
3310 * lpfc_sli_issue_mbox - Issue a mailbox command to firmware 3451 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
3311 * @phba: Pointer to HBA context object. 3452 * @phba: Pointer to HBA context object.
3312 * @pmbox: Pointer to mailbox object. 3453 * @pmbox: Pointer to mailbox object.
3313 * @flag: Flag indicating how the mailbox need to be processed. 3454 * @flag: Flag indicating how the mailbox need to be processed.
3314 * 3455 *
3315 * This function is called by discovery code and HBA management code 3456 * This function is called by discovery code and HBA management code
3316 * to submit a mailbox command to firmware. This function gets the 3457 * to submit a mailbox command to firmware with SLI-3 interface spec. This
3317 * hbalock to protect the data structures. 3458 * function gets the hbalock to protect the data structures.
3318 * The mailbox command can be submitted in polling mode, in which case 3459 * The mailbox command can be submitted in polling mode, in which case
3319 * this function will wait in a polling loop for the completion of the 3460 * this function will wait in a polling loop for the completion of the
3320 * mailbox. 3461 * mailbox.
@@ -3332,8 +3473,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3332 * return codes the caller owns the mailbox command after the return of 3473 * return codes the caller owns the mailbox command after the return of
3333 * the function. 3474 * the function.
3334 **/ 3475 **/
3335int 3476static int
3336lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 3477lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
3478 uint32_t flag)
3337{ 3479{
3338 MAILBOX_t *mb; 3480 MAILBOX_t *mb;
3339 struct lpfc_sli *psli = &phba->sli; 3481 struct lpfc_sli *psli = &phba->sli;
@@ -3349,6 +3491,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3349 spin_lock_irqsave(&phba->hbalock, drvr_flag); 3491 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3350 if (!pmbox) { 3492 if (!pmbox) {
3351 /* processing mbox queue from intr_handler */ 3493 /* processing mbox queue from intr_handler */
3494 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
3495 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3496 return MBX_SUCCESS;
3497 }
3352 processing_queue = 1; 3498 processing_queue = 1;
3353 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3499 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3354 pmbox = lpfc_mbox_get(phba); 3500 pmbox = lpfc_mbox_get(phba);
@@ -3365,7 +3511,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3365 lpfc_printf_log(phba, KERN_ERR, 3511 lpfc_printf_log(phba, KERN_ERR,
3366 LOG_MBOX | LOG_VPORT, 3512 LOG_MBOX | LOG_VPORT,
3367 "1806 Mbox x%x failed. No vport\n", 3513 "1806 Mbox x%x failed. No vport\n",
3368 pmbox->mb.mbxCommand); 3514 pmbox->u.mb.mbxCommand);
3369 dump_stack(); 3515 dump_stack();
3370 goto out_not_finished; 3516 goto out_not_finished;
3371 } 3517 }
@@ -3385,21 +3531,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3385 3531
3386 psli = &phba->sli; 3532 psli = &phba->sli;
3387 3533
3388 mb = &pmbox->mb; 3534 mb = &pmbox->u.mb;
3389 status = MBX_SUCCESS; 3535 status = MBX_SUCCESS;
3390 3536
3391 if (phba->link_state == LPFC_HBA_ERROR) { 3537 if (phba->link_state == LPFC_HBA_ERROR) {
3392 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3538 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3393 3539
3394 /* Mbox command <mbxCommand> cannot issue */ 3540 /* Mbox command <mbxCommand> cannot issue */
3395 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 3541 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3542 "(%d):0311 Mailbox command x%x cannot "
3543 "issue Data: x%x x%x\n",
3544 pmbox->vport ? pmbox->vport->vpi : 0,
3545 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3396 goto out_not_finished; 3546 goto out_not_finished;
3397 } 3547 }
3398 3548
3399 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 3549 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
3400 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 3550 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
3401 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3551 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3402 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 3552 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3553 "(%d):2528 Mailbox command x%x cannot "
3554 "issue Data: x%x x%x\n",
3555 pmbox->vport ? pmbox->vport->vpi : 0,
3556 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3403 goto out_not_finished; 3557 goto out_not_finished;
3404 } 3558 }
3405 3559
@@ -3413,14 +3567,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3413 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3567 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3414 3568
3415 /* Mbox command <mbxCommand> cannot issue */ 3569 /* Mbox command <mbxCommand> cannot issue */
3416 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 3570 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3571 "(%d):2529 Mailbox command x%x "
3572 "cannot issue Data: x%x x%x\n",
3573 pmbox->vport ? pmbox->vport->vpi : 0,
3574 pmbox->u.mb.mbxCommand,
3575 psli->sli_flag, flag);
3417 goto out_not_finished; 3576 goto out_not_finished;
3418 } 3577 }
3419 3578
3420 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 3579 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
3421 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3580 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3422 /* Mbox command <mbxCommand> cannot issue */ 3581 /* Mbox command <mbxCommand> cannot issue */
3423 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 3582 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3583 "(%d):2530 Mailbox command x%x "
3584 "cannot issue Data: x%x x%x\n",
3585 pmbox->vport ? pmbox->vport->vpi : 0,
3586 pmbox->u.mb.mbxCommand,
3587 psli->sli_flag, flag);
3424 goto out_not_finished; 3588 goto out_not_finished;
3425 } 3589 }
3426 3590
@@ -3462,12 +3626,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3462 3626
3463 /* If we are not polling, we MUST be in SLI2 mode */ 3627 /* If we are not polling, we MUST be in SLI2 mode */
3464 if (flag != MBX_POLL) { 3628 if (flag != MBX_POLL) {
3465 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 3629 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
3466 (mb->mbxCommand != MBX_KILL_BOARD)) { 3630 (mb->mbxCommand != MBX_KILL_BOARD)) {
3467 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3631 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3468 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3632 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3469 /* Mbox command <mbxCommand> cannot issue */ 3633 /* Mbox command <mbxCommand> cannot issue */
3470 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 3634 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3635 "(%d):2531 Mailbox command x%x "
3636 "cannot issue Data: x%x x%x\n",
3637 pmbox->vport ? pmbox->vport->vpi : 0,
3638 pmbox->u.mb.mbxCommand,
3639 psli->sli_flag, flag);
3471 goto out_not_finished; 3640 goto out_not_finished;
3472 } 3641 }
3473 /* timeout active mbox command */ 3642 /* timeout active mbox command */
@@ -3506,7 +3675,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3506 /* next set own bit for the adapter and copy over command word */ 3675 /* next set own bit for the adapter and copy over command word */
3507 mb->mbxOwner = OWN_CHIP; 3676 mb->mbxOwner = OWN_CHIP;
3508 3677
3509 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 3678 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3510 /* First copy command data to host SLIM area */ 3679 /* First copy command data to host SLIM area */
3511 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 3680 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
3512 } else { 3681 } else {
@@ -3529,7 +3698,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3529 3698
3530 if (mb->mbxCommand == MBX_CONFIG_PORT) { 3699 if (mb->mbxCommand == MBX_CONFIG_PORT) {
3531 /* switch over to host mailbox */ 3700 /* switch over to host mailbox */
3532 psli->sli_flag |= LPFC_SLI2_ACTIVE; 3701 psli->sli_flag |= LPFC_SLI_ACTIVE;
3533 } 3702 }
3534 } 3703 }
3535 3704
@@ -3552,7 +3721,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3552 writel(CA_MBATT, phba->CAregaddr); 3721 writel(CA_MBATT, phba->CAregaddr);
3553 readl(phba->CAregaddr); /* flush */ 3722 readl(phba->CAregaddr); /* flush */
3554 3723
3555 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 3724 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3556 /* First read mbox status word */ 3725 /* First read mbox status word */
3557 word0 = *((uint32_t *)phba->mbox); 3726 word0 = *((uint32_t *)phba->mbox);
3558 word0 = le32_to_cpu(word0); 3727 word0 = le32_to_cpu(word0);
@@ -3591,7 +3760,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3591 spin_lock_irqsave(&phba->hbalock, drvr_flag); 3760 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3592 } 3761 }
3593 3762
3594 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 3763 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3595 /* First copy command data */ 3764 /* First copy command data */
3596 word0 = *((uint32_t *)phba->mbox); 3765 word0 = *((uint32_t *)phba->mbox);
3597 word0 = le32_to_cpu(word0); 3766 word0 = le32_to_cpu(word0);
@@ -3604,7 +3773,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3604 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 3773 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
3605 && slimmb->mbxStatus) { 3774 && slimmb->mbxStatus) {
3606 psli->sli_flag &= 3775 psli->sli_flag &=
3607 ~LPFC_SLI2_ACTIVE; 3776 ~LPFC_SLI_ACTIVE;
3608 word0 = slimword0; 3777 word0 = slimword0;
3609 } 3778 }
3610 } 3779 }
@@ -3616,7 +3785,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3616 ha_copy = readl(phba->HAregaddr); 3785 ha_copy = readl(phba->HAregaddr);
3617 } 3786 }
3618 3787
3619 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 3788 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3620 /* copy results back to user */ 3789 /* copy results back to user */
3621 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 3790 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
3622 } else { 3791 } else {
@@ -3701,35 +3870,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3701} 3870}
3702 3871
3703/** 3872/**
3704 * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb 3873 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
3705 * @phba: Pointer to HBA context object. 3874 * @phba: Pointer to HBA context object.
3706 * @pring: Pointer to driver SLI ring object. 3875 * @ring_number: SLI ring number to issue iocb on.
3707 * @piocb: Pointer to command iocb. 3876 * @piocb: Pointer to command iocb.
3708 * @flag: Flag indicating if this command can be put into txq. 3877 * @flag: Flag indicating if this command can be put into txq.
3709 * 3878 *
3710 * __lpfc_sli_issue_iocb is used by other functions in the driver 3879 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
3711 * to issue an iocb command to the HBA. If the PCI slot is recovering 3880 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
3712 * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT 3881 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
3713 * flag is turned on, the function returns IOCB_ERROR. 3882 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
3714 * When the link is down, this function allows only iocbs for 3883 * this function allows only iocbs for posting buffers. This function finds
3715 * posting buffers. 3884 * next available slot in the command ring and posts the command to the
3716 * This function finds next available slot in the command ring and 3885 * available slot and writes the port attention register to request HBA start
3717 * posts the command to the available slot and writes the port 3886 * processing new iocb. If there is no slot available in the ring and
3718 * attention register to request HBA start processing new iocb. 3887 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
3719 * If there is no slot available in the ring and 3888 * the function returns IOCB_BUSY.
3720 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the
3721 * txq, otherwise the function returns IOCB_BUSY.
3722 * 3889 *
3723 * This function is called with hbalock held. 3890 * This function is called with hbalock held. The function will return success
3724 * The function will return success after it successfully submit the 3891 * after it successfully submit the iocb to firmware or after adding to the
3725 * iocb to firmware or after adding to the txq. 3892 * txq.
3726 **/ 3893 **/
3727static int 3894static int
3728__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3895__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
3729 struct lpfc_iocbq *piocb, uint32_t flag) 3896 struct lpfc_iocbq *piocb, uint32_t flag)
3730{ 3897{
3731 struct lpfc_iocbq *nextiocb; 3898 struct lpfc_iocbq *nextiocb;
3732 IOCB_t *iocb; 3899 IOCB_t *iocb;
3900 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
3733 3901
3734 if (piocb->iocb_cmpl && (!piocb->vport) && 3902 if (piocb->iocb_cmpl && (!piocb->vport) &&
3735 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 3903 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -3833,6 +4001,52 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3833 return IOCB_BUSY; 4001 return IOCB_BUSY;
3834} 4002}
3835 4003
4004/**
4005 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
4006 *
4007 * This routine wraps the actual lockless version for issusing IOCB function
4008 * pointer from the lpfc_hba struct.
4009 *
4010 * Return codes:
4011 * IOCB_ERROR - Error
4012 * IOCB_SUCCESS - Success
4013 * IOCB_BUSY - Busy
4014 **/
4015static inline int
4016__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
4017 struct lpfc_iocbq *piocb, uint32_t flag)
4018{
4019 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
4020}
4021
4022/**
4023 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
4024 * @phba: The hba struct for which this call is being executed.
4025 * @dev_grp: The HBA PCI-Device group number.
4026 *
4027 * This routine sets up the SLI interface API function jump table in @phba
4028 * struct.
4029 * Returns: 0 - success, -ENODEV - failure.
4030 **/
4031int
4032lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4033{
4034
4035 switch (dev_grp) {
4036 case LPFC_PCI_DEV_LP:
4037 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
4038 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
4039 break;
4040 default:
4041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4042 "1419 Invalid HBA PCI-device group: 0x%x\n",
4043 dev_grp);
4044 return -ENODEV;
4045 break;
4046 }
4047 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
4048 return 0;
4049}
3836 4050
3837/** 4051/**
3838 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 4052 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
@@ -3848,14 +4062,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3848 * functions which do not hold hbalock. 4062 * functions which do not hold hbalock.
3849 **/ 4063 **/
3850int 4064int
3851lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 4065lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
3852 struct lpfc_iocbq *piocb, uint32_t flag) 4066 struct lpfc_iocbq *piocb, uint32_t flag)
3853{ 4067{
3854 unsigned long iflags; 4068 unsigned long iflags;
3855 int rc; 4069 int rc;
3856 4070
3857 spin_lock_irqsave(&phba->hbalock, iflags); 4071 spin_lock_irqsave(&phba->hbalock, iflags);
3858 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); 4072 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
3859 spin_unlock_irqrestore(&phba->hbalock, iflags); 4073 spin_unlock_irqrestore(&phba->hbalock, iflags);
3860 4074
3861 return rc; 4075 return rc;
@@ -5077,53 +5291,104 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
5077} 5291}
5078 5292
5079/** 5293/**
5080 * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function 5294 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
5081 * @phba: Pointer to HBA context. 5295 * @phba: Pointer to HBA context.
5082 * 5296 *
5083 * This function is called to cleanup any pending mailbox 5297 * This function is called to shutdown the driver's mailbox sub-system.
5084 * objects in the driver queue before bringing the HBA offline. 5298 * It first marks the mailbox sub-system is in a block state to prevent
5085 * This function is called while resetting the HBA. 5299 * the asynchronous mailbox command from issued off the pending mailbox
5086 * The function is called without any lock held. The function 5300 * command queue. If the mailbox command sub-system shutdown is due to
5087 * takes hbalock to update SLI data structure. 5301 * HBA error conditions such as EEH or ERATT, this routine shall invoke
5088 * This function returns 1 when there is an active mailbox 5302 * the mailbox sub-system flush routine to forcefully bring down the
5089 * command pending else returns 0. 5303 * mailbox sub-system. Otherwise, if it is due to normal condition (such
5304 * as with offline or HBA function reset), this routine will wait for the
5305 * outstanding mailbox command to complete before invoking the mailbox
5306 * sub-system flush routine to gracefully bring down mailbox sub-system.
5090 **/ 5307 **/
5091int 5308void
5092lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 5309lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
5093{ 5310{
5094 struct lpfc_vport *vport = phba->pport; 5311 struct lpfc_sli *psli = &phba->sli;
5095 int i = 0; 5312 uint8_t actcmd = MBX_HEARTBEAT;
5096 uint32_t ha_copy; 5313 unsigned long timeout;
5097 5314
5098 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { 5315 spin_lock_irq(&phba->hbalock);
5099 if (i++ > LPFC_MBOX_TMO * 1000) 5316 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5100 return 1; 5317 spin_unlock_irq(&phba->hbalock);
5101 5318
5102 /* 5319 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5103 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
5104 * did finish. This way we won't get the misleading
5105 * "Stray Mailbox Interrupt" message.
5106 */
5107 spin_lock_irq(&phba->hbalock); 5320 spin_lock_irq(&phba->hbalock);
5108 ha_copy = phba->work_ha; 5321 if (phba->sli.mbox_active)
5109 phba->work_ha &= ~HA_MBATT; 5322 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5110 spin_unlock_irq(&phba->hbalock); 5323 spin_unlock_irq(&phba->hbalock);
5324 /* Determine how long we might wait for the active mailbox
5325 * command to be gracefully completed by firmware.
5326 */
5327 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
5328 1000) + jiffies;
5329 while (phba->sli.mbox_active) {
5330 /* Check active mailbox complete status every 2ms */
5331 msleep(2);
5332 if (time_after(jiffies, timeout))
5333 /* Timeout, let the mailbox flush routine to
5334 * forcefully release active mailbox command
5335 */
5336 break;
5337 }
5338 }
5339 lpfc_sli_mbox_sys_flush(phba);
5340}
5111 5341
5112 if (ha_copy & HA_MBATT) 5342/**
5113 if (lpfc_sli_handle_mb_event(phba) == 0) 5343 * lpfc_sli_eratt_read - read sli-3 error attention events
5114 i = 0; 5344 * @phba: Pointer to HBA context.
5345 *
5346 * This function is called to read the SLI3 device error attention registers
5347 * for possible error attention events. The caller must hold the hostlock
5348 * with spin_lock_irq().
5349 *
5350 * This fucntion returns 1 when there is Error Attention in the Host Attention
5351 * Register and returns 0 otherwise.
5352 **/
5353static int
5354lpfc_sli_eratt_read(struct lpfc_hba *phba)
5355{
5356 uint32_t ha_copy;
5115 5357
5116 msleep(1); 5358 /* Read chip Host Attention (HA) register */
5117 } 5359 ha_copy = readl(phba->HAregaddr);
5360 if (ha_copy & HA_ERATT) {
5361 /* Read host status register to retrieve error event */
5362 lpfc_sli_read_hs(phba);
5118 5363
5119 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 5364 /* Check if there is a deferred error condition is active */
5365 if ((HS_FFER1 & phba->work_hs) &&
5366 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
5367 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
5368 spin_lock_irq(&phba->hbalock);
5369 phba->hba_flag |= DEFER_ERATT;
5370 spin_unlock_irq(&phba->hbalock);
5371 /* Clear all interrupt enable conditions */
5372 writel(0, phba->HCregaddr);
5373 readl(phba->HCregaddr);
5374 }
5375
5376 /* Set the driver HA work bitmap */
5377 spin_lock_irq(&phba->hbalock);
5378 phba->work_ha |= HA_ERATT;
5379 /* Indicate polling handles this ERATT */
5380 phba->hba_flag |= HBA_ERATT_HANDLED;
5381 spin_unlock_irq(&phba->hbalock);
5382 return 1;
5383 }
5384 return 0;
5120} 5385}
5121 5386
5122/** 5387/**
5123 * lpfc_sli_check_eratt - check error attention events 5388 * lpfc_sli_check_eratt - check error attention events
5124 * @phba: Pointer to HBA context. 5389 * @phba: Pointer to HBA context.
5125 * 5390 *
5126 * This function is called form timer soft interrupt context to check HBA's 5391 * This function is called from timer soft interrupt context to check HBA's
5127 * error attention register bit for error attention events. 5392 * error attention register bit for error attention events.
5128 * 5393 *
5129 * This fucntion returns 1 when there is Error Attention in the Host Attention 5394 * This fucntion returns 1 when there is Error Attention in the Host Attention
@@ -5134,10 +5399,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5134{ 5399{
5135 uint32_t ha_copy; 5400 uint32_t ha_copy;
5136 5401
5137 /* If PCI channel is offline, don't process it */
5138 if (unlikely(pci_channel_offline(phba->pcidev)))
5139 return 0;
5140
5141 /* If somebody is waiting to handle an eratt, don't process it 5402 /* If somebody is waiting to handle an eratt, don't process it
5142 * here. The brdkill function will do this. 5403 * here. The brdkill function will do this.
5143 */ 5404 */
@@ -5161,56 +5422,80 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5161 return 0; 5422 return 0;
5162 } 5423 }
5163 5424
5164 /* Read chip Host Attention (HA) register */ 5425 /* If PCI channel is offline, don't process it */
5165 ha_copy = readl(phba->HAregaddr); 5426 if (unlikely(pci_channel_offline(phba->pcidev))) {
5166 if (ha_copy & HA_ERATT) {
5167 /* Read host status register to retrieve error event */
5168 lpfc_sli_read_hs(phba);
5169
5170 /* Check if there is a deferred error condition is active */
5171 if ((HS_FFER1 & phba->work_hs) &&
5172 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
5173 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
5174 phba->hba_flag |= DEFER_ERATT;
5175 /* Clear all interrupt enable conditions */
5176 writel(0, phba->HCregaddr);
5177 readl(phba->HCregaddr);
5178 }
5179
5180 /* Set the driver HA work bitmap */
5181 phba->work_ha |= HA_ERATT;
5182 /* Indicate polling handles this ERATT */
5183 phba->hba_flag |= HBA_ERATT_HANDLED;
5184 spin_unlock_irq(&phba->hbalock); 5427 spin_unlock_irq(&phba->hbalock);
5185 return 1; 5428 return 0;
5429 }
5430
5431 switch (phba->sli_rev) {
5432 case LPFC_SLI_REV2:
5433 case LPFC_SLI_REV3:
5434 /* Read chip Host Attention (HA) register */
5435 ha_copy = lpfc_sli_eratt_read(phba);
5436 break;
5437 default:
5438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5439 "0299 Invalid SLI revision (%d)\n",
5440 phba->sli_rev);
5441 ha_copy = 0;
5442 break;
5186 } 5443 }
5187 spin_unlock_irq(&phba->hbalock); 5444 spin_unlock_irq(&phba->hbalock);
5445
5446 return ha_copy;
5447}
5448
5449/**
5450 * lpfc_intr_state_check - Check device state for interrupt handling
5451 * @phba: Pointer to HBA context.
5452 *
5453 * This inline routine checks whether a device or its PCI slot is in a state
5454 * that the interrupt should be handled.
5455 *
5456 * This function returns 0 if the device or the PCI slot is in a state that
5457 * interrupt should be handled, otherwise -EIO.
5458 */
5459static inline int
5460lpfc_intr_state_check(struct lpfc_hba *phba)
5461{
5462 /* If the pci channel is offline, ignore all the interrupts */
5463 if (unlikely(pci_channel_offline(phba->pcidev)))
5464 return -EIO;
5465
5466 /* Update device level interrupt statistics */
5467 phba->sli.slistat.sli_intr++;
5468
5469 /* Ignore all interrupts during initialization. */
5470 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5471 return -EIO;
5472
5188 return 0; 5473 return 0;
5189} 5474}
5190 5475
5191/** 5476/**
5192 * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver 5477 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
5193 * @irq: Interrupt number. 5478 * @irq: Interrupt number.
5194 * @dev_id: The device context pointer. 5479 * @dev_id: The device context pointer.
5195 * 5480 *
5196 * This function is directly called from the PCI layer as an interrupt 5481 * This function is directly called from the PCI layer as an interrupt
5197 * service routine when the device is enabled with MSI-X multi-message 5482 * service routine when device with SLI-3 interface spec is enabled with
5198 * interrupt mode and there are slow-path events in the HBA. However, 5483 * MSI-X multi-message interrupt mode and there are slow-path events in
5199 * when the device is enabled with either MSI or Pin-IRQ interrupt mode, 5484 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
5200 * this function is called as part of the device-level interrupt handler. 5485 * interrupt mode, this function is called as part of the device-level
5201 * When the PCI slot is in error recovery or the HBA is undergoing 5486 * interrupt handler. When the PCI slot is in error recovery or the HBA
5202 * initialization, the interrupt handler will not process the interrupt. 5487 * is undergoing initialization, the interrupt handler will not process
5203 * The link attention and ELS ring attention events are handled by the 5488 * the interrupt. The link attention and ELS ring attention events are
5204 * worker thread. The interrupt handler signals the worker thread and 5489 * handled by the worker thread. The interrupt handler signals the worker
5205 * and returns for these events. This function is called without any 5490 * thread and returns for these events. This function is called without
5206 * lock held. It gets the hbalock to access and update SLI data 5491 * any lock held. It gets the hbalock to access and update SLI data
5207 * structures. 5492 * structures.
5208 * 5493 *
5209 * This function returns IRQ_HANDLED when interrupt is handled else it 5494 * This function returns IRQ_HANDLED when interrupt is handled else it
5210 * returns IRQ_NONE. 5495 * returns IRQ_NONE.
5211 **/ 5496 **/
5212irqreturn_t 5497irqreturn_t
5213lpfc_sp_intr_handler(int irq, void *dev_id) 5498lpfc_sli_sp_intr_handler(int irq, void *dev_id)
5214{ 5499{
5215 struct lpfc_hba *phba; 5500 struct lpfc_hba *phba;
5216 uint32_t ha_copy; 5501 uint32_t ha_copy;
@@ -5240,13 +5525,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5240 * individual interrupt handler in MSI-X multi-message interrupt mode 5525 * individual interrupt handler in MSI-X multi-message interrupt mode
5241 */ 5526 */
5242 if (phba->intr_type == MSIX) { 5527 if (phba->intr_type == MSIX) {
5243 /* If the pci channel is offline, ignore all the interrupts */ 5528 /* Check device state for handling interrupt */
5244 if (unlikely(pci_channel_offline(phba->pcidev))) 5529 if (lpfc_intr_state_check(phba))
5245 return IRQ_NONE;
5246 /* Update device-level interrupt statistics */
5247 phba->sli.slistat.sli_intr++;
5248 /* Ignore all interrupts during initialization. */
5249 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5250 return IRQ_NONE; 5530 return IRQ_NONE;
5251 /* Need to read HA REG for slow-path events */ 5531 /* Need to read HA REG for slow-path events */
5252 spin_lock_irqsave(&phba->hbalock, iflag); 5532 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -5271,7 +5551,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5271 * interrupt. 5551 * interrupt.
5272 */ 5552 */
5273 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 5553 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5274 spin_unlock_irq(&phba->hbalock); 5554 spin_unlock_irqrestore(&phba->hbalock, iflag);
5275 return IRQ_NONE; 5555 return IRQ_NONE;
5276 } 5556 }
5277 5557
@@ -5434,7 +5714,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5434 LOG_MBOX | LOG_SLI, 5714 LOG_MBOX | LOG_SLI,
5435 "0350 rc should have" 5715 "0350 rc should have"
5436 "been MBX_BUSY"); 5716 "been MBX_BUSY");
5437 goto send_current_mbox; 5717 if (rc != MBX_NOT_FINISHED)
5718 goto send_current_mbox;
5438 } 5719 }
5439 } 5720 }
5440 spin_lock_irqsave( 5721 spin_lock_irqsave(
@@ -5471,29 +5752,29 @@ send_current_mbox:
5471 } 5752 }
5472 return IRQ_HANDLED; 5753 return IRQ_HANDLED;
5473 5754
5474} /* lpfc_sp_intr_handler */ 5755} /* lpfc_sli_sp_intr_handler */
5475 5756
5476/** 5757/**
5477 * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver 5758 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
5478 * @irq: Interrupt number. 5759 * @irq: Interrupt number.
5479 * @dev_id: The device context pointer. 5760 * @dev_id: The device context pointer.
5480 * 5761 *
5481 * This function is directly called from the PCI layer as an interrupt 5762 * This function is directly called from the PCI layer as an interrupt
5482 * service routine when the device is enabled with MSI-X multi-message 5763 * service routine when device with SLI-3 interface spec is enabled with
5483 * interrupt mode and there is a fast-path FCP IOCB ring event in the 5764 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
5484 * HBA. However, when the device is enabled with either MSI or Pin-IRQ 5765 * ring event in the HBA. However, when the device is enabled with either
5485 * interrupt mode, this function is called as part of the device-level 5766 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
5486 * interrupt handler. When the PCI slot is in error recovery or the HBA 5767 * device-level interrupt handler. When the PCI slot is in error recovery
5487 * is undergoing initialization, the interrupt handler will not process 5768 * or the HBA is undergoing initialization, the interrupt handler will not
5488 * the interrupt. The SCSI FCP fast-path ring event are handled in the 5769 * process the interrupt. The SCSI FCP fast-path ring event are handled in
5489 * intrrupt context. This function is called without any lock held. It 5770 * the intrrupt context. This function is called without any lock held.
5490 * gets the hbalock to access and update SLI data structures. 5771 * It gets the hbalock to access and update SLI data structures.
5491 * 5772 *
5492 * This function returns IRQ_HANDLED when interrupt is handled else it 5773 * This function returns IRQ_HANDLED when interrupt is handled else it
5493 * returns IRQ_NONE. 5774 * returns IRQ_NONE.
5494 **/ 5775 **/
5495irqreturn_t 5776irqreturn_t
5496lpfc_fp_intr_handler(int irq, void *dev_id) 5777lpfc_sli_fp_intr_handler(int irq, void *dev_id)
5497{ 5778{
5498 struct lpfc_hba *phba; 5779 struct lpfc_hba *phba;
5499 uint32_t ha_copy; 5780 uint32_t ha_copy;
@@ -5513,13 +5794,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5513 * individual interrupt handler in MSI-X multi-message interrupt mode 5794 * individual interrupt handler in MSI-X multi-message interrupt mode
5514 */ 5795 */
5515 if (phba->intr_type == MSIX) { 5796 if (phba->intr_type == MSIX) {
5516 /* If pci channel is offline, ignore all the interrupts */ 5797 /* Check device state for handling interrupt */
5517 if (unlikely(pci_channel_offline(phba->pcidev))) 5798 if (lpfc_intr_state_check(phba))
5518 return IRQ_NONE;
5519 /* Update device-level interrupt statistics */
5520 phba->sli.slistat.sli_intr++;
5521 /* Ignore all interrupts during initialization. */
5522 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5523 return IRQ_NONE; 5799 return IRQ_NONE;
5524 /* Need to read HA REG for FCP ring and other ring events */ 5800 /* Need to read HA REG for FCP ring and other ring events */
5525 ha_copy = readl(phba->HAregaddr); 5801 ha_copy = readl(phba->HAregaddr);
@@ -5530,7 +5806,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5530 * any interrupt. 5806 * any interrupt.
5531 */ 5807 */
5532 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 5808 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5533 spin_unlock_irq(&phba->hbalock); 5809 spin_unlock_irqrestore(&phba->hbalock, iflag);
5534 return IRQ_NONE; 5810 return IRQ_NONE;
5535 } 5811 }
5536 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 5812 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
@@ -5566,26 +5842,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5566 } 5842 }
5567 } 5843 }
5568 return IRQ_HANDLED; 5844 return IRQ_HANDLED;
5569} /* lpfc_fp_intr_handler */ 5845} /* lpfc_sli_fp_intr_handler */
5570 5846
5571/** 5847/**
5572 * lpfc_intr_handler - The device-level interrupt handler of lpfc driver 5848 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
5573 * @irq: Interrupt number. 5849 * @irq: Interrupt number.
5574 * @dev_id: The device context pointer. 5850 * @dev_id: The device context pointer.
5575 * 5851 *
5576 * This function is the device-level interrupt handler called from the PCI 5852 * This function is the HBA device-level interrupt handler to device with
5577 * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is 5853 * SLI-3 interface spec, called from the PCI layer when either MSI or
5578 * an event in the HBA which requires driver attention. This function 5854 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
5579 * invokes the slow-path interrupt attention handling function and fast-path 5855 * requires driver attention. This function invokes the slow-path interrupt
5580 * interrupt attention handling function in turn to process the relevant 5856 * attention handling function and fast-path interrupt attention handling
5581 * HBA attention events. This function is called without any lock held. It 5857 * function in turn to process the relevant HBA attention events. This
5582 * gets the hbalock to access and update SLI data structures. 5858 * function is called without any lock held. It gets the hbalock to access
5859 * and update SLI data structures.
5583 * 5860 *
5584 * This function returns IRQ_HANDLED when interrupt is handled, else it 5861 * This function returns IRQ_HANDLED when interrupt is handled, else it
5585 * returns IRQ_NONE. 5862 * returns IRQ_NONE.
5586 **/ 5863 **/
5587irqreturn_t 5864irqreturn_t
5588lpfc_intr_handler(int irq, void *dev_id) 5865lpfc_sli_intr_handler(int irq, void *dev_id)
5589{ 5866{
5590 struct lpfc_hba *phba; 5867 struct lpfc_hba *phba;
5591 irqreturn_t sp_irq_rc, fp_irq_rc; 5868 irqreturn_t sp_irq_rc, fp_irq_rc;
@@ -5600,15 +5877,8 @@ lpfc_intr_handler(int irq, void *dev_id)
5600 if (unlikely(!phba)) 5877 if (unlikely(!phba))
5601 return IRQ_NONE; 5878 return IRQ_NONE;
5602 5879
5603 /* If the pci channel is offline, ignore all the interrupts. */ 5880 /* Check device state for handling interrupt */
5604 if (unlikely(pci_channel_offline(phba->pcidev))) 5881 if (lpfc_intr_state_check(phba))
5605 return IRQ_NONE;
5606
5607 /* Update device level interrupt statistics */
5608 phba->sli.slistat.sli_intr++;
5609
5610 /* Ignore all interrupts during initialization. */
5611 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5612 return IRQ_NONE; 5882 return IRQ_NONE;
5613 5883
5614 spin_lock(&phba->hbalock); 5884 spin_lock(&phba->hbalock);
@@ -5650,7 +5920,7 @@ lpfc_intr_handler(int irq, void *dev_id)
5650 status2 >>= (4*LPFC_ELS_RING); 5920 status2 >>= (4*LPFC_ELS_RING);
5651 5921
5652 if (status1 || (status2 & HA_RXMASK)) 5922 if (status1 || (status2 & HA_RXMASK))
5653 sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); 5923 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
5654 else 5924 else
5655 sp_irq_rc = IRQ_NONE; 5925 sp_irq_rc = IRQ_NONE;
5656 5926
@@ -5670,10 +5940,10 @@ lpfc_intr_handler(int irq, void *dev_id)
5670 status2 = 0; 5940 status2 = 0;
5671 5941
5672 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 5942 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
5673 fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); 5943 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
5674 else 5944 else
5675 fp_irq_rc = IRQ_NONE; 5945 fp_irq_rc = IRQ_NONE;
5676 5946
5677 /* Return device-level interrupt handling status */ 5947 /* Return device-level interrupt handling status */
5678 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 5948 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
5679} /* lpfc_intr_handler */ 5949} /* lpfc_sli_intr_handler */