diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-17 12:00:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-17 12:00:23 -0400 |
commit | ed09441dacc2a2d6c170aa3b1f79a041291a813f (patch) | |
tree | 95c35bdf4f0b679806984093dce627a66d0d7cf1 /drivers | |
parent | b225ee5bed70254a100896c473e6dd8c2be45c18 (diff) | |
parent | 4c393e6e457fb41169dd110c1b96a138394c2d7b (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (39 commits)
[SCSI] sd: fix compile failure with CONFIG_BLK_DEV_INTEGRITY=n
libiscsi: fix locking in iscsi_eh_device_reset
libiscsi: check reason why we are stopping iscsi session to determine error value
[SCSI] iscsi_tcp: return a descriptive error value during connection errors
[SCSI] libiscsi: rename host reset to target reset
[SCSI] iscsi class: fix endpoint id handling
[SCSI] libiscsi: Support drivers initiating session removal
[SCSI] libiscsi: fix data corruption when target has to resend data-in packets
[SCSI] sd: Switch kernel printing level for DIF messages
[SCSI] sd: Correctly handle all combinations of DIF and DIX
[SCSI] sd: Always print actual protection_type
[SCSI] sd: Issue correct protection operation
[SCSI] scsi_error: fix target reset handling
[SCSI] lpfc 8.2.8 v2 : Add statistical reporting control and additional fc vendor events
[SCSI] lpfc 8.2.8 v2 : Add sysfs control of target queue depth handling
[SCSI] lpfc 8.2.8 v2 : Revert target busy in favor of transport disrupted
[SCSI] scsi_dh_alua: remove REQ_NOMERGE
[SCSI] lpfc 8.2.8 : update driver version to 8.2.8
[SCSI] lpfc 8.2.8 : Add MSI-X support
[SCSI] lpfc 8.2.8 : Update driver to use new Host byte error code DID_TRANSPORT_DISRUPTED
...
Diffstat (limited to 'drivers')
51 files changed, 7924 insertions, 1092 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 5a1cf2580e16..1e5b6446231d 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -378,6 +378,7 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) | |||
378 | { | 378 | { |
379 | struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); | 379 | struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); |
380 | 380 | ||
381 | iscsi_session_teardown(cls_session); | ||
381 | iscsi_host_remove(shost); | 382 | iscsi_host_remove(shost); |
382 | iscsi_host_free(shost); | 383 | iscsi_host_free(shost); |
383 | } | 384 | } |
@@ -597,7 +598,7 @@ static struct scsi_host_template iscsi_iser_sht = { | |||
597 | .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, | 598 | .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, |
598 | .eh_abort_handler = iscsi_eh_abort, | 599 | .eh_abort_handler = iscsi_eh_abort, |
599 | .eh_device_reset_handler= iscsi_eh_device_reset, | 600 | .eh_device_reset_handler= iscsi_eh_device_reset, |
600 | .eh_host_reset_handler = iscsi_eh_host_reset, | 601 | .eh_target_reset_handler= iscsi_eh_target_reset, |
601 | .use_clustering = DISABLE_CLUSTERING, | 602 | .use_clustering = DISABLE_CLUSTERING, |
602 | .proc_name = "iscsi_iser", | 603 | .proc_name = "iscsi_iser", |
603 | .this_id = -1, | 604 | .this_id = -1, |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 103304c1e3b0..9bf3460c5540 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -849,7 +849,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio, | |||
849 | dm_bio_record(&mpio->details, bio); | 849 | dm_bio_record(&mpio->details, bio); |
850 | 850 | ||
851 | map_context->ptr = mpio; | 851 | map_context->ptr = mpio; |
852 | bio->bi_rw |= (1 << BIO_RW_FAILFAST); | 852 | bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); |
853 | r = map_io(m, bio, mpio, 0); | 853 | r = map_io(m, bio, mpio, 0); |
854 | if (r < 0 || r == DM_MAPIO_REQUEUE) | 854 | if (r < 0 || r == DM_MAPIO_REQUEUE) |
855 | mempool_free(mpio, m->mpio_pool); | 855 | mempool_free(mpio, m->mpio_pool); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 8744014b9d80..d4ac47d11279 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -167,7 +167,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) | |||
167 | mp_bh->bio = *bio; | 167 | mp_bh->bio = *bio; |
168 | mp_bh->bio.bi_sector += multipath->rdev->data_offset; | 168 | mp_bh->bio.bi_sector += multipath->rdev->data_offset; |
169 | mp_bh->bio.bi_bdev = multipath->rdev->bdev; | 169 | mp_bh->bio.bi_bdev = multipath->rdev->bdev; |
170 | mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST); | 170 | mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); |
171 | mp_bh->bio.bi_end_io = multipath_end_request; | 171 | mp_bh->bio.bi_end_io = multipath_end_request; |
172 | mp_bh->bio.bi_private = mp_bh; | 172 | mp_bh->bio.bi_private = mp_bh; |
173 | generic_make_request(&mp_bh->bio); | 173 | generic_make_request(&mp_bh->bio); |
@@ -393,7 +393,7 @@ static void multipathd (mddev_t *mddev) | |||
393 | *bio = *(mp_bh->master_bio); | 393 | *bio = *(mp_bh->master_bio); |
394 | bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; | 394 | bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; |
395 | bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; | 395 | bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; |
396 | bio->bi_rw |= (1 << BIO_RW_FAILFAST); | 396 | bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); |
397 | bio->bi_end_io = multipath_end_request; | 397 | bio->bi_end_io = multipath_end_request; |
398 | bio->bi_private = mp_bh; | 398 | bio->bi_private = mp_bh; |
399 | generic_make_request(bio); | 399 | generic_make_request(bio); |
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 85fcb4371054..7844461a995b 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -544,7 +544,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, | |||
544 | } | 544 | } |
545 | cqr->retries = DIAG_MAX_RETRIES; | 545 | cqr->retries = DIAG_MAX_RETRIES; |
546 | cqr->buildclk = get_clock(); | 546 | cqr->buildclk = get_clock(); |
547 | if (req->cmd_flags & REQ_FAILFAST) | 547 | if (blk_noretry_request(req)) |
548 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 548 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
549 | cqr->startdev = memdev; | 549 | cqr->startdev = memdev; |
550 | cqr->memdev = memdev; | 550 | cqr->memdev = memdev; |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 49f9d221e23d..2e60d5f968c8 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -1700,7 +1700,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, | |||
1700 | recid++; | 1700 | recid++; |
1701 | } | 1701 | } |
1702 | } | 1702 | } |
1703 | if (req->cmd_flags & REQ_FAILFAST) | 1703 | if (blk_noretry_request(req)) |
1704 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1704 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1705 | cqr->startdev = startdev; | 1705 | cqr->startdev = startdev; |
1706 | cqr->memdev = startdev; | 1706 | cqr->memdev = startdev; |
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 93d9b6452a94..7d442aeff3d1 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -355,7 +355,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, | |||
355 | recid++; | 355 | recid++; |
356 | } | 356 | } |
357 | } | 357 | } |
358 | if (req->cmd_flags & REQ_FAILFAST) | 358 | if (blk_noretry_request(req)) |
359 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 359 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
360 | cqr->startdev = memdev; | 360 | cqr->startdev = memdev; |
361 | cqr->memdev = memdev; | 361 | cqr->memdev = memdev; |
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index 9785d7384199..4003deefb7d8 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c | |||
@@ -1364,7 +1364,8 @@ EXPORT_SYMBOL(scsi_print_sense); | |||
1364 | static const char * const hostbyte_table[]={ | 1364 | static const char * const hostbyte_table[]={ |
1365 | "DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", | 1365 | "DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", |
1366 | "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", | 1366 | "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", |
1367 | "DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE"}; | 1367 | "DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE", |
1368 | "DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST" }; | ||
1368 | #define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table) | 1369 | #define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table) |
1369 | 1370 | ||
1370 | static const char * const driverbyte_table[]={ | 1371 | static const char * const driverbyte_table[]={ |
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 708e475896b9..e356b43753ff 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c | |||
@@ -109,7 +109,8 @@ static struct request *get_alua_req(struct scsi_device *sdev, | |||
109 | } | 109 | } |
110 | 110 | ||
111 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 111 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
112 | rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; | 112 | rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | |
113 | REQ_FAILFAST_DRIVER; | ||
113 | rq->retries = ALUA_FAILOVER_RETRIES; | 114 | rq->retries = ALUA_FAILOVER_RETRIES; |
114 | rq->timeout = ALUA_FAILOVER_TIMEOUT; | 115 | rq->timeout = ALUA_FAILOVER_TIMEOUT; |
115 | 116 | ||
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index 8f45570a8a01..0e572d2c5b0a 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c | |||
@@ -303,7 +303,8 @@ static struct request *get_req(struct scsi_device *sdev, int cmd, | |||
303 | 303 | ||
304 | rq->cmd[4] = len; | 304 | rq->cmd[4] = len; |
305 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 305 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
306 | rq->cmd_flags |= REQ_FAILFAST; | 306 | rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | |
307 | REQ_FAILFAST_DRIVER; | ||
307 | rq->timeout = CLARIION_TIMEOUT; | 308 | rq->timeout = CLARIION_TIMEOUT; |
308 | rq->retries = CLARIION_RETRIES; | 309 | rq->retries = CLARIION_RETRIES; |
309 | 310 | ||
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 5e93c88ad66b..9aec4ca64e56 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c | |||
@@ -112,7 +112,8 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) | |||
112 | return SCSI_DH_RES_TEMP_UNAVAIL; | 112 | return SCSI_DH_RES_TEMP_UNAVAIL; |
113 | 113 | ||
114 | req->cmd_type = REQ_TYPE_BLOCK_PC; | 114 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
115 | req->cmd_flags |= REQ_FAILFAST; | 115 | req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | |
116 | REQ_FAILFAST_DRIVER; | ||
116 | req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); | 117 | req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); |
117 | req->cmd[0] = TEST_UNIT_READY; | 118 | req->cmd[0] = TEST_UNIT_READY; |
118 | req->timeout = HP_SW_TIMEOUT; | 119 | req->timeout = HP_SW_TIMEOUT; |
@@ -204,7 +205,8 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h) | |||
204 | return SCSI_DH_RES_TEMP_UNAVAIL; | 205 | return SCSI_DH_RES_TEMP_UNAVAIL; |
205 | 206 | ||
206 | req->cmd_type = REQ_TYPE_BLOCK_PC; | 207 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
207 | req->cmd_flags |= REQ_FAILFAST; | 208 | req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | |
209 | REQ_FAILFAST_DRIVER; | ||
208 | req->cmd_len = COMMAND_SIZE(START_STOP); | 210 | req->cmd_len = COMMAND_SIZE(START_STOP); |
209 | req->cmd[0] = START_STOP; | 211 | req->cmd[0] = START_STOP; |
210 | req->cmd[4] = 1; /* Start spin cycle */ | 212 | req->cmd[4] = 1; /* Start spin cycle */ |
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 50bf95f3b5c4..a43c3ed4df28 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c | |||
@@ -226,7 +226,8 @@ static struct request *get_rdac_req(struct scsi_device *sdev, | |||
226 | } | 226 | } |
227 | 227 | ||
228 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 228 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
229 | rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; | 229 | rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | |
230 | REQ_FAILFAST_DRIVER; | ||
230 | rq->retries = RDAC_RETRIES; | 231 | rq->retries = RDAC_RETRIES; |
231 | rq->timeout = RDAC_TIMEOUT; | 232 | rq->timeout = RDAC_TIMEOUT; |
232 | 233 | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 4e0b7c8eb32e..7650707a40de 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -2031,8 +2031,6 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) | |||
2031 | spin_unlock_irqrestore(shost->host_lock, flags); | 2031 | spin_unlock_irqrestore(shost->host_lock, flags); |
2032 | } else | 2032 | } else |
2033 | ibmvfc_issue_fc_host_lip(shost); | 2033 | ibmvfc_issue_fc_host_lip(shost); |
2034 | |||
2035 | scsi_target_unblock(&rport->dev); | ||
2036 | LEAVE; | 2034 | LEAVE; |
2037 | } | 2035 | } |
2038 | 2036 | ||
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 2a2f0094570f..ed6c54cae7b1 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -523,22 +523,20 @@ iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task) | |||
523 | } | 523 | } |
524 | 524 | ||
525 | /** | 525 | /** |
526 | * iscsi_data_rsp - SCSI Data-In Response processing | 526 | * iscsi_data_in - SCSI Data-In Response processing |
527 | * @conn: iscsi connection | 527 | * @conn: iscsi connection |
528 | * @task: scsi command task | 528 | * @task: scsi command task |
529 | **/ | 529 | **/ |
530 | static int | 530 | static int |
531 | iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task) | 531 | iscsi_data_in(struct iscsi_conn *conn, struct iscsi_task *task) |
532 | { | 532 | { |
533 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 533 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
534 | struct iscsi_tcp_task *tcp_task = task->dd_data; | 534 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
535 | struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; | 535 | struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; |
536 | struct iscsi_session *session = conn->session; | ||
537 | struct scsi_cmnd *sc = task->sc; | ||
538 | int datasn = be32_to_cpu(rhdr->datasn); | 536 | int datasn = be32_to_cpu(rhdr->datasn); |
539 | unsigned total_in_length = scsi_in(sc)->length; | 537 | unsigned total_in_length = scsi_in(task->sc)->length; |
540 | 538 | ||
541 | iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); | 539 | iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); |
542 | if (tcp_conn->in.datalen == 0) | 540 | if (tcp_conn->in.datalen == 0) |
543 | return 0; | 541 | return 0; |
544 | 542 | ||
@@ -558,23 +556,6 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task) | |||
558 | return ISCSI_ERR_DATA_OFFSET; | 556 | return ISCSI_ERR_DATA_OFFSET; |
559 | } | 557 | } |
560 | 558 | ||
561 | if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) { | ||
562 | sc->result = (DID_OK << 16) | rhdr->cmd_status; | ||
563 | conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; | ||
564 | if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | | ||
565 | ISCSI_FLAG_DATA_OVERFLOW)) { | ||
566 | int res_count = be32_to_cpu(rhdr->residual_count); | ||
567 | |||
568 | if (res_count > 0 && | ||
569 | (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || | ||
570 | res_count <= total_in_length)) | ||
571 | scsi_in(sc)->resid = res_count; | ||
572 | else | ||
573 | sc->result = (DID_BAD_TARGET << 16) | | ||
574 | rhdr->cmd_status; | ||
575 | } | ||
576 | } | ||
577 | |||
578 | conn->datain_pdus_cnt++; | 559 | conn->datain_pdus_cnt++; |
579 | return 0; | 560 | return 0; |
580 | } | 561 | } |
@@ -774,7 +755,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | |||
774 | if (!task) | 755 | if (!task) |
775 | rc = ISCSI_ERR_BAD_ITT; | 756 | rc = ISCSI_ERR_BAD_ITT; |
776 | else | 757 | else |
777 | rc = iscsi_data_rsp(conn, task); | 758 | rc = iscsi_data_in(conn, task); |
778 | if (rc) { | 759 | if (rc) { |
779 | spin_unlock(&conn->session->lock); | 760 | spin_unlock(&conn->session->lock); |
780 | break; | 761 | break; |
@@ -998,7 +979,7 @@ iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, | |||
998 | 979 | ||
999 | error: | 980 | error: |
1000 | debug_tcp("Error receiving PDU, errno=%d\n", rc); | 981 | debug_tcp("Error receiving PDU, errno=%d\n", rc); |
1001 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 982 | iscsi_conn_failure(conn, rc); |
1002 | return 0; | 983 | return 0; |
1003 | } | 984 | } |
1004 | 985 | ||
@@ -1117,8 +1098,10 @@ iscsi_xmit(struct iscsi_conn *conn) | |||
1117 | 1098 | ||
1118 | while (1) { | 1099 | while (1) { |
1119 | rc = iscsi_tcp_xmit_segment(tcp_conn, segment); | 1100 | rc = iscsi_tcp_xmit_segment(tcp_conn, segment); |
1120 | if (rc < 0) | 1101 | if (rc < 0) { |
1102 | rc = ISCSI_ERR_XMIT_FAILED; | ||
1121 | goto error; | 1103 | goto error; |
1104 | } | ||
1122 | if (rc == 0) | 1105 | if (rc == 0) |
1123 | break; | 1106 | break; |
1124 | 1107 | ||
@@ -1127,7 +1110,7 @@ iscsi_xmit(struct iscsi_conn *conn) | |||
1127 | if (segment->total_copied >= segment->total_size) { | 1110 | if (segment->total_copied >= segment->total_size) { |
1128 | if (segment->done != NULL) { | 1111 | if (segment->done != NULL) { |
1129 | rc = segment->done(tcp_conn, segment); | 1112 | rc = segment->done(tcp_conn, segment); |
1130 | if (rc < 0) | 1113 | if (rc != 0) |
1131 | goto error; | 1114 | goto error; |
1132 | } | 1115 | } |
1133 | } | 1116 | } |
@@ -1142,8 +1125,8 @@ error: | |||
1142 | /* Transmit error. We could initiate error recovery | 1125 | /* Transmit error. We could initiate error recovery |
1143 | * here. */ | 1126 | * here. */ |
1144 | debug_tcp("Error sending PDU, errno=%d\n", rc); | 1127 | debug_tcp("Error sending PDU, errno=%d\n", rc); |
1145 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 1128 | iscsi_conn_failure(conn, rc); |
1146 | return rc; | 1129 | return -EIO; |
1147 | } | 1130 | } |
1148 | 1131 | ||
1149 | /** | 1132 | /** |
@@ -1904,6 +1887,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) | |||
1904 | struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); | 1887 | struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); |
1905 | 1888 | ||
1906 | iscsi_r2tpool_free(cls_session->dd_data); | 1889 | iscsi_r2tpool_free(cls_session->dd_data); |
1890 | iscsi_session_teardown(cls_session); | ||
1907 | 1891 | ||
1908 | iscsi_host_remove(shost); | 1892 | iscsi_host_remove(shost); |
1909 | iscsi_host_free(shost); | 1893 | iscsi_host_free(shost); |
@@ -1927,7 +1911,7 @@ static struct scsi_host_template iscsi_sht = { | |||
1927 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, | 1911 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, |
1928 | .eh_abort_handler = iscsi_eh_abort, | 1912 | .eh_abort_handler = iscsi_eh_abort, |
1929 | .eh_device_reset_handler= iscsi_eh_device_reset, | 1913 | .eh_device_reset_handler= iscsi_eh_device_reset, |
1930 | .eh_host_reset_handler = iscsi_eh_host_reset, | 1914 | .eh_target_reset_handler= iscsi_eh_target_reset, |
1931 | .use_clustering = DISABLE_CLUSTERING, | 1915 | .use_clustering = DISABLE_CLUSTERING, |
1932 | .slave_configure = iscsi_tcp_slave_configure, | 1916 | .slave_configure = iscsi_tcp_slave_configure, |
1933 | .proc_name = "iscsi_tcp", | 1917 | .proc_name = "iscsi_tcp", |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index da7b67d30d9a..801c7cf54d2e 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -404,11 +404,6 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, | |||
404 | conn->session->queued_cmdsn--; | 404 | conn->session->queued_cmdsn--; |
405 | else | 405 | else |
406 | conn->session->tt->cleanup_task(conn, task); | 406 | conn->session->tt->cleanup_task(conn, task); |
407 | /* | ||
408 | * Check if cleanup_task dropped the lock and the command completed, | ||
409 | */ | ||
410 | if (!task->sc) | ||
411 | return; | ||
412 | 407 | ||
413 | sc->result = err; | 408 | sc->result = err; |
414 | if (!scsi_bidi_cmnd(sc)) | 409 | if (!scsi_bidi_cmnd(sc)) |
@@ -633,6 +628,40 @@ out: | |||
633 | __iscsi_put_task(task); | 628 | __iscsi_put_task(task); |
634 | } | 629 | } |
635 | 630 | ||
631 | /** | ||
632 | * iscsi_data_in_rsp - SCSI Data-In Response processing | ||
633 | * @conn: iscsi connection | ||
634 | * @hdr: iscsi pdu | ||
635 | * @task: scsi command task | ||
636 | **/ | ||
637 | static void | ||
638 | iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | ||
639 | struct iscsi_task *task) | ||
640 | { | ||
641 | struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr; | ||
642 | struct scsi_cmnd *sc = task->sc; | ||
643 | |||
644 | if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) | ||
645 | return; | ||
646 | |||
647 | sc->result = (DID_OK << 16) | rhdr->cmd_status; | ||
648 | conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; | ||
649 | if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | | ||
650 | ISCSI_FLAG_DATA_OVERFLOW)) { | ||
651 | int res_count = be32_to_cpu(rhdr->residual_count); | ||
652 | |||
653 | if (res_count > 0 && | ||
654 | (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || | ||
655 | res_count <= scsi_in(sc)->length)) | ||
656 | scsi_in(sc)->resid = res_count; | ||
657 | else | ||
658 | sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; | ||
659 | } | ||
660 | |||
661 | conn->scsirsp_pdus_cnt++; | ||
662 | __iscsi_put_task(task); | ||
663 | } | ||
664 | |||
636 | static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | 665 | static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) |
637 | { | 666 | { |
638 | struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; | 667 | struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; |
@@ -818,12 +847,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
818 | iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen); | 847 | iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen); |
819 | break; | 848 | break; |
820 | case ISCSI_OP_SCSI_DATA_IN: | 849 | case ISCSI_OP_SCSI_DATA_IN: |
821 | if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { | 850 | iscsi_data_in_rsp(conn, hdr, task); |
822 | conn->scsirsp_pdus_cnt++; | ||
823 | iscsi_update_cmdsn(session, | ||
824 | (struct iscsi_nopin*) hdr); | ||
825 | __iscsi_put_task(task); | ||
826 | } | ||
827 | break; | 851 | break; |
828 | case ISCSI_OP_LOGOUT_RSP: | 852 | case ISCSI_OP_LOGOUT_RSP: |
829 | iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); | 853 | iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); |
@@ -954,6 +978,38 @@ struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt) | |||
954 | } | 978 | } |
955 | EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask); | 979 | EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask); |
956 | 980 | ||
981 | void iscsi_session_failure(struct iscsi_cls_session *cls_session, | ||
982 | enum iscsi_err err) | ||
983 | { | ||
984 | struct iscsi_session *session = cls_session->dd_data; | ||
985 | struct iscsi_conn *conn; | ||
986 | struct device *dev; | ||
987 | unsigned long flags; | ||
988 | |||
989 | spin_lock_irqsave(&session->lock, flags); | ||
990 | conn = session->leadconn; | ||
991 | if (session->state == ISCSI_STATE_TERMINATE || !conn) { | ||
992 | spin_unlock_irqrestore(&session->lock, flags); | ||
993 | return; | ||
994 | } | ||
995 | |||
996 | dev = get_device(&conn->cls_conn->dev); | ||
997 | spin_unlock_irqrestore(&session->lock, flags); | ||
998 | if (!dev) | ||
999 | return; | ||
1000 | /* | ||
1001 | * if the host is being removed bypass the connection | ||
1002 | * recovery initialization because we are going to kill | ||
1003 | * the session. | ||
1004 | */ | ||
1005 | if (err == ISCSI_ERR_INVALID_HOST) | ||
1006 | iscsi_conn_error_event(conn->cls_conn, err); | ||
1007 | else | ||
1008 | iscsi_conn_failure(conn, err); | ||
1009 | put_device(dev); | ||
1010 | } | ||
1011 | EXPORT_SYMBOL_GPL(iscsi_session_failure); | ||
1012 | |||
957 | void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) | 1013 | void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) |
958 | { | 1014 | { |
959 | struct iscsi_session *session = conn->session; | 1015 | struct iscsi_session *session = conn->session; |
@@ -968,9 +1024,10 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) | |||
968 | if (conn->stop_stage == 0) | 1024 | if (conn->stop_stage == 0) |
969 | session->state = ISCSI_STATE_FAILED; | 1025 | session->state = ISCSI_STATE_FAILED; |
970 | spin_unlock_irqrestore(&session->lock, flags); | 1026 | spin_unlock_irqrestore(&session->lock, flags); |
1027 | |||
971 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); | 1028 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); |
972 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); | 1029 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); |
973 | iscsi_conn_error(conn->cls_conn, err); | 1030 | iscsi_conn_error_event(conn->cls_conn, err); |
974 | } | 1031 | } |
975 | EXPORT_SYMBOL_GPL(iscsi_conn_failure); | 1032 | EXPORT_SYMBOL_GPL(iscsi_conn_failure); |
976 | 1033 | ||
@@ -1194,15 +1251,13 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
1194 | switch (session->state) { | 1251 | switch (session->state) { |
1195 | case ISCSI_STATE_IN_RECOVERY: | 1252 | case ISCSI_STATE_IN_RECOVERY: |
1196 | reason = FAILURE_SESSION_IN_RECOVERY; | 1253 | reason = FAILURE_SESSION_IN_RECOVERY; |
1197 | sc->result = DID_IMM_RETRY << 16; | 1254 | goto reject; |
1198 | break; | ||
1199 | case ISCSI_STATE_LOGGING_OUT: | 1255 | case ISCSI_STATE_LOGGING_OUT: |
1200 | reason = FAILURE_SESSION_LOGGING_OUT; | 1256 | reason = FAILURE_SESSION_LOGGING_OUT; |
1201 | sc->result = DID_IMM_RETRY << 16; | 1257 | goto reject; |
1202 | break; | ||
1203 | case ISCSI_STATE_RECOVERY_FAILED: | 1258 | case ISCSI_STATE_RECOVERY_FAILED: |
1204 | reason = FAILURE_SESSION_RECOVERY_TIMEOUT; | 1259 | reason = FAILURE_SESSION_RECOVERY_TIMEOUT; |
1205 | sc->result = DID_NO_CONNECT << 16; | 1260 | sc->result = DID_TRANSPORT_FAILFAST << 16; |
1206 | break; | 1261 | break; |
1207 | case ISCSI_STATE_TERMINATE: | 1262 | case ISCSI_STATE_TERMINATE: |
1208 | reason = FAILURE_SESSION_TERMINATE; | 1263 | reason = FAILURE_SESSION_TERMINATE; |
@@ -1267,7 +1322,7 @@ reject: | |||
1267 | spin_unlock(&session->lock); | 1322 | spin_unlock(&session->lock); |
1268 | debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); | 1323 | debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); |
1269 | spin_lock(host->host_lock); | 1324 | spin_lock(host->host_lock); |
1270 | return SCSI_MLQUEUE_HOST_BUSY; | 1325 | return SCSI_MLQUEUE_TARGET_BUSY; |
1271 | 1326 | ||
1272 | fault: | 1327 | fault: |
1273 | spin_unlock(&session->lock); | 1328 | spin_unlock(&session->lock); |
@@ -1307,7 +1362,7 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) | |||
1307 | } | 1362 | } |
1308 | EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); | 1363 | EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); |
1309 | 1364 | ||
1310 | int iscsi_eh_host_reset(struct scsi_cmnd *sc) | 1365 | int iscsi_eh_target_reset(struct scsi_cmnd *sc) |
1311 | { | 1366 | { |
1312 | struct iscsi_cls_session *cls_session; | 1367 | struct iscsi_cls_session *cls_session; |
1313 | struct iscsi_session *session; | 1368 | struct iscsi_session *session; |
@@ -1321,7 +1376,7 @@ int iscsi_eh_host_reset(struct scsi_cmnd *sc) | |||
1321 | spin_lock_bh(&session->lock); | 1376 | spin_lock_bh(&session->lock); |
1322 | if (session->state == ISCSI_STATE_TERMINATE) { | 1377 | if (session->state == ISCSI_STATE_TERMINATE) { |
1323 | failed: | 1378 | failed: |
1324 | debug_scsi("failing host reset: session terminated " | 1379 | debug_scsi("failing target reset: session terminated " |
1325 | "[CID %d age %d]\n", conn->id, session->age); | 1380 | "[CID %d age %d]\n", conn->id, session->age); |
1326 | spin_unlock_bh(&session->lock); | 1381 | spin_unlock_bh(&session->lock); |
1327 | mutex_unlock(&session->eh_mutex); | 1382 | mutex_unlock(&session->eh_mutex); |
@@ -1336,7 +1391,7 @@ failed: | |||
1336 | */ | 1391 | */ |
1337 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 1392 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
1338 | 1393 | ||
1339 | debug_scsi("iscsi_eh_host_reset wait for relogin\n"); | 1394 | debug_scsi("iscsi_eh_target_reset wait for relogin\n"); |
1340 | wait_event_interruptible(conn->ehwait, | 1395 | wait_event_interruptible(conn->ehwait, |
1341 | session->state == ISCSI_STATE_TERMINATE || | 1396 | session->state == ISCSI_STATE_TERMINATE || |
1342 | session->state == ISCSI_STATE_LOGGED_IN || | 1397 | session->state == ISCSI_STATE_LOGGED_IN || |
@@ -1348,14 +1403,14 @@ failed: | |||
1348 | spin_lock_bh(&session->lock); | 1403 | spin_lock_bh(&session->lock); |
1349 | if (session->state == ISCSI_STATE_LOGGED_IN) | 1404 | if (session->state == ISCSI_STATE_LOGGED_IN) |
1350 | iscsi_session_printk(KERN_INFO, session, | 1405 | iscsi_session_printk(KERN_INFO, session, |
1351 | "host reset succeeded\n"); | 1406 | "target reset succeeded\n"); |
1352 | else | 1407 | else |
1353 | goto failed; | 1408 | goto failed; |
1354 | spin_unlock_bh(&session->lock); | 1409 | spin_unlock_bh(&session->lock); |
1355 | mutex_unlock(&session->eh_mutex); | 1410 | mutex_unlock(&session->eh_mutex); |
1356 | return SUCCESS; | 1411 | return SUCCESS; |
1357 | } | 1412 | } |
1358 | EXPORT_SYMBOL_GPL(iscsi_eh_host_reset); | 1413 | EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); |
1359 | 1414 | ||
1360 | static void iscsi_tmf_timedout(unsigned long data) | 1415 | static void iscsi_tmf_timedout(unsigned long data) |
1361 | { | 1416 | { |
@@ -1769,10 +1824,10 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) | |||
1769 | 1824 | ||
1770 | iscsi_suspend_tx(conn); | 1825 | iscsi_suspend_tx(conn); |
1771 | 1826 | ||
1772 | spin_lock(&session->lock); | 1827 | spin_lock_bh(&session->lock); |
1773 | fail_all_commands(conn, sc->device->lun, DID_ERROR); | 1828 | fail_all_commands(conn, sc->device->lun, DID_ERROR); |
1774 | conn->tmf_state = TMF_INITIAL; | 1829 | conn->tmf_state = TMF_INITIAL; |
1775 | spin_unlock(&session->lock); | 1830 | spin_unlock_bh(&session->lock); |
1776 | 1831 | ||
1777 | iscsi_start_tx(conn); | 1832 | iscsi_start_tx(conn); |
1778 | goto done; | 1833 | goto done; |
@@ -1878,6 +1933,7 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, | |||
1878 | int dd_data_size, uint16_t qdepth) | 1933 | int dd_data_size, uint16_t qdepth) |
1879 | { | 1934 | { |
1880 | struct Scsi_Host *shost; | 1935 | struct Scsi_Host *shost; |
1936 | struct iscsi_host *ihost; | ||
1881 | 1937 | ||
1882 | shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); | 1938 | shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); |
1883 | if (!shost) | 1939 | if (!shost) |
@@ -1892,22 +1948,43 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, | |||
1892 | qdepth = ISCSI_DEF_CMD_PER_LUN; | 1948 | qdepth = ISCSI_DEF_CMD_PER_LUN; |
1893 | } | 1949 | } |
1894 | shost->cmd_per_lun = qdepth; | 1950 | shost->cmd_per_lun = qdepth; |
1951 | |||
1952 | ihost = shost_priv(shost); | ||
1953 | spin_lock_init(&ihost->lock); | ||
1954 | ihost->state = ISCSI_HOST_SETUP; | ||
1955 | ihost->num_sessions = 0; | ||
1956 | init_waitqueue_head(&ihost->session_removal_wq); | ||
1895 | return shost; | 1957 | return shost; |
1896 | } | 1958 | } |
1897 | EXPORT_SYMBOL_GPL(iscsi_host_alloc); | 1959 | EXPORT_SYMBOL_GPL(iscsi_host_alloc); |
1898 | 1960 | ||
1961 | static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session) | ||
1962 | { | ||
1963 | iscsi_session_failure(cls_session, ISCSI_ERR_INVALID_HOST); | ||
1964 | } | ||
1965 | |||
1899 | /** | 1966 | /** |
1900 | * iscsi_host_remove - remove host and sessions | 1967 | * iscsi_host_remove - remove host and sessions |
1901 | * @shost: scsi host | 1968 | * @shost: scsi host |
1902 | * | 1969 | * |
1903 | * This will also remove any sessions attached to the host, but if userspace | 1970 | * If there are any sessions left, this will initiate the removal and wait |
1904 | * is managing the session at the same time this will break. TODO: add | 1971 | * for the completion. |
1905 | * refcounting to the netlink iscsi interface so a rmmod or host hot unplug | ||
1906 | * does not remove the memory from under us. | ||
1907 | */ | 1972 | */ |
1908 | void iscsi_host_remove(struct Scsi_Host *shost) | 1973 | void iscsi_host_remove(struct Scsi_Host *shost) |
1909 | { | 1974 | { |
1910 | iscsi_host_for_each_session(shost, iscsi_session_teardown); | 1975 | struct iscsi_host *ihost = shost_priv(shost); |
1976 | unsigned long flags; | ||
1977 | |||
1978 | spin_lock_irqsave(&ihost->lock, flags); | ||
1979 | ihost->state = ISCSI_HOST_REMOVED; | ||
1980 | spin_unlock_irqrestore(&ihost->lock, flags); | ||
1981 | |||
1982 | iscsi_host_for_each_session(shost, iscsi_notify_host_removed); | ||
1983 | wait_event_interruptible(ihost->session_removal_wq, | ||
1984 | ihost->num_sessions == 0); | ||
1985 | if (signal_pending(current)) | ||
1986 | flush_signals(current); | ||
1987 | |||
1911 | scsi_remove_host(shost); | 1988 | scsi_remove_host(shost); |
1912 | } | 1989 | } |
1913 | EXPORT_SYMBOL_GPL(iscsi_host_remove); | 1990 | EXPORT_SYMBOL_GPL(iscsi_host_remove); |
@@ -1923,6 +2000,27 @@ void iscsi_host_free(struct Scsi_Host *shost) | |||
1923 | } | 2000 | } |
1924 | EXPORT_SYMBOL_GPL(iscsi_host_free); | 2001 | EXPORT_SYMBOL_GPL(iscsi_host_free); |
1925 | 2002 | ||
2003 | static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost) | ||
2004 | { | ||
2005 | struct iscsi_host *ihost = shost_priv(shost); | ||
2006 | unsigned long flags; | ||
2007 | |||
2008 | shost = scsi_host_get(shost); | ||
2009 | if (!shost) { | ||
2010 | printk(KERN_ERR "Invalid state. Cannot notify host removal " | ||
2011 | "of session teardown event because host already " | ||
2012 | "removed.\n"); | ||
2013 | return; | ||
2014 | } | ||
2015 | |||
2016 | spin_lock_irqsave(&ihost->lock, flags); | ||
2017 | ihost->num_sessions--; | ||
2018 | if (ihost->num_sessions == 0) | ||
2019 | wake_up(&ihost->session_removal_wq); | ||
2020 | spin_unlock_irqrestore(&ihost->lock, flags); | ||
2021 | scsi_host_put(shost); | ||
2022 | } | ||
2023 | |||
1926 | /** | 2024 | /** |
1927 | * iscsi_session_setup - create iscsi cls session and host and session | 2025 | * iscsi_session_setup - create iscsi cls session and host and session |
1928 | * @iscsit: iscsi transport template | 2026 | * @iscsit: iscsi transport template |
@@ -1943,9 +2041,19 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, | |||
1943 | uint16_t cmds_max, int cmd_task_size, | 2041 | uint16_t cmds_max, int cmd_task_size, |
1944 | uint32_t initial_cmdsn, unsigned int id) | 2042 | uint32_t initial_cmdsn, unsigned int id) |
1945 | { | 2043 | { |
2044 | struct iscsi_host *ihost = shost_priv(shost); | ||
1946 | struct iscsi_session *session; | 2045 | struct iscsi_session *session; |
1947 | struct iscsi_cls_session *cls_session; | 2046 | struct iscsi_cls_session *cls_session; |
1948 | int cmd_i, scsi_cmds, total_cmds = cmds_max; | 2047 | int cmd_i, scsi_cmds, total_cmds = cmds_max; |
2048 | unsigned long flags; | ||
2049 | |||
2050 | spin_lock_irqsave(&ihost->lock, flags); | ||
2051 | if (ihost->state == ISCSI_HOST_REMOVED) { | ||
2052 | spin_unlock_irqrestore(&ihost->lock, flags); | ||
2053 | return NULL; | ||
2054 | } | ||
2055 | ihost->num_sessions++; | ||
2056 | spin_unlock_irqrestore(&ihost->lock, flags); | ||
1949 | 2057 | ||
1950 | if (!total_cmds) | 2058 | if (!total_cmds) |
1951 | total_cmds = ISCSI_DEF_XMIT_CMDS_MAX; | 2059 | total_cmds = ISCSI_DEF_XMIT_CMDS_MAX; |
@@ -1958,7 +2066,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, | |||
1958 | printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " | 2066 | printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " |
1959 | "must be a power of two that is at least %d.\n", | 2067 | "must be a power of two that is at least %d.\n", |
1960 | total_cmds, ISCSI_TOTAL_CMDS_MIN); | 2068 | total_cmds, ISCSI_TOTAL_CMDS_MIN); |
1961 | return NULL; | 2069 | goto dec_session_count; |
1962 | } | 2070 | } |
1963 | 2071 | ||
1964 | if (total_cmds > ISCSI_TOTAL_CMDS_MAX) { | 2072 | if (total_cmds > ISCSI_TOTAL_CMDS_MAX) { |
@@ -1982,7 +2090,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, | |||
1982 | cls_session = iscsi_alloc_session(shost, iscsit, | 2090 | cls_session = iscsi_alloc_session(shost, iscsit, |
1983 | sizeof(struct iscsi_session)); | 2091 | sizeof(struct iscsi_session)); |
1984 | if (!cls_session) | 2092 | if (!cls_session) |
1985 | return NULL; | 2093 | goto dec_session_count; |
1986 | session = cls_session->dd_data; | 2094 | session = cls_session->dd_data; |
1987 | session->cls_session = cls_session; | 2095 | session->cls_session = cls_session; |
1988 | session->host = shost; | 2096 | session->host = shost; |
@@ -2021,6 +2129,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, | |||
2021 | 2129 | ||
2022 | if (iscsi_add_session(cls_session, id)) | 2130 | if (iscsi_add_session(cls_session, id)) |
2023 | goto cls_session_fail; | 2131 | goto cls_session_fail; |
2132 | |||
2024 | return cls_session; | 2133 | return cls_session; |
2025 | 2134 | ||
2026 | cls_session_fail: | 2135 | cls_session_fail: |
@@ -2029,6 +2138,8 @@ module_get_fail: | |||
2029 | iscsi_pool_free(&session->cmdpool); | 2138 | iscsi_pool_free(&session->cmdpool); |
2030 | cmdpool_alloc_fail: | 2139 | cmdpool_alloc_fail: |
2031 | iscsi_free_session(cls_session); | 2140 | iscsi_free_session(cls_session); |
2141 | dec_session_count: | ||
2142 | iscsi_host_dec_session_cnt(shost); | ||
2032 | return NULL; | 2143 | return NULL; |
2033 | } | 2144 | } |
2034 | EXPORT_SYMBOL_GPL(iscsi_session_setup); | 2145 | EXPORT_SYMBOL_GPL(iscsi_session_setup); |
@@ -2044,6 +2155,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) | |||
2044 | { | 2155 | { |
2045 | struct iscsi_session *session = cls_session->dd_data; | 2156 | struct iscsi_session *session = cls_session->dd_data; |
2046 | struct module *owner = cls_session->transport->owner; | 2157 | struct module *owner = cls_session->transport->owner; |
2158 | struct Scsi_Host *shost = session->host; | ||
2047 | 2159 | ||
2048 | iscsi_pool_free(&session->cmdpool); | 2160 | iscsi_pool_free(&session->cmdpool); |
2049 | 2161 | ||
@@ -2056,6 +2168,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) | |||
2056 | kfree(session->ifacename); | 2168 | kfree(session->ifacename); |
2057 | 2169 | ||
2058 | iscsi_destroy_session(cls_session); | 2170 | iscsi_destroy_session(cls_session); |
2171 | iscsi_host_dec_session_cnt(shost); | ||
2059 | module_put(owner); | 2172 | module_put(owner); |
2060 | } | 2173 | } |
2061 | EXPORT_SYMBOL_GPL(iscsi_session_teardown); | 2174 | EXPORT_SYMBOL_GPL(iscsi_session_teardown); |
@@ -2335,8 +2448,10 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, | |||
2335 | * flush queues. | 2448 | * flush queues. |
2336 | */ | 2449 | */ |
2337 | spin_lock_bh(&session->lock); | 2450 | spin_lock_bh(&session->lock); |
2338 | fail_all_commands(conn, -1, | 2451 | if (flag == STOP_CONN_RECOVER) |
2339 | STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR); | 2452 | fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED); |
2453 | else | ||
2454 | fail_all_commands(conn, -1, DID_ERROR); | ||
2340 | flush_control_queues(session, conn); | 2455 | flush_control_queues(session, conn); |
2341 | spin_unlock_bh(&session->lock); | 2456 | spin_unlock_bh(&session->lock); |
2342 | mutex_unlock(&session->eh_mutex); | 2457 | mutex_unlock(&session->eh_mutex); |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index e0e018d12653..60a9e6e9384b 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -34,7 +34,14 @@ struct lpfc_sli2_slim; | |||
34 | #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ | 34 | #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ |
35 | #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ | 35 | #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ |
36 | #define LPFC_VNAME_LEN 100 /* vport symbolic name length */ | 36 | #define LPFC_VNAME_LEN 100 /* vport symbolic name length */ |
37 | 37 | #define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt | |
38 | queue depth change in millisecs */ | ||
39 | #define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */ | ||
40 | #define LPFC_MIN_TGT_QDEPTH 100 | ||
41 | #define LPFC_MAX_TGT_QDEPTH 0xFFFF | ||
42 | |||
43 | #define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data | ||
44 | collection. */ | ||
38 | /* | 45 | /* |
39 | * Following time intervals are used of adjusting SCSI device | 46 | * Following time intervals are used of adjusting SCSI device |
40 | * queue depths when there are driver resource error or Firmware | 47 | * queue depths when there are driver resource error or Firmware |
@@ -49,6 +56,9 @@ struct lpfc_sli2_slim; | |||
49 | #define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ | 56 | #define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ |
50 | #define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ | 57 | #define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ |
51 | 58 | ||
59 | /* Error Attention event polling interval */ | ||
60 | #define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */ | ||
61 | |||
52 | /* Define macros for 64 bit support */ | 62 | /* Define macros for 64 bit support */ |
53 | #define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr))) | 63 | #define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr))) |
54 | #define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32))) | 64 | #define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32))) |
@@ -60,6 +70,9 @@ struct lpfc_sli2_slim; | |||
60 | 70 | ||
61 | #define MAX_HBAEVT 32 | 71 | #define MAX_HBAEVT 32 |
62 | 72 | ||
73 | /* Number of MSI-X vectors the driver uses */ | ||
74 | #define LPFC_MSIX_VECTORS 2 | ||
75 | |||
63 | /* lpfc wait event data ready flag */ | 76 | /* lpfc wait event data ready flag */ |
64 | #define LPFC_DATA_READY (1<<0) | 77 | #define LPFC_DATA_READY (1<<0) |
65 | 78 | ||
@@ -357,6 +370,7 @@ struct lpfc_vport { | |||
357 | uint32_t cfg_log_verbose; | 370 | uint32_t cfg_log_verbose; |
358 | uint32_t cfg_max_luns; | 371 | uint32_t cfg_max_luns; |
359 | uint32_t cfg_enable_da_id; | 372 | uint32_t cfg_enable_da_id; |
373 | uint32_t cfg_max_scsicmpl_time; | ||
360 | 374 | ||
361 | uint32_t dev_loss_tmo_changed; | 375 | uint32_t dev_loss_tmo_changed; |
362 | 376 | ||
@@ -369,6 +383,8 @@ struct lpfc_vport { | |||
369 | struct lpfc_debugfs_trc *disc_trc; | 383 | struct lpfc_debugfs_trc *disc_trc; |
370 | atomic_t disc_trc_cnt; | 384 | atomic_t disc_trc_cnt; |
371 | #endif | 385 | #endif |
386 | uint8_t stat_data_enabled; | ||
387 | uint8_t stat_data_blocked; | ||
372 | }; | 388 | }; |
373 | 389 | ||
374 | struct hbq_s { | 390 | struct hbq_s { |
@@ -407,10 +423,11 @@ struct lpfc_hba { | |||
407 | struct lpfc_sli sli; | 423 | struct lpfc_sli sli; |
408 | uint32_t sli_rev; /* SLI2 or SLI3 */ | 424 | uint32_t sli_rev; /* SLI2 or SLI3 */ |
409 | uint32_t sli3_options; /* Mask of enabled SLI3 options */ | 425 | uint32_t sli3_options; /* Mask of enabled SLI3 options */ |
410 | #define LPFC_SLI3_ENABLED 0x01 | 426 | #define LPFC_SLI3_HBQ_ENABLED 0x01 |
411 | #define LPFC_SLI3_HBQ_ENABLED 0x02 | 427 | #define LPFC_SLI3_NPIV_ENABLED 0x02 |
412 | #define LPFC_SLI3_NPIV_ENABLED 0x04 | 428 | #define LPFC_SLI3_VPORT_TEARDOWN 0x04 |
413 | #define LPFC_SLI3_VPORT_TEARDOWN 0x08 | 429 | #define LPFC_SLI3_CRP_ENABLED 0x08 |
430 | #define LPFC_SLI3_INB_ENABLED 0x10 | ||
414 | uint32_t iocb_cmd_size; | 431 | uint32_t iocb_cmd_size; |
415 | uint32_t iocb_rsp_size; | 432 | uint32_t iocb_rsp_size; |
416 | 433 | ||
@@ -422,10 +439,20 @@ struct lpfc_hba { | |||
422 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ | 439 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ |
423 | #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ | 440 | #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ |
424 | 441 | ||
425 | struct lpfc_sli2_slim *slim2p; | 442 | uint32_t hba_flag; /* hba generic flags */ |
426 | struct lpfc_dmabuf hbqslimp; | 443 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ |
444 | |||
445 | struct lpfc_dmabuf slim2p; | ||
427 | 446 | ||
428 | dma_addr_t slim2p_mapping; | 447 | MAILBOX_t *mbox; |
448 | uint32_t *inb_ha_copy; | ||
449 | uint32_t *inb_counter; | ||
450 | uint32_t inb_last_counter; | ||
451 | uint32_t ha_copy; | ||
452 | struct _PCB *pcb; | ||
453 | struct _IOCB *IOCBs; | ||
454 | |||
455 | struct lpfc_dmabuf hbqslimp; | ||
429 | 456 | ||
430 | uint16_t pci_cfg_value; | 457 | uint16_t pci_cfg_value; |
431 | 458 | ||
@@ -492,7 +519,7 @@ struct lpfc_hba { | |||
492 | 519 | ||
493 | wait_queue_head_t work_waitq; | 520 | wait_queue_head_t work_waitq; |
494 | struct task_struct *worker_thread; | 521 | struct task_struct *worker_thread; |
495 | long data_flags; | 522 | unsigned long data_flags; |
496 | 523 | ||
497 | uint32_t hbq_in_use; /* HBQs in use flag */ | 524 | uint32_t hbq_in_use; /* HBQs in use flag */ |
498 | struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ | 525 | struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ |
@@ -514,6 +541,7 @@ struct lpfc_hba { | |||
514 | void __iomem *HCregaddr; /* virtual address for host ctl reg */ | 541 | void __iomem *HCregaddr; /* virtual address for host ctl reg */ |
515 | 542 | ||
516 | struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */ | 543 | struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */ |
544 | struct lpfc_pgp *port_gp; | ||
517 | uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */ | 545 | uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */ |
518 | uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */ | 546 | uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */ |
519 | 547 | ||
@@ -536,6 +564,7 @@ struct lpfc_hba { | |||
536 | uint8_t soft_wwn_enable; | 564 | uint8_t soft_wwn_enable; |
537 | 565 | ||
538 | struct timer_list fcp_poll_timer; | 566 | struct timer_list fcp_poll_timer; |
567 | struct timer_list eratt_poll; | ||
539 | 568 | ||
540 | /* | 569 | /* |
541 | * stat counters | 570 | * stat counters |
@@ -565,7 +594,7 @@ struct lpfc_hba { | |||
565 | 594 | ||
566 | struct fc_host_statistics link_stats; | 595 | struct fc_host_statistics link_stats; |
567 | enum intr_type_t intr_type; | 596 | enum intr_type_t intr_type; |
568 | struct msix_entry msix_entries[1]; | 597 | struct msix_entry msix_entries[LPFC_MSIX_VECTORS]; |
569 | 598 | ||
570 | struct list_head port_list; | 599 | struct list_head port_list; |
571 | struct lpfc_vport *pport; /* physical lpfc_vport pointer */ | 600 | struct lpfc_vport *pport; /* physical lpfc_vport pointer */ |
@@ -605,6 +634,7 @@ struct lpfc_hba { | |||
605 | unsigned long last_completion_time; | 634 | unsigned long last_completion_time; |
606 | struct timer_list hb_tmofunc; | 635 | struct timer_list hb_tmofunc; |
607 | uint8_t hb_outstanding; | 636 | uint8_t hb_outstanding; |
637 | enum hba_temp_state over_temp_state; | ||
608 | /* ndlp reference management */ | 638 | /* ndlp reference management */ |
609 | spinlock_t ndlp_lock; | 639 | spinlock_t ndlp_lock; |
610 | /* | 640 | /* |
@@ -613,7 +643,19 @@ struct lpfc_hba { | |||
613 | */ | 643 | */ |
614 | #define QUE_BUFTAG_BIT (1<<31) | 644 | #define QUE_BUFTAG_BIT (1<<31) |
615 | uint32_t buffer_tag_count; | 645 | uint32_t buffer_tag_count; |
616 | enum hba_temp_state over_temp_state; | 646 | int wait_4_mlo_maint_flg; |
647 | wait_queue_head_t wait_4_mlo_m_q; | ||
648 | /* data structure used for latency data collection */ | ||
649 | #define LPFC_NO_BUCKET 0 | ||
650 | #define LPFC_LINEAR_BUCKET 1 | ||
651 | #define LPFC_POWER2_BUCKET 2 | ||
652 | uint8_t bucket_type; | ||
653 | uint32_t bucket_base; | ||
654 | uint32_t bucket_step; | ||
655 | |||
656 | /* Maximum number of events that can be outstanding at any time*/ | ||
657 | #define LPFC_MAX_EVT_COUNT 512 | ||
658 | atomic_t fast_event_count; | ||
617 | }; | 659 | }; |
618 | 660 | ||
619 | static inline struct Scsi_Host * | 661 | static inline struct Scsi_Host * |
@@ -650,15 +692,25 @@ lpfc_worker_wake_up(struct lpfc_hba *phba) | |||
650 | return; | 692 | return; |
651 | } | 693 | } |
652 | 694 | ||
653 | #define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ | 695 | static inline void |
654 | #define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature | 696 | lpfc_sli_read_hs(struct lpfc_hba *phba) |
655 | event */ | 697 | { |
698 | /* | ||
699 | * There was a link/board error. Read the status register to retrieve | ||
700 | * the error event and process it. | ||
701 | */ | ||
702 | phba->sli.slistat.err_attn_event++; | ||
703 | |||
704 | /* Save status info */ | ||
705 | phba->work_hs = readl(phba->HSregaddr); | ||
706 | phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); | ||
707 | phba->work_status[1] = readl(phba->MBslimaddr + 0xac); | ||
708 | |||
709 | /* Clear chip Host Attention error bit */ | ||
710 | writel(HA_ERATT, phba->HAregaddr); | ||
711 | readl(phba->HAregaddr); /* flush */ | ||
712 | phba->pport->stopped = 1; | ||
713 | |||
714 | return; | ||
715 | } | ||
656 | 716 | ||
657 | struct temp_event { | ||
658 | uint32_t event_type; | ||
659 | uint32_t event_code; | ||
660 | uint32_t data; | ||
661 | }; | ||
662 | #define LPFC_CRIT_TEMP 0x1 | ||
663 | #define LPFC_THRESHOLD_TEMP 0x2 | ||
664 | #define LPFC_NORMAL_TEMP 0x3 | ||
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 37bfa0bd1dae..aa3d6277581d 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -32,6 +32,7 @@ | |||
32 | 32 | ||
33 | #include "lpfc_hw.h" | 33 | #include "lpfc_hw.h" |
34 | #include "lpfc_sli.h" | 34 | #include "lpfc_sli.h" |
35 | #include "lpfc_nl.h" | ||
35 | #include "lpfc_disc.h" | 36 | #include "lpfc_disc.h" |
36 | #include "lpfc_scsi.h" | 37 | #include "lpfc_scsi.h" |
37 | #include "lpfc.h" | 38 | #include "lpfc.h" |
@@ -49,6 +50,21 @@ | |||
49 | #define LPFC_LINK_SPEED_BITMAP 0x00000117 | 50 | #define LPFC_LINK_SPEED_BITMAP 0x00000117 |
50 | #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8" | 51 | #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8" |
51 | 52 | ||
53 | /** | ||
54 | * lpfc_jedec_to_ascii: Hex to ascii convertor according to JEDEC rules. | ||
55 | * @incr: integer to convert. | ||
56 | * @hdw: ascii string holding converted integer plus a string terminator. | ||
57 | * | ||
58 | * Description: | ||
59 | * JEDEC Joint Electron Device Engineering Council. | ||
60 | * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii | ||
61 | * character string. The string is then terminated with a NULL in byte 9. | ||
62 | * Hex 0-9 becomes ascii '0' to '9'. | ||
63 | * Hex a-f becomes ascii '=' to 'B' capital B. | ||
64 | * | ||
65 | * Notes: | ||
66 | * Coded for 32 bit integers only. | ||
67 | **/ | ||
52 | static void | 68 | static void |
53 | lpfc_jedec_to_ascii(int incr, char hdw[]) | 69 | lpfc_jedec_to_ascii(int incr, char hdw[]) |
54 | { | 70 | { |
@@ -65,6 +81,14 @@ lpfc_jedec_to_ascii(int incr, char hdw[]) | |||
65 | return; | 81 | return; |
66 | } | 82 | } |
67 | 83 | ||
84 | /** | ||
85 | * lpfc_drvr_version_show: Return the Emulex driver string with version number. | ||
86 | * @dev: class unused variable. | ||
87 | * @attr: device attribute, not used. | ||
88 | * @buf: on return contains the module description text. | ||
89 | * | ||
90 | * Returns: size of formatted string. | ||
91 | **/ | ||
68 | static ssize_t | 92 | static ssize_t |
69 | lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, | 93 | lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, |
70 | char *buf) | 94 | char *buf) |
@@ -72,6 +96,14 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, | |||
72 | return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); | 96 | return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); |
73 | } | 97 | } |
74 | 98 | ||
99 | /** | ||
100 | * lpfc_info_show: Return some pci info about the host in ascii. | ||
101 | * @dev: class converted to a Scsi_host structure. | ||
102 | * @attr: device attribute, not used. | ||
103 | * @buf: on return contains the formatted text from lpfc_info(). | ||
104 | * | ||
105 | * Returns: size of formatted string. | ||
106 | **/ | ||
75 | static ssize_t | 107 | static ssize_t |
76 | lpfc_info_show(struct device *dev, struct device_attribute *attr, | 108 | lpfc_info_show(struct device *dev, struct device_attribute *attr, |
77 | char *buf) | 109 | char *buf) |
@@ -81,6 +113,14 @@ lpfc_info_show(struct device *dev, struct device_attribute *attr, | |||
81 | return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host)); | 113 | return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host)); |
82 | } | 114 | } |
83 | 115 | ||
116 | /** | ||
117 | * lpfc_serialnum_show: Return the hba serial number in ascii. | ||
118 | * @dev: class converted to a Scsi_host structure. | ||
119 | * @attr: device attribute, not used. | ||
120 | * @buf: on return contains the formatted text serial number. | ||
121 | * | ||
122 | * Returns: size of formatted string. | ||
123 | **/ | ||
84 | static ssize_t | 124 | static ssize_t |
85 | lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, | 125 | lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, |
86 | char *buf) | 126 | char *buf) |
@@ -92,6 +132,18 @@ lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, | |||
92 | return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber); | 132 | return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber); |
93 | } | 133 | } |
94 | 134 | ||
135 | /** | ||
136 | * lpfc_temp_sensor_show: Return the temperature sensor level. | ||
137 | * @dev: class converted to a Scsi_host structure. | ||
138 | * @attr: device attribute, not used. | ||
139 | * @buf: on return contains the formatted support level. | ||
140 | * | ||
141 | * Description: | ||
142 | * Returns a number indicating the temperature sensor level currently | ||
143 | * supported, zero or one in ascii. | ||
144 | * | ||
145 | * Returns: size of formatted string. | ||
146 | **/ | ||
95 | static ssize_t | 147 | static ssize_t |
96 | lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, | 148 | lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, |
97 | char *buf) | 149 | char *buf) |
@@ -102,6 +154,14 @@ lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, | |||
102 | return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support); | 154 | return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support); |
103 | } | 155 | } |
104 | 156 | ||
157 | /** | ||
158 | * lpfc_modeldesc_show: Return the model description of the hba. | ||
159 | * @dev: class converted to a Scsi_host structure. | ||
160 | * @attr: device attribute, not used. | ||
161 | * @buf: on return contains the scsi vpd model description. | ||
162 | * | ||
163 | * Returns: size of formatted string. | ||
164 | **/ | ||
105 | static ssize_t | 165 | static ssize_t |
106 | lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, | 166 | lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, |
107 | char *buf) | 167 | char *buf) |
@@ -113,6 +173,14 @@ lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, | |||
113 | return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc); | 173 | return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc); |
114 | } | 174 | } |
115 | 175 | ||
176 | /** | ||
177 | * lpfc_modelname_show: Return the model name of the hba. | ||
178 | * @dev: class converted to a Scsi_host structure. | ||
179 | * @attr: device attribute, not used. | ||
180 | * @buf: on return contains the scsi vpd model name. | ||
181 | * | ||
182 | * Returns: size of formatted string. | ||
183 | **/ | ||
116 | static ssize_t | 184 | static ssize_t |
117 | lpfc_modelname_show(struct device *dev, struct device_attribute *attr, | 185 | lpfc_modelname_show(struct device *dev, struct device_attribute *attr, |
118 | char *buf) | 186 | char *buf) |
@@ -124,6 +192,14 @@ lpfc_modelname_show(struct device *dev, struct device_attribute *attr, | |||
124 | return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName); | 192 | return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName); |
125 | } | 193 | } |
126 | 194 | ||
195 | /** | ||
196 | * lpfc_programtype_show: Return the program type of the hba. | ||
197 | * @dev: class converted to a Scsi_host structure. | ||
198 | * @attr: device attribute, not used. | ||
199 | * @buf: on return contains the scsi vpd program type. | ||
200 | * | ||
201 | * Returns: size of formatted string. | ||
202 | **/ | ||
127 | static ssize_t | 203 | static ssize_t |
128 | lpfc_programtype_show(struct device *dev, struct device_attribute *attr, | 204 | lpfc_programtype_show(struct device *dev, struct device_attribute *attr, |
129 | char *buf) | 205 | char *buf) |
@@ -135,6 +211,33 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr, | |||
135 | return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType); | 211 | return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType); |
136 | } | 212 | } |
137 | 213 | ||
214 | /** | ||
215 | * lpfc_mlomgmt_show: Return the Menlo Maintenance sli flag. | ||
216 | * @dev: class converted to a Scsi_host structure. | ||
217 | * @attr: device attribute, not used. | ||
218 | * @buf: on return contains the Menlo Maintenance sli flag. | ||
219 | * | ||
220 | * Returns: size of formatted string. | ||
221 | **/ | ||
222 | static ssize_t | ||
223 | lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
224 | { | ||
225 | struct Scsi_Host *shost = class_to_shost(dev); | ||
226 | struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; | ||
227 | struct lpfc_hba *phba = vport->phba; | ||
228 | |||
229 | return snprintf(buf, PAGE_SIZE, "%d\n", | ||
230 | (phba->sli.sli_flag & LPFC_MENLO_MAINT)); | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * lpfc_vportnum_show: Return the port number in ascii of the hba. | ||
235 | * @dev: class converted to a Scsi_host structure. | ||
236 | * @attr: device attribute, not used. | ||
237 | * @buf: on return contains scsi vpd program type. | ||
238 | * | ||
239 | * Returns: size of formatted string. | ||
240 | **/ | ||
138 | static ssize_t | 241 | static ssize_t |
139 | lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, | 242 | lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, |
140 | char *buf) | 243 | char *buf) |
@@ -146,6 +249,14 @@ lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, | |||
146 | return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port); | 249 | return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port); |
147 | } | 250 | } |
148 | 251 | ||
252 | /** | ||
253 | * lpfc_fwrev_show: Return the firmware rev running in the hba. | ||
254 | * @dev: class converted to a Scsi_host structure. | ||
255 | * @attr: device attribute, not used. | ||
256 | * @buf: on return contains the scsi vpd program type. | ||
257 | * | ||
258 | * Returns: size of formatted string. | ||
259 | **/ | ||
149 | static ssize_t | 260 | static ssize_t |
150 | lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, | 261 | lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, |
151 | char *buf) | 262 | char *buf) |
@@ -159,6 +270,14 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, | |||
159 | return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev); | 270 | return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev); |
160 | } | 271 | } |
161 | 272 | ||
273 | /** | ||
274 | * lpfc_hdw_show: Return the jedec information about the hba. | ||
275 | * @dev: class converted to a Scsi_host structure. | ||
276 | * @attr: device attribute, not used. | ||
277 | * @buf: on return contains the scsi vpd program type. | ||
278 | * | ||
279 | * Returns: size of formatted string. | ||
280 | **/ | ||
162 | static ssize_t | 281 | static ssize_t |
163 | lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) | 282 | lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) |
164 | { | 283 | { |
@@ -171,6 +290,15 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
171 | lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); | 290 | lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); |
172 | return snprintf(buf, PAGE_SIZE, "%s\n", hdw); | 291 | return snprintf(buf, PAGE_SIZE, "%s\n", hdw); |
173 | } | 292 | } |
293 | |||
294 | /** | ||
295 | * lpfc_option_rom_version_show: Return the adapter ROM FCode version. | ||
296 | * @dev: class converted to a Scsi_host structure. | ||
297 | * @attr: device attribute, not used. | ||
298 | * @buf: on return contains the ROM and FCode ascii strings. | ||
299 | * | ||
300 | * Returns: size of formatted string. | ||
301 | **/ | ||
174 | static ssize_t | 302 | static ssize_t |
175 | lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, | 303 | lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, |
176 | char *buf) | 304 | char *buf) |
@@ -181,6 +309,18 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, | |||
181 | 309 | ||
182 | return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); | 310 | return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); |
183 | } | 311 | } |
312 | |||
313 | /** | ||
314 | * lpfc_state_show: Return the link state of the port. | ||
315 | * @dev: class converted to a Scsi_host structure. | ||
316 | * @attr: device attribute, not used. | ||
317 | * @buf: on return contains text describing the state of the link. | ||
318 | * | ||
319 | * Notes: | ||
320 | * The switch statement has no default so zero will be returned. | ||
321 | * | ||
322 | * Returns: size of formatted string. | ||
323 | **/ | ||
184 | static ssize_t | 324 | static ssize_t |
185 | lpfc_link_state_show(struct device *dev, struct device_attribute *attr, | 325 | lpfc_link_state_show(struct device *dev, struct device_attribute *attr, |
186 | char *buf) | 326 | char *buf) |
@@ -232,8 +372,10 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr, | |||
232 | "Unknown\n"); | 372 | "Unknown\n"); |
233 | break; | 373 | break; |
234 | } | 374 | } |
235 | 375 | if (phba->sli.sli_flag & LPFC_MENLO_MAINT) | |
236 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 376 | len += snprintf(buf + len, PAGE_SIZE-len, |
377 | " Menlo Maint Mode\n"); | ||
378 | else if (phba->fc_topology == TOPOLOGY_LOOP) { | ||
237 | if (vport->fc_flag & FC_PUBLIC_LOOP) | 379 | if (vport->fc_flag & FC_PUBLIC_LOOP) |
238 | len += snprintf(buf + len, PAGE_SIZE-len, | 380 | len += snprintf(buf + len, PAGE_SIZE-len, |
239 | " Public Loop\n"); | 381 | " Public Loop\n"); |
@@ -253,6 +395,18 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr, | |||
253 | return len; | 395 | return len; |
254 | } | 396 | } |
255 | 397 | ||
398 | /** | ||
399 | * lpfc_num_discovered_ports_show: Return sum of mapped and unmapped vports. | ||
400 | * @dev: class device that is converted into a Scsi_host. | ||
401 | * @attr: device attribute, not used. | ||
402 | * @buf: on return contains the sum of fc mapped and unmapped. | ||
403 | * | ||
404 | * Description: | ||
405 | * Returns the ascii text number of the sum of the fc mapped and unmapped | ||
406 | * vport counts. | ||
407 | * | ||
408 | * Returns: size of formatted string. | ||
409 | **/ | ||
256 | static ssize_t | 410 | static ssize_t |
257 | lpfc_num_discovered_ports_show(struct device *dev, | 411 | lpfc_num_discovered_ports_show(struct device *dev, |
258 | struct device_attribute *attr, char *buf) | 412 | struct device_attribute *attr, char *buf) |
@@ -264,7 +418,20 @@ lpfc_num_discovered_ports_show(struct device *dev, | |||
264 | vport->fc_map_cnt + vport->fc_unmap_cnt); | 418 | vport->fc_map_cnt + vport->fc_unmap_cnt); |
265 | } | 419 | } |
266 | 420 | ||
267 | 421 | /** | |
422 | * lpfc_issue_lip: Misnomer, name carried over from long ago. | ||
423 | * @shost: Scsi_Host pointer. | ||
424 | * | ||
425 | * Description: | ||
426 | * Bring the link down gracefully then re-init the link. The firmware will | ||
427 | * re-init the fiber channel interface as required. Does not issue a LIP. | ||
428 | * | ||
429 | * Returns: | ||
430 | * -EPERM port offline or management commands are being blocked | ||
431 | * -ENOMEM cannot allocate memory for the mailbox command | ||
432 | * -EIO error sending the mailbox command | ||
433 | * zero for success | ||
434 | **/ | ||
268 | static int | 435 | static int |
269 | lpfc_issue_lip(struct Scsi_Host *shost) | 436 | lpfc_issue_lip(struct Scsi_Host *shost) |
270 | { | 437 | { |
@@ -306,6 +473,21 @@ lpfc_issue_lip(struct Scsi_Host *shost) | |||
306 | return 0; | 473 | return 0; |
307 | } | 474 | } |
308 | 475 | ||
476 | /** | ||
477 | * lpfc_do_offline: Issues a mailbox command to bring the link down. | ||
478 | * @phba: lpfc_hba pointer. | ||
479 | * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL. | ||
480 | * | ||
481 | * Notes: | ||
482 | * Assumes any error from lpfc_do_offline() will be negative. | ||
483 | * Can wait up to 5 seconds for the port ring buffers count | ||
484 | * to reach zero, prints a warning if it is not zero and continues. | ||
485 | * lpfc_workq_post_event() returns a non-zero return coce if call fails. | ||
486 | * | ||
487 | * Returns: | ||
488 | * -EIO error posting the event | ||
489 | * zero for success | ||
490 | **/ | ||
309 | static int | 491 | static int |
310 | lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) | 492 | lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) |
311 | { | 493 | { |
@@ -353,6 +535,22 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) | |||
353 | return 0; | 535 | return 0; |
354 | } | 536 | } |
355 | 537 | ||
538 | /** | ||
539 | * lpfc_selective_reset: Offline then onlines the port. | ||
540 | * @phba: lpfc_hba pointer. | ||
541 | * | ||
542 | * Description: | ||
543 | * If the port is configured to allow a reset then the hba is brought | ||
544 | * offline then online. | ||
545 | * | ||
546 | * Notes: | ||
547 | * Assumes any error from lpfc_do_offline() will be negative. | ||
548 | * | ||
549 | * Returns: | ||
550 | * lpfc_do_offline() return code if not zero | ||
551 | * -EIO reset not configured or error posting the event | ||
552 | * zero for success | ||
553 | **/ | ||
356 | static int | 554 | static int |
357 | lpfc_selective_reset(struct lpfc_hba *phba) | 555 | lpfc_selective_reset(struct lpfc_hba *phba) |
358 | { | 556 | { |
@@ -378,6 +576,27 @@ lpfc_selective_reset(struct lpfc_hba *phba) | |||
378 | return 0; | 576 | return 0; |
379 | } | 577 | } |
380 | 578 | ||
579 | /** | ||
580 | * lpfc_issue_reset: Selectively resets an adapter. | ||
581 | * @dev: class device that is converted into a Scsi_host. | ||
582 | * @attr: device attribute, not used. | ||
583 | * @buf: containing the string "selective". | ||
584 | * @count: unused variable. | ||
585 | * | ||
586 | * Description: | ||
587 | * If the buf contains the string "selective" then lpfc_selective_reset() | ||
588 | * is called to perform the reset. | ||
589 | * | ||
590 | * Notes: | ||
591 | * Assumes any error from lpfc_selective_reset() will be negative. | ||
592 | * If lpfc_selective_reset() returns zero then the length of the buffer | ||
593 | * is returned which indicates succcess | ||
594 | * | ||
595 | * Returns: | ||
596 | * -EINVAL if the buffer does not contain the string "selective" | ||
597 | * length of buf if lpfc-selective_reset() if the call succeeds | ||
598 | * return value of lpfc_selective_reset() if the call fails | ||
599 | **/ | ||
381 | static ssize_t | 600 | static ssize_t |
382 | lpfc_issue_reset(struct device *dev, struct device_attribute *attr, | 601 | lpfc_issue_reset(struct device *dev, struct device_attribute *attr, |
383 | const char *buf, size_t count) | 602 | const char *buf, size_t count) |
@@ -397,6 +616,14 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr, | |||
397 | return status; | 616 | return status; |
398 | } | 617 | } |
399 | 618 | ||
619 | /** | ||
620 | * lpfc_nport_evt_cnt_show: Return the number of nport events. | ||
621 | * @dev: class device that is converted into a Scsi_host. | ||
622 | * @attr: device attribute, not used. | ||
623 | * @buf: on return contains the ascii number of nport events. | ||
624 | * | ||
625 | * Returns: size of formatted string. | ||
626 | **/ | ||
400 | static ssize_t | 627 | static ssize_t |
401 | lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr, | 628 | lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr, |
402 | char *buf) | 629 | char *buf) |
@@ -408,6 +635,14 @@ lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr, | |||
408 | return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); | 635 | return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); |
409 | } | 636 | } |
410 | 637 | ||
638 | /** | ||
639 | * lpfc_board_mode_show: Return the state of the board. | ||
640 | * @dev: class device that is converted into a Scsi_host. | ||
641 | * @attr: device attribute, not used. | ||
642 | * @buf: on return contains the state of the adapter. | ||
643 | * | ||
644 | * Returns: size of formatted string. | ||
645 | **/ | ||
411 | static ssize_t | 646 | static ssize_t |
412 | lpfc_board_mode_show(struct device *dev, struct device_attribute *attr, | 647 | lpfc_board_mode_show(struct device *dev, struct device_attribute *attr, |
413 | char *buf) | 648 | char *buf) |
@@ -429,6 +664,19 @@ lpfc_board_mode_show(struct device *dev, struct device_attribute *attr, | |||
429 | return snprintf(buf, PAGE_SIZE, "%s\n", state); | 664 | return snprintf(buf, PAGE_SIZE, "%s\n", state); |
430 | } | 665 | } |
431 | 666 | ||
667 | /** | ||
668 | * lpfc_board_mode_store: Puts the hba in online, offline, warm or error state. | ||
669 | * @dev: class device that is converted into a Scsi_host. | ||
670 | * @attr: device attribute, not used. | ||
671 | * @buf: containing one of the strings "online", "offline", "warm" or "error". | ||
672 | * @count: unused variable. | ||
673 | * | ||
674 | * Returns: | ||
675 | * -EACCES if enable hba reset not enabled | ||
676 | * -EINVAL if the buffer does not contain a valid string (see above) | ||
677 | * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails | ||
678 | * buf length greater than zero indicates success | ||
679 | **/ | ||
432 | static ssize_t | 680 | static ssize_t |
433 | lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, | 681 | lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, |
434 | const char *buf, size_t count) | 682 | const char *buf, size_t count) |
@@ -462,6 +710,24 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, | |||
462 | return -EIO; | 710 | return -EIO; |
463 | } | 711 | } |
464 | 712 | ||
713 | /** | ||
714 | * lpfc_get_hba_info: Return various bits of informaton about the adapter. | ||
715 | * @phba: pointer to the adapter structure. | ||
716 | * @mxri max xri count. | ||
717 | * @axri available xri count. | ||
718 | * @mrpi max rpi count. | ||
719 | * @arpi available rpi count. | ||
720 | * @mvpi max vpi count. | ||
721 | * @avpi available vpi count. | ||
722 | * | ||
723 | * Description: | ||
724 | * If an integer pointer for an count is not null then the value for the | ||
725 | * count is returned. | ||
726 | * | ||
727 | * Returns: | ||
728 | * zero on error | ||
729 | * one for success | ||
730 | **/ | ||
465 | static int | 731 | static int |
466 | lpfc_get_hba_info(struct lpfc_hba *phba, | 732 | lpfc_get_hba_info(struct lpfc_hba *phba, |
467 | uint32_t *mxri, uint32_t *axri, | 733 | uint32_t *mxri, uint32_t *axri, |
@@ -524,6 +790,20 @@ lpfc_get_hba_info(struct lpfc_hba *phba, | |||
524 | return 1; | 790 | return 1; |
525 | } | 791 | } |
526 | 792 | ||
793 | /** | ||
794 | * lpfc_max_rpi_show: Return maximum rpi. | ||
795 | * @dev: class device that is converted into a Scsi_host. | ||
796 | * @attr: device attribute, not used. | ||
797 | * @buf: on return contains the maximum rpi count in decimal or "Unknown". | ||
798 | * | ||
799 | * Description: | ||
800 | * Calls lpfc_get_hba_info() asking for just the mrpi count. | ||
801 | * If lpfc_get_hba_info() returns zero (failure) the buffer text is set | ||
802 | * to "Unknown" and the buffer length is returned, therefore the caller | ||
803 | * must check for "Unknown" in the buffer to detect a failure. | ||
804 | * | ||
805 | * Returns: size of formatted string. | ||
806 | **/ | ||
527 | static ssize_t | 807 | static ssize_t |
528 | lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr, | 808 | lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr, |
529 | char *buf) | 809 | char *buf) |
@@ -538,6 +818,20 @@ lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr, | |||
538 | return snprintf(buf, PAGE_SIZE, "Unknown\n"); | 818 | return snprintf(buf, PAGE_SIZE, "Unknown\n"); |
539 | } | 819 | } |
540 | 820 | ||
821 | /** | ||
822 | * lpfc_used_rpi_show: Return maximum rpi minus available rpi. | ||
823 | * @dev: class device that is converted into a Scsi_host. | ||
824 | * @attr: device attribute, not used. | ||
825 | * @buf: containing the used rpi count in decimal or "Unknown". | ||
826 | * | ||
827 | * Description: | ||
828 | * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts. | ||
829 | * If lpfc_get_hba_info() returns zero (failure) the buffer text is set | ||
830 | * to "Unknown" and the buffer length is returned, therefore the caller | ||
831 | * must check for "Unknown" in the buffer to detect a failure. | ||
832 | * | ||
833 | * Returns: size of formatted string. | ||
834 | **/ | ||
541 | static ssize_t | 835 | static ssize_t |
542 | lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr, | 836 | lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr, |
543 | char *buf) | 837 | char *buf) |
@@ -552,6 +846,20 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr, | |||
552 | return snprintf(buf, PAGE_SIZE, "Unknown\n"); | 846 | return snprintf(buf, PAGE_SIZE, "Unknown\n"); |
553 | } | 847 | } |
554 | 848 | ||
849 | /** | ||
850 | * lpfc_max_xri_show: Return maximum xri. | ||
851 | * @dev: class device that is converted into a Scsi_host. | ||
852 | * @attr: device attribute, not used. | ||
853 | * @buf: on return contains the maximum xri count in decimal or "Unknown". | ||
854 | * | ||
855 | * Description: | ||
856 | * Calls lpfc_get_hba_info() asking for just the mrpi count. | ||
857 | * If lpfc_get_hba_info() returns zero (failure) the buffer text is set | ||
858 | * to "Unknown" and the buffer length is returned, therefore the caller | ||
859 | * must check for "Unknown" in the buffer to detect a failure. | ||
860 | * | ||
861 | * Returns: size of formatted string. | ||
862 | **/ | ||
555 | static ssize_t | 863 | static ssize_t |
556 | lpfc_max_xri_show(struct device *dev, struct device_attribute *attr, | 864 | lpfc_max_xri_show(struct device *dev, struct device_attribute *attr, |
557 | char *buf) | 865 | char *buf) |
@@ -566,6 +874,20 @@ lpfc_max_xri_show(struct device *dev, struct device_attribute *attr, | |||
566 | return snprintf(buf, PAGE_SIZE, "Unknown\n"); | 874 | return snprintf(buf, PAGE_SIZE, "Unknown\n"); |
567 | } | 875 | } |
568 | 876 | ||
877 | /** | ||
878 | * lpfc_used_xri_show: Return maximum xpi minus the available xpi. | ||
879 | * @dev: class device that is converted into a Scsi_host. | ||
880 | * @attr: device attribute, not used. | ||
881 | * @buf: on return contains the used xri count in decimal or "Unknown". | ||
882 | * | ||
883 | * Description: | ||
884 | * Calls lpfc_get_hba_info() asking for just the mxri and axri counts. | ||
885 | * If lpfc_get_hba_info() returns zero (failure) the buffer text is set | ||
886 | * to "Unknown" and the buffer length is returned, therefore the caller | ||
887 | * must check for "Unknown" in the buffer to detect a failure. | ||
888 | * | ||
889 | * Returns: size of formatted string. | ||
890 | **/ | ||
569 | static ssize_t | 891 | static ssize_t |
570 | lpfc_used_xri_show(struct device *dev, struct device_attribute *attr, | 892 | lpfc_used_xri_show(struct device *dev, struct device_attribute *attr, |
571 | char *buf) | 893 | char *buf) |
@@ -580,6 +902,20 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr, | |||
580 | return snprintf(buf, PAGE_SIZE, "Unknown\n"); | 902 | return snprintf(buf, PAGE_SIZE, "Unknown\n"); |
581 | } | 903 | } |
582 | 904 | ||
905 | /** | ||
906 | * lpfc_max_vpi_show: Return maximum vpi. | ||
907 | * @dev: class device that is converted into a Scsi_host. | ||
908 | * @attr: device attribute, not used. | ||
909 | * @buf: on return contains the maximum vpi count in decimal or "Unknown". | ||
910 | * | ||
911 | * Description: | ||
912 | * Calls lpfc_get_hba_info() asking for just the mvpi count. | ||
913 | * If lpfc_get_hba_info() returns zero (failure) the buffer text is set | ||
914 | * to "Unknown" and the buffer length is returned, therefore the caller | ||
915 | * must check for "Unknown" in the buffer to detect a failure. | ||
916 | * | ||
917 | * Returns: size of formatted string. | ||
918 | **/ | ||
583 | static ssize_t | 919 | static ssize_t |
584 | lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr, | 920 | lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr, |
585 | char *buf) | 921 | char *buf) |
@@ -594,6 +930,20 @@ lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr, | |||
594 | return snprintf(buf, PAGE_SIZE, "Unknown\n"); | 930 | return snprintf(buf, PAGE_SIZE, "Unknown\n"); |
595 | } | 931 | } |
596 | 932 | ||
933 | /** | ||
934 | * lpfc_used_vpi_show: Return maximum vpi minus the available vpi. | ||
935 | * @dev: class device that is converted into a Scsi_host. | ||
936 | * @attr: device attribute, not used. | ||
937 | * @buf: on return contains the used vpi count in decimal or "Unknown". | ||
938 | * | ||
939 | * Description: | ||
940 | * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts. | ||
941 | * If lpfc_get_hba_info() returns zero (failure) the buffer text is set | ||
942 | * to "Unknown" and the buffer length is returned, therefore the caller | ||
943 | * must check for "Unknown" in the buffer to detect a failure. | ||
944 | * | ||
945 | * Returns: size of formatted string. | ||
946 | **/ | ||
597 | static ssize_t | 947 | static ssize_t |
598 | lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr, | 948 | lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr, |
599 | char *buf) | 949 | char *buf) |
@@ -608,6 +958,19 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr, | |||
608 | return snprintf(buf, PAGE_SIZE, "Unknown\n"); | 958 | return snprintf(buf, PAGE_SIZE, "Unknown\n"); |
609 | } | 959 | } |
610 | 960 | ||
961 | /** | ||
962 | * lpfc_npiv_info_show: Return text about NPIV support for the adapter. | ||
963 | * @dev: class device that is converted into a Scsi_host. | ||
964 | * @attr: device attribute, not used. | ||
965 | * @buf: text that must be interpreted to determine if npiv is supported. | ||
966 | * | ||
967 | * Description: | ||
968 | * Buffer will contain text indicating npiv is not suppoerted on the port, | ||
969 | * the port is an NPIV physical port, or it is an npiv virtual port with | ||
970 | * the id of the vport. | ||
971 | * | ||
972 | * Returns: size of formatted string. | ||
973 | **/ | ||
611 | static ssize_t | 974 | static ssize_t |
612 | lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr, | 975 | lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr, |
613 | char *buf) | 976 | char *buf) |
@@ -623,6 +986,17 @@ lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr, | |||
623 | return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); | 986 | return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); |
624 | } | 987 | } |
625 | 988 | ||
989 | /** | ||
990 | * lpfc_poll_show: Return text about poll support for the adapter. | ||
991 | * @dev: class device that is converted into a Scsi_host. | ||
992 | * @attr: device attribute, not used. | ||
993 | * @buf: on return contains the cfg_poll in hex. | ||
994 | * | ||
995 | * Notes: | ||
996 | * cfg_poll should be a lpfc_polling_flags type. | ||
997 | * | ||
998 | * Returns: size of formatted string. | ||
999 | **/ | ||
626 | static ssize_t | 1000 | static ssize_t |
627 | lpfc_poll_show(struct device *dev, struct device_attribute *attr, | 1001 | lpfc_poll_show(struct device *dev, struct device_attribute *attr, |
628 | char *buf) | 1002 | char *buf) |
@@ -634,6 +1008,20 @@ lpfc_poll_show(struct device *dev, struct device_attribute *attr, | |||
634 | return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); | 1008 | return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); |
635 | } | 1009 | } |
636 | 1010 | ||
1011 | /** | ||
1012 | * lpfc_poll_store: Set the value of cfg_poll for the adapter. | ||
1013 | * @dev: class device that is converted into a Scsi_host. | ||
1014 | * @attr: device attribute, not used. | ||
1015 | * @buf: one or more lpfc_polling_flags values. | ||
1016 | * @count: not used. | ||
1017 | * | ||
1018 | * Notes: | ||
1019 | * buf contents converted to integer and checked for a valid value. | ||
1020 | * | ||
1021 | * Returns: | ||
1022 | * -EINVAL if the buffer connot be converted or is out of range | ||
1023 | * length of the buf on success | ||
1024 | **/ | ||
637 | static ssize_t | 1025 | static ssize_t |
638 | lpfc_poll_store(struct device *dev, struct device_attribute *attr, | 1026 | lpfc_poll_store(struct device *dev, struct device_attribute *attr, |
639 | const char *buf, size_t count) | 1027 | const char *buf, size_t count) |
@@ -692,6 +1080,20 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, | |||
692 | return strlen(buf); | 1080 | return strlen(buf); |
693 | } | 1081 | } |
694 | 1082 | ||
1083 | /** | ||
1084 | * lpfc_param_show: Return a cfg attribute value in decimal. | ||
1085 | * | ||
1086 | * Description: | ||
1087 | * Macro that given an attr e.g. hba_queue_depth expands | ||
1088 | * into a function with the name lpfc_hba_queue_depth_show. | ||
1089 | * | ||
1090 | * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field. | ||
1091 | * @dev: class device that is converted into a Scsi_host. | ||
1092 | * @attr: device attribute, not used. | ||
1093 | * @buf: on return contains the attribute value in decimal. | ||
1094 | * | ||
1095 | * Returns: size of formatted string. | ||
1096 | **/ | ||
695 | #define lpfc_param_show(attr) \ | 1097 | #define lpfc_param_show(attr) \ |
696 | static ssize_t \ | 1098 | static ssize_t \ |
697 | lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | 1099 | lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ |
@@ -706,6 +1108,20 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | |||
706 | phba->cfg_##attr);\ | 1108 | phba->cfg_##attr);\ |
707 | } | 1109 | } |
708 | 1110 | ||
1111 | /** | ||
1112 | * lpfc_param_hex_show: Return a cfg attribute value in hex. | ||
1113 | * | ||
1114 | * Description: | ||
1115 | * Macro that given an attr e.g. hba_queue_depth expands | ||
1116 | * into a function with the name lpfc_hba_queue_depth_show | ||
1117 | * | ||
1118 | * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field. | ||
1119 | * @dev: class device that is converted into a Scsi_host. | ||
1120 | * @attr: device attribute, not used. | ||
1121 | * @buf: on return contains the attribute value in hexidecimal. | ||
1122 | * | ||
1123 | * Returns: size of formatted string. | ||
1124 | **/ | ||
709 | #define lpfc_param_hex_show(attr) \ | 1125 | #define lpfc_param_hex_show(attr) \ |
710 | static ssize_t \ | 1126 | static ssize_t \ |
711 | lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | 1127 | lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ |
@@ -720,6 +1136,25 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | |||
720 | phba->cfg_##attr);\ | 1136 | phba->cfg_##attr);\ |
721 | } | 1137 | } |
722 | 1138 | ||
1139 | /** | ||
1140 | * lpfc_param_init: Intializes a cfg attribute. | ||
1141 | * | ||
1142 | * Description: | ||
1143 | * Macro that given an attr e.g. hba_queue_depth expands | ||
1144 | * into a function with the name lpfc_hba_queue_depth_init. The macro also | ||
1145 | * takes a default argument, a minimum and maximum argument. | ||
1146 | * | ||
1147 | * lpfc_##attr##_init: Initializes an attribute. | ||
1148 | * @phba: pointer the the adapter structure. | ||
1149 | * @val: integer attribute value. | ||
1150 | * | ||
1151 | * Validates the min and max values then sets the adapter config field | ||
1152 | * accordingly, or uses the default if out of range and prints an error message. | ||
1153 | * | ||
1154 | * Returns: | ||
1155 | * zero on success | ||
1156 | * -EINVAL if default used | ||
1157 | **/ | ||
723 | #define lpfc_param_init(attr, default, minval, maxval) \ | 1158 | #define lpfc_param_init(attr, default, minval, maxval) \ |
724 | static int \ | 1159 | static int \ |
725 | lpfc_##attr##_init(struct lpfc_hba *phba, int val) \ | 1160 | lpfc_##attr##_init(struct lpfc_hba *phba, int val) \ |
@@ -735,6 +1170,26 @@ lpfc_##attr##_init(struct lpfc_hba *phba, int val) \ | |||
735 | return -EINVAL;\ | 1170 | return -EINVAL;\ |
736 | } | 1171 | } |
737 | 1172 | ||
1173 | /** | ||
1174 | * lpfc_param_set: Set a cfg attribute value. | ||
1175 | * | ||
1176 | * Description: | ||
1177 | * Macro that given an attr e.g. hba_queue_depth expands | ||
1178 | * into a function with the name lpfc_hba_queue_depth_set | ||
1179 | * | ||
1180 | * lpfc_##attr##_set: Sets an attribute value. | ||
1181 | * @phba: pointer the the adapter structure. | ||
1182 | * @val: integer attribute value. | ||
1183 | * | ||
1184 | * Description: | ||
1185 | * Validates the min and max values then sets the | ||
1186 | * adapter config field if in the valid range. prints error message | ||
1187 | * and does not set the parameter if invalid. | ||
1188 | * | ||
1189 | * Returns: | ||
1190 | * zero on success | ||
1191 | * -EINVAL if val is invalid | ||
1192 | **/ | ||
738 | #define lpfc_param_set(attr, default, minval, maxval) \ | 1193 | #define lpfc_param_set(attr, default, minval, maxval) \ |
739 | static int \ | 1194 | static int \ |
740 | lpfc_##attr##_set(struct lpfc_hba *phba, int val) \ | 1195 | lpfc_##attr##_set(struct lpfc_hba *phba, int val) \ |
@@ -749,6 +1204,27 @@ lpfc_##attr##_set(struct lpfc_hba *phba, int val) \ | |||
749 | return -EINVAL;\ | 1204 | return -EINVAL;\ |
750 | } | 1205 | } |
751 | 1206 | ||
1207 | /** | ||
1208 | * lpfc_param_store: Set a vport attribute value. | ||
1209 | * | ||
1210 | * Description: | ||
1211 | * Macro that given an attr e.g. hba_queue_depth expands | ||
1212 | * into a function with the name lpfc_hba_queue_depth_store. | ||
1213 | * | ||
1214 | * lpfc_##attr##_store: Set an sttribute value. | ||
1215 | * @dev: class device that is converted into a Scsi_host. | ||
1216 | * @attr: device attribute, not used. | ||
1217 | * @buf: contains the attribute value in ascii. | ||
1218 | * @count: not used. | ||
1219 | * | ||
1220 | * Description: | ||
1221 | * Convert the ascii text number to an integer, then | ||
1222 | * use the lpfc_##attr##_set function to set the value. | ||
1223 | * | ||
1224 | * Returns: | ||
1225 | * -EINVAL if val is invalid or lpfc_##attr##_set() fails | ||
1226 | * length of buffer upon success. | ||
1227 | **/ | ||
752 | #define lpfc_param_store(attr) \ | 1228 | #define lpfc_param_store(attr) \ |
753 | static ssize_t \ | 1229 | static ssize_t \ |
754 | lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ | 1230 | lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ |
@@ -768,6 +1244,20 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ | |||
768 | return -EINVAL;\ | 1244 | return -EINVAL;\ |
769 | } | 1245 | } |
770 | 1246 | ||
1247 | /** | ||
1248 | * lpfc_vport_param_show: Return decimal formatted cfg attribute value. | ||
1249 | * | ||
1250 | * Description: | ||
1251 | * Macro that given an attr e.g. hba_queue_depth expands | ||
1252 | * into a function with the name lpfc_hba_queue_depth_show | ||
1253 | * | ||
1254 | * lpfc_##attr##_show: prints the attribute value in decimal. | ||
1255 | * @dev: class device that is converted into a Scsi_host. | ||
1256 | * @attr: device attribute, not used. | ||
1257 | * @buf: on return contains the attribute value in decimal. | ||
1258 | * | ||
1259 | * Returns: length of formatted string. | ||
1260 | **/ | ||
771 | #define lpfc_vport_param_show(attr) \ | 1261 | #define lpfc_vport_param_show(attr) \ |
772 | static ssize_t \ | 1262 | static ssize_t \ |
773 | lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | 1263 | lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ |
@@ -780,6 +1270,21 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | |||
780 | return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ | 1270 | return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ |
781 | } | 1271 | } |
782 | 1272 | ||
1273 | /** | ||
1274 | * lpfc_vport_param_hex_show: Return hex formatted attribute value. | ||
1275 | * | ||
1276 | * Description: | ||
1277 | * Macro that given an attr e.g. | ||
1278 | * hba_queue_depth expands into a function with the name | ||
1279 | * lpfc_hba_queue_depth_show | ||
1280 | * | ||
1281 | * lpfc_##attr##_show: prints the attribute value in hexidecimal. | ||
1282 | * @dev: class device that is converted into a Scsi_host. | ||
1283 | * @attr: device attribute, not used. | ||
1284 | * @buf: on return contains the attribute value in hexidecimal. | ||
1285 | * | ||
1286 | * Returns: length of formatted string. | ||
1287 | **/ | ||
783 | #define lpfc_vport_param_hex_show(attr) \ | 1288 | #define lpfc_vport_param_hex_show(attr) \ |
784 | static ssize_t \ | 1289 | static ssize_t \ |
785 | lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | 1290 | lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ |
@@ -792,6 +1297,24 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ | |||
792 | return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ | 1297 | return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ |
793 | } | 1298 | } |
794 | 1299 | ||
1300 | /** | ||
1301 | * lpfc_vport_param_init: Initialize a vport cfg attribute. | ||
1302 | * | ||
1303 | * Description: | ||
1304 | * Macro that given an attr e.g. hba_queue_depth expands | ||
1305 | * into a function with the name lpfc_hba_queue_depth_init. The macro also | ||
1306 | * takes a default argument, a minimum and maximum argument. | ||
1307 | * | ||
1308 | * lpfc_##attr##_init: validates the min and max values then sets the | ||
1309 | * adapter config field accordingly, or uses the default if out of range | ||
1310 | * and prints an error message. | ||
1311 | * @phba: pointer the the adapter structure. | ||
1312 | * @val: integer attribute value. | ||
1313 | * | ||
1314 | * Returns: | ||
1315 | * zero on success | ||
1316 | * -EINVAL if default used | ||
1317 | **/ | ||
795 | #define lpfc_vport_param_init(attr, default, minval, maxval) \ | 1318 | #define lpfc_vport_param_init(attr, default, minval, maxval) \ |
796 | static int \ | 1319 | static int \ |
797 | lpfc_##attr##_init(struct lpfc_vport *vport, int val) \ | 1320 | lpfc_##attr##_init(struct lpfc_vport *vport, int val) \ |
@@ -801,12 +1324,29 @@ lpfc_##attr##_init(struct lpfc_vport *vport, int val) \ | |||
801 | return 0;\ | 1324 | return 0;\ |
802 | }\ | 1325 | }\ |
803 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ | 1326 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ |
804 | "0449 lpfc_"#attr" attribute cannot be set to %d, "\ | 1327 | "0423 lpfc_"#attr" attribute cannot be set to %d, "\ |
805 | "allowed range is ["#minval", "#maxval"]\n", val); \ | 1328 | "allowed range is ["#minval", "#maxval"]\n", val); \ |
806 | vport->cfg_##attr = default;\ | 1329 | vport->cfg_##attr = default;\ |
807 | return -EINVAL;\ | 1330 | return -EINVAL;\ |
808 | } | 1331 | } |
809 | 1332 | ||
1333 | /** | ||
1334 | * lpfc_vport_param_set: Set a vport cfg attribute. | ||
1335 | * | ||
1336 | * Description: | ||
1337 | * Macro that given an attr e.g. hba_queue_depth expands | ||
1338 | * into a function with the name lpfc_hba_queue_depth_set | ||
1339 | * | ||
1340 | * lpfc_##attr##_set: validates the min and max values then sets the | ||
1341 | * adapter config field if in the valid range. prints error message | ||
1342 | * and does not set the parameter if invalid. | ||
1343 | * @phba: pointer the the adapter structure. | ||
1344 | * @val: integer attribute value. | ||
1345 | * | ||
1346 | * Returns: | ||
1347 | * zero on success | ||
1348 | * -EINVAL if val is invalid | ||
1349 | **/ | ||
810 | #define lpfc_vport_param_set(attr, default, minval, maxval) \ | 1350 | #define lpfc_vport_param_set(attr, default, minval, maxval) \ |
811 | static int \ | 1351 | static int \ |
812 | lpfc_##attr##_set(struct lpfc_vport *vport, int val) \ | 1352 | lpfc_##attr##_set(struct lpfc_vport *vport, int val) \ |
@@ -816,11 +1356,28 @@ lpfc_##attr##_set(struct lpfc_vport *vport, int val) \ | |||
816 | return 0;\ | 1356 | return 0;\ |
817 | }\ | 1357 | }\ |
818 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ | 1358 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ |
819 | "0450 lpfc_"#attr" attribute cannot be set to %d, "\ | 1359 | "0424 lpfc_"#attr" attribute cannot be set to %d, "\ |
820 | "allowed range is ["#minval", "#maxval"]\n", val); \ | 1360 | "allowed range is ["#minval", "#maxval"]\n", val); \ |
821 | return -EINVAL;\ | 1361 | return -EINVAL;\ |
822 | } | 1362 | } |
823 | 1363 | ||
1364 | /** | ||
1365 | * lpfc_vport_param_store: Set a vport attribute. | ||
1366 | * | ||
1367 | * Description: | ||
1368 | * Macro that given an attr e.g. hba_queue_depth | ||
1369 | * expands into a function with the name lpfc_hba_queue_depth_store | ||
1370 | * | ||
1371 | * lpfc_##attr##_store: convert the ascii text number to an integer, then | ||
1372 | * use the lpfc_##attr##_set function to set the value. | ||
1373 | * @cdev: class device that is converted into a Scsi_host. | ||
1374 | * @buf: contains the attribute value in decimal. | ||
1375 | * @count: not used. | ||
1376 | * | ||
1377 | * Returns: | ||
1378 | * -EINVAL if val is invalid or lpfc_##attr##_set() fails | ||
1379 | * length of buffer upon success. | ||
1380 | **/ | ||
824 | #define lpfc_vport_param_store(attr) \ | 1381 | #define lpfc_vport_param_store(attr) \ |
825 | static ssize_t \ | 1382 | static ssize_t \ |
826 | lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ | 1383 | lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ |
@@ -941,6 +1498,7 @@ static DEVICE_ATTR(option_rom_version, S_IRUGO, | |||
941 | lpfc_option_rom_version_show, NULL); | 1498 | lpfc_option_rom_version_show, NULL); |
942 | static DEVICE_ATTR(num_discovered_ports, S_IRUGO, | 1499 | static DEVICE_ATTR(num_discovered_ports, S_IRUGO, |
943 | lpfc_num_discovered_ports_show, NULL); | 1500 | lpfc_num_discovered_ports_show, NULL); |
1501 | static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); | ||
944 | static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); | 1502 | static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); |
945 | static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); | 1503 | static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); |
946 | static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, | 1504 | static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, |
@@ -958,6 +1516,17 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL); | |||
958 | 1516 | ||
959 | static char *lpfc_soft_wwn_key = "C99G71SL8032A"; | 1517 | static char *lpfc_soft_wwn_key = "C99G71SL8032A"; |
960 | 1518 | ||
1519 | /** | ||
1520 | * lpfc_soft_wwn_enable_store: Allows setting of the wwn if the key is valid. | ||
1521 | * @dev: class device that is converted into a Scsi_host. | ||
1522 | * @attr: device attribute, not used. | ||
1523 | * @buf: containing the string lpfc_soft_wwn_key. | ||
1524 | * @count: must be size of lpfc_soft_wwn_key. | ||
1525 | * | ||
1526 | * Returns: | ||
1527 | * -EINVAL if the buffer does not contain lpfc_soft_wwn_key | ||
1528 | * length of buf indicates success | ||
1529 | **/ | ||
961 | static ssize_t | 1530 | static ssize_t |
962 | lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr, | 1531 | lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr, |
963 | const char *buf, size_t count) | 1532 | const char *buf, size_t count) |
@@ -994,6 +1563,14 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr, | |||
994 | static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL, | 1563 | static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL, |
995 | lpfc_soft_wwn_enable_store); | 1564 | lpfc_soft_wwn_enable_store); |
996 | 1565 | ||
1566 | /** | ||
1567 | * lpfc_soft_wwpn_show: Return the cfg soft ww port name of the adapter. | ||
1568 | * @dev: class device that is converted into a Scsi_host. | ||
1569 | * @attr: device attribute, not used. | ||
1570 | * @buf: on return contains the wwpn in hexidecimal. | ||
1571 | * | ||
1572 | * Returns: size of formatted string. | ||
1573 | **/ | ||
997 | static ssize_t | 1574 | static ssize_t |
998 | lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr, | 1575 | lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr, |
999 | char *buf) | 1576 | char *buf) |
@@ -1006,7 +1583,19 @@ lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr, | |||
1006 | (unsigned long long)phba->cfg_soft_wwpn); | 1583 | (unsigned long long)phba->cfg_soft_wwpn); |
1007 | } | 1584 | } |
1008 | 1585 | ||
1009 | 1586 | /** | |
1587 | * lpfc_soft_wwpn_store: Set the ww port name of the adapter. | ||
1588 | * @dev class device that is converted into a Scsi_host. | ||
1589 | * @attr: device attribute, not used. | ||
1590 | * @buf: contains the wwpn in hexidecimal. | ||
1591 | * @count: number of wwpn bytes in buf | ||
1592 | * | ||
1593 | * Returns: | ||
1594 | * -EACCES hba reset not enabled, adapter over temp | ||
1595 | * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid | ||
1596 | * -EIO error taking adapter offline or online | ||
1597 | * value of count on success | ||
1598 | **/ | ||
1010 | static ssize_t | 1599 | static ssize_t |
1011 | lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, | 1600 | lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, |
1012 | const char *buf, size_t count) | 1601 | const char *buf, size_t count) |
@@ -1080,6 +1669,14 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, | |||
1080 | static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ | 1669 | static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ |
1081 | lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); | 1670 | lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); |
1082 | 1671 | ||
1672 | /** | ||
1673 | * lpfc_soft_wwnn_show: Return the cfg soft ww node name for the adapter. | ||
1674 | * @dev: class device that is converted into a Scsi_host. | ||
1675 | * @attr: device attribute, not used. | ||
1676 | * @buf: on return contains the wwnn in hexidecimal. | ||
1677 | * | ||
1678 | * Returns: size of formatted string. | ||
1679 | **/ | ||
1083 | static ssize_t | 1680 | static ssize_t |
1084 | lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr, | 1681 | lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr, |
1085 | char *buf) | 1682 | char *buf) |
@@ -1090,7 +1687,16 @@ lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr, | |||
1090 | (unsigned long long)phba->cfg_soft_wwnn); | 1687 | (unsigned long long)phba->cfg_soft_wwnn); |
1091 | } | 1688 | } |
1092 | 1689 | ||
1093 | 1690 | /** | |
1691 | * lpfc_soft_wwnn_store: sets the ww node name of the adapter. | ||
1692 | * @cdev: class device that is converted into a Scsi_host. | ||
1693 | * @buf: contains the ww node name in hexidecimal. | ||
1694 | * @count: number of wwnn bytes in buf. | ||
1695 | * | ||
1696 | * Returns: | ||
1697 | * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid | ||
1698 | * value of count on success | ||
1699 | **/ | ||
1094 | static ssize_t | 1700 | static ssize_t |
1095 | lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, | 1701 | lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, |
1096 | const char *buf, size_t count) | 1702 | const char *buf, size_t count) |
@@ -1178,6 +1784,15 @@ module_param(lpfc_nodev_tmo, int, 0); | |||
1178 | MODULE_PARM_DESC(lpfc_nodev_tmo, | 1784 | MODULE_PARM_DESC(lpfc_nodev_tmo, |
1179 | "Seconds driver will hold I/O waiting " | 1785 | "Seconds driver will hold I/O waiting " |
1180 | "for a device to come back"); | 1786 | "for a device to come back"); |
1787 | |||
1788 | /** | ||
1789 | * lpfc_nodev_tmo_show: Return the hba dev loss timeout value. | ||
1790 | * @dev: class converted to a Scsi_host structure. | ||
1791 | * @attr: device attribute, not used. | ||
1792 | * @buf: on return contains the dev loss timeout in decimal. | ||
1793 | * | ||
1794 | * Returns: size of formatted string. | ||
1795 | **/ | ||
1181 | static ssize_t | 1796 | static ssize_t |
1182 | lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, | 1797 | lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, |
1183 | char *buf) | 1798 | char *buf) |
@@ -1189,6 +1804,21 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, | |||
1189 | return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); | 1804 | return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); |
1190 | } | 1805 | } |
1191 | 1806 | ||
1807 | /** | ||
1808 | * lpfc_nodev_tmo_init: Set the hba nodev timeout value. | ||
1809 | * @vport: lpfc vport structure pointer. | ||
1810 | * @val: contains the nodev timeout value. | ||
1811 | * | ||
1812 | * Description: | ||
1813 | * If the devloss tmo is already set then nodev tmo is set to devloss tmo, | ||
1814 | * a kernel error message is printed and zero is returned. | ||
1815 | * Else if val is in range then nodev tmo and devloss tmo are set to val. | ||
1816 | * Otherwise nodev tmo is set to the default value. | ||
1817 | * | ||
1818 | * Returns: | ||
1819 | * zero if already set or if val is in range | ||
1820 | * -EINVAL val out of range | ||
1821 | **/ | ||
1192 | static int | 1822 | static int |
1193 | lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) | 1823 | lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) |
1194 | { | 1824 | { |
@@ -1196,7 +1826,7 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) | |||
1196 | vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; | 1826 | vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; |
1197 | if (val != LPFC_DEF_DEVLOSS_TMO) | 1827 | if (val != LPFC_DEF_DEVLOSS_TMO) |
1198 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1828 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
1199 | "0402 Ignoring nodev_tmo module " | 1829 | "0407 Ignoring nodev_tmo module " |
1200 | "parameter because devloss_tmo is " | 1830 | "parameter because devloss_tmo is " |
1201 | "set.\n"); | 1831 | "set.\n"); |
1202 | return 0; | 1832 | return 0; |
@@ -1215,6 +1845,13 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) | |||
1215 | return -EINVAL; | 1845 | return -EINVAL; |
1216 | } | 1846 | } |
1217 | 1847 | ||
1848 | /** | ||
1849 | * lpfc_update_rport_devloss_tmo: Update dev loss tmo value. | ||
1850 | * @vport: lpfc vport structure pointer. | ||
1851 | * | ||
1852 | * Description: | ||
1853 | * Update all the ndlp's dev loss tmo with the vport devloss tmo value. | ||
1854 | **/ | ||
1218 | static void | 1855 | static void |
1219 | lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) | 1856 | lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) |
1220 | { | 1857 | { |
@@ -1229,6 +1866,21 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) | |||
1229 | spin_unlock_irq(shost->host_lock); | 1866 | spin_unlock_irq(shost->host_lock); |
1230 | } | 1867 | } |
1231 | 1868 | ||
1869 | /** | ||
1870 | * lpfc_nodev_tmo_set: Set the vport nodev tmo and devloss tmo values. | ||
1871 | * @vport: lpfc vport structure pointer. | ||
1872 | * @val: contains the tmo value. | ||
1873 | * | ||
1874 | * Description: | ||
1875 | * If the devloss tmo is already set or the vport dev loss tmo has changed | ||
1876 | * then a kernel error message is printed and zero is returned. | ||
1877 | * Else if val is in range then nodev tmo and devloss tmo are set to val. | ||
1878 | * Otherwise nodev tmo is set to the default value. | ||
1879 | * | ||
1880 | * Returns: | ||
1881 | * zero if already set or if val is in range | ||
1882 | * -EINVAL val out of range | ||
1883 | **/ | ||
1232 | static int | 1884 | static int |
1233 | lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) | 1885 | lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) |
1234 | { | 1886 | { |
@@ -1269,6 +1921,21 @@ MODULE_PARM_DESC(lpfc_devloss_tmo, | |||
1269 | lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO, | 1921 | lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO, |
1270 | LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO) | 1922 | LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO) |
1271 | lpfc_vport_param_show(devloss_tmo) | 1923 | lpfc_vport_param_show(devloss_tmo) |
1924 | |||
1925 | /** | ||
1926 | * lpfc_devloss_tmo_set: Sets vport nodev tmo, devloss tmo values, changed bit. | ||
1927 | * @vport: lpfc vport structure pointer. | ||
1928 | * @val: contains the tmo value. | ||
1929 | * | ||
1930 | * Description: | ||
1931 | * If val is in a valid range then set the vport nodev tmo, | ||
1932 | * devloss tmo, also set the vport dev loss tmo changed flag. | ||
1933 | * Else a kernel error message is printed. | ||
1934 | * | ||
1935 | * Returns: | ||
1936 | * zero if val is in range | ||
1937 | * -EINVAL val out of range | ||
1938 | **/ | ||
1272 | static int | 1939 | static int |
1273 | lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) | 1940 | lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) |
1274 | { | 1941 | { |
@@ -1366,12 +2033,27 @@ MODULE_PARM_DESC(lpfc_restrict_login, | |||
1366 | "Restrict virtual ports login to remote initiators."); | 2033 | "Restrict virtual ports login to remote initiators."); |
1367 | lpfc_vport_param_show(restrict_login); | 2034 | lpfc_vport_param_show(restrict_login); |
1368 | 2035 | ||
2036 | /** | ||
2037 | * lpfc_restrict_login_init: Set the vport restrict login flag. | ||
2038 | * @vport: lpfc vport structure pointer. | ||
2039 | * @val: contains the restrict login value. | ||
2040 | * | ||
2041 | * Description: | ||
2042 | * If val is not in a valid range then log a kernel error message and set | ||
2043 | * the vport restrict login to one. | ||
2044 | * If the port type is physical clear the restrict login flag and return. | ||
2045 | * Else set the restrict login flag to val. | ||
2046 | * | ||
2047 | * Returns: | ||
2048 | * zero if val is in range | ||
2049 | * -EINVAL val out of range | ||
2050 | **/ | ||
1369 | static int | 2051 | static int |
1370 | lpfc_restrict_login_init(struct lpfc_vport *vport, int val) | 2052 | lpfc_restrict_login_init(struct lpfc_vport *vport, int val) |
1371 | { | 2053 | { |
1372 | if (val < 0 || val > 1) { | 2054 | if (val < 0 || val > 1) { |
1373 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 2055 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
1374 | "0449 lpfc_restrict_login attribute cannot " | 2056 | "0422 lpfc_restrict_login attribute cannot " |
1375 | "be set to %d, allowed range is [0, 1]\n", | 2057 | "be set to %d, allowed range is [0, 1]\n", |
1376 | val); | 2058 | val); |
1377 | vport->cfg_restrict_login = 1; | 2059 | vport->cfg_restrict_login = 1; |
@@ -1385,12 +2067,28 @@ lpfc_restrict_login_init(struct lpfc_vport *vport, int val) | |||
1385 | return 0; | 2067 | return 0; |
1386 | } | 2068 | } |
1387 | 2069 | ||
2070 | /** | ||
2071 | * lpfc_restrict_login_set: Set the vport restrict login flag. | ||
2072 | * @vport: lpfc vport structure pointer. | ||
2073 | * @val: contains the restrict login value. | ||
2074 | * | ||
2075 | * Description: | ||
2076 | * If val is not in a valid range then log a kernel error message and set | ||
2077 | * the vport restrict login to one. | ||
2078 | * If the port type is physical and the val is not zero log a kernel | ||
2079 | * error message, clear the restrict login flag and return zero. | ||
2080 | * Else set the restrict login flag to val. | ||
2081 | * | ||
2082 | * Returns: | ||
2083 | * zero if val is in range | ||
2084 | * -EINVAL val out of range | ||
2085 | **/ | ||
1388 | static int | 2086 | static int |
1389 | lpfc_restrict_login_set(struct lpfc_vport *vport, int val) | 2087 | lpfc_restrict_login_set(struct lpfc_vport *vport, int val) |
1390 | { | 2088 | { |
1391 | if (val < 0 || val > 1) { | 2089 | if (val < 0 || val > 1) { |
1392 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 2090 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
1393 | "0450 lpfc_restrict_login attribute cannot " | 2091 | "0425 lpfc_restrict_login attribute cannot " |
1394 | "be set to %d, allowed range is [0, 1]\n", | 2092 | "be set to %d, allowed range is [0, 1]\n", |
1395 | val); | 2093 | val); |
1396 | vport->cfg_restrict_login = 1; | 2094 | vport->cfg_restrict_login = 1; |
@@ -1441,6 +2139,23 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1, | |||
1441 | # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. | 2139 | # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. |
1442 | # Default value is 0. | 2140 | # Default value is 0. |
1443 | */ | 2141 | */ |
2142 | |||
2143 | /** | ||
2144 | * lpfc_topology_set: Set the adapters topology field. | ||
2145 | * @phba: lpfc_hba pointer. | ||
2146 | * @val: topology value. | ||
2147 | * | ||
2148 | * Description: | ||
2149 | * If val is in a valid range then set the adapter's topology field and | ||
2150 | * issue a lip; if the lip fails reset the topology to the old value. | ||
2151 | * | ||
2152 | * If the value is not in range log a kernel error message and return an error. | ||
2153 | * | ||
2154 | * Returns: | ||
2155 | * zero if val is in range and lip okay | ||
2156 | * non-zero return value from lpfc_issue_lip() | ||
2157 | * -EINVAL val out of range | ||
2158 | **/ | ||
1444 | static int | 2159 | static int |
1445 | lpfc_topology_set(struct lpfc_hba *phba, int val) | 2160 | lpfc_topology_set(struct lpfc_hba *phba, int val) |
1446 | { | 2161 | { |
@@ -1469,6 +2184,335 @@ lpfc_param_store(topology) | |||
1469 | static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, | 2184 | static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, |
1470 | lpfc_topology_show, lpfc_topology_store); | 2185 | lpfc_topology_show, lpfc_topology_store); |
1471 | 2186 | ||
2187 | |||
2188 | /** | ||
2189 | * lpfc_stat_data_ctrl_store: write call back for lpfc_stat_data_ctrl | ||
2190 | * sysfs file. | ||
2191 | * @dev: Pointer to class device. | ||
2192 | * @buf: Data buffer. | ||
2193 | * @count: Size of the data buffer. | ||
2194 | * | ||
2195 | * This function get called when an user write to the lpfc_stat_data_ctrl | ||
2196 | * sysfs file. This function parse the command written to the sysfs file | ||
2197 | * and take appropriate action. These commands are used for controlling | ||
2198 | * driver statistical data collection. | ||
2199 | * Following are the command this function handles. | ||
2200 | * | ||
2201 | * setbucket <bucket_type> <base> <step> | ||
2202 | * = Set the latency buckets. | ||
2203 | * destroybucket = destroy all the buckets. | ||
2204 | * start = start data collection | ||
2205 | * stop = stop data collection | ||
2206 | * reset = reset the collected data | ||
2207 | **/ | ||
2208 | static ssize_t | ||
2209 | lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, | ||
2210 | const char *buf, size_t count) | ||
2211 | { | ||
2212 | struct Scsi_Host *shost = class_to_shost(dev); | ||
2213 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
2214 | struct lpfc_hba *phba = vport->phba; | ||
2215 | #define LPFC_MAX_DATA_CTRL_LEN 1024 | ||
2216 | static char bucket_data[LPFC_MAX_DATA_CTRL_LEN]; | ||
2217 | unsigned long i; | ||
2218 | char *str_ptr, *token; | ||
2219 | struct lpfc_vport **vports; | ||
2220 | struct Scsi_Host *v_shost; | ||
2221 | char *bucket_type_str, *base_str, *step_str; | ||
2222 | unsigned long base, step, bucket_type; | ||
2223 | |||
2224 | if (!strncmp(buf, "setbucket", strlen("setbucket"))) { | ||
2225 | if (strlen(buf) > LPFC_MAX_DATA_CTRL_LEN) | ||
2226 | return -EINVAL; | ||
2227 | |||
2228 | strcpy(bucket_data, buf); | ||
2229 | str_ptr = &bucket_data[0]; | ||
2230 | /* Ignore this token - this is command token */ | ||
2231 | token = strsep(&str_ptr, "\t "); | ||
2232 | if (!token) | ||
2233 | return -EINVAL; | ||
2234 | |||
2235 | bucket_type_str = strsep(&str_ptr, "\t "); | ||
2236 | if (!bucket_type_str) | ||
2237 | return -EINVAL; | ||
2238 | |||
2239 | if (!strncmp(bucket_type_str, "linear", strlen("linear"))) | ||
2240 | bucket_type = LPFC_LINEAR_BUCKET; | ||
2241 | else if (!strncmp(bucket_type_str, "power2", strlen("power2"))) | ||
2242 | bucket_type = LPFC_POWER2_BUCKET; | ||
2243 | else | ||
2244 | return -EINVAL; | ||
2245 | |||
2246 | base_str = strsep(&str_ptr, "\t "); | ||
2247 | if (!base_str) | ||
2248 | return -EINVAL; | ||
2249 | base = simple_strtoul(base_str, NULL, 0); | ||
2250 | |||
2251 | step_str = strsep(&str_ptr, "\t "); | ||
2252 | if (!step_str) | ||
2253 | return -EINVAL; | ||
2254 | step = simple_strtoul(step_str, NULL, 0); | ||
2255 | if (!step) | ||
2256 | return -EINVAL; | ||
2257 | |||
2258 | /* Block the data collection for every vport */ | ||
2259 | vports = lpfc_create_vport_work_array(phba); | ||
2260 | if (vports == NULL) | ||
2261 | return -ENOMEM; | ||
2262 | |||
2263 | for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | ||
2264 | v_shost = lpfc_shost_from_vport(vports[i]); | ||
2265 | spin_lock_irq(v_shost->host_lock); | ||
2266 | /* Block and reset data collection */ | ||
2267 | vports[i]->stat_data_blocked = 1; | ||
2268 | if (vports[i]->stat_data_enabled) | ||
2269 | lpfc_vport_reset_stat_data(vports[i]); | ||
2270 | spin_unlock_irq(v_shost->host_lock); | ||
2271 | } | ||
2272 | |||
2273 | /* Set the bucket attributes */ | ||
2274 | phba->bucket_type = bucket_type; | ||
2275 | phba->bucket_base = base; | ||
2276 | phba->bucket_step = step; | ||
2277 | |||
2278 | for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | ||
2279 | v_shost = lpfc_shost_from_vport(vports[i]); | ||
2280 | |||
2281 | /* Unblock data collection */ | ||
2282 | spin_lock_irq(v_shost->host_lock); | ||
2283 | vports[i]->stat_data_blocked = 0; | ||
2284 | spin_unlock_irq(v_shost->host_lock); | ||
2285 | } | ||
2286 | lpfc_destroy_vport_work_array(phba, vports); | ||
2287 | return strlen(buf); | ||
2288 | } | ||
2289 | |||
2290 | if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) { | ||
2291 | vports = lpfc_create_vport_work_array(phba); | ||
2292 | if (vports == NULL) | ||
2293 | return -ENOMEM; | ||
2294 | |||
2295 | for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | ||
2296 | v_shost = lpfc_shost_from_vport(vports[i]); | ||
2297 | spin_lock_irq(shost->host_lock); | ||
2298 | vports[i]->stat_data_blocked = 1; | ||
2299 | lpfc_free_bucket(vport); | ||
2300 | vport->stat_data_enabled = 0; | ||
2301 | vports[i]->stat_data_blocked = 0; | ||
2302 | spin_unlock_irq(shost->host_lock); | ||
2303 | } | ||
2304 | lpfc_destroy_vport_work_array(phba, vports); | ||
2305 | phba->bucket_type = LPFC_NO_BUCKET; | ||
2306 | phba->bucket_base = 0; | ||
2307 | phba->bucket_step = 0; | ||
2308 | return strlen(buf); | ||
2309 | } | ||
2310 | |||
2311 | if (!strncmp(buf, "start", strlen("start"))) { | ||
2312 | /* If no buckets configured return error */ | ||
2313 | if (phba->bucket_type == LPFC_NO_BUCKET) | ||
2314 | return -EINVAL; | ||
2315 | spin_lock_irq(shost->host_lock); | ||
2316 | if (vport->stat_data_enabled) { | ||
2317 | spin_unlock_irq(shost->host_lock); | ||
2318 | return strlen(buf); | ||
2319 | } | ||
2320 | lpfc_alloc_bucket(vport); | ||
2321 | vport->stat_data_enabled = 1; | ||
2322 | spin_unlock_irq(shost->host_lock); | ||
2323 | return strlen(buf); | ||
2324 | } | ||
2325 | |||
2326 | if (!strncmp(buf, "stop", strlen("stop"))) { | ||
2327 | spin_lock_irq(shost->host_lock); | ||
2328 | if (vport->stat_data_enabled == 0) { | ||
2329 | spin_unlock_irq(shost->host_lock); | ||
2330 | return strlen(buf); | ||
2331 | } | ||
2332 | lpfc_free_bucket(vport); | ||
2333 | vport->stat_data_enabled = 0; | ||
2334 | spin_unlock_irq(shost->host_lock); | ||
2335 | return strlen(buf); | ||
2336 | } | ||
2337 | |||
2338 | if (!strncmp(buf, "reset", strlen("reset"))) { | ||
2339 | if ((phba->bucket_type == LPFC_NO_BUCKET) | ||
2340 | || !vport->stat_data_enabled) | ||
2341 | return strlen(buf); | ||
2342 | spin_lock_irq(shost->host_lock); | ||
2343 | vport->stat_data_blocked = 1; | ||
2344 | lpfc_vport_reset_stat_data(vport); | ||
2345 | vport->stat_data_blocked = 0; | ||
2346 | spin_unlock_irq(shost->host_lock); | ||
2347 | return strlen(buf); | ||
2348 | } | ||
2349 | return -EINVAL; | ||
2350 | } | ||
2351 | |||
2352 | |||
2353 | /** | ||
2354 | * lpfc_stat_data_ctrl_show: Read callback function for | ||
2355 | * lpfc_stat_data_ctrl sysfs file. | ||
2356 | * @dev: Pointer to class device object. | ||
2357 | * @buf: Data buffer. | ||
2358 | * | ||
2359 | * This function is the read call back function for | ||
2360 | * lpfc_stat_data_ctrl sysfs file. This function report the | ||
2361 | * current statistical data collection state. | ||
2362 | **/ | ||
2363 | static ssize_t | ||
2364 | lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr, | ||
2365 | char *buf) | ||
2366 | { | ||
2367 | struct Scsi_Host *shost = class_to_shost(dev); | ||
2368 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
2369 | struct lpfc_hba *phba = vport->phba; | ||
2370 | int index = 0; | ||
2371 | int i; | ||
2372 | char *bucket_type; | ||
2373 | unsigned long bucket_value; | ||
2374 | |||
2375 | switch (phba->bucket_type) { | ||
2376 | case LPFC_LINEAR_BUCKET: | ||
2377 | bucket_type = "linear"; | ||
2378 | break; | ||
2379 | case LPFC_POWER2_BUCKET: | ||
2380 | bucket_type = "power2"; | ||
2381 | break; | ||
2382 | default: | ||
2383 | bucket_type = "No Bucket"; | ||
2384 | break; | ||
2385 | } | ||
2386 | |||
2387 | sprintf(&buf[index], "Statistical Data enabled :%d, " | ||
2388 | "blocked :%d, Bucket type :%s, Bucket base :%d," | ||
2389 | " Bucket step :%d\nLatency Ranges :", | ||
2390 | vport->stat_data_enabled, vport->stat_data_blocked, | ||
2391 | bucket_type, phba->bucket_base, phba->bucket_step); | ||
2392 | index = strlen(buf); | ||
2393 | if (phba->bucket_type != LPFC_NO_BUCKET) { | ||
2394 | for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) { | ||
2395 | if (phba->bucket_type == LPFC_LINEAR_BUCKET) | ||
2396 | bucket_value = phba->bucket_base + | ||
2397 | phba->bucket_step * i; | ||
2398 | else | ||
2399 | bucket_value = phba->bucket_base + | ||
2400 | (1 << i) * phba->bucket_step; | ||
2401 | |||
2402 | if (index + 10 > PAGE_SIZE) | ||
2403 | break; | ||
2404 | sprintf(&buf[index], "%08ld ", bucket_value); | ||
2405 | index = strlen(buf); | ||
2406 | } | ||
2407 | } | ||
2408 | sprintf(&buf[index], "\n"); | ||
2409 | return strlen(buf); | ||
2410 | } | ||
2411 | |||
2412 | /* | ||
2413 | * Sysfs attribute to control the statistical data collection. | ||
2414 | */ | ||
2415 | static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR, | ||
2416 | lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store); | ||
2417 | |||
2418 | /* | ||
2419 | * lpfc_drvr_stat_data: sysfs attr to get driver statistical data. | ||
2420 | */ | ||
2421 | |||
2422 | /* | ||
2423 | * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN | ||
2424 | * for each target. | ||
2425 | */ | ||
2426 | #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18) | ||
2427 | #define MAX_STAT_DATA_SIZE_PER_TARGET \ | ||
2428 | STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT) | ||
2429 | |||
2430 | |||
2431 | /** | ||
2432 | * sysfs_drvr_stat_data_read: Read callback function for lpfc_drvr_stat_data | ||
2433 | * sysfs attribute. | ||
2434 | * @kobj: Pointer to the kernel object | ||
2435 | * @bin_attr: Attribute object | ||
2436 | * @buff: Buffer pointer | ||
2437 | * @off: File offset | ||
2438 | * @count: Buffer size | ||
2439 | * | ||
2440 | * This function is the read call back function for lpfc_drvr_stat_data | ||
2441 | * sysfs file. This function export the statistical data to user | ||
2442 | * applications. | ||
2443 | **/ | ||
2444 | static ssize_t | ||
2445 | sysfs_drvr_stat_data_read(struct kobject *kobj, struct bin_attribute *bin_attr, | ||
2446 | char *buf, loff_t off, size_t count) | ||
2447 | { | ||
2448 | struct device *dev = container_of(kobj, struct device, | ||
2449 | kobj); | ||
2450 | struct Scsi_Host *shost = class_to_shost(dev); | ||
2451 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
2452 | struct lpfc_hba *phba = vport->phba; | ||
2453 | int i = 0, index = 0; | ||
2454 | unsigned long nport_index; | ||
2455 | struct lpfc_nodelist *ndlp = NULL; | ||
2456 | nport_index = (unsigned long)off / | ||
2457 | MAX_STAT_DATA_SIZE_PER_TARGET; | ||
2458 | |||
2459 | if (!vport->stat_data_enabled || vport->stat_data_blocked | ||
2460 | || (phba->bucket_type == LPFC_NO_BUCKET)) | ||
2461 | return 0; | ||
2462 | |||
2463 | spin_lock_irq(shost->host_lock); | ||
2464 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | ||
2465 | if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data) | ||
2466 | continue; | ||
2467 | |||
2468 | if (nport_index > 0) { | ||
2469 | nport_index--; | ||
2470 | continue; | ||
2471 | } | ||
2472 | |||
2473 | if ((index + MAX_STAT_DATA_SIZE_PER_TARGET) | ||
2474 | > count) | ||
2475 | break; | ||
2476 | |||
2477 | if (!ndlp->lat_data) | ||
2478 | continue; | ||
2479 | |||
2480 | /* Print the WWN */ | ||
2481 | sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:", | ||
2482 | ndlp->nlp_portname.u.wwn[0], | ||
2483 | ndlp->nlp_portname.u.wwn[1], | ||
2484 | ndlp->nlp_portname.u.wwn[2], | ||
2485 | ndlp->nlp_portname.u.wwn[3], | ||
2486 | ndlp->nlp_portname.u.wwn[4], | ||
2487 | ndlp->nlp_portname.u.wwn[5], | ||
2488 | ndlp->nlp_portname.u.wwn[6], | ||
2489 | ndlp->nlp_portname.u.wwn[7]); | ||
2490 | |||
2491 | index = strlen(buf); | ||
2492 | |||
2493 | for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) { | ||
2494 | sprintf(&buf[index], "%010u,", | ||
2495 | ndlp->lat_data[i].cmd_count); | ||
2496 | index = strlen(buf); | ||
2497 | } | ||
2498 | sprintf(&buf[index], "\n"); | ||
2499 | index = strlen(buf); | ||
2500 | } | ||
2501 | spin_unlock_irq(shost->host_lock); | ||
2502 | return index; | ||
2503 | } | ||
2504 | |||
2505 | static struct bin_attribute sysfs_drvr_stat_data_attr = { | ||
2506 | .attr = { | ||
2507 | .name = "lpfc_drvr_stat_data", | ||
2508 | .mode = S_IRUSR, | ||
2509 | .owner = THIS_MODULE, | ||
2510 | }, | ||
2511 | .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET, | ||
2512 | .read = sysfs_drvr_stat_data_read, | ||
2513 | .write = NULL, | ||
2514 | }; | ||
2515 | |||
1472 | /* | 2516 | /* |
1473 | # lpfc_link_speed: Link speed selection for initializing the Fibre Channel | 2517 | # lpfc_link_speed: Link speed selection for initializing the Fibre Channel |
1474 | # connection. | 2518 | # connection. |
@@ -1479,6 +2523,24 @@ static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, | |||
1479 | # 8 = 8 Gigabaud | 2523 | # 8 = 8 Gigabaud |
1480 | # Value range is [0,8]. Default value is 0. | 2524 | # Value range is [0,8]. Default value is 0. |
1481 | */ | 2525 | */ |
2526 | |||
2527 | /** | ||
2528 | * lpfc_link_speed_set: Set the adapters link speed. | ||
2529 | * @phba: lpfc_hba pointer. | ||
2530 | * @val: link speed value. | ||
2531 | * | ||
2532 | * Description: | ||
2533 | * If val is in a valid range then set the adapter's link speed field and | ||
2534 | * issue a lip; if the lip fails reset the link speed to the old value. | ||
2535 | * | ||
2536 | * Notes: | ||
2537 | * If the value is not in range log a kernel error message and return an error. | ||
2538 | * | ||
2539 | * Returns: | ||
2540 | * zero if val is in range and lip okay. | ||
2541 | * non-zero return value from lpfc_issue_lip() | ||
2542 | * -EINVAL val out of range | ||
2543 | **/ | ||
1482 | static int | 2544 | static int |
1483 | lpfc_link_speed_set(struct lpfc_hba *phba, int val) | 2545 | lpfc_link_speed_set(struct lpfc_hba *phba, int val) |
1484 | { | 2546 | { |
@@ -1513,6 +2575,23 @@ static int lpfc_link_speed = 0; | |||
1513 | module_param(lpfc_link_speed, int, 0); | 2575 | module_param(lpfc_link_speed, int, 0); |
1514 | MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); | 2576 | MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); |
1515 | lpfc_param_show(link_speed) | 2577 | lpfc_param_show(link_speed) |
2578 | |||
2579 | /** | ||
2580 | * lpfc_link_speed_init: Set the adapters link speed. | ||
2581 | * @phba: lpfc_hba pointer. | ||
2582 | * @val: link speed value. | ||
2583 | * | ||
2584 | * Description: | ||
2585 | * If val is in a valid range then set the adapter's link speed field. | ||
2586 | * | ||
2587 | * Notes: | ||
2588 | * If the value is not in range log a kernel error message, clear the link | ||
2589 | * speed and return an error. | ||
2590 | * | ||
2591 | * Returns: | ||
2592 | * zero if val saved. | ||
2593 | * -EINVAL val out of range | ||
2594 | **/ | ||
1516 | static int | 2595 | static int |
1517 | lpfc_link_speed_init(struct lpfc_hba *phba, int val) | 2596 | lpfc_link_speed_init(struct lpfc_hba *phba, int val) |
1518 | { | 2597 | { |
@@ -1522,7 +2601,7 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val) | |||
1522 | return 0; | 2601 | return 0; |
1523 | } | 2602 | } |
1524 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2603 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
1525 | "0454 lpfc_link_speed attribute cannot " | 2604 | "0405 lpfc_link_speed attribute cannot " |
1526 | "be set to %d, allowed values are " | 2605 | "be set to %d, allowed values are " |
1527 | "["LPFC_LINK_SPEED_STRING"]\n", val); | 2606 | "["LPFC_LINK_SPEED_STRING"]\n", val); |
1528 | phba->cfg_link_speed = 0; | 2607 | phba->cfg_link_speed = 0; |
@@ -1548,6 +2627,48 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, | |||
1548 | "Use ADISC on rediscovery to authenticate FCP devices"); | 2627 | "Use ADISC on rediscovery to authenticate FCP devices"); |
1549 | 2628 | ||
1550 | /* | 2629 | /* |
2630 | # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue | ||
2631 | # depth. Default value is 0. When the value of this parameter is zero the | ||
2632 | # SCSI command completion time is not used for controlling I/O queue depth. When | ||
2633 | # the parameter is set to a non-zero value, the I/O queue depth is controlled | ||
2634 | # to limit the I/O completion time to the parameter value. | ||
2635 | # The value is set in milliseconds. | ||
2636 | */ | ||
2637 | static int lpfc_max_scsicmpl_time; | ||
2638 | module_param(lpfc_max_scsicmpl_time, int, 0); | ||
2639 | MODULE_PARM_DESC(lpfc_max_scsicmpl_time, | ||
2640 | "Use command completion time to control queue depth"); | ||
2641 | lpfc_vport_param_show(max_scsicmpl_time); | ||
2642 | lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000); | ||
2643 | static int | ||
2644 | lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) | ||
2645 | { | ||
2646 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
2647 | struct lpfc_nodelist *ndlp, *next_ndlp; | ||
2648 | |||
2649 | if (val == vport->cfg_max_scsicmpl_time) | ||
2650 | return 0; | ||
2651 | if ((val < 0) || (val > 60000)) | ||
2652 | return -EINVAL; | ||
2653 | vport->cfg_max_scsicmpl_time = val; | ||
2654 | |||
2655 | spin_lock_irq(shost->host_lock); | ||
2656 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | ||
2657 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
2658 | continue; | ||
2659 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | ||
2660 | continue; | ||
2661 | ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; | ||
2662 | } | ||
2663 | spin_unlock_irq(shost->host_lock); | ||
2664 | return 0; | ||
2665 | } | ||
2666 | lpfc_vport_param_store(max_scsicmpl_time); | ||
2667 | static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR, | ||
2668 | lpfc_max_scsicmpl_time_show, | ||
2669 | lpfc_max_scsicmpl_time_store); | ||
2670 | |||
2671 | /* | ||
1551 | # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value | 2672 | # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value |
1552 | # range is [0,1]. Default value is 0. | 2673 | # range is [0,1]. Default value is 0. |
1553 | */ | 2674 | */ |
@@ -1623,12 +2744,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, | |||
1623 | /* | 2744 | /* |
1624 | # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that | 2745 | # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that |
1625 | # support this feature | 2746 | # support this feature |
1626 | # 0 = MSI disabled (default) | 2747 | # 0 = MSI disabled |
1627 | # 1 = MSI enabled | 2748 | # 1 = MSI enabled |
1628 | # 2 = MSI-X enabled | 2749 | # 2 = MSI-X enabled (default) |
1629 | # Value range is [0,2]. Default value is 0. | 2750 | # Value range is [0,2]. Default value is 2. |
1630 | */ | 2751 | */ |
1631 | LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " | 2752 | LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " |
1632 | "MSI-X (2), if possible"); | 2753 | "MSI-X (2), if possible"); |
1633 | 2754 | ||
1634 | /* | 2755 | /* |
@@ -1668,6 +2789,7 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
1668 | &dev_attr_option_rom_version, | 2789 | &dev_attr_option_rom_version, |
1669 | &dev_attr_link_state, | 2790 | &dev_attr_link_state, |
1670 | &dev_attr_num_discovered_ports, | 2791 | &dev_attr_num_discovered_ports, |
2792 | &dev_attr_menlo_mgmt_mode, | ||
1671 | &dev_attr_lpfc_drvr_version, | 2793 | &dev_attr_lpfc_drvr_version, |
1672 | &dev_attr_lpfc_temp_sensor, | 2794 | &dev_attr_lpfc_temp_sensor, |
1673 | &dev_attr_lpfc_log_verbose, | 2795 | &dev_attr_lpfc_log_verbose, |
@@ -1709,6 +2831,8 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
1709 | &dev_attr_lpfc_enable_hba_reset, | 2831 | &dev_attr_lpfc_enable_hba_reset, |
1710 | &dev_attr_lpfc_enable_hba_heartbeat, | 2832 | &dev_attr_lpfc_enable_hba_heartbeat, |
1711 | &dev_attr_lpfc_sg_seg_cnt, | 2833 | &dev_attr_lpfc_sg_seg_cnt, |
2834 | &dev_attr_lpfc_max_scsicmpl_time, | ||
2835 | &dev_attr_lpfc_stat_data_ctrl, | ||
1712 | NULL, | 2836 | NULL, |
1713 | }; | 2837 | }; |
1714 | 2838 | ||
@@ -1731,9 +2855,29 @@ struct device_attribute *lpfc_vport_attrs[] = { | |||
1731 | &dev_attr_nport_evt_cnt, | 2855 | &dev_attr_nport_evt_cnt, |
1732 | &dev_attr_npiv_info, | 2856 | &dev_attr_npiv_info, |
1733 | &dev_attr_lpfc_enable_da_id, | 2857 | &dev_attr_lpfc_enable_da_id, |
2858 | &dev_attr_lpfc_max_scsicmpl_time, | ||
2859 | &dev_attr_lpfc_stat_data_ctrl, | ||
1734 | NULL, | 2860 | NULL, |
1735 | }; | 2861 | }; |
1736 | 2862 | ||
2863 | /** | ||
2864 | * sysfs_ctlreg_write: Write method for writing to ctlreg. | ||
2865 | * @kobj: kernel kobject that contains the kernel class device. | ||
2866 | * @bin_attr: kernel attributes passed to us. | ||
2867 | * @buf: contains the data to be written to the adapter IOREG space. | ||
2868 | * @off: offset into buffer to beginning of data. | ||
2869 | * @count: bytes to transfer. | ||
2870 | * | ||
2871 | * Description: | ||
2872 | * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. | ||
2873 | * Uses the adapter io control registers to send buf contents to the adapter. | ||
2874 | * | ||
2875 | * Returns: | ||
2876 | * -ERANGE off and count combo out of range | ||
2877 | * -EINVAL off, count or buff address invalid | ||
2878 | * -EPERM adapter is offline | ||
2879 | * value of count, buf contents written | ||
2880 | **/ | ||
1737 | static ssize_t | 2881 | static ssize_t |
1738 | sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr, | 2882 | sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr, |
1739 | char *buf, loff_t off, size_t count) | 2883 | char *buf, loff_t off, size_t count) |
@@ -1766,6 +2910,23 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
1766 | return count; | 2910 | return count; |
1767 | } | 2911 | } |
1768 | 2912 | ||
2913 | /** | ||
2914 | * sysfs_ctlreg_read: Read method for reading from ctlreg. | ||
2915 | * @kobj: kernel kobject that contains the kernel class device. | ||
2916 | * @bin_attr: kernel attributes passed to us. | ||
2917 | * @buf: if succesful contains the data from the adapter IOREG space. | ||
2918 | * @off: offset into buffer to beginning of data. | ||
2919 | * @count: bytes to transfer. | ||
2920 | * | ||
2921 | * Description: | ||
2922 | * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. | ||
2923 | * Uses the adapter io control registers to read data into buf. | ||
2924 | * | ||
2925 | * Returns: | ||
2926 | * -ERANGE off and count combo out of range | ||
2927 | * -EINVAL off, count or buff address invalid | ||
2928 | * value of count, buf contents read | ||
2929 | **/ | ||
1769 | static ssize_t | 2930 | static ssize_t |
1770 | sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr, | 2931 | sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr, |
1771 | char *buf, loff_t off, size_t count) | 2932 | char *buf, loff_t off, size_t count) |
@@ -1810,7 +2971,10 @@ static struct bin_attribute sysfs_ctlreg_attr = { | |||
1810 | .write = sysfs_ctlreg_write, | 2971 | .write = sysfs_ctlreg_write, |
1811 | }; | 2972 | }; |
1812 | 2973 | ||
1813 | 2974 | /** | |
2975 | * sysfs_mbox_idle: frees the sysfs mailbox. | ||
2976 | * @phba: lpfc_hba pointer | ||
2977 | **/ | ||
1814 | static void | 2978 | static void |
1815 | sysfs_mbox_idle(struct lpfc_hba *phba) | 2979 | sysfs_mbox_idle(struct lpfc_hba *phba) |
1816 | { | 2980 | { |
@@ -1824,6 +2988,27 @@ sysfs_mbox_idle(struct lpfc_hba *phba) | |||
1824 | } | 2988 | } |
1825 | } | 2989 | } |
1826 | 2990 | ||
2991 | /** | ||
2992 | * sysfs_mbox_write: Write method for writing information via mbox. | ||
2993 | * @kobj: kernel kobject that contains the kernel class device. | ||
2994 | * @bin_attr: kernel attributes passed to us. | ||
2995 | * @buf: contains the data to be written to sysfs mbox. | ||
2996 | * @off: offset into buffer to beginning of data. | ||
2997 | * @count: bytes to transfer. | ||
2998 | * | ||
2999 | * Description: | ||
3000 | * Accessed via /sys/class/scsi_host/hostxxx/mbox. | ||
3001 | * Uses the sysfs mbox to send buf contents to the adapter. | ||
3002 | * | ||
3003 | * Returns: | ||
3004 | * -ERANGE off and count combo out of range | ||
3005 | * -EINVAL off, count or buff address invalid | ||
3006 | * zero if count is zero | ||
3007 | * -EPERM adapter is offline | ||
3008 | * -ENOMEM failed to allocate memory for the mail box | ||
3009 | * -EAGAIN offset, state or mbox is NULL | ||
3010 | * count number of bytes transferred | ||
3011 | **/ | ||
1827 | static ssize_t | 3012 | static ssize_t |
1828 | sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, | 3013 | sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, |
1829 | char *buf, loff_t off, size_t count) | 3014 | char *buf, loff_t off, size_t count) |
@@ -1878,6 +3063,29 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
1878 | return count; | 3063 | return count; |
1879 | } | 3064 | } |
1880 | 3065 | ||
3066 | /** | ||
3067 | * sysfs_mbox_read: Read method for reading information via mbox. | ||
3068 | * @kobj: kernel kobject that contains the kernel class device. | ||
3069 | * @bin_attr: kernel attributes passed to us. | ||
3070 | * @buf: contains the data to be read from sysfs mbox. | ||
3071 | * @off: offset into buffer to beginning of data. | ||
3072 | * @count: bytes to transfer. | ||
3073 | * | ||
3074 | * Description: | ||
3075 | * Accessed via /sys/class/scsi_host/hostxxx/mbox. | ||
3076 | * Uses the sysfs mbox to receive data from to the adapter. | ||
3077 | * | ||
3078 | * Returns: | ||
3079 | * -ERANGE off greater than mailbox command size | ||
3080 | * -EINVAL off, count or buff address invalid | ||
3081 | * zero if off and count are zero | ||
3082 | * -EACCES adapter over temp | ||
3083 | * -EPERM garbage can value to catch a multitude of errors | ||
3084 | * -EAGAIN management IO not permitted, state or off error | ||
3085 | * -ETIME mailbox timeout | ||
3086 | * -ENODEV mailbox error | ||
3087 | * count number of bytes transferred | ||
3088 | **/ | ||
1881 | static ssize_t | 3089 | static ssize_t |
1882 | sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, | 3090 | sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, |
1883 | char *buf, loff_t off, size_t count) | 3091 | char *buf, loff_t off, size_t count) |
@@ -1954,6 +3162,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
1954 | case MBX_DEL_LD_ENTRY: | 3162 | case MBX_DEL_LD_ENTRY: |
1955 | case MBX_SET_VARIABLE: | 3163 | case MBX_SET_VARIABLE: |
1956 | case MBX_WRITE_WWN: | 3164 | case MBX_WRITE_WWN: |
3165 | case MBX_PORT_CAPABILITIES: | ||
3166 | case MBX_PORT_IOV_CONTROL: | ||
1957 | break; | 3167 | break; |
1958 | case MBX_READ_SPARM64: | 3168 | case MBX_READ_SPARM64: |
1959 | case MBX_READ_LA: | 3169 | case MBX_READ_LA: |
@@ -1978,17 +3188,15 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
1978 | /* If HBA encountered an error attention, allow only DUMP | 3188 | /* If HBA encountered an error attention, allow only DUMP |
1979 | * or RESTART mailbox commands until the HBA is restarted. | 3189 | * or RESTART mailbox commands until the HBA is restarted. |
1980 | */ | 3190 | */ |
1981 | if ((phba->pport->stopped) && | 3191 | if (phba->pport->stopped && |
1982 | (phba->sysfs_mbox.mbox->mb.mbxCommand != | 3192 | phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY && |
1983 | MBX_DUMP_MEMORY && | 3193 | phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART && |
1984 | phba->sysfs_mbox.mbox->mb.mbxCommand != | 3194 | phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS && |
1985 | MBX_RESTART && | 3195 | phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN) |
1986 | phba->sysfs_mbox.mbox->mb.mbxCommand != | 3196 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, |
1987 | MBX_WRITE_VPARMS)) { | 3197 | "1259 mbox: Issued mailbox cmd " |
1988 | sysfs_mbox_idle(phba); | 3198 | "0x%x while in stopped state.\n", |
1989 | spin_unlock_irq(&phba->hbalock); | 3199 | phba->sysfs_mbox.mbox->mb.mbxCommand); |
1990 | return -EPERM; | ||
1991 | } | ||
1992 | 3200 | ||
1993 | phba->sysfs_mbox.mbox->vport = vport; | 3201 | phba->sysfs_mbox.mbox->vport = vport; |
1994 | 3202 | ||
@@ -2059,6 +3267,14 @@ static struct bin_attribute sysfs_mbox_attr = { | |||
2059 | .write = sysfs_mbox_write, | 3267 | .write = sysfs_mbox_write, |
2060 | }; | 3268 | }; |
2061 | 3269 | ||
3270 | /** | ||
3271 | * lpfc_alloc_sysfs_attr: Creates the ctlreg and mbox entries. | ||
3272 | * @vport: address of lpfc vport structure. | ||
3273 | * | ||
3274 | * Return codes: | ||
3275 | * zero on success | ||
3276 | * error return code from sysfs_create_bin_file() | ||
3277 | **/ | ||
2062 | int | 3278 | int |
2063 | lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) | 3279 | lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) |
2064 | { | 3280 | { |
@@ -2075,18 +3291,30 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) | |||
2075 | if (error) | 3291 | if (error) |
2076 | goto out_remove_ctlreg_attr; | 3292 | goto out_remove_ctlreg_attr; |
2077 | 3293 | ||
3294 | error = sysfs_create_bin_file(&shost->shost_dev.kobj, | ||
3295 | &sysfs_drvr_stat_data_attr); | ||
3296 | if (error) | ||
3297 | goto out_remove_mbox_attr; | ||
3298 | |||
2078 | return 0; | 3299 | return 0; |
3300 | out_remove_mbox_attr: | ||
3301 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); | ||
2079 | out_remove_ctlreg_attr: | 3302 | out_remove_ctlreg_attr: |
2080 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); | 3303 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); |
2081 | out: | 3304 | out: |
2082 | return error; | 3305 | return error; |
2083 | } | 3306 | } |
2084 | 3307 | ||
3308 | /** | ||
3309 | * lpfc_free_sysfs_attr: Removes the ctlreg and mbox entries. | ||
3310 | * @vport: address of lpfc vport structure. | ||
3311 | **/ | ||
2085 | void | 3312 | void |
2086 | lpfc_free_sysfs_attr(struct lpfc_vport *vport) | 3313 | lpfc_free_sysfs_attr(struct lpfc_vport *vport) |
2087 | { | 3314 | { |
2088 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 3315 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
2089 | 3316 | sysfs_remove_bin_file(&shost->shost_dev.kobj, | |
3317 | &sysfs_drvr_stat_data_attr); | ||
2090 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); | 3318 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); |
2091 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); | 3319 | sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); |
2092 | } | 3320 | } |
@@ -2096,6 +3324,10 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport) | |||
2096 | * Dynamic FC Host Attributes Support | 3324 | * Dynamic FC Host Attributes Support |
2097 | */ | 3325 | */ |
2098 | 3326 | ||
3327 | /** | ||
3328 | * lpfc_get_host_port_id: Copy the vport DID into the scsi host port id. | ||
3329 | * @shost: kernel scsi host pointer. | ||
3330 | **/ | ||
2099 | static void | 3331 | static void |
2100 | lpfc_get_host_port_id(struct Scsi_Host *shost) | 3332 | lpfc_get_host_port_id(struct Scsi_Host *shost) |
2101 | { | 3333 | { |
@@ -2105,6 +3337,10 @@ lpfc_get_host_port_id(struct Scsi_Host *shost) | |||
2105 | fc_host_port_id(shost) = vport->fc_myDID; | 3337 | fc_host_port_id(shost) = vport->fc_myDID; |
2106 | } | 3338 | } |
2107 | 3339 | ||
3340 | /** | ||
3341 | * lpfc_get_host_port_type: Set the value of the scsi host port type. | ||
3342 | * @shost: kernel scsi host pointer. | ||
3343 | **/ | ||
2108 | static void | 3344 | static void |
2109 | lpfc_get_host_port_type(struct Scsi_Host *shost) | 3345 | lpfc_get_host_port_type(struct Scsi_Host *shost) |
2110 | { | 3346 | { |
@@ -2133,6 +3369,10 @@ lpfc_get_host_port_type(struct Scsi_Host *shost) | |||
2133 | spin_unlock_irq(shost->host_lock); | 3369 | spin_unlock_irq(shost->host_lock); |
2134 | } | 3370 | } |
2135 | 3371 | ||
3372 | /** | ||
3373 | * lpfc_get_host_port_state: Set the value of the scsi host port state. | ||
3374 | * @shost: kernel scsi host pointer. | ||
3375 | **/ | ||
2136 | static void | 3376 | static void |
2137 | lpfc_get_host_port_state(struct Scsi_Host *shost) | 3377 | lpfc_get_host_port_state(struct Scsi_Host *shost) |
2138 | { | 3378 | { |
@@ -2167,6 +3407,10 @@ lpfc_get_host_port_state(struct Scsi_Host *shost) | |||
2167 | spin_unlock_irq(shost->host_lock); | 3407 | spin_unlock_irq(shost->host_lock); |
2168 | } | 3408 | } |
2169 | 3409 | ||
3410 | /** | ||
3411 | * lpfc_get_host_speed: Set the value of the scsi host speed. | ||
3412 | * @shost: kernel scsi host pointer. | ||
3413 | **/ | ||
2170 | static void | 3414 | static void |
2171 | lpfc_get_host_speed(struct Scsi_Host *shost) | 3415 | lpfc_get_host_speed(struct Scsi_Host *shost) |
2172 | { | 3416 | { |
@@ -2199,6 +3443,10 @@ lpfc_get_host_speed(struct Scsi_Host *shost) | |||
2199 | spin_unlock_irq(shost->host_lock); | 3443 | spin_unlock_irq(shost->host_lock); |
2200 | } | 3444 | } |
2201 | 3445 | ||
3446 | /** | ||
3447 | * lpfc_get_host_fabric_name: Set the value of the scsi host fabric name. | ||
3448 | * @shost: kernel scsi host pointer. | ||
3449 | **/ | ||
2202 | static void | 3450 | static void |
2203 | lpfc_get_host_fabric_name (struct Scsi_Host *shost) | 3451 | lpfc_get_host_fabric_name (struct Scsi_Host *shost) |
2204 | { | 3452 | { |
@@ -2221,6 +3469,18 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost) | |||
2221 | fc_host_fabric_name(shost) = node_name; | 3469 | fc_host_fabric_name(shost) = node_name; |
2222 | } | 3470 | } |
2223 | 3471 | ||
3472 | /** | ||
3473 | * lpfc_get_stats: Return statistical information about the adapter. | ||
3474 | * @shost: kernel scsi host pointer. | ||
3475 | * | ||
3476 | * Notes: | ||
3477 | * NULL on error for link down, no mbox pool, sli2 active, | ||
3478 | * management not allowed, memory allocation error, or mbox error. | ||
3479 | * | ||
3480 | * Returns: | ||
3481 | * NULL for error | ||
3482 | * address of the adapter host statistics | ||
3483 | **/ | ||
2224 | static struct fc_host_statistics * | 3484 | static struct fc_host_statistics * |
2225 | lpfc_get_stats(struct Scsi_Host *shost) | 3485 | lpfc_get_stats(struct Scsi_Host *shost) |
2226 | { | 3486 | { |
@@ -2334,6 +3594,10 @@ lpfc_get_stats(struct Scsi_Host *shost) | |||
2334 | return hs; | 3594 | return hs; |
2335 | } | 3595 | } |
2336 | 3596 | ||
3597 | /** | ||
3598 | * lpfc_reset_stats: Copy the adapter link stats information. | ||
3599 | * @shost: kernel scsi host pointer. | ||
3600 | **/ | ||
2337 | static void | 3601 | static void |
2338 | lpfc_reset_stats(struct Scsi_Host *shost) | 3602 | lpfc_reset_stats(struct Scsi_Host *shost) |
2339 | { | 3603 | { |
@@ -2411,6 +3675,14 @@ lpfc_reset_stats(struct Scsi_Host *shost) | |||
2411 | * are no sysfs handlers for link_down_tmo. | 3675 | * are no sysfs handlers for link_down_tmo. |
2412 | */ | 3676 | */ |
2413 | 3677 | ||
3678 | /** | ||
3679 | * lpfc_get_node_by_target: Return the nodelist for a target. | ||
3680 | * @starget: kernel scsi target pointer. | ||
3681 | * | ||
3682 | * Returns: | ||
3683 | * address of the node list if found | ||
3684 | * NULL target not found | ||
3685 | **/ | ||
2414 | static struct lpfc_nodelist * | 3686 | static struct lpfc_nodelist * |
2415 | lpfc_get_node_by_target(struct scsi_target *starget) | 3687 | lpfc_get_node_by_target(struct scsi_target *starget) |
2416 | { | 3688 | { |
@@ -2432,6 +3704,10 @@ lpfc_get_node_by_target(struct scsi_target *starget) | |||
2432 | return NULL; | 3704 | return NULL; |
2433 | } | 3705 | } |
2434 | 3706 | ||
3707 | /** | ||
3708 | * lpfc_get_starget_port_id: Set the target port id to the ndlp DID or -1. | ||
3709 | * @starget: kernel scsi target pointer. | ||
3710 | **/ | ||
2435 | static void | 3711 | static void |
2436 | lpfc_get_starget_port_id(struct scsi_target *starget) | 3712 | lpfc_get_starget_port_id(struct scsi_target *starget) |
2437 | { | 3713 | { |
@@ -2440,6 +3716,12 @@ lpfc_get_starget_port_id(struct scsi_target *starget) | |||
2440 | fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1; | 3716 | fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1; |
2441 | } | 3717 | } |
2442 | 3718 | ||
3719 | /** | ||
3720 | * lpfc_get_starget_node_name: Set the target node name. | ||
3721 | * @starget: kernel scsi target pointer. | ||
3722 | * | ||
3723 | * Description: Set the target node name to the ndlp node name wwn or zero. | ||
3724 | **/ | ||
2443 | static void | 3725 | static void |
2444 | lpfc_get_starget_node_name(struct scsi_target *starget) | 3726 | lpfc_get_starget_node_name(struct scsi_target *starget) |
2445 | { | 3727 | { |
@@ -2449,6 +3731,12 @@ lpfc_get_starget_node_name(struct scsi_target *starget) | |||
2449 | ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0; | 3731 | ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0; |
2450 | } | 3732 | } |
2451 | 3733 | ||
3734 | /** | ||
3735 | * lpfc_get_starget_port_name: Set the target port name. | ||
3736 | * @starget: kernel scsi target pointer. | ||
3737 | * | ||
3738 | * Description: set the target port name to the ndlp port name wwn or zero. | ||
3739 | **/ | ||
2452 | static void | 3740 | static void |
2453 | lpfc_get_starget_port_name(struct scsi_target *starget) | 3741 | lpfc_get_starget_port_name(struct scsi_target *starget) |
2454 | { | 3742 | { |
@@ -2458,6 +3746,15 @@ lpfc_get_starget_port_name(struct scsi_target *starget) | |||
2458 | ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0; | 3746 | ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0; |
2459 | } | 3747 | } |
2460 | 3748 | ||
3749 | /** | ||
3750 | * lpfc_set_rport_loss_tmo: Set the rport dev loss tmo. | ||
3751 | * @rport: fc rport address. | ||
3752 | * @timeout: new value for dev loss tmo. | ||
3753 | * | ||
3754 | * Description: | ||
3755 | * If timeout is non zero set the dev_loss_tmo to timeout, else set | ||
3756 | * dev_loss_tmo to one. | ||
3757 | **/ | ||
2461 | static void | 3758 | static void |
2462 | lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) | 3759 | lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) |
2463 | { | 3760 | { |
@@ -2467,7 +3764,18 @@ lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) | |||
2467 | rport->dev_loss_tmo = 1; | 3764 | rport->dev_loss_tmo = 1; |
2468 | } | 3765 | } |
2469 | 3766 | ||
2470 | 3767 | /** | |
3768 | * lpfc_rport_show_function: Return rport target information. | ||
3769 | * | ||
3770 | * Description: | ||
3771 | * Macro that uses field to generate a function with the name lpfc_show_rport_ | ||
3772 | * | ||
3773 | * lpfc_show_rport_##field: returns the bytes formatted in buf | ||
3774 | * @cdev: class converted to an fc_rport. | ||
3775 | * @buf: on return contains the target_field or zero. | ||
3776 | * | ||
3777 | * Returns: size of formatted string. | ||
3778 | **/ | ||
2471 | #define lpfc_rport_show_function(field, format_string, sz, cast) \ | 3779 | #define lpfc_rport_show_function(field, format_string, sz, cast) \ |
2472 | static ssize_t \ | 3780 | static ssize_t \ |
2473 | lpfc_show_rport_##field (struct device *dev, \ | 3781 | lpfc_show_rport_##field (struct device *dev, \ |
@@ -2602,6 +3910,10 @@ struct fc_function_template lpfc_vport_transport_functions = { | |||
2602 | .vport_disable = lpfc_vport_disable, | 3910 | .vport_disable = lpfc_vport_disable, |
2603 | }; | 3911 | }; |
2604 | 3912 | ||
3913 | /** | ||
3914 | * lpfc_get_cfgparam: Used during probe_one to init the adapter structure. | ||
3915 | * @phba: lpfc_hba pointer. | ||
3916 | **/ | ||
2605 | void | 3917 | void |
2606 | lpfc_get_cfgparam(struct lpfc_hba *phba) | 3918 | lpfc_get_cfgparam(struct lpfc_hba *phba) |
2607 | { | 3919 | { |
@@ -2637,6 +3949,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
2637 | return; | 3949 | return; |
2638 | } | 3950 | } |
2639 | 3951 | ||
3952 | /** | ||
3953 | * lpfc_get_vport_cfgparam: Used during port create, init the vport structure. | ||
3954 | * @vport: lpfc_vport pointer. | ||
3955 | **/ | ||
2640 | void | 3956 | void |
2641 | lpfc_get_vport_cfgparam(struct lpfc_vport *vport) | 3957 | lpfc_get_vport_cfgparam(struct lpfc_vport *vport) |
2642 | { | 3958 | { |
@@ -2648,6 +3964,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport) | |||
2648 | lpfc_restrict_login_init(vport, lpfc_restrict_login); | 3964 | lpfc_restrict_login_init(vport, lpfc_restrict_login); |
2649 | lpfc_fcp_class_init(vport, lpfc_fcp_class); | 3965 | lpfc_fcp_class_init(vport, lpfc_fcp_class); |
2650 | lpfc_use_adisc_init(vport, lpfc_use_adisc); | 3966 | lpfc_use_adisc_init(vport, lpfc_use_adisc); |
3967 | lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); | ||
2651 | lpfc_fdmi_on_init(vport, lpfc_fdmi_on); | 3968 | lpfc_fdmi_on_init(vport, lpfc_fdmi_on); |
2652 | lpfc_discovery_threads_init(vport, lpfc_discovery_threads); | 3969 | lpfc_discovery_threads_init(vport, lpfc_discovery_threads); |
2653 | lpfc_max_luns_init(vport, lpfc_max_luns); | 3970 | lpfc_max_luns_init(vport, lpfc_max_luns); |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 1b8245213b83..044ef4057d28 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param); | 21 | typedef int (*node_filter)(struct lpfc_nodelist *, void *); |
22 | 22 | ||
23 | struct fc_rport; | 23 | struct fc_rport; |
24 | void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); | 24 | void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); |
@@ -26,11 +26,11 @@ void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); | |||
26 | void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); | 26 | void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); |
27 | 27 | ||
28 | void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *); | 28 | void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *); |
29 | int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, | 29 | int lpfc_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *); |
30 | struct lpfc_dmabuf *mp); | ||
31 | void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); | 30 | void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); |
32 | void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport); | 31 | void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *); |
33 | void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); | 32 | void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); |
33 | int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *); | ||
34 | int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); | 34 | int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); |
35 | void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); | 35 | void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); |
36 | void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); | 36 | void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); |
@@ -43,7 +43,7 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); | |||
43 | void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); | 43 | void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); |
44 | 44 | ||
45 | struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); | 45 | struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); |
46 | void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove); | 46 | void lpfc_cleanup_rpis(struct lpfc_vport *, int); |
47 | int lpfc_linkdown(struct lpfc_hba *); | 47 | int lpfc_linkdown(struct lpfc_hba *); |
48 | void lpfc_port_link_failure(struct lpfc_vport *); | 48 | void lpfc_port_link_failure(struct lpfc_vport *); |
49 | void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); | 49 | void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); |
@@ -135,7 +135,7 @@ void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, | |||
135 | int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); | 135 | int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); |
136 | int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); | 136 | int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); |
137 | void lpfc_fdmi_tmo(unsigned long); | 137 | void lpfc_fdmi_tmo(unsigned long); |
138 | void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport); | 138 | void lpfc_fdmi_timeout_handler(struct lpfc_vport *); |
139 | 139 | ||
140 | int lpfc_config_port_prep(struct lpfc_hba *); | 140 | int lpfc_config_port_prep(struct lpfc_hba *); |
141 | int lpfc_config_port_post(struct lpfc_hba *); | 141 | int lpfc_config_port_post(struct lpfc_hba *); |
@@ -155,6 +155,8 @@ int lpfc_sli_queue_setup(struct lpfc_hba *); | |||
155 | void lpfc_handle_eratt(struct lpfc_hba *); | 155 | void lpfc_handle_eratt(struct lpfc_hba *); |
156 | void lpfc_handle_latt(struct lpfc_hba *); | 156 | void lpfc_handle_latt(struct lpfc_hba *); |
157 | irqreturn_t lpfc_intr_handler(int, void *); | 157 | irqreturn_t lpfc_intr_handler(int, void *); |
158 | irqreturn_t lpfc_sp_intr_handler(int, void *); | ||
159 | irqreturn_t lpfc_fp_intr_handler(int, void *); | ||
158 | 160 | ||
159 | void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); | 161 | void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); |
160 | void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); | 162 | void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); |
@@ -175,11 +177,12 @@ void lpfc_mem_free(struct lpfc_hba *); | |||
175 | void lpfc_stop_vport_timers(struct lpfc_vport *); | 177 | void lpfc_stop_vport_timers(struct lpfc_vport *); |
176 | 178 | ||
177 | void lpfc_poll_timeout(unsigned long ptr); | 179 | void lpfc_poll_timeout(unsigned long ptr); |
178 | void lpfc_poll_start_timer(struct lpfc_hba * phba); | 180 | void lpfc_poll_start_timer(struct lpfc_hba *); |
179 | void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba); | 181 | void lpfc_poll_eratt(unsigned long); |
182 | void lpfc_sli_poll_fcp_ring(struct lpfc_hba *); | ||
180 | struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); | 183 | struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); |
181 | void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); | 184 | void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); |
182 | uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); | 185 | uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); |
183 | 186 | ||
184 | void lpfc_reset_barrier(struct lpfc_hba * phba); | 187 | void lpfc_reset_barrier(struct lpfc_hba * phba); |
185 | int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); | 188 | int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); |
@@ -187,11 +190,13 @@ int lpfc_sli_brdkill(struct lpfc_hba *); | |||
187 | int lpfc_sli_brdreset(struct lpfc_hba *); | 190 | int lpfc_sli_brdreset(struct lpfc_hba *); |
188 | int lpfc_sli_brdrestart(struct lpfc_hba *); | 191 | int lpfc_sli_brdrestart(struct lpfc_hba *); |
189 | int lpfc_sli_hba_setup(struct lpfc_hba *); | 192 | int lpfc_sli_hba_setup(struct lpfc_hba *); |
193 | int lpfc_sli_config_port(struct lpfc_hba *, int); | ||
190 | int lpfc_sli_host_down(struct lpfc_vport *); | 194 | int lpfc_sli_host_down(struct lpfc_vport *); |
191 | int lpfc_sli_hba_down(struct lpfc_hba *); | 195 | int lpfc_sli_hba_down(struct lpfc_hba *); |
192 | int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); | 196 | int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); |
193 | int lpfc_sli_handle_mb_event(struct lpfc_hba *); | 197 | int lpfc_sli_handle_mb_event(struct lpfc_hba *); |
194 | int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); | 198 | int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); |
199 | int lpfc_sli_check_eratt(struct lpfc_hba *); | ||
195 | int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, | 200 | int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, |
196 | struct lpfc_sli_ring *, uint32_t); | 201 | struct lpfc_sli_ring *, uint32_t); |
197 | void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); | 202 | void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); |
@@ -199,6 +204,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, | |||
199 | struct lpfc_iocbq *, uint32_t); | 204 | struct lpfc_iocbq *, uint32_t); |
200 | void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); | 205 | void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); |
201 | void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); | 206 | void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); |
207 | void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); | ||
202 | int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, | 208 | int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, |
203 | struct lpfc_dmabuf *); | 209 | struct lpfc_dmabuf *); |
204 | struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, | 210 | struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, |
@@ -226,17 +232,13 @@ struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t); | |||
226 | struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, | 232 | struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, |
227 | struct lpfc_name *); | 233 | struct lpfc_name *); |
228 | 234 | ||
229 | int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, | 235 | int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); |
230 | uint32_t timeout); | ||
231 | 236 | ||
232 | int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, | 237 | int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, |
233 | struct lpfc_sli_ring * pring, | 238 | struct lpfc_iocbq *, struct lpfc_iocbq *, |
234 | struct lpfc_iocbq * piocb, | 239 | uint32_t); |
235 | struct lpfc_iocbq * prspiocbq, | 240 | void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, |
236 | uint32_t timeout); | 241 | struct lpfc_iocbq *); |
237 | void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, | ||
238 | struct lpfc_iocbq * cmdiocb, | ||
239 | struct lpfc_iocbq * rspiocb); | ||
240 | 242 | ||
241 | void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *); | 243 | void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *); |
242 | 244 | ||
@@ -269,7 +271,7 @@ void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); | |||
269 | 271 | ||
270 | struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *); | 272 | struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *); |
271 | int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable); | 273 | int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable); |
272 | void lpfc_mbx_unreg_vpi(struct lpfc_vport *); | 274 | int lpfc_mbx_unreg_vpi(struct lpfc_vport *); |
273 | void destroy_port(struct lpfc_vport *); | 275 | void destroy_port(struct lpfc_vport *); |
274 | int lpfc_get_instance(void); | 276 | int lpfc_get_instance(void); |
275 | void lpfc_host_attrib_init(struct Scsi_Host *); | 277 | void lpfc_host_attrib_init(struct Scsi_Host *); |
@@ -290,6 +292,13 @@ void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); | |||
290 | void lpfc_adjust_queue_depth(struct lpfc_hba *); | 292 | void lpfc_adjust_queue_depth(struct lpfc_hba *); |
291 | void lpfc_ramp_down_queue_handler(struct lpfc_hba *); | 293 | void lpfc_ramp_down_queue_handler(struct lpfc_hba *); |
292 | void lpfc_ramp_up_queue_handler(struct lpfc_hba *); | 294 | void lpfc_ramp_up_queue_handler(struct lpfc_hba *); |
295 | void lpfc_scsi_dev_block(struct lpfc_hba *); | ||
296 | |||
297 | void | ||
298 | lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *, | ||
299 | struct lpfc_iocbq *); | ||
300 | struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); | ||
301 | void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); | ||
293 | 302 | ||
294 | #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) | 303 | #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) |
295 | #define HBA_EVENT_RSCN 5 | 304 | #define HBA_EVENT_RSCN 5 |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 7fc74cf5823b..26dae8bae2d1 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #include "lpfc_hw.h" | 35 | #include "lpfc_hw.h" |
36 | #include "lpfc_sli.h" | 36 | #include "lpfc_sli.h" |
37 | #include "lpfc_nl.h" | ||
37 | #include "lpfc_disc.h" | 38 | #include "lpfc_disc.h" |
38 | #include "lpfc_scsi.h" | 39 | #include "lpfc_scsi.h" |
39 | #include "lpfc.h" | 40 | #include "lpfc.h" |
@@ -134,25 +135,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
134 | } | 135 | } |
135 | list_del(&head); | 136 | list_del(&head); |
136 | } else { | 137 | } else { |
137 | struct lpfc_iocbq *next; | 138 | INIT_LIST_HEAD(&head); |
138 | 139 | list_add_tail(&head, &piocbq->list); | |
139 | list_for_each_entry_safe(iocbq, next, &piocbq->list, list) { | 140 | list_for_each_entry(iocbq, &head, list) { |
140 | icmd = &iocbq->iocb; | 141 | icmd = &iocbq->iocb; |
141 | if (icmd->ulpBdeCount == 0) | 142 | if (icmd->ulpBdeCount == 0) |
142 | lpfc_ct_unsol_buffer(phba, piocbq, NULL, 0); | 143 | lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0); |
143 | for (i = 0; i < icmd->ulpBdeCount; i++) { | 144 | for (i = 0; i < icmd->ulpBdeCount; i++) { |
144 | paddr = getPaddr(icmd->un.cont64[i].addrHigh, | 145 | paddr = getPaddr(icmd->un.cont64[i].addrHigh, |
145 | icmd->un.cont64[i].addrLow); | 146 | icmd->un.cont64[i].addrLow); |
146 | mp = lpfc_sli_ringpostbuf_get(phba, pring, | 147 | mp = lpfc_sli_ringpostbuf_get(phba, pring, |
147 | paddr); | 148 | paddr); |
148 | size = icmd->un.cont64[i].tus.f.bdeSize; | 149 | size = icmd->un.cont64[i].tus.f.bdeSize; |
149 | lpfc_ct_unsol_buffer(phba, piocbq, mp, size); | 150 | lpfc_ct_unsol_buffer(phba, iocbq, mp, size); |
150 | lpfc_in_buf_free(phba, mp); | 151 | lpfc_in_buf_free(phba, mp); |
151 | } | 152 | } |
152 | list_del(&iocbq->list); | ||
153 | lpfc_sli_release_iocbq(phba, iocbq); | ||
154 | lpfc_post_buffer(phba, pring, i); | 153 | lpfc_post_buffer(phba, pring, i); |
155 | } | 154 | } |
155 | list_del(&head); | ||
156 | } | 156 | } |
157 | } | 157 | } |
158 | 158 | ||
@@ -212,7 +212,7 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl, | |||
212 | else | 212 | else |
213 | list_add_tail(&mp->list, &mlist->list); | 213 | list_add_tail(&mp->list, &mlist->list); |
214 | 214 | ||
215 | bpl->tus.f.bdeFlags = BUFF_USE_RCV; | 215 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; |
216 | /* build buffer ptr list for IOCB */ | 216 | /* build buffer ptr list for IOCB */ |
217 | bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); | 217 | bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); |
218 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); | 218 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); |
@@ -283,7 +283,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, | |||
283 | icmd->un.genreq64.bdl.ulpIoTag32 = 0; | 283 | icmd->un.genreq64.bdl.ulpIoTag32 = 0; |
284 | icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); | 284 | icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); |
285 | icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); | 285 | icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); |
286 | icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL; | 286 | icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; |
287 | icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64)); | 287 | icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64)); |
288 | 288 | ||
289 | if (usr_flg) | 289 | if (usr_flg) |
@@ -861,7 +861,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
861 | 861 | ||
862 | retry++; | 862 | retry++; |
863 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 863 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
864 | "0216 Retrying NS cmd %x\n", cmdcode); | 864 | "0250 Retrying NS cmd %x\n", cmdcode); |
865 | rc = lpfc_ns_cmd(vport, cmdcode, retry, 0); | 865 | rc = lpfc_ns_cmd(vport, cmdcode, retry, 0); |
866 | if (rc == 0) | 866 | if (rc == 0) |
867 | goto out; | 867 | goto out; |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 094b47e94b29..771920bdde44 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2007-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -35,6 +35,7 @@ | |||
35 | 35 | ||
36 | #include "lpfc_hw.h" | 36 | #include "lpfc_hw.h" |
37 | #include "lpfc_sli.h" | 37 | #include "lpfc_sli.h" |
38 | #include "lpfc_nl.h" | ||
38 | #include "lpfc_disc.h" | 39 | #include "lpfc_disc.h" |
39 | #include "lpfc_scsi.h" | 40 | #include "lpfc_scsi.h" |
40 | #include "lpfc.h" | 41 | #include "lpfc.h" |
@@ -46,13 +47,14 @@ | |||
46 | #include "lpfc_debugfs.h" | 47 | #include "lpfc_debugfs.h" |
47 | 48 | ||
48 | #ifdef CONFIG_LPFC_DEBUG_FS | 49 | #ifdef CONFIG_LPFC_DEBUG_FS |
49 | /* debugfs interface | 50 | /** |
51 | * debugfs interface | ||
50 | * | 52 | * |
51 | * To access this interface the user should: | 53 | * To access this interface the user should: |
52 | * # mkdir /debug | 54 | * # mkdir /debug |
53 | * # mount -t debugfs none /debug | 55 | * # mount -t debugfs none /debug |
54 | * | 56 | * |
55 | * The lpfc debugfs directory hierachy is: | 57 | * The lpfc debugfs directory hierarchy is: |
56 | * lpfc/lpfcX/vportY | 58 | * lpfc/lpfcX/vportY |
57 | * where X is the lpfc hba unique_id | 59 | * where X is the lpfc hba unique_id |
58 | * where Y is the vport VPI on that hba | 60 | * where Y is the vport VPI on that hba |
@@ -61,14 +63,21 @@ | |||
61 | * discovery_trace | 63 | * discovery_trace |
62 | * This is an ACSII readable file that contains a trace of the last | 64 | * This is an ACSII readable file that contains a trace of the last |
63 | * lpfc_debugfs_max_disc_trc events that happened on a specific vport. | 65 | * lpfc_debugfs_max_disc_trc events that happened on a specific vport. |
64 | * See lpfc_debugfs.h for different categories of | 66 | * See lpfc_debugfs.h for different categories of discovery events. |
65 | * discovery events. To enable the discovery trace, the following | 67 | * To enable the discovery trace, the following module parameters must be set: |
66 | * module parameters must be set: | ||
67 | * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support | 68 | * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support |
68 | * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for | 69 | * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for |
69 | * EACH vport. X MUST also be a power of 2. | 70 | * EACH vport. X MUST also be a power of 2. |
70 | * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in | 71 | * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in |
71 | * lpfc_debugfs.h . | 72 | * lpfc_debugfs.h . |
73 | * | ||
74 | * slow_ring_trace | ||
75 | * This is an ACSII readable file that contains a trace of the last | ||
76 | * lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA. | ||
77 | * To enable the slow ring trace, the following module parameters must be set: | ||
78 | * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support | ||
79 | * lpfc_debugfs_max_slow_ring_trc=X Where X is the event trace depth for | ||
80 | * the HBA. X MUST also be a power of 2. | ||
72 | */ | 81 | */ |
73 | static int lpfc_debugfs_enable = 1; | 82 | static int lpfc_debugfs_enable = 1; |
74 | module_param(lpfc_debugfs_enable, int, 0); | 83 | module_param(lpfc_debugfs_enable, int, 0); |
@@ -117,6 +126,25 @@ struct lpfc_debug { | |||
117 | static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); | 126 | static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); |
118 | static unsigned long lpfc_debugfs_start_time = 0L; | 127 | static unsigned long lpfc_debugfs_start_time = 0L; |
119 | 128 | ||
129 | /** | ||
130 | * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer. | ||
131 | * @vport: The vport to gather the log info from. | ||
132 | * @buf: The buffer to dump log into. | ||
133 | * @size: The maximum amount of data to process. | ||
134 | * | ||
135 | * Description: | ||
136 | * This routine gathers the lpfc discovery debugfs data from the @vport and | ||
137 | * dumps it to @buf up to @size number of bytes. It will start at the next entry | ||
138 | * in the log and process the log until the end of the buffer. Then it will | ||
139 | * gather from the beginning of the log and process until the current entry. | ||
140 | * | ||
141 | * Notes: | ||
142 | * Discovery logging will be disabled while while this routine dumps the log. | ||
143 | * | ||
144 | * Return Value: | ||
145 | * This routine returns the amount of bytes that were dumped into @buf and will | ||
146 | * not exceed @size. | ||
147 | **/ | ||
120 | static int | 148 | static int |
121 | lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) | 149 | lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) |
122 | { | 150 | { |
@@ -125,7 +153,6 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) | |||
125 | struct lpfc_debugfs_trc *dtp; | 153 | struct lpfc_debugfs_trc *dtp; |
126 | char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; | 154 | char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; |
127 | 155 | ||
128 | |||
129 | enable = lpfc_debugfs_enable; | 156 | enable = lpfc_debugfs_enable; |
130 | lpfc_debugfs_enable = 0; | 157 | lpfc_debugfs_enable = 0; |
131 | 158 | ||
@@ -159,6 +186,25 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) | |||
159 | return len; | 186 | return len; |
160 | } | 187 | } |
161 | 188 | ||
189 | /** | ||
190 | * lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer. | ||
191 | * @phba: The HBA to gather the log info from. | ||
192 | * @buf: The buffer to dump log into. | ||
193 | * @size: The maximum amount of data to process. | ||
194 | * | ||
195 | * Description: | ||
196 | * This routine gathers the lpfc slow ring debugfs data from the @phba and | ||
197 | * dumps it to @buf up to @size number of bytes. It will start at the next entry | ||
198 | * in the log and process the log until the end of the buffer. Then it will | ||
199 | * gather from the beginning of the log and process until the current entry. | ||
200 | * | ||
201 | * Notes: | ||
202 | * Slow ring logging will be disabled while while this routine dumps the log. | ||
203 | * | ||
204 | * Return Value: | ||
205 | * This routine returns the amount of bytes that were dumped into @buf and will | ||
206 | * not exceed @size. | ||
207 | **/ | ||
162 | static int | 208 | static int |
163 | lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) | 209 | lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) |
164 | { | 210 | { |
@@ -203,6 +249,25 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) | |||
203 | 249 | ||
204 | static int lpfc_debugfs_last_hbq = -1; | 250 | static int lpfc_debugfs_last_hbq = -1; |
205 | 251 | ||
252 | /** | ||
253 | * lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer. | ||
254 | * @phba: The HBA to gather host buffer info from. | ||
255 | * @buf: The buffer to dump log into. | ||
256 | * @size: The maximum amount of data to process. | ||
257 | * | ||
258 | * Description: | ||
259 | * This routine dumps the host buffer queue info from the @phba to @buf up to | ||
260 | * @size number of bytes. A header that describes the current hbq state will be | ||
261 | * dumped to @buf first and then info on each hbq entry will be dumped to @buf | ||
262 | * until @size bytes have been dumped or all the hbq info has been dumped. | ||
263 | * | ||
264 | * Notes: | ||
265 | * This routine will rotate through each configured HBQ each time called. | ||
266 | * | ||
267 | * Return Value: | ||
268 | * This routine returns the amount of bytes that were dumped into @buf and will | ||
269 | * not exceed @size. | ||
270 | **/ | ||
206 | static int | 271 | static int |
207 | lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) | 272 | lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) |
208 | { | 273 | { |
@@ -303,6 +368,24 @@ skipit: | |||
303 | 368 | ||
304 | static int lpfc_debugfs_last_hba_slim_off; | 369 | static int lpfc_debugfs_last_hba_slim_off; |
305 | 370 | ||
371 | /** | ||
372 | * lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer. | ||
373 | * @phba: The HBA to gather SLIM info from. | ||
374 | * @buf: The buffer to dump log into. | ||
375 | * @size: The maximum amount of data to process. | ||
376 | * | ||
377 | * Description: | ||
378 | * This routine dumps the current contents of HBA SLIM for the HBA associated | ||
379 | * with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data. | ||
380 | * | ||
381 | * Notes: | ||
382 | * This routine will only dump up to 1024 bytes of data each time called and | ||
383 | * should be called multiple times to dump the entire HBA SLIM. | ||
384 | * | ||
385 | * Return Value: | ||
386 | * This routine returns the amount of bytes that were dumped into @buf and will | ||
387 | * not exceed @size. | ||
388 | **/ | ||
306 | static int | 389 | static int |
307 | lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) | 390 | lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) |
308 | { | 391 | { |
@@ -342,6 +425,21 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) | |||
342 | return len; | 425 | return len; |
343 | } | 426 | } |
344 | 427 | ||
428 | /** | ||
429 | * lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer. | ||
430 | * @phba: The HBA to gather Host SLIM info from. | ||
431 | * @buf: The buffer to dump log into. | ||
432 | * @size: The maximum amount of data to process. | ||
433 | * | ||
434 | * Description: | ||
435 | * This routine dumps the current contents of host SLIM for the host associated | ||
436 | * with @phba to @buf up to @size bytes of data. The dump will contain the | ||
437 | * Mailbox, PCB, Rings, and Registers that are located in host memory. | ||
438 | * | ||
439 | * Return Value: | ||
440 | * This routine returns the amount of bytes that were dumped into @buf and will | ||
441 | * not exceed @size. | ||
442 | **/ | ||
345 | static int | 443 | static int |
346 | lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) | 444 | lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) |
347 | { | 445 | { |
@@ -357,7 +455,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) | |||
357 | spin_lock_irq(&phba->hbalock); | 455 | spin_lock_irq(&phba->hbalock); |
358 | 456 | ||
359 | len += snprintf(buf+len, size-len, "SLIM Mailbox\n"); | 457 | len += snprintf(buf+len, size-len, "SLIM Mailbox\n"); |
360 | ptr = (uint32_t *)phba->slim2p; | 458 | ptr = (uint32_t *)phba->slim2p.virt; |
361 | i = sizeof(MAILBOX_t); | 459 | i = sizeof(MAILBOX_t); |
362 | while (i > 0) { | 460 | while (i > 0) { |
363 | len += snprintf(buf+len, size-len, | 461 | len += snprintf(buf+len, size-len, |
@@ -370,7 +468,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) | |||
370 | } | 468 | } |
371 | 469 | ||
372 | len += snprintf(buf+len, size-len, "SLIM PCB\n"); | 470 | len += snprintf(buf+len, size-len, "SLIM PCB\n"); |
373 | ptr = (uint32_t *)&phba->slim2p->pcb; | 471 | ptr = (uint32_t *)phba->pcb; |
374 | i = sizeof(PCB_t); | 472 | i = sizeof(PCB_t); |
375 | while (i > 0) { | 473 | while (i > 0) { |
376 | len += snprintf(buf+len, size-len, | 474 | len += snprintf(buf+len, size-len, |
@@ -382,44 +480,16 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) | |||
382 | off += (8 * sizeof(uint32_t)); | 480 | off += (8 * sizeof(uint32_t)); |
383 | } | 481 | } |
384 | 482 | ||
385 | pgpp = (struct lpfc_pgp *)&phba->slim2p->mbx.us.s3_pgp.port; | 483 | for (i = 0; i < 4; i++) { |
386 | pring = &psli->ring[0]; | 484 | pgpp = &phba->port_gp[i]; |
387 | len += snprintf(buf+len, size-len, | 485 | pring = &psli->ring[i]; |
388 | "Ring 0: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) " | 486 | len += snprintf(buf+len, size-len, |
389 | "RSP PutInx:%d Max:%d\n", | 487 | "Ring %d: CMD GetInx:%d (Max:%d Next:%d " |
390 | pgpp->cmdGetInx, pring->numCiocb, | 488 | "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n", |
391 | pring->next_cmdidx, pring->local_getidx, pring->flag, | 489 | i, pgpp->cmdGetInx, pring->numCiocb, |
392 | pgpp->rspPutInx, pring->numRiocb); | 490 | pring->next_cmdidx, pring->local_getidx, |
393 | pgpp++; | 491 | pring->flag, pgpp->rspPutInx, pring->numRiocb); |
394 | 492 | } | |
395 | pring = &psli->ring[1]; | ||
396 | len += snprintf(buf+len, size-len, | ||
397 | "Ring 1: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) " | ||
398 | "RSP PutInx:%d Max:%d\n", | ||
399 | pgpp->cmdGetInx, pring->numCiocb, | ||
400 | pring->next_cmdidx, pring->local_getidx, pring->flag, | ||
401 | pgpp->rspPutInx, pring->numRiocb); | ||
402 | pgpp++; | ||
403 | |||
404 | pring = &psli->ring[2]; | ||
405 | len += snprintf(buf+len, size-len, | ||
406 | "Ring 2: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) " | ||
407 | "RSP PutInx:%d Max:%d\n", | ||
408 | pgpp->cmdGetInx, pring->numCiocb, | ||
409 | pring->next_cmdidx, pring->local_getidx, pring->flag, | ||
410 | pgpp->rspPutInx, pring->numRiocb); | ||
411 | pgpp++; | ||
412 | |||
413 | pring = &psli->ring[3]; | ||
414 | len += snprintf(buf+len, size-len, | ||
415 | "Ring 3: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) " | ||
416 | "RSP PutInx:%d Max:%d\n", | ||
417 | pgpp->cmdGetInx, pring->numCiocb, | ||
418 | pring->next_cmdidx, pring->local_getidx, pring->flag, | ||
419 | pgpp->rspPutInx, pring->numRiocb); | ||
420 | |||
421 | |||
422 | ptr = (uint32_t *)&phba->slim2p->mbx.us.s3_pgp.hbq_get; | ||
423 | word0 = readl(phba->HAregaddr); | 493 | word0 = readl(phba->HAregaddr); |
424 | word1 = readl(phba->CAregaddr); | 494 | word1 = readl(phba->CAregaddr); |
425 | word2 = readl(phba->HSregaddr); | 495 | word2 = readl(phba->HSregaddr); |
@@ -430,6 +500,21 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) | |||
430 | return len; | 500 | return len; |
431 | } | 501 | } |
432 | 502 | ||
503 | /** | ||
504 | * lpfc_debugfs_nodelist_data - Dump target node list to a buffer. | ||
505 | * @vport: The vport to gather target node info from. | ||
506 | * @buf: The buffer to dump log into. | ||
507 | * @size: The maximum amount of data to process. | ||
508 | * | ||
509 | * Description: | ||
510 | * This routine dumps the current target node list associated with @vport to | ||
511 | * @buf up to @size bytes of data. Each node entry in the dump will contain a | ||
512 | * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields. | ||
513 | * | ||
514 | * Return Value: | ||
515 | * This routine returns the amount of bytes that were dumped into @buf and will | ||
516 | * not exceed @size. | ||
517 | **/ | ||
433 | static int | 518 | static int |
434 | lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) | 519 | lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) |
435 | { | 520 | { |
@@ -513,7 +598,22 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) | |||
513 | } | 598 | } |
514 | #endif | 599 | #endif |
515 | 600 | ||
516 | 601 | /** | |
602 | * lpfc_debugfs_disc_trc - Store discovery trace log. | ||
603 | * @vport: The vport to associate this trace string with for retrieval. | ||
604 | * @mask: Log entry classification. | ||
605 | * @fmt: Format string to be displayed when dumping the log. | ||
606 | * @data1: 1st data parameter to be applied to @fmt. | ||
607 | * @data2: 2nd data parameter to be applied to @fmt. | ||
608 | * @data3: 3rd data parameter to be applied to @fmt. | ||
609 | * | ||
610 | * Description: | ||
611 | * This routine is used by the driver code to add a debugfs log entry to the | ||
612 | * discovery trace buffer associated with @vport. Only entries with a @mask that | ||
613 | * match the current debugfs discovery mask will be saved. Entries that do not | ||
614 | * match will be thrown away. @fmt, @data1, @data2, and @data3 are used like | ||
615 | * printf when displaying the log. | ||
616 | **/ | ||
517 | inline void | 617 | inline void |
518 | lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, | 618 | lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, |
519 | uint32_t data1, uint32_t data2, uint32_t data3) | 619 | uint32_t data1, uint32_t data2, uint32_t data3) |
@@ -542,6 +642,19 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, | |||
542 | return; | 642 | return; |
543 | } | 643 | } |
544 | 644 | ||
645 | /** | ||
646 | * lpfc_debugfs_slow_ring_trc - Store slow ring trace log. | ||
647 | * @phba: The phba to associate this trace string with for retrieval. | ||
648 | * @fmt: Format string to be displayed when dumping the log. | ||
649 | * @data1: 1st data parameter to be applied to @fmt. | ||
650 | * @data2: 2nd data parameter to be applied to @fmt. | ||
651 | * @data3: 3rd data parameter to be applied to @fmt. | ||
652 | * | ||
653 | * Description: | ||
654 | * This routine is used by the driver code to add a debugfs log entry to the | ||
655 | * discovery trace buffer associated with @vport. @fmt, @data1, @data2, and | ||
656 | * @data3 are used like printf when displaying the log. | ||
657 | **/ | ||
545 | inline void | 658 | inline void |
546 | lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, | 659 | lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, |
547 | uint32_t data1, uint32_t data2, uint32_t data3) | 660 | uint32_t data1, uint32_t data2, uint32_t data3) |
@@ -568,6 +681,21 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, | |||
568 | } | 681 | } |
569 | 682 | ||
570 | #ifdef CONFIG_LPFC_DEBUG_FS | 683 | #ifdef CONFIG_LPFC_DEBUG_FS |
684 | /** | ||
685 | * lpfc_debugfs_disc_trc_open - Open the discovery trace log. | ||
686 | * @inode: The inode pointer that contains a vport pointer. | ||
687 | * @file: The file pointer to attach the log output. | ||
688 | * | ||
689 | * Description: | ||
690 | * This routine is the entry point for the debugfs open file operation. It gets | ||
691 | * the vport from the i_private field in @inode, allocates the necessary buffer | ||
692 | * for the log, fills the buffer from the in-memory log for this vport, and then | ||
693 | * returns a pointer to that log in the private_data field in @file. | ||
694 | * | ||
695 | * Returns: | ||
696 | * This function returns zero if successful. On error it will return an negative | ||
697 | * error value. | ||
698 | **/ | ||
571 | static int | 699 | static int |
572 | lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file) | 700 | lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file) |
573 | { | 701 | { |
@@ -585,7 +713,7 @@ lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file) | |||
585 | if (!debug) | 713 | if (!debug) |
586 | goto out; | 714 | goto out; |
587 | 715 | ||
588 | /* Round to page boundry */ | 716 | /* Round to page boundary */ |
589 | size = (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE); | 717 | size = (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE); |
590 | size = PAGE_ALIGN(size); | 718 | size = PAGE_ALIGN(size); |
591 | 719 | ||
@@ -603,6 +731,21 @@ out: | |||
603 | return rc; | 731 | return rc; |
604 | } | 732 | } |
605 | 733 | ||
734 | /** | ||
735 | * lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log. | ||
736 | * @inode: The inode pointer that contains a vport pointer. | ||
737 | * @file: The file pointer to attach the log output. | ||
738 | * | ||
739 | * Description: | ||
740 | * This routine is the entry point for the debugfs open file operation. It gets | ||
741 | * the vport from the i_private field in @inode, allocates the necessary buffer | ||
742 | * for the log, fills the buffer from the in-memory log for this vport, and then | ||
743 | * returns a pointer to that log in the private_data field in @file. | ||
744 | * | ||
745 | * Returns: | ||
746 | * This function returns zero if successful. On error it will return an negative | ||
747 | * error value. | ||
748 | **/ | ||
606 | static int | 749 | static int |
607 | lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file) | 750 | lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file) |
608 | { | 751 | { |
@@ -620,7 +763,7 @@ lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file) | |||
620 | if (!debug) | 763 | if (!debug) |
621 | goto out; | 764 | goto out; |
622 | 765 | ||
623 | /* Round to page boundry */ | 766 | /* Round to page boundary */ |
624 | size = (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE); | 767 | size = (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE); |
625 | size = PAGE_ALIGN(size); | 768 | size = PAGE_ALIGN(size); |
626 | 769 | ||
@@ -638,6 +781,21 @@ out: | |||
638 | return rc; | 781 | return rc; |
639 | } | 782 | } |
640 | 783 | ||
784 | /** | ||
785 | * lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer. | ||
786 | * @inode: The inode pointer that contains a vport pointer. | ||
787 | * @file: The file pointer to attach the log output. | ||
788 | * | ||
789 | * Description: | ||
790 | * This routine is the entry point for the debugfs open file operation. It gets | ||
791 | * the vport from the i_private field in @inode, allocates the necessary buffer | ||
792 | * for the log, fills the buffer from the in-memory log for this vport, and then | ||
793 | * returns a pointer to that log in the private_data field in @file. | ||
794 | * | ||
795 | * Returns: | ||
796 | * This function returns zero if successful. On error it will return an negative | ||
797 | * error value. | ||
798 | **/ | ||
641 | static int | 799 | static int |
642 | lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file) | 800 | lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file) |
643 | { | 801 | { |
@@ -649,7 +807,7 @@ lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file) | |||
649 | if (!debug) | 807 | if (!debug) |
650 | goto out; | 808 | goto out; |
651 | 809 | ||
652 | /* Round to page boundry */ | 810 | /* Round to page boundary */ |
653 | debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL); | 811 | debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL); |
654 | if (!debug->buffer) { | 812 | if (!debug->buffer) { |
655 | kfree(debug); | 813 | kfree(debug); |
@@ -665,6 +823,21 @@ out: | |||
665 | return rc; | 823 | return rc; |
666 | } | 824 | } |
667 | 825 | ||
826 | /** | ||
827 | * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer. | ||
828 | * @inode: The inode pointer that contains a vport pointer. | ||
829 | * @file: The file pointer to attach the log output. | ||
830 | * | ||
831 | * Description: | ||
832 | * This routine is the entry point for the debugfs open file operation. It gets | ||
833 | * the vport from the i_private field in @inode, allocates the necessary buffer | ||
834 | * for the log, fills the buffer from the in-memory log for this vport, and then | ||
835 | * returns a pointer to that log in the private_data field in @file. | ||
836 | * | ||
837 | * Returns: | ||
838 | * This function returns zero if successful. On error it will return an negative | ||
839 | * error value. | ||
840 | **/ | ||
668 | static int | 841 | static int |
669 | lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file) | 842 | lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file) |
670 | { | 843 | { |
@@ -676,7 +849,7 @@ lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file) | |||
676 | if (!debug) | 849 | if (!debug) |
677 | goto out; | 850 | goto out; |
678 | 851 | ||
679 | /* Round to page boundry */ | 852 | /* Round to page boundary */ |
680 | debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL); | 853 | debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL); |
681 | if (!debug->buffer) { | 854 | if (!debug->buffer) { |
682 | kfree(debug); | 855 | kfree(debug); |
@@ -692,6 +865,21 @@ out: | |||
692 | return rc; | 865 | return rc; |
693 | } | 866 | } |
694 | 867 | ||
868 | /** | ||
869 | * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer. | ||
870 | * @inode: The inode pointer that contains a vport pointer. | ||
871 | * @file: The file pointer to attach the log output. | ||
872 | * | ||
873 | * Description: | ||
874 | * This routine is the entry point for the debugfs open file operation. It gets | ||
875 | * the vport from the i_private field in @inode, allocates the necessary buffer | ||
876 | * for the log, fills the buffer from the in-memory log for this vport, and then | ||
877 | * returns a pointer to that log in the private_data field in @file. | ||
878 | * | ||
879 | * Returns: | ||
880 | * This function returns zero if successful. On error it will return an negative | ||
881 | * error value. | ||
882 | **/ | ||
695 | static int | 883 | static int |
696 | lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file) | 884 | lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file) |
697 | { | 885 | { |
@@ -703,7 +891,7 @@ lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file) | |||
703 | if (!debug) | 891 | if (!debug) |
704 | goto out; | 892 | goto out; |
705 | 893 | ||
706 | /* Round to page boundry */ | 894 | /* Round to page boundary */ |
707 | debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL); | 895 | debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL); |
708 | if (!debug->buffer) { | 896 | if (!debug->buffer) { |
709 | kfree(debug); | 897 | kfree(debug); |
@@ -719,6 +907,21 @@ out: | |||
719 | return rc; | 907 | return rc; |
720 | } | 908 | } |
721 | 909 | ||
910 | /** | ||
911 | * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file. | ||
912 | * @inode: The inode pointer that contains a vport pointer. | ||
913 | * @file: The file pointer to attach the log output. | ||
914 | * | ||
915 | * Description: | ||
916 | * This routine is the entry point for the debugfs open file operation. It gets | ||
917 | * the vport from the i_private field in @inode, allocates the necessary buffer | ||
918 | * for the log, fills the buffer from the in-memory log for this vport, and then | ||
919 | * returns a pointer to that log in the private_data field in @file. | ||
920 | * | ||
921 | * Returns: | ||
922 | * This function returns zero if successful. On error it will return an negative | ||
923 | * error value. | ||
924 | **/ | ||
722 | static int | 925 | static int |
723 | lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file) | 926 | lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file) |
724 | { | 927 | { |
@@ -730,7 +933,7 @@ lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file) | |||
730 | if (!debug) | 933 | if (!debug) |
731 | goto out; | 934 | goto out; |
732 | 935 | ||
733 | /* Round to page boundry */ | 936 | /* Round to page boundary */ |
734 | debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL); | 937 | debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL); |
735 | if (!debug->buffer) { | 938 | if (!debug->buffer) { |
736 | kfree(debug); | 939 | kfree(debug); |
@@ -746,6 +949,23 @@ out: | |||
746 | return rc; | 949 | return rc; |
747 | } | 950 | } |
748 | 951 | ||
952 | /** | ||
953 | * lpfc_debugfs_lseek - Seek through a debugfs file. | ||
954 | * @file: The file pointer to seek through. | ||
955 | * @off: The offset to seek to or the amount to seek by. | ||
956 | * @whence: Indicates how to seek. | ||
957 | * | ||
958 | * Description: | ||
959 | * This routine is the entry point for the debugfs lseek file operation. The | ||
960 | * @whence parameter indicates whether @off is the offset to directly seek to, | ||
961 | * or if it is a value to seek forward or reverse by. This function figures out | ||
962 | * what the new offset of the debugfs file will be and assigns that value to the | ||
963 | * f_pos field of @file. | ||
964 | * | ||
965 | * Returns: | ||
966 | * This function returns the new offset if successful and returns a negative | ||
967 | * error if unable to process the seek. | ||
968 | **/ | ||
749 | static loff_t | 969 | static loff_t |
750 | lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) | 970 | lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) |
751 | { | 971 | { |
@@ -767,6 +987,22 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) | |||
767 | return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos); | 987 | return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos); |
768 | } | 988 | } |
769 | 989 | ||
990 | /** | ||
991 | * lpfc_debugfs_read - Read a debugfs file. | ||
992 | * @file: The file pointer to read from. | ||
993 | * @buf: The buffer to copy the data to. | ||
994 | * @nbytes: The number of bytes to read. | ||
995 | * @ppos: The position in the file to start reading from. | ||
996 | * | ||
997 | * Description: | ||
998 | * This routine reads data from from the buffer indicated in the private_data | ||
999 | * field of @file. It will start reading at @ppos and copy up to @nbytes of | ||
1000 | * data to @buf. | ||
1001 | * | ||
1002 | * Returns: | ||
1003 | * This function returns the amount of data that was read (this could be less | ||
1004 | * than @nbytes if the end of the file was reached) or a negative error value. | ||
1005 | **/ | ||
770 | static ssize_t | 1006 | static ssize_t |
771 | lpfc_debugfs_read(struct file *file, char __user *buf, | 1007 | lpfc_debugfs_read(struct file *file, char __user *buf, |
772 | size_t nbytes, loff_t *ppos) | 1008 | size_t nbytes, loff_t *ppos) |
@@ -776,6 +1012,18 @@ lpfc_debugfs_read(struct file *file, char __user *buf, | |||
776 | debug->len); | 1012 | debug->len); |
777 | } | 1013 | } |
778 | 1014 | ||
1015 | /** | ||
1016 | * lpfc_debugfs_release - Release the buffer used to store debugfs file data. | ||
1017 | * @inode: The inode pointer that contains a vport pointer. (unused) | ||
1018 | * @file: The file pointer that contains the buffer to release. | ||
1019 | * | ||
1020 | * Description: | ||
1021 | * This routine frees the buffer that was allocated when the debugfs file was | ||
1022 | * opened. | ||
1023 | * | ||
1024 | * Returns: | ||
1025 | * This function returns zero. | ||
1026 | **/ | ||
779 | static int | 1027 | static int |
780 | lpfc_debugfs_release(struct inode *inode, struct file *file) | 1028 | lpfc_debugfs_release(struct inode *inode, struct file *file) |
781 | { | 1029 | { |
@@ -845,6 +1093,16 @@ static struct dentry *lpfc_debugfs_root = NULL; | |||
845 | static atomic_t lpfc_debugfs_hba_count; | 1093 | static atomic_t lpfc_debugfs_hba_count; |
846 | #endif | 1094 | #endif |
847 | 1095 | ||
1096 | /** | ||
1097 | * lpfc_debugfs_initialize - Initialize debugfs for a vport. | ||
1098 | * @vport: The vport pointer to initialize. | ||
1099 | * | ||
1100 | * Description: | ||
1101 | * When Debugfs is configured this routine sets up the lpfc debugfs file system. | ||
1102 | * If not already created, this routine will create the lpfc directory, and | ||
1103 | * lpfcX directory (for this HBA), and vportX directory for this vport. It will | ||
1104 | * also create each file used to access lpfc specific debugfs information. | ||
1105 | **/ | ||
848 | inline void | 1106 | inline void |
849 | lpfc_debugfs_initialize(struct lpfc_vport *vport) | 1107 | lpfc_debugfs_initialize(struct lpfc_vport *vport) |
850 | { | 1108 | { |
@@ -862,7 +1120,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
862 | atomic_set(&lpfc_debugfs_hba_count, 0); | 1120 | atomic_set(&lpfc_debugfs_hba_count, 0); |
863 | if (!lpfc_debugfs_root) { | 1121 | if (!lpfc_debugfs_root) { |
864 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1122 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
865 | "0409 Cannot create debugfs root\n"); | 1123 | "0408 Cannot create debugfs root\n"); |
866 | goto debug_failed; | 1124 | goto debug_failed; |
867 | } | 1125 | } |
868 | } | 1126 | } |
@@ -876,7 +1134,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
876 | debugfs_create_dir(name, lpfc_debugfs_root); | 1134 | debugfs_create_dir(name, lpfc_debugfs_root); |
877 | if (!phba->hba_debugfs_root) { | 1135 | if (!phba->hba_debugfs_root) { |
878 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1136 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
879 | "0409 Cannot create debugfs hba\n"); | 1137 | "0412 Cannot create debugfs hba\n"); |
880 | goto debug_failed; | 1138 | goto debug_failed; |
881 | } | 1139 | } |
882 | atomic_inc(&lpfc_debugfs_hba_count); | 1140 | atomic_inc(&lpfc_debugfs_hba_count); |
@@ -890,7 +1148,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
890 | phba, &lpfc_debugfs_op_hbqinfo); | 1148 | phba, &lpfc_debugfs_op_hbqinfo); |
891 | if (!phba->debug_hbqinfo) { | 1149 | if (!phba->debug_hbqinfo) { |
892 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1150 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
893 | "0409 Cannot create debugfs hbqinfo\n"); | 1151 | "0411 Cannot create debugfs hbqinfo\n"); |
894 | goto debug_failed; | 1152 | goto debug_failed; |
895 | } | 1153 | } |
896 | 1154 | ||
@@ -902,7 +1160,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
902 | phba, &lpfc_debugfs_op_dumpHBASlim); | 1160 | phba, &lpfc_debugfs_op_dumpHBASlim); |
903 | if (!phba->debug_dumpHBASlim) { | 1161 | if (!phba->debug_dumpHBASlim) { |
904 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1162 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
905 | "0409 Cannot create debugfs dumpHBASlim\n"); | 1163 | "0413 Cannot create debugfs dumpHBASlim\n"); |
906 | goto debug_failed; | 1164 | goto debug_failed; |
907 | } | 1165 | } |
908 | 1166 | ||
@@ -914,7 +1172,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
914 | phba, &lpfc_debugfs_op_dumpHostSlim); | 1172 | phba, &lpfc_debugfs_op_dumpHostSlim); |
915 | if (!phba->debug_dumpHostSlim) { | 1173 | if (!phba->debug_dumpHostSlim) { |
916 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1174 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
917 | "0409 Cannot create debugfs dumpHostSlim\n"); | 1175 | "0414 Cannot create debugfs dumpHostSlim\n"); |
918 | goto debug_failed; | 1176 | goto debug_failed; |
919 | } | 1177 | } |
920 | 1178 | ||
@@ -944,7 +1202,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
944 | phba, &lpfc_debugfs_op_slow_ring_trc); | 1202 | phba, &lpfc_debugfs_op_slow_ring_trc); |
945 | if (!phba->debug_slow_ring_trc) { | 1203 | if (!phba->debug_slow_ring_trc) { |
946 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1204 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
947 | "0409 Cannot create debugfs " | 1205 | "0415 Cannot create debugfs " |
948 | "slow_ring_trace\n"); | 1206 | "slow_ring_trace\n"); |
949 | goto debug_failed; | 1207 | goto debug_failed; |
950 | } | 1208 | } |
@@ -955,7 +1213,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
955 | GFP_KERNEL); | 1213 | GFP_KERNEL); |
956 | if (!phba->slow_ring_trc) { | 1214 | if (!phba->slow_ring_trc) { |
957 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1215 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
958 | "0409 Cannot create debugfs " | 1216 | "0416 Cannot create debugfs " |
959 | "slow_ring buffer\n"); | 1217 | "slow_ring buffer\n"); |
960 | goto debug_failed; | 1218 | goto debug_failed; |
961 | } | 1219 | } |
@@ -972,7 +1230,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
972 | debugfs_create_dir(name, phba->hba_debugfs_root); | 1230 | debugfs_create_dir(name, phba->hba_debugfs_root); |
973 | if (!vport->vport_debugfs_root) { | 1231 | if (!vport->vport_debugfs_root) { |
974 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1232 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
975 | "0409 Cant create debugfs"); | 1233 | "0417 Cant create debugfs"); |
976 | goto debug_failed; | 1234 | goto debug_failed; |
977 | } | 1235 | } |
978 | atomic_inc(&phba->debugfs_vport_count); | 1236 | atomic_inc(&phba->debugfs_vport_count); |
@@ -1001,7 +1259,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
1001 | 1259 | ||
1002 | if (!vport->disc_trc) { | 1260 | if (!vport->disc_trc) { |
1003 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1261 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
1004 | "0409 Cannot create debugfs disc trace " | 1262 | "0418 Cannot create debugfs disc trace " |
1005 | "buffer\n"); | 1263 | "buffer\n"); |
1006 | goto debug_failed; | 1264 | goto debug_failed; |
1007 | } | 1265 | } |
@@ -1014,7 +1272,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
1014 | vport, &lpfc_debugfs_op_disc_trc); | 1272 | vport, &lpfc_debugfs_op_disc_trc); |
1015 | if (!vport->debug_disc_trc) { | 1273 | if (!vport->debug_disc_trc) { |
1016 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | 1274 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, |
1017 | "0409 Cannot create debugfs " | 1275 | "0419 Cannot create debugfs " |
1018 | "discovery_trace\n"); | 1276 | "discovery_trace\n"); |
1019 | goto debug_failed; | 1277 | goto debug_failed; |
1020 | } | 1278 | } |
@@ -1033,7 +1291,17 @@ debug_failed: | |||
1033 | #endif | 1291 | #endif |
1034 | } | 1292 | } |
1035 | 1293 | ||
1036 | 1294 | /** | |
1295 | * lpfc_debugfs_terminate - Tear down debugfs infrastructure for this vport. | ||
1296 | * @vport: The vport pointer to remove from debugfs. | ||
1297 | * | ||
1298 | * Description: | ||
1299 | * When Debugfs is configured this routine removes debugfs file system elements | ||
1300 | * that are specific to this vport. It also checks to see if there are any | ||
1301 | * users left for the debugfs directories associated with the HBA and driver. If | ||
1302 | * this is the last user of the HBA directory or driver directory then it will | ||
1303 | * remove those from the debugfs infrastructure as well. | ||
1304 | **/ | ||
1037 | inline void | 1305 | inline void |
1038 | lpfc_debugfs_terminate(struct lpfc_vport *vport) | 1306 | lpfc_debugfs_terminate(struct lpfc_vport *vport) |
1039 | { | 1307 | { |
@@ -1096,5 +1364,3 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) | |||
1096 | #endif | 1364 | #endif |
1097 | return; | 1365 | return; |
1098 | } | 1366 | } |
1099 | |||
1100 | |||
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 2db0b74b6fad..f29e548a90d1 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h | |||
@@ -37,6 +37,7 @@ enum lpfc_work_type { | |||
37 | LPFC_EVT_KILL, | 37 | LPFC_EVT_KILL, |
38 | LPFC_EVT_ELS_RETRY, | 38 | LPFC_EVT_ELS_RETRY, |
39 | LPFC_EVT_DEV_LOSS, | 39 | LPFC_EVT_DEV_LOSS, |
40 | LPFC_EVT_FASTPATH_MGMT_EVT, | ||
40 | }; | 41 | }; |
41 | 42 | ||
42 | /* structure used to queue event to the discovery tasklet */ | 43 | /* structure used to queue event to the discovery tasklet */ |
@@ -47,6 +48,24 @@ struct lpfc_work_evt { | |||
47 | enum lpfc_work_type evt; | 48 | enum lpfc_work_type evt; |
48 | }; | 49 | }; |
49 | 50 | ||
51 | struct lpfc_scsi_check_condition_event; | ||
52 | struct lpfc_scsi_varqueuedepth_event; | ||
53 | struct lpfc_scsi_event_header; | ||
54 | struct lpfc_fabric_event_header; | ||
55 | struct lpfc_fcprdchkerr_event; | ||
56 | |||
57 | /* structure used for sending events from fast path */ | ||
58 | struct lpfc_fast_path_event { | ||
59 | struct lpfc_work_evt work_evt; | ||
60 | struct lpfc_vport *vport; | ||
61 | union { | ||
62 | struct lpfc_scsi_check_condition_event check_cond_evt; | ||
63 | struct lpfc_scsi_varqueuedepth_event queue_depth_evt; | ||
64 | struct lpfc_scsi_event_header scsi_evt; | ||
65 | struct lpfc_fabric_event_header fabric_evt; | ||
66 | struct lpfc_fcprdchkerr_event read_check_error; | ||
67 | } un; | ||
68 | }; | ||
50 | 69 | ||
51 | struct lpfc_nodelist { | 70 | struct lpfc_nodelist { |
52 | struct list_head nlp_listp; | 71 | struct list_head nlp_listp; |
@@ -88,6 +107,10 @@ struct lpfc_nodelist { | |||
88 | unsigned long last_ramp_up_time; /* jiffy of last ramp up */ | 107 | unsigned long last_ramp_up_time; /* jiffy of last ramp up */ |
89 | unsigned long last_q_full_time; /* jiffy of last queue full */ | 108 | unsigned long last_q_full_time; /* jiffy of last queue full */ |
90 | struct kref kref; | 109 | struct kref kref; |
110 | atomic_t cmd_pending; | ||
111 | uint32_t cmd_qdepth; | ||
112 | unsigned long last_change_time; | ||
113 | struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ | ||
91 | }; | 114 | }; |
92 | 115 | ||
93 | /* Defines for nlp_flag (uint32) */ | 116 | /* Defines for nlp_flag (uint32) */ |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index f54e0f7eaee3..630bd28fb997 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | #include "lpfc_hw.h" | 31 | #include "lpfc_hw.h" |
32 | #include "lpfc_sli.h" | 32 | #include "lpfc_sli.h" |
33 | #include "lpfc_nl.h" | ||
33 | #include "lpfc_disc.h" | 34 | #include "lpfc_disc.h" |
34 | #include "lpfc_scsi.h" | 35 | #include "lpfc_scsi.h" |
35 | #include "lpfc.h" | 36 | #include "lpfc.h" |
@@ -53,6 +54,28 @@ static void lpfc_register_new_vport(struct lpfc_hba *phba, | |||
53 | 54 | ||
54 | static int lpfc_max_els_tries = 3; | 55 | static int lpfc_max_els_tries = 3; |
55 | 56 | ||
57 | /** | ||
58 | * lpfc_els_chk_latt: Check host link attention event for a vport. | ||
59 | * @vport: pointer to a host virtual N_Port data structure. | ||
60 | * | ||
61 | * This routine checks whether there is an outstanding host link | ||
62 | * attention event during the discovery process with the @vport. It is done | ||
63 | * by reading the HBA's Host Attention (HA) register. If there is any host | ||
64 | * link attention events during this @vport's discovery process, the @vport | ||
65 | * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall | ||
66 | * be issued if the link state is not already in host link cleared state, | ||
67 | * and a return code shall indicate whether the host link attention event | ||
68 | * had happened. | ||
69 | * | ||
70 | * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport | ||
71 | * state in LPFC_VPORT_READY, the request for checking host link attention | ||
72 | * event will be ignored and a return code shall indicate no host link | ||
73 | * attention event had happened. | ||
74 | * | ||
75 | * Return codes | ||
76 | * 0 - no host link attention event happened | ||
77 | * 1 - host link attention event happened | ||
78 | **/ | ||
56 | int | 79 | int |
57 | lpfc_els_chk_latt(struct lpfc_vport *vport) | 80 | lpfc_els_chk_latt(struct lpfc_vport *vport) |
58 | { | 81 | { |
@@ -92,6 +115,34 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) | |||
92 | return 1; | 115 | return 1; |
93 | } | 116 | } |
94 | 117 | ||
118 | /** | ||
119 | * lpfc_prep_els_iocb: Allocate and prepare a lpfc iocb data structure. | ||
120 | * @vport: pointer to a host virtual N_Port data structure. | ||
121 | * @expectRsp: flag indicating whether response is expected. | ||
122 | * @cmdSize: size of the ELS command. | ||
123 | * @retry: number of retries to the command IOCB when it fails. | ||
124 | * @ndlp: pointer to a node-list data structure. | ||
125 | * @did: destination identifier. | ||
126 | * @elscmd: the ELS command code. | ||
127 | * | ||
128 | * This routine is used for allocating a lpfc-IOCB data structure from | ||
129 | * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters | ||
130 | * passed into the routine for discovery state machine to issue an Extended | ||
131 | * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation | ||
132 | * and preparation routine that is used by all the discovery state machine | ||
133 | * routines and the ELS command-specific fields will be later set up by | ||
134 | * the individual discovery machine routines after calling this routine | ||
135 | * allocating and preparing a generic IOCB data structure. It fills in the | ||
136 | * Buffer Descriptor Entries (BDEs), allocates buffers for both command | ||
137 | * payload and response payload (if expected). The reference count on the | ||
138 | * ndlp is incremented by 1 and the reference to the ndlp is put into | ||
139 | * context1 of the IOCB data structure for this IOCB to hold the ndlp | ||
140 | * reference for the command's callback function to access later. | ||
141 | * | ||
142 | * Return code | ||
143 | * Pointer to the newly allocated/prepared els iocb data structure | ||
144 | * NULL - when els iocb data structure allocation/preparation failed | ||
145 | **/ | ||
95 | static struct lpfc_iocbq * | 146 | static struct lpfc_iocbq * |
96 | lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | 147 | lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, |
97 | uint16_t cmdSize, uint8_t retry, | 148 | uint16_t cmdSize, uint8_t retry, |
@@ -150,7 +201,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |||
150 | 201 | ||
151 | icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); | 202 | icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); |
152 | icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); | 203 | icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); |
153 | icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL; | 204 | icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; |
154 | icmd->un.elsreq64.remoteID = did; /* DID */ | 205 | icmd->un.elsreq64.remoteID = did; /* DID */ |
155 | if (expectRsp) { | 206 | if (expectRsp) { |
156 | icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); | 207 | icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); |
@@ -185,7 +236,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |||
185 | bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); | 236 | bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); |
186 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); | 237 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); |
187 | bpl->tus.f.bdeSize = FCELSSIZE; | 238 | bpl->tus.f.bdeSize = FCELSSIZE; |
188 | bpl->tus.f.bdeFlags = BUFF_USE_RCV; | 239 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
189 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | 240 | bpl->tus.w = le32_to_cpu(bpl->tus.w); |
190 | } | 241 | } |
191 | 242 | ||
@@ -233,6 +284,22 @@ els_iocb_free_pcmb_exit: | |||
233 | return NULL; | 284 | return NULL; |
234 | } | 285 | } |
235 | 286 | ||
287 | /** | ||
288 | * lpfc_issue_fabric_reglogin: Issue fabric registration login for a vport. | ||
289 | * @vport: pointer to a host virtual N_Port data structure. | ||
290 | * | ||
291 | * This routine issues a fabric registration login for a @vport. An | ||
292 | * active ndlp node with Fabric_DID must already exist for this @vport. | ||
293 | * The routine invokes two mailbox commands to carry out fabric registration | ||
294 | * login through the HBA firmware: the first mailbox command requests the | ||
295 | * HBA to perform link configuration for the @vport; and the second mailbox | ||
296 | * command requests the HBA to perform the actual fabric registration login | ||
297 | * with the @vport. | ||
298 | * | ||
299 | * Return code | ||
300 | * 0 - successfully issued fabric registration login for @vport | ||
301 | * -ENXIO -- failed to issue fabric registration login for @vport | ||
302 | **/ | ||
236 | static int | 303 | static int |
237 | lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) | 304 | lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) |
238 | { | 305 | { |
@@ -313,6 +380,26 @@ fail: | |||
313 | return -ENXIO; | 380 | return -ENXIO; |
314 | } | 381 | } |
315 | 382 | ||
383 | /** | ||
384 | * lpfc_cmpl_els_flogi_fabric: Completion function for flogi to a fabric port. | ||
385 | * @vport: pointer to a host virtual N_Port data structure. | ||
386 | * @ndlp: pointer to a node-list data structure. | ||
387 | * @sp: pointer to service parameter data structure. | ||
388 | * @irsp: pointer to the IOCB within the lpfc response IOCB. | ||
389 | * | ||
390 | * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback | ||
391 | * function to handle the completion of a Fabric Login (FLOGI) into a fabric | ||
392 | * port in a fabric topology. It properly sets up the parameters to the @ndlp | ||
393 | * from the IOCB response. It also check the newly assigned N_Port ID to the | ||
394 | * @vport against the previously assigned N_Port ID. If it is different from | ||
395 | * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine | ||
396 | * is invoked on all the remaining nodes with the @vport to unregister the | ||
397 | * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() | ||
398 | * is invoked to register login to the fabric. | ||
399 | * | ||
400 | * Return code | ||
401 | * 0 - Success (currently, always return 0) | ||
402 | **/ | ||
316 | static int | 403 | static int |
317 | lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | 404 | lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
318 | struct serv_parm *sp, IOCB_t *irsp) | 405 | struct serv_parm *sp, IOCB_t *irsp) |
@@ -387,7 +474,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
387 | */ | 474 | */ |
388 | list_for_each_entry_safe(np, next_np, | 475 | list_for_each_entry_safe(np, next_np, |
389 | &vport->fc_nodes, nlp_listp) { | 476 | &vport->fc_nodes, nlp_listp) { |
390 | if (!NLP_CHK_NODE_ACT(ndlp)) | 477 | if (!NLP_CHK_NODE_ACT(np)) |
391 | continue; | 478 | continue; |
392 | if ((np->nlp_state != NLP_STE_NPR_NODE) || | 479 | if ((np->nlp_state != NLP_STE_NPR_NODE) || |
393 | !(np->nlp_flag & NLP_NPR_ADISC)) | 480 | !(np->nlp_flag & NLP_NPR_ADISC)) |
@@ -416,9 +503,26 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
416 | return 0; | 503 | return 0; |
417 | } | 504 | } |
418 | 505 | ||
419 | /* | 506 | /** |
420 | * We FLOGIed into an NPort, initiate pt2pt protocol | 507 | * lpfc_cmpl_els_flogi_nport: Completion function for flogi to an N_Port. |
421 | */ | 508 | * @vport: pointer to a host virtual N_Port data structure. |
509 | * @ndlp: pointer to a node-list data structure. | ||
510 | * @sp: pointer to service parameter data structure. | ||
511 | * | ||
512 | * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback | ||
513 | * function to handle the completion of a Fabric Login (FLOGI) into an N_Port | ||
514 | * in a point-to-point topology. First, the @vport's N_Port Name is compared | ||
515 | * with the received N_Port Name: if the @vport's N_Port Name is greater than | ||
516 | * the received N_Port Name lexicographically, this node shall assign local | ||
517 | * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and | ||
518 | * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, | ||
519 | * this node shall just wait for the remote node to issue PLOGI and assign | ||
520 | * N_Port IDs. | ||
521 | * | ||
522 | * Return code | ||
523 | * 0 - Success | ||
524 | * -ENXIO - Fail | ||
525 | **/ | ||
422 | static int | 526 | static int |
423 | lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | 527 | lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
424 | struct serv_parm *sp) | 528 | struct serv_parm *sp) |
@@ -516,6 +620,29 @@ fail: | |||
516 | return -ENXIO; | 620 | return -ENXIO; |
517 | } | 621 | } |
518 | 622 | ||
623 | /** | ||
624 | * lpfc_cmpl_els_flogi: Completion callback function for flogi. | ||
625 | * @phba: pointer to lpfc hba data structure. | ||
626 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
627 | * @rspiocb: pointer to lpfc response iocb data structure. | ||
628 | * | ||
629 | * This routine is the top-level completion callback function for issuing | ||
630 | * a Fabric Login (FLOGI) command. If the response IOCB reported error, | ||
631 | * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If | ||
632 | * retry has been made (either immediately or delayed with lpfc_els_retry() | ||
633 | * returning 1), the command IOCB will be released and function returned. | ||
634 | * If the retry attempt has been given up (possibly reach the maximum | ||
635 | * number of retries), one additional decrement of ndlp reference shall be | ||
636 | * invoked before going out after releasing the command IOCB. This will | ||
637 | * actually release the remote node (Note, lpfc_els_free_iocb() will also | ||
638 | * invoke one decrement of ndlp reference count). If no error reported in | ||
639 | * the IOCB status, the command Port ID field is used to determine whether | ||
640 | * this is a point-to-point topology or a fabric topology: if the Port ID | ||
641 | * field is assigned, it is a fabric topology; otherwise, it is a | ||
642 | * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or | ||
643 | * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the | ||
644 | * specific topology completion conditions. | ||
645 | **/ | ||
519 | static void | 646 | static void |
520 | lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 647 | lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
521 | struct lpfc_iocbq *rspiocb) | 648 | struct lpfc_iocbq *rspiocb) |
@@ -618,6 +745,28 @@ out: | |||
618 | lpfc_els_free_iocb(phba, cmdiocb); | 745 | lpfc_els_free_iocb(phba, cmdiocb); |
619 | } | 746 | } |
620 | 747 | ||
748 | /** | ||
749 | * lpfc_issue_els_flogi: Issue an flogi iocb command for a vport. | ||
750 | * @vport: pointer to a host virtual N_Port data structure. | ||
751 | * @ndlp: pointer to a node-list data structure. | ||
752 | * @retry: number of retries to the command IOCB. | ||
753 | * | ||
754 | * This routine issues a Fabric Login (FLOGI) Request ELS command | ||
755 | * for a @vport. The initiator service parameters are put into the payload | ||
756 | * of the FLOGI Request IOCB and the top-level callback function pointer | ||
757 | * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback | ||
758 | * function field. The lpfc_issue_fabric_iocb routine is invoked to send | ||
759 | * out FLOGI ELS command with one outstanding fabric IOCB at a time. | ||
760 | * | ||
761 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
762 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
763 | * will be stored into the context1 field of the IOCB for the completion | ||
764 | * callback function to the FLOGI ELS command. | ||
765 | * | ||
766 | * Return code | ||
767 | * 0 - successfully issued flogi iocb for @vport | ||
768 | * 1 - failed to issue flogi iocb for @vport | ||
769 | **/ | ||
621 | static int | 770 | static int |
622 | lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | 771 | lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
623 | uint8_t retry) | 772 | uint8_t retry) |
@@ -694,6 +843,20 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
694 | return 0; | 843 | return 0; |
695 | } | 844 | } |
696 | 845 | ||
846 | /** | ||
847 | * lpfc_els_abort_flogi: Abort all outstanding flogi iocbs. | ||
848 | * @phba: pointer to lpfc hba data structure. | ||
849 | * | ||
850 | * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs | ||
851 | * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq | ||
852 | * list and issues an abort IOCB commond on each outstanding IOCB that | ||
853 | * contains a active Fabric_DID ndlp. Note that this function is to issue | ||
854 | * the abort IOCB command on all the outstanding IOCBs, thus when this | ||
855 | * function returns, it does not guarantee all the IOCBs are actually aborted. | ||
856 | * | ||
857 | * Return code | ||
858 | * 0 - Sucessfully issued abort iocb on all outstanding flogis (Always 0) | ||
859 | **/ | ||
697 | int | 860 | int |
698 | lpfc_els_abort_flogi(struct lpfc_hba *phba) | 861 | lpfc_els_abort_flogi(struct lpfc_hba *phba) |
699 | { | 862 | { |
@@ -729,6 +892,22 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba) | |||
729 | return 0; | 892 | return 0; |
730 | } | 893 | } |
731 | 894 | ||
895 | /** | ||
896 | * lpfc_initial_flogi: Issue an initial fabric login for a vport. | ||
897 | * @vport: pointer to a host virtual N_Port data structure. | ||
898 | * | ||
899 | * This routine issues an initial Fabric Login (FLOGI) for the @vport | ||
900 | * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from | ||
901 | * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and | ||
902 | * put it into the @vport's ndlp list. If an inactive ndlp found on the list, | ||
903 | * it will just be enabled and made active. The lpfc_issue_els_flogi() routine | ||
904 | * is then invoked with the @vport and the ndlp to perform the FLOGI for the | ||
905 | * @vport. | ||
906 | * | ||
907 | * Return code | ||
908 | * 0 - failed to issue initial flogi for @vport | ||
909 | * 1 - successfully issued initial flogi for @vport | ||
910 | **/ | ||
732 | int | 911 | int |
733 | lpfc_initial_flogi(struct lpfc_vport *vport) | 912 | lpfc_initial_flogi(struct lpfc_vport *vport) |
734 | { | 913 | { |
@@ -764,6 +943,22 @@ lpfc_initial_flogi(struct lpfc_vport *vport) | |||
764 | return 1; | 943 | return 1; |
765 | } | 944 | } |
766 | 945 | ||
946 | /** | ||
947 | * lpfc_initial_fdisc: Issue an initial fabric discovery for a vport. | ||
948 | * @vport: pointer to a host virtual N_Port data structure. | ||
949 | * | ||
950 | * This routine issues an initial Fabric Discover (FDISC) for the @vport | ||
951 | * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from | ||
952 | * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and | ||
953 | * put it into the @vport's ndlp list. If an inactive ndlp found on the list, | ||
954 | * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine | ||
955 | * is then invoked with the @vport and the ndlp to perform the FDISC for the | ||
956 | * @vport. | ||
957 | * | ||
958 | * Return code | ||
959 | * 0 - failed to issue initial fdisc for @vport | ||
960 | * 1 - successfully issued initial fdisc for @vport | ||
961 | **/ | ||
767 | int | 962 | int |
768 | lpfc_initial_fdisc(struct lpfc_vport *vport) | 963 | lpfc_initial_fdisc(struct lpfc_vport *vport) |
769 | { | 964 | { |
@@ -797,6 +992,17 @@ lpfc_initial_fdisc(struct lpfc_vport *vport) | |||
797 | return 1; | 992 | return 1; |
798 | } | 993 | } |
799 | 994 | ||
995 | /** | ||
996 | * lpfc_more_plogi: Check and issue remaining plogis for a vport. | ||
997 | * @vport: pointer to a host virtual N_Port data structure. | ||
998 | * | ||
999 | * This routine checks whether there are more remaining Port Logins | ||
1000 | * (PLOGI) to be issued for the @vport. If so, it will invoke the routine | ||
1001 | * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes | ||
1002 | * to issue ELS PLOGIs up to the configured discover threads with the | ||
1003 | * @vport (@vport->cfg_discovery_threads). The function also decrement | ||
1004 | * the @vport's num_disc_node by 1 if it is not already 0. | ||
1005 | **/ | ||
800 | void | 1006 | void |
801 | lpfc_more_plogi(struct lpfc_vport *vport) | 1007 | lpfc_more_plogi(struct lpfc_vport *vport) |
802 | { | 1008 | { |
@@ -819,6 +1025,37 @@ lpfc_more_plogi(struct lpfc_vport *vport) | |||
819 | return; | 1025 | return; |
820 | } | 1026 | } |
821 | 1027 | ||
1028 | /** | ||
1029 | * lpfc_plogi_confirm_nport: Confirm pologi wwpn matches stored ndlp. | ||
1030 | * @phba: pointer to lpfc hba data structure. | ||
1031 | * @prsp: pointer to response IOCB payload. | ||
1032 | * @ndlp: pointer to a node-list data structure. | ||
1033 | * | ||
1034 | * This routine checks and indicates whether the WWPN of an N_Port, retrieved | ||
1035 | * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. | ||
1036 | * The following cases are considered N_Port confirmed: | ||
1037 | * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches | ||
1038 | * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but | ||
1039 | * it does not have WWPN assigned either. If the WWPN is confirmed, the | ||
1040 | * pointer to the @ndlp will be returned. If the WWPN is not confirmed: | ||
1041 | * 1) if there is a node on vport list other than the @ndlp with the same | ||
1042 | * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked | ||
1043 | * on that node to release the RPI associated with the node; 2) if there is | ||
1044 | * no node found on vport list with the same WWPN of the N_Port PLOGI logged | ||
1045 | * into, a new node shall be allocated (or activated). In either case, the | ||
1046 | * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall | ||
1047 | * be released and the new_ndlp shall be put on to the vport node list and | ||
1048 | * its pointer returned as the confirmed node. | ||
1049 | * | ||
1050 | * Note that before the @ndlp got "released", the keepDID from not-matching | ||
1051 | * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID | ||
1052 | * of the @ndlp. This is because the release of @ndlp is actually to put it | ||
1053 | * into an inactive state on the vport node list and the vport node list | ||
1054 | * management algorithm does not allow two node with a same DID. | ||
1055 | * | ||
1056 | * Return code | ||
1057 | * pointer to the PLOGI N_Port @ndlp | ||
1058 | **/ | ||
822 | static struct lpfc_nodelist * | 1059 | static struct lpfc_nodelist * |
823 | lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | 1060 | lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, |
824 | struct lpfc_nodelist *ndlp) | 1061 | struct lpfc_nodelist *ndlp) |
@@ -922,6 +1159,17 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
922 | return new_ndlp; | 1159 | return new_ndlp; |
923 | } | 1160 | } |
924 | 1161 | ||
1162 | /** | ||
1163 | * lpfc_end_rscn: Check and handle more rscn for a vport. | ||
1164 | * @vport: pointer to a host virtual N_Port data structure. | ||
1165 | * | ||
1166 | * This routine checks whether more Registration State Change | ||
1167 | * Notifications (RSCNs) came in while the discovery state machine was in | ||
1168 | * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be | ||
1169 | * invoked to handle the additional RSCNs for the @vport. Otherwise, the | ||
1170 | * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of | ||
1171 | * handling the RSCNs. | ||
1172 | **/ | ||
925 | void | 1173 | void |
926 | lpfc_end_rscn(struct lpfc_vport *vport) | 1174 | lpfc_end_rscn(struct lpfc_vport *vport) |
927 | { | 1175 | { |
@@ -943,6 +1191,26 @@ lpfc_end_rscn(struct lpfc_vport *vport) | |||
943 | } | 1191 | } |
944 | } | 1192 | } |
945 | 1193 | ||
1194 | /** | ||
1195 | * lpfc_cmpl_els_plogi: Completion callback function for plogi. | ||
1196 | * @phba: pointer to lpfc hba data structure. | ||
1197 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
1198 | * @rspiocb: pointer to lpfc response iocb data structure. | ||
1199 | * | ||
1200 | * This routine is the completion callback function for issuing the Port | ||
1201 | * Login (PLOGI) command. For PLOGI completion, there must be an active | ||
1202 | * ndlp on the vport node list that matches the remote node ID from the | ||
1203 | * PLOGI reponse IOCB. If such ndlp does not exist, the PLOGI is simply | ||
1204 | * ignored and command IOCB released. The PLOGI response IOCB status is | ||
1205 | * checked for error conditons. If there is error status reported, PLOGI | ||
1206 | * retry shall be attempted by invoking the lpfc_els_retry() routine. | ||
1207 | * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on | ||
1208 | * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine | ||
1209 | * (DSM) is set for this PLOGI completion. Finally, it checks whether | ||
1210 | * there are additional N_Port nodes with the vport that need to perform | ||
1211 | * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition | ||
1212 | * PLOGIs. | ||
1213 | **/ | ||
946 | static void | 1214 | static void |
947 | lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 1215 | lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
948 | struct lpfc_iocbq *rspiocb) | 1216 | struct lpfc_iocbq *rspiocb) |
@@ -1048,6 +1316,27 @@ out: | |||
1048 | return; | 1316 | return; |
1049 | } | 1317 | } |
1050 | 1318 | ||
1319 | /** | ||
1320 | * lpfc_issue_els_plogi: Issue an plogi iocb command for a vport. | ||
1321 | * @vport: pointer to a host virtual N_Port data structure. | ||
1322 | * @did: destination port identifier. | ||
1323 | * @retry: number of retries to the command IOCB. | ||
1324 | * | ||
1325 | * This routine issues a Port Login (PLOGI) command to a remote N_Port | ||
1326 | * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, | ||
1327 | * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. | ||
1328 | * This routine constructs the proper feilds of the PLOGI IOCB and invokes | ||
1329 | * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. | ||
1330 | * | ||
1331 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
1332 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
1333 | * will be stored into the context1 field of the IOCB for the completion | ||
1334 | * callback function to the PLOGI ELS command. | ||
1335 | * | ||
1336 | * Return code | ||
1337 | * 0 - Successfully issued a plogi for @vport | ||
1338 | * 1 - failed to issue a plogi for @vport | ||
1339 | **/ | ||
1051 | int | 1340 | int |
1052 | lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) | 1341 | lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) |
1053 | { | 1342 | { |
@@ -1106,6 +1395,19 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) | |||
1106 | return 0; | 1395 | return 0; |
1107 | } | 1396 | } |
1108 | 1397 | ||
1398 | /** | ||
1399 | * lpfc_cmpl_els_prli: Completion callback function for prli. | ||
1400 | * @phba: pointer to lpfc hba data structure. | ||
1401 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
1402 | * @rspiocb: pointer to lpfc response iocb data structure. | ||
1403 | * | ||
1404 | * This routine is the completion callback function for a Process Login | ||
1405 | * (PRLI) ELS command. The PRLI response IOCB status is checked for error | ||
1406 | * status. If there is error status reported, PRLI retry shall be attempted | ||
1407 | * by invoking the lpfc_els_retry() routine. Otherwise, the state | ||
1408 | * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this | ||
1409 | * ndlp to mark the PRLI completion. | ||
1410 | **/ | ||
1109 | static void | 1411 | static void |
1110 | lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 1412 | lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
1111 | struct lpfc_iocbq *rspiocb) | 1413 | struct lpfc_iocbq *rspiocb) |
@@ -1164,6 +1466,27 @@ out: | |||
1164 | return; | 1466 | return; |
1165 | } | 1467 | } |
1166 | 1468 | ||
1469 | /** | ||
1470 | * lpfc_issue_els_prli: Issue a prli iocb command for a vport. | ||
1471 | * @vport: pointer to a host virtual N_Port data structure. | ||
1472 | * @ndlp: pointer to a node-list data structure. | ||
1473 | * @retry: number of retries to the command IOCB. | ||
1474 | * | ||
1475 | * This routine issues a Process Login (PRLI) ELS command for the | ||
1476 | * @vport. The PRLI service parameters are set up in the payload of the | ||
1477 | * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine | ||
1478 | * is put to the IOCB completion callback func field before invoking the | ||
1479 | * routine lpfc_sli_issue_iocb() to send out PRLI command. | ||
1480 | * | ||
1481 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
1482 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
1483 | * will be stored into the context1 field of the IOCB for the completion | ||
1484 | * callback function to the PRLI ELS command. | ||
1485 | * | ||
1486 | * Return code | ||
1487 | * 0 - successfully issued prli iocb command for @vport | ||
1488 | * 1 - failed to issue prli iocb command for @vport | ||
1489 | **/ | ||
1167 | int | 1490 | int |
1168 | lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | 1491 | lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
1169 | uint8_t retry) | 1492 | uint8_t retry) |
@@ -1233,6 +1556,92 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1233 | return 0; | 1556 | return 0; |
1234 | } | 1557 | } |
1235 | 1558 | ||
1559 | /** | ||
1560 | * lpfc_rscn_disc: Perform rscn discovery for a vport. | ||
1561 | * @vport: pointer to a host virtual N_Port data structure. | ||
1562 | * | ||
1563 | * This routine performs Registration State Change Notification (RSCN) | ||
1564 | * discovery for a @vport. If the @vport's node port recovery count is not | ||
1565 | * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all | ||
1566 | * the nodes that need recovery. If none of the PLOGI were needed through | ||
1567 | * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be | ||
1568 | * invoked to check and handle possible more RSCN came in during the period | ||
1569 | * of processing the current ones. | ||
1570 | **/ | ||
1571 | static void | ||
1572 | lpfc_rscn_disc(struct lpfc_vport *vport) | ||
1573 | { | ||
1574 | lpfc_can_disctmo(vport); | ||
1575 | |||
1576 | /* RSCN discovery */ | ||
1577 | /* go thru NPR nodes and issue ELS PLOGIs */ | ||
1578 | if (vport->fc_npr_cnt) | ||
1579 | if (lpfc_els_disc_plogi(vport)) | ||
1580 | return; | ||
1581 | |||
1582 | lpfc_end_rscn(vport); | ||
1583 | } | ||
1584 | |||
1585 | /** | ||
1586 | * lpfc_adisc_done: Complete the adisc phase of discovery. | ||
1587 | * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. | ||
1588 | * | ||
1589 | * This function is called when the final ADISC is completed during discovery. | ||
1590 | * This function handles clearing link attention or issuing reg_vpi depending | ||
1591 | * on whether npiv is enabled. This function also kicks off the PLOGI phase of | ||
1592 | * discovery. | ||
1593 | * This function is called with no locks held. | ||
1594 | **/ | ||
1595 | static void | ||
1596 | lpfc_adisc_done(struct lpfc_vport *vport) | ||
1597 | { | ||
1598 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1599 | struct lpfc_hba *phba = vport->phba; | ||
1600 | |||
1601 | /* | ||
1602 | * For NPIV, cmpl_reg_vpi will set port_state to READY, | ||
1603 | * and continue discovery. | ||
1604 | */ | ||
1605 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && | ||
1606 | !(vport->fc_flag & FC_RSCN_MODE)) { | ||
1607 | lpfc_issue_reg_vpi(phba, vport); | ||
1608 | return; | ||
1609 | } | ||
1610 | /* | ||
1611 | * For SLI2, we need to set port_state to READY | ||
1612 | * and continue discovery. | ||
1613 | */ | ||
1614 | if (vport->port_state < LPFC_VPORT_READY) { | ||
1615 | /* If we get here, there is nothing to ADISC */ | ||
1616 | if (vport->port_type == LPFC_PHYSICAL_PORT) | ||
1617 | lpfc_issue_clear_la(phba, vport); | ||
1618 | if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { | ||
1619 | vport->num_disc_nodes = 0; | ||
1620 | /* go thru NPR list, issue ELS PLOGIs */ | ||
1621 | if (vport->fc_npr_cnt) | ||
1622 | lpfc_els_disc_plogi(vport); | ||
1623 | if (!vport->num_disc_nodes) { | ||
1624 | spin_lock_irq(shost->host_lock); | ||
1625 | vport->fc_flag &= ~FC_NDISC_ACTIVE; | ||
1626 | spin_unlock_irq(shost->host_lock); | ||
1627 | lpfc_can_disctmo(vport); | ||
1628 | lpfc_end_rscn(vport); | ||
1629 | } | ||
1630 | } | ||
1631 | vport->port_state = LPFC_VPORT_READY; | ||
1632 | } else | ||
1633 | lpfc_rscn_disc(vport); | ||
1634 | } | ||
1635 | |||
1636 | /** | ||
1637 | * lpfc_more_adisc: Issue more adisc as needed. | ||
1638 | * @vport: pointer to a host virtual N_Port data structure. | ||
1639 | * | ||
1640 | * This routine determines whether there are more ndlps on a @vport | ||
1641 | * node list need to have Address Discover (ADISC) issued. If so, it will | ||
1642 | * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's | ||
1643 | * remaining nodes which need to have ADISC sent. | ||
1644 | **/ | ||
1236 | void | 1645 | void |
1237 | lpfc_more_adisc(struct lpfc_vport *vport) | 1646 | lpfc_more_adisc(struct lpfc_vport *vport) |
1238 | { | 1647 | { |
@@ -1252,23 +1661,27 @@ lpfc_more_adisc(struct lpfc_vport *vport) | |||
1252 | /* go thru NPR nodes and issue any remaining ELS ADISCs */ | 1661 | /* go thru NPR nodes and issue any remaining ELS ADISCs */ |
1253 | sentadisc = lpfc_els_disc_adisc(vport); | 1662 | sentadisc = lpfc_els_disc_adisc(vport); |
1254 | } | 1663 | } |
1664 | if (!vport->num_disc_nodes) | ||
1665 | lpfc_adisc_done(vport); | ||
1255 | return; | 1666 | return; |
1256 | } | 1667 | } |
1257 | 1668 | ||
1258 | static void | 1669 | /** |
1259 | lpfc_rscn_disc(struct lpfc_vport *vport) | 1670 | * lpfc_cmpl_els_adisc: Completion callback function for adisc. |
1260 | { | 1671 | * @phba: pointer to lpfc hba data structure. |
1261 | lpfc_can_disctmo(vport); | 1672 | * @cmdiocb: pointer to lpfc command iocb data structure. |
1262 | 1673 | * @rspiocb: pointer to lpfc response iocb data structure. | |
1263 | /* RSCN discovery */ | 1674 | * |
1264 | /* go thru NPR nodes and issue ELS PLOGIs */ | 1675 | * This routine is the completion function for issuing the Address Discover |
1265 | if (vport->fc_npr_cnt) | 1676 | * (ADISC) command. It first checks to see whether link went down during |
1266 | if (lpfc_els_disc_plogi(vport)) | 1677 | * the discovery process. If so, the node will be marked as node port |
1267 | return; | 1678 | * recovery for issuing discover IOCB by the link attention handler and |
1268 | 1679 | * exit. Otherwise, the response status is checked. If error was reported | |
1269 | lpfc_end_rscn(vport); | 1680 | * in the response status, the ADISC command shall be retried by invoking |
1270 | } | 1681 | * the lpfc_els_retry() routine. Otherwise, if no error was reported in |
1271 | 1682 | * the response status, the state machine is invoked to set transition | |
1683 | * with respect to NLP_EVT_CMPL_ADISC event. | ||
1684 | **/ | ||
1272 | static void | 1685 | static void |
1273 | lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 1686 | lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
1274 | struct lpfc_iocbq *rspiocb) | 1687 | struct lpfc_iocbq *rspiocb) |
@@ -1333,57 +1746,34 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1333 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1746 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1334 | NLP_EVT_CMPL_ADISC); | 1747 | NLP_EVT_CMPL_ADISC); |
1335 | 1748 | ||
1336 | if (disc && vport->num_disc_nodes) { | 1749 | /* Check to see if there are more ADISCs to be sent */ |
1337 | /* Check to see if there are more ADISCs to be sent */ | 1750 | if (disc && vport->num_disc_nodes) |
1338 | lpfc_more_adisc(vport); | 1751 | lpfc_more_adisc(vport); |
1339 | |||
1340 | /* Check to see if we are done with ADISC authentication */ | ||
1341 | if (vport->num_disc_nodes == 0) { | ||
1342 | /* If we get here, there is nothing left to ADISC */ | ||
1343 | /* | ||
1344 | * For NPIV, cmpl_reg_vpi will set port_state to READY, | ||
1345 | * and continue discovery. | ||
1346 | */ | ||
1347 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && | ||
1348 | !(vport->fc_flag & FC_RSCN_MODE)) { | ||
1349 | lpfc_issue_reg_vpi(phba, vport); | ||
1350 | goto out; | ||
1351 | } | ||
1352 | /* | ||
1353 | * For SLI2, we need to set port_state to READY | ||
1354 | * and continue discovery. | ||
1355 | */ | ||
1356 | if (vport->port_state < LPFC_VPORT_READY) { | ||
1357 | /* If we get here, there is nothing to ADISC */ | ||
1358 | if (vport->port_type == LPFC_PHYSICAL_PORT) | ||
1359 | lpfc_issue_clear_la(phba, vport); | ||
1360 | |||
1361 | if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { | ||
1362 | vport->num_disc_nodes = 0; | ||
1363 | /* go thru NPR list, issue ELS PLOGIs */ | ||
1364 | if (vport->fc_npr_cnt) | ||
1365 | lpfc_els_disc_plogi(vport); | ||
1366 | |||
1367 | if (!vport->num_disc_nodes) { | ||
1368 | spin_lock_irq(shost->host_lock); | ||
1369 | vport->fc_flag &= | ||
1370 | ~FC_NDISC_ACTIVE; | ||
1371 | spin_unlock_irq( | ||
1372 | shost->host_lock); | ||
1373 | lpfc_can_disctmo(vport); | ||
1374 | } | ||
1375 | } | ||
1376 | vport->port_state = LPFC_VPORT_READY; | ||
1377 | } else { | ||
1378 | lpfc_rscn_disc(vport); | ||
1379 | } | ||
1380 | } | ||
1381 | } | ||
1382 | out: | 1752 | out: |
1383 | lpfc_els_free_iocb(phba, cmdiocb); | 1753 | lpfc_els_free_iocb(phba, cmdiocb); |
1384 | return; | 1754 | return; |
1385 | } | 1755 | } |
1386 | 1756 | ||
1757 | /** | ||
1758 | * lpfc_issue_els_adisc: Issue an address discover iocb to an node on a vport. | ||
1759 | * @vport: pointer to a virtual N_Port data structure. | ||
1760 | * @ndlp: pointer to a node-list data structure. | ||
1761 | * @retry: number of retries to the command IOCB. | ||
1762 | * | ||
1763 | * This routine issues an Address Discover (ADISC) for an @ndlp on a | ||
1764 | * @vport. It prepares the payload of the ADISC ELS command, updates the | ||
1765 | * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine | ||
1766 | * to issue the ADISC ELS command. | ||
1767 | * | ||
1768 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
1769 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
1770 | * will be stored into the context1 field of the IOCB for the completion | ||
1771 | * callback function to the ADISC ELS command. | ||
1772 | * | ||
1773 | * Return code | ||
1774 | * 0 - successfully issued adisc | ||
1775 | * 1 - failed to issue adisc | ||
1776 | **/ | ||
1387 | int | 1777 | int |
1388 | lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | 1778 | lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
1389 | uint8_t retry) | 1779 | uint8_t retry) |
@@ -1437,6 +1827,18 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1437 | return 0; | 1827 | return 0; |
1438 | } | 1828 | } |
1439 | 1829 | ||
1830 | /** | ||
1831 | * lpfc_cmpl_els_logo: Completion callback function for logo. | ||
1832 | * @phba: pointer to lpfc hba data structure. | ||
1833 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
1834 | * @rspiocb: pointer to lpfc response iocb data structure. | ||
1835 | * | ||
1836 | * This routine is the completion function for issuing the ELS Logout (LOGO) | ||
1837 | * command. If no error status was reported from the LOGO response, the | ||
1838 | * state machine of the associated ndlp shall be invoked for transition with | ||
1839 | * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported, | ||
1840 | * the lpfc_els_retry() routine will be invoked to retry the LOGO command. | ||
1841 | **/ | ||
1440 | static void | 1842 | static void |
1441 | lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 1843 | lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
1442 | struct lpfc_iocbq *rspiocb) | 1844 | struct lpfc_iocbq *rspiocb) |
@@ -1502,6 +1904,26 @@ out: | |||
1502 | return; | 1904 | return; |
1503 | } | 1905 | } |
1504 | 1906 | ||
1907 | /** | ||
1908 | * lpfc_issue_els_logo: Issue a logo to an node on a vport. | ||
1909 | * @vport: pointer to a virtual N_Port data structure. | ||
1910 | * @ndlp: pointer to a node-list data structure. | ||
1911 | * @retry: number of retries to the command IOCB. | ||
1912 | * | ||
1913 | * This routine constructs and issues an ELS Logout (LOGO) iocb command | ||
1914 | * to a remote node, referred by an @ndlp on a @vport. It constructs the | ||
1915 | * payload of the IOCB, properly sets up the @ndlp state, and invokes the | ||
1916 | * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. | ||
1917 | * | ||
1918 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
1919 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
1920 | * will be stored into the context1 field of the IOCB for the completion | ||
1921 | * callback function to the LOGO ELS command. | ||
1922 | * | ||
1923 | * Return code | ||
1924 | * 0 - successfully issued logo | ||
1925 | * 1 - failed to issue logo | ||
1926 | **/ | ||
1505 | int | 1927 | int |
1506 | lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | 1928 | lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
1507 | uint8_t retry) | 1929 | uint8_t retry) |
@@ -1563,6 +1985,22 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1563 | return 0; | 1985 | return 0; |
1564 | } | 1986 | } |
1565 | 1987 | ||
1988 | /** | ||
1989 | * lpfc_cmpl_els_cmd: Completion callback function for generic els command. | ||
1990 | * @phba: pointer to lpfc hba data structure. | ||
1991 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
1992 | * @rspiocb: pointer to lpfc response iocb data structure. | ||
1993 | * | ||
1994 | * This routine is a generic completion callback function for ELS commands. | ||
1995 | * Specifically, it is the callback function which does not need to perform | ||
1996 | * any command specific operations. It is currently used by the ELS command | ||
1997 | * issuing routines for the ELS State Change Request (SCR), | ||
1998 | * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution | ||
1999 | * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than | ||
2000 | * certain debug loggings, this callback function simply invokes the | ||
2001 | * lpfc_els_chk_latt() routine to check whether link went down during the | ||
2002 | * discovery process. | ||
2003 | **/ | ||
1566 | static void | 2004 | static void |
1567 | lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 2005 | lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
1568 | struct lpfc_iocbq *rspiocb) | 2006 | struct lpfc_iocbq *rspiocb) |
@@ -1587,6 +2025,28 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1587 | return; | 2025 | return; |
1588 | } | 2026 | } |
1589 | 2027 | ||
2028 | /** | ||
2029 | * lpfc_issue_els_scr: Issue a scr to an node on a vport. | ||
2030 | * @vport: pointer to a host virtual N_Port data structure. | ||
2031 | * @nportid: N_Port identifier to the remote node. | ||
2032 | * @retry: number of retries to the command IOCB. | ||
2033 | * | ||
2034 | * This routine issues a State Change Request (SCR) to a fabric node | ||
2035 | * on a @vport. The remote node @nportid is passed into the function. It | ||
2036 | * first search the @vport node list to find the matching ndlp. If no such | ||
2037 | * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An | ||
2038 | * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() | ||
2039 | * routine is invoked to send the SCR IOCB. | ||
2040 | * | ||
2041 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
2042 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
2043 | * will be stored into the context1 field of the IOCB for the completion | ||
2044 | * callback function to the SCR ELS command. | ||
2045 | * | ||
2046 | * Return code | ||
2047 | * 0 - Successfully issued scr command | ||
2048 | * 1 - Failed to issue scr command | ||
2049 | **/ | ||
1590 | int | 2050 | int |
1591 | lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | 2051 | lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) |
1592 | { | 2052 | { |
@@ -1659,6 +2119,28 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
1659 | return 0; | 2119 | return 0; |
1660 | } | 2120 | } |
1661 | 2121 | ||
2122 | /** | ||
2123 | * lpfc_issue_els_farpr: Issue a farp to an node on a vport. | ||
2124 | * @vport: pointer to a host virtual N_Port data structure. | ||
2125 | * @nportid: N_Port identifier to the remote node. | ||
2126 | * @retry: number of retries to the command IOCB. | ||
2127 | * | ||
2128 | * This routine issues a Fibre Channel Address Resolution Response | ||
2129 | * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) | ||
2130 | * is passed into the function. It first search the @vport node list to find | ||
2131 | * the matching ndlp. If no such ndlp is found, a new ndlp shall be created | ||
2132 | * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the | ||
2133 | * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. | ||
2134 | * | ||
2135 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
2136 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
2137 | * will be stored into the context1 field of the IOCB for the completion | ||
2138 | * callback function to the PARPR ELS command. | ||
2139 | * | ||
2140 | * Return code | ||
2141 | * 0 - Successfully issued farpr command | ||
2142 | * 1 - Failed to issue farpr command | ||
2143 | **/ | ||
1662 | static int | 2144 | static int |
1663 | lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | 2145 | lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) |
1664 | { | 2146 | { |
@@ -1748,6 +2230,18 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
1748 | return 0; | 2230 | return 0; |
1749 | } | 2231 | } |
1750 | 2232 | ||
2233 | /** | ||
2234 | * lpfc_cancel_retry_delay_tmo: Cancel the timer with delayed iocb-cmd retry. | ||
2235 | * @vport: pointer to a host virtual N_Port data structure. | ||
2236 | * @nlp: pointer to a node-list data structure. | ||
2237 | * | ||
2238 | * This routine cancels the timer with a delayed IOCB-command retry for | ||
2239 | * a @vport's @ndlp. It stops the timer for the delayed function retrial and | ||
2240 | * removes the ELS retry event if it presents. In addition, if the | ||
2241 | * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB | ||
2242 | * commands are sent for the @vport's nodes that require issuing discovery | ||
2243 | * ADISC. | ||
2244 | **/ | ||
1751 | void | 2245 | void |
1752 | lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) | 2246 | lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) |
1753 | { | 2247 | { |
@@ -1775,25 +2269,36 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) | |||
1775 | if (vport->port_state < LPFC_VPORT_READY) { | 2269 | if (vport->port_state < LPFC_VPORT_READY) { |
1776 | /* Check if there are more ADISCs to be sent */ | 2270 | /* Check if there are more ADISCs to be sent */ |
1777 | lpfc_more_adisc(vport); | 2271 | lpfc_more_adisc(vport); |
1778 | if ((vport->num_disc_nodes == 0) && | ||
1779 | (vport->fc_npr_cnt)) | ||
1780 | lpfc_els_disc_plogi(vport); | ||
1781 | } else { | 2272 | } else { |
1782 | /* Check if there are more PLOGIs to be sent */ | 2273 | /* Check if there are more PLOGIs to be sent */ |
1783 | lpfc_more_plogi(vport); | 2274 | lpfc_more_plogi(vport); |
1784 | } | 2275 | if (vport->num_disc_nodes == 0) { |
1785 | if (vport->num_disc_nodes == 0) { | 2276 | spin_lock_irq(shost->host_lock); |
1786 | spin_lock_irq(shost->host_lock); | 2277 | vport->fc_flag &= ~FC_NDISC_ACTIVE; |
1787 | vport->fc_flag &= ~FC_NDISC_ACTIVE; | 2278 | spin_unlock_irq(shost->host_lock); |
1788 | spin_unlock_irq(shost->host_lock); | 2279 | lpfc_can_disctmo(vport); |
1789 | lpfc_can_disctmo(vport); | 2280 | lpfc_end_rscn(vport); |
1790 | lpfc_end_rscn(vport); | 2281 | } |
1791 | } | 2282 | } |
1792 | } | 2283 | } |
1793 | } | 2284 | } |
1794 | return; | 2285 | return; |
1795 | } | 2286 | } |
1796 | 2287 | ||
2288 | /** | ||
2289 | * lpfc_els_retry_delay: Timer function with a ndlp delayed function timer. | ||
2290 | * @ptr: holder for the pointer to the timer function associated data (ndlp). | ||
2291 | * | ||
2292 | * This routine is invoked by the ndlp delayed-function timer to check | ||
2293 | * whether there is any pending ELS retry event(s) with the node. If not, it | ||
2294 | * simply returns. Otherwise, if there is at least one ELS delayed event, it | ||
2295 | * adds the delayed events to the HBA work list and invokes the | ||
2296 | * lpfc_worker_wake_up() routine to wake up worker thread to process the | ||
2297 | * event. Note that lpfc_nlp_get() is called before posting the event to | ||
2298 | * the work list to hold reference count of ndlp so that it guarantees the | ||
2299 | * reference to ndlp will still be available when the worker thread gets | ||
2300 | * to the event associated with the ndlp. | ||
2301 | **/ | ||
1797 | void | 2302 | void |
1798 | lpfc_els_retry_delay(unsigned long ptr) | 2303 | lpfc_els_retry_delay(unsigned long ptr) |
1799 | { | 2304 | { |
@@ -1822,6 +2327,15 @@ lpfc_els_retry_delay(unsigned long ptr) | |||
1822 | return; | 2327 | return; |
1823 | } | 2328 | } |
1824 | 2329 | ||
2330 | /** | ||
2331 | * lpfc_els_retry_delay_handler: Work thread handler for ndlp delayed function. | ||
2332 | * @ndlp: pointer to a node-list data structure. | ||
2333 | * | ||
2334 | * This routine is the worker-thread handler for processing the @ndlp delayed | ||
2335 | * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves | ||
2336 | * the last ELS command from the associated ndlp and invokes the proper ELS | ||
2337 | * function according to the delayed ELS command to retry the command. | ||
2338 | **/ | ||
1825 | void | 2339 | void |
1826 | lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) | 2340 | lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) |
1827 | { | 2341 | { |
@@ -1884,6 +2398,27 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) | |||
1884 | return; | 2398 | return; |
1885 | } | 2399 | } |
1886 | 2400 | ||
2401 | /** | ||
2402 | * lpfc_els_retry: Make retry decision on an els command iocb. | ||
2403 | * @phba: pointer to lpfc hba data structure. | ||
2404 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
2405 | * @rspiocb: pointer to lpfc response iocb data structure. | ||
2406 | * | ||
2407 | * This routine makes a retry decision on an ELS command IOCB, which has | ||
2408 | * failed. The following ELS IOCBs use this function for retrying the command | ||
2409 | * when previously issued command responsed with error status: FLOGI, PLOGI, | ||
2410 | * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the | ||
2411 | * returned error status, it makes the decision whether a retry shall be | ||
2412 | * issued for the command, and whether a retry shall be made immediately or | ||
2413 | * delayed. In the former case, the corresponding ELS command issuing-function | ||
2414 | * is called to retry the command. In the later case, the ELS command shall | ||
2415 | * be posted to the ndlp delayed event and delayed function timer set to the | ||
2416 | * ndlp for the delayed command issusing. | ||
2417 | * | ||
2418 | * Return code | ||
2419 | * 0 - No retry of els command is made | ||
2420 | * 1 - Immediate or delayed retry of els command is made | ||
2421 | **/ | ||
1887 | static int | 2422 | static int |
1888 | lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 2423 | lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
1889 | struct lpfc_iocbq *rspiocb) | 2424 | struct lpfc_iocbq *rspiocb) |
@@ -2051,7 +2586,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2051 | (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) | 2586 | (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) |
2052 | ) { | 2587 | ) { |
2053 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 2588 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
2054 | "0123 FDISC Failed (x%x). " | 2589 | "0122 FDISC Failed (x%x). " |
2055 | "Fabric Detected Bad WWN\n", | 2590 | "Fabric Detected Bad WWN\n", |
2056 | stat.un.lsRjtError); | 2591 | stat.un.lsRjtError); |
2057 | lpfc_vport_set_state(vport, | 2592 | lpfc_vport_set_state(vport, |
@@ -2182,12 +2717,26 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2182 | return 0; | 2717 | return 0; |
2183 | } | 2718 | } |
2184 | 2719 | ||
2720 | /** | ||
2721 | * lpfc_els_free_data: Free lpfc dma buffer and data structure with an iocb. | ||
2722 | * @phba: pointer to lpfc hba data structure. | ||
2723 | * @buf_ptr1: pointer to the lpfc DMA buffer data structure. | ||
2724 | * | ||
2725 | * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) | ||
2726 | * associated with a command IOCB back to the lpfc DMA buffer pool. It first | ||
2727 | * checks to see whether there is a lpfc DMA buffer associated with the | ||
2728 | * response of the command IOCB. If so, it will be released before releasing | ||
2729 | * the lpfc DMA buffer associated with the IOCB itself. | ||
2730 | * | ||
2731 | * Return code | ||
2732 | * 0 - Successfully released lpfc DMA buffer (currently, always return 0) | ||
2733 | **/ | ||
2185 | static int | 2734 | static int |
2186 | lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) | 2735 | lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) |
2187 | { | 2736 | { |
2188 | struct lpfc_dmabuf *buf_ptr; | 2737 | struct lpfc_dmabuf *buf_ptr; |
2189 | 2738 | ||
2190 | /* Free the response before processing the command. */ | 2739 | /* Free the response before processing the command. */ |
2191 | if (!list_empty(&buf_ptr1->list)) { | 2740 | if (!list_empty(&buf_ptr1->list)) { |
2192 | list_remove_head(&buf_ptr1->list, buf_ptr, | 2741 | list_remove_head(&buf_ptr1->list, buf_ptr, |
2193 | struct lpfc_dmabuf, | 2742 | struct lpfc_dmabuf, |
@@ -2200,6 +2749,18 @@ lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) | |||
2200 | return 0; | 2749 | return 0; |
2201 | } | 2750 | } |
2202 | 2751 | ||
2752 | /** | ||
2753 | * lpfc_els_free_bpl: Free lpfc dma buffer and data structure with bpl. | ||
2754 | * @phba: pointer to lpfc hba data structure. | ||
2755 | * @buf_ptr: pointer to the lpfc dma buffer data structure. | ||
2756 | * | ||
2757 | * This routine releases the lpfc Direct Memory Access (DMA) buffer | ||
2758 | * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer | ||
2759 | * pool. | ||
2760 | * | ||
2761 | * Return code | ||
2762 | * 0 - Successfully released lpfc DMA buffer (currently, always return 0) | ||
2763 | **/ | ||
2203 | static int | 2764 | static int |
2204 | lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) | 2765 | lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) |
2205 | { | 2766 | { |
@@ -2208,6 +2769,33 @@ lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) | |||
2208 | return 0; | 2769 | return 0; |
2209 | } | 2770 | } |
2210 | 2771 | ||
2772 | /** | ||
2773 | * lpfc_els_free_iocb: Free a command iocb and its associated resources. | ||
2774 | * @phba: pointer to lpfc hba data structure. | ||
2775 | * @elsiocb: pointer to lpfc els command iocb data structure. | ||
2776 | * | ||
2777 | * This routine frees a command IOCB and its associated resources. The | ||
2778 | * command IOCB data structure contains the reference to various associated | ||
2779 | * resources, these fields must be set to NULL if the associated reference | ||
2780 | * not present: | ||
2781 | * context1 - reference to ndlp | ||
2782 | * context2 - reference to cmd | ||
2783 | * context2->next - reference to rsp | ||
2784 | * context3 - reference to bpl | ||
2785 | * | ||
2786 | * It first properly decrements the reference count held on ndlp for the | ||
2787 | * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not | ||
2788 | * set, it invokes the lpfc_els_free_data() routine to release the Direct | ||
2789 | * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it | ||
2790 | * adds the DMA buffer the @phba data structure for the delayed release. | ||
2791 | * If reference to the Buffer Pointer List (BPL) is present, the | ||
2792 | * lpfc_els_free_bpl() routine is invoked to release the DMA memory | ||
2793 | * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is | ||
2794 | * invoked to release the IOCB data structure back to @phba IOCBQ list. | ||
2795 | * | ||
2796 | * Return code | ||
2797 | * 0 - Success (currently, always return 0) | ||
2798 | **/ | ||
2211 | int | 2799 | int |
2212 | lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) | 2800 | lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) |
2213 | { | 2801 | { |
@@ -2274,6 +2862,23 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) | |||
2274 | return 0; | 2862 | return 0; |
2275 | } | 2863 | } |
2276 | 2864 | ||
2865 | /** | ||
2866 | * lpfc_cmpl_els_logo_acc: Completion callback function to logo acc response. | ||
2867 | * @phba: pointer to lpfc hba data structure. | ||
2868 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
2869 | * @rspiocb: pointer to lpfc response iocb data structure. | ||
2870 | * | ||
2871 | * This routine is the completion callback function to the Logout (LOGO) | ||
2872 | * Accept (ACC) Response ELS command. This routine is invoked to indicate | ||
2873 | * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to | ||
2874 | * release the ndlp if it has the last reference remaining (reference count | ||
2875 | * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 | ||
2876 | * field to NULL to inform the following lpfc_els_free_iocb() routine no | ||
2877 | * ndlp reference count needs to be decremented. Otherwise, the ndlp | ||
2878 | * reference use-count shall be decremented by the lpfc_els_free_iocb() | ||
2879 | * routine. Finally, the lpfc_els_free_iocb() is invoked to release the | ||
2880 | * IOCB data structure. | ||
2881 | **/ | ||
2277 | static void | 2882 | static void |
2278 | lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 2883 | lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
2279 | struct lpfc_iocbq *rspiocb) | 2884 | struct lpfc_iocbq *rspiocb) |
@@ -2311,6 +2916,19 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2311 | return; | 2916 | return; |
2312 | } | 2917 | } |
2313 | 2918 | ||
2919 | /** | ||
2920 | * lpfc_mbx_cmpl_dflt_rpi: Completion callbk func for unreg dflt rpi mbox cmd. | ||
2921 | * @phba: pointer to lpfc hba data structure. | ||
2922 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
2923 | * | ||
2924 | * This routine is the completion callback function for unregister default | ||
2925 | * RPI (Remote Port Index) mailbox command to the @phba. It simply releases | ||
2926 | * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and | ||
2927 | * decrements the ndlp reference count held for this completion callback | ||
2928 | * function. After that, it invokes the lpfc_nlp_not_used() to check | ||
2929 | * whether there is only one reference left on the ndlp. If so, it will | ||
2930 | * perform one more decrement and trigger the release of the ndlp. | ||
2931 | **/ | ||
2314 | void | 2932 | void |
2315 | lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | 2933 | lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
2316 | { | 2934 | { |
@@ -2332,6 +2950,22 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
2332 | return; | 2950 | return; |
2333 | } | 2951 | } |
2334 | 2952 | ||
2953 | /** | ||
2954 | * lpfc_cmpl_els_rsp: Completion callback function for els response iocb cmd. | ||
2955 | * @phba: pointer to lpfc hba data structure. | ||
2956 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
2957 | * @rspiocb: pointer to lpfc response iocb data structure. | ||
2958 | * | ||
2959 | * This routine is the completion callback function for ELS Response IOCB | ||
2960 | * command. In normal case, this callback function just properly sets the | ||
2961 | * nlp_flag bitmap in the ndlp data structure, if the mbox command reference | ||
2962 | * field in the command IOCB is not NULL, the referred mailbox command will | ||
2963 | * be send out, and then invokes the lpfc_els_free_iocb() routine to release | ||
2964 | * the IOCB. Under error conditions, such as when a LS_RJT is returned or a | ||
2965 | * link down event occurred during the discovery, the lpfc_nlp_not_used() | ||
2966 | * routine shall be invoked trying to release the ndlp if no other threads | ||
2967 | * are currently referring it. | ||
2968 | **/ | ||
2335 | static void | 2969 | static void |
2336 | lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 2970 | lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
2337 | struct lpfc_iocbq *rspiocb) | 2971 | struct lpfc_iocbq *rspiocb) |
@@ -2487,6 +3121,31 @@ out: | |||
2487 | return; | 3121 | return; |
2488 | } | 3122 | } |
2489 | 3123 | ||
3124 | /** | ||
3125 | * lpfc_els_rsp_acc: Prepare and issue an acc response iocb command. | ||
3126 | * @vport: pointer to a host virtual N_Port data structure. | ||
3127 | * @flag: the els command code to be accepted. | ||
3128 | * @oldiocb: pointer to the original lpfc command iocb data structure. | ||
3129 | * @ndlp: pointer to a node-list data structure. | ||
3130 | * @mbox: pointer to the driver internal queue element for mailbox command. | ||
3131 | * | ||
3132 | * This routine prepares and issues an Accept (ACC) response IOCB | ||
3133 | * command. It uses the @flag to properly set up the IOCB field for the | ||
3134 | * specific ACC response command to be issued and invokes the | ||
3135 | * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a | ||
3136 | * @mbox pointer is passed in, it will be put into the context_un.mbox | ||
3137 | * field of the IOCB for the completion callback function to issue the | ||
3138 | * mailbox command to the HBA later when callback is invoked. | ||
3139 | * | ||
3140 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
3141 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
3142 | * will be stored into the context1 field of the IOCB for the completion | ||
3143 | * callback function to the corresponding response ELS IOCB command. | ||
3144 | * | ||
3145 | * Return code | ||
3146 | * 0 - Successfully issued acc response | ||
3147 | * 1 - Failed to issue acc response | ||
3148 | **/ | ||
2490 | int | 3149 | int |
2491 | lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, | 3150 | lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, |
2492 | struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, | 3151 | struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, |
@@ -2601,6 +3260,28 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, | |||
2601 | return 0; | 3260 | return 0; |
2602 | } | 3261 | } |
2603 | 3262 | ||
3263 | /** | ||
3264 | * lpfc_els_rsp_reject: Propare and issue a rjt response iocb command. | ||
3265 | * @vport: pointer to a virtual N_Port data structure. | ||
3266 | * @rejectError: | ||
3267 | * @oldiocb: pointer to the original lpfc command iocb data structure. | ||
3268 | * @ndlp: pointer to a node-list data structure. | ||
3269 | * @mbox: pointer to the driver internal queue element for mailbox command. | ||
3270 | * | ||
3271 | * This routine prepares and issue an Reject (RJT) response IOCB | ||
3272 | * command. If a @mbox pointer is passed in, it will be put into the | ||
3273 | * context_un.mbox field of the IOCB for the completion callback function | ||
3274 | * to issue to the HBA later. | ||
3275 | * | ||
3276 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
3277 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
3278 | * will be stored into the context1 field of the IOCB for the completion | ||
3279 | * callback function to the reject response ELS IOCB command. | ||
3280 | * | ||
3281 | * Return code | ||
3282 | * 0 - Successfully issued reject response | ||
3283 | * 1 - Failed to issue reject response | ||
3284 | **/ | ||
2604 | int | 3285 | int |
2605 | lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, | 3286 | lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, |
2606 | struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, | 3287 | struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, |
@@ -2660,6 +3341,25 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, | |||
2660 | return 0; | 3341 | return 0; |
2661 | } | 3342 | } |
2662 | 3343 | ||
3344 | /** | ||
3345 | * lpfc_els_rsp_adisc_acc: Prepare and issue acc response to adisc iocb cmd. | ||
3346 | * @vport: pointer to a virtual N_Port data structure. | ||
3347 | * @oldiocb: pointer to the original lpfc command iocb data structure. | ||
3348 | * @ndlp: pointer to a node-list data structure. | ||
3349 | * | ||
3350 | * This routine prepares and issues an Accept (ACC) response to Address | ||
3351 | * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB | ||
3352 | * and invokes the lpfc_sli_issue_iocb() routine to send out the command. | ||
3353 | * | ||
3354 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
3355 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
3356 | * will be stored into the context1 field of the IOCB for the completion | ||
3357 | * callback function to the ADISC Accept response ELS IOCB command. | ||
3358 | * | ||
3359 | * Return code | ||
3360 | * 0 - Successfully issued acc adisc response | ||
3361 | * 1 - Failed to issue adisc acc response | ||
3362 | **/ | ||
2663 | int | 3363 | int |
2664 | lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, | 3364 | lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, |
2665 | struct lpfc_nodelist *ndlp) | 3365 | struct lpfc_nodelist *ndlp) |
@@ -2716,6 +3416,25 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, | |||
2716 | return 0; | 3416 | return 0; |
2717 | } | 3417 | } |
2718 | 3418 | ||
3419 | /** | ||
3420 | * lpfc_els_rsp_prli_acc: Prepare and issue acc response to prli iocb cmd. | ||
3421 | * @vport: pointer to a virtual N_Port data structure. | ||
3422 | * @oldiocb: pointer to the original lpfc command iocb data structure. | ||
3423 | * @ndlp: pointer to a node-list data structure. | ||
3424 | * | ||
3425 | * This routine prepares and issues an Accept (ACC) response to Process | ||
3426 | * Login (PRLI) ELS command. It simply prepares the payload of the IOCB | ||
3427 | * and invokes the lpfc_sli_issue_iocb() routine to send out the command. | ||
3428 | * | ||
3429 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
3430 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
3431 | * will be stored into the context1 field of the IOCB for the completion | ||
3432 | * callback function to the PRLI Accept response ELS IOCB command. | ||
3433 | * | ||
3434 | * Return code | ||
3435 | * 0 - Successfully issued acc prli response | ||
3436 | * 1 - Failed to issue acc prli response | ||
3437 | **/ | ||
2719 | int | 3438 | int |
2720 | lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, | 3439 | lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, |
2721 | struct lpfc_nodelist *ndlp) | 3440 | struct lpfc_nodelist *ndlp) |
@@ -2795,6 +3514,32 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, | |||
2795 | return 0; | 3514 | return 0; |
2796 | } | 3515 | } |
2797 | 3516 | ||
3517 | /** | ||
3518 | * lpfc_els_rsp_rnid_acc: Issue rnid acc response iocb command. | ||
3519 | * @vport: pointer to a virtual N_Port data structure. | ||
3520 | * @format: rnid command format. | ||
3521 | * @oldiocb: pointer to the original lpfc command iocb data structure. | ||
3522 | * @ndlp: pointer to a node-list data structure. | ||
3523 | * | ||
3524 | * This routine issues a Request Node Identification Data (RNID) Accept | ||
3525 | * (ACC) response. It constructs the RNID ACC response command according to | ||
3526 | * the proper @format and then calls the lpfc_sli_issue_iocb() routine to | ||
3527 | * issue the response. Note that this command does not need to hold the ndlp | ||
3528 | * reference count for the callback. So, the ndlp reference count taken by | ||
3529 | * the lpfc_prep_els_iocb() routine is put back and the context1 field of | ||
3530 | * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that | ||
3531 | * there is no ndlp reference available. | ||
3532 | * | ||
3533 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
3534 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
3535 | * will be stored into the context1 field of the IOCB for the completion | ||
3536 | * callback function. However, for the RNID Accept Response ELS command, | ||
3537 | * this is undone later by this routine after the IOCB is allocated. | ||
3538 | * | ||
3539 | * Return code | ||
3540 | * 0 - Successfully issued acc rnid response | ||
3541 | * 1 - Failed to issue acc rnid response | ||
3542 | **/ | ||
2798 | static int | 3543 | static int |
2799 | lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, | 3544 | lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, |
2800 | struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) | 3545 | struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) |
@@ -2875,6 +3620,25 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, | |||
2875 | return 0; | 3620 | return 0; |
2876 | } | 3621 | } |
2877 | 3622 | ||
3623 | /** | ||
3624 | * lpfc_els_disc_adisc: Issue remaining adisc iocbs to npr nodes of a vport. | ||
3625 | * @vport: pointer to a host virtual N_Port data structure. | ||
3626 | * | ||
3627 | * This routine issues Address Discover (ADISC) ELS commands to those | ||
3628 | * N_Ports which are in node port recovery state and ADISC has not been issued | ||
3629 | * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the | ||
3630 | * lpfc_issue_els_adisc() routine, the per @vport number of discover count | ||
3631 | * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a | ||
3632 | * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will | ||
3633 | * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC | ||
3634 | * IOCBs quit for later pick up. On the other hand, after walking through | ||
3635 | * all the ndlps with the @vport and there is none ADISC IOCB issued, the | ||
3636 | * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is | ||
3637 | * no more ADISC need to be sent. | ||
3638 | * | ||
3639 | * Return code | ||
3640 | * The number of N_Ports with adisc issued. | ||
3641 | **/ | ||
2878 | int | 3642 | int |
2879 | lpfc_els_disc_adisc(struct lpfc_vport *vport) | 3643 | lpfc_els_disc_adisc(struct lpfc_vport *vport) |
2880 | { | 3644 | { |
@@ -2914,6 +3678,25 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport) | |||
2914 | return sentadisc; | 3678 | return sentadisc; |
2915 | } | 3679 | } |
2916 | 3680 | ||
3681 | /** | ||
3682 | * lpfc_els_disc_plogi: Issue plogi for all npr nodes of a vport before adisc. | ||
3683 | * @vport: pointer to a host virtual N_Port data structure. | ||
3684 | * | ||
3685 | * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports | ||
3686 | * which are in node port recovery state, with a @vport. Each time an ELS | ||
3687 | * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, | ||
3688 | * the per @vport number of discover count (num_disc_nodes) shall be | ||
3689 | * incremented. If the num_disc_nodes reaches a pre-configured threshold | ||
3690 | * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE | ||
3691 | * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for | ||
3692 | * later pick up. On the other hand, after walking through all the ndlps with | ||
3693 | * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag | ||
3694 | * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC | ||
3695 | * PLOGI need to be sent. | ||
3696 | * | ||
3697 | * Return code | ||
3698 | * The number of N_Ports with plogi issued. | ||
3699 | **/ | ||
2917 | int | 3700 | int |
2918 | lpfc_els_disc_plogi(struct lpfc_vport *vport) | 3701 | lpfc_els_disc_plogi(struct lpfc_vport *vport) |
2919 | { | 3702 | { |
@@ -2954,6 +3737,15 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) | |||
2954 | return sentplogi; | 3737 | return sentplogi; |
2955 | } | 3738 | } |
2956 | 3739 | ||
3740 | /** | ||
3741 | * lpfc_els_flush_rscn: Clean up any rscn activities with a vport. | ||
3742 | * @vport: pointer to a host virtual N_Port data structure. | ||
3743 | * | ||
3744 | * This routine cleans up any Registration State Change Notification | ||
3745 | * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the | ||
3746 | * @vport together with the host_lock is used to prevent multiple thread | ||
3747 | * trying to access the RSCN array on a same @vport at the same time. | ||
3748 | **/ | ||
2957 | void | 3749 | void |
2958 | lpfc_els_flush_rscn(struct lpfc_vport *vport) | 3750 | lpfc_els_flush_rscn(struct lpfc_vport *vport) |
2959 | { | 3751 | { |
@@ -2984,6 +3776,18 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport) | |||
2984 | vport->fc_rscn_flush = 0; | 3776 | vport->fc_rscn_flush = 0; |
2985 | } | 3777 | } |
2986 | 3778 | ||
3779 | /** | ||
3780 | * lpfc_rscn_payload_check: Check whether there is a pending rscn to a did. | ||
3781 | * @vport: pointer to a host virtual N_Port data structure. | ||
3782 | * @did: remote destination port identifier. | ||
3783 | * | ||
3784 | * This routine checks whether there is any pending Registration State | ||
3785 | * Configuration Notification (RSCN) to a @did on @vport. | ||
3786 | * | ||
3787 | * Return code | ||
3788 | * None zero - The @did matched with a pending rscn | ||
3789 | * 0 - not able to match @did with a pending rscn | ||
3790 | **/ | ||
2987 | int | 3791 | int |
2988 | lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) | 3792 | lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) |
2989 | { | 3793 | { |
@@ -3053,6 +3857,17 @@ return_did_out: | |||
3053 | return did; | 3857 | return did; |
3054 | } | 3858 | } |
3055 | 3859 | ||
3860 | /** | ||
3861 | * lpfc_rscn_recovery_check: Send recovery event to vport nodes matching rscn | ||
3862 | * @vport: pointer to a host virtual N_Port data structure. | ||
3863 | * | ||
3864 | * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the | ||
3865 | * state machine for a @vport's nodes that are with pending RSCN (Registration | ||
3866 | * State Change Notification). | ||
3867 | * | ||
3868 | * Return code | ||
3869 | * 0 - Successful (currently alway return 0) | ||
3870 | **/ | ||
3056 | static int | 3871 | static int |
3057 | lpfc_rscn_recovery_check(struct lpfc_vport *vport) | 3872 | lpfc_rscn_recovery_check(struct lpfc_vport *vport) |
3058 | { | 3873 | { |
@@ -3071,6 +3886,28 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) | |||
3071 | return 0; | 3886 | return 0; |
3072 | } | 3887 | } |
3073 | 3888 | ||
3889 | /** | ||
3890 | * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb. | ||
3891 | * @vport: pointer to a host virtual N_Port data structure. | ||
3892 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
3893 | * @ndlp: pointer to a node-list data structure. | ||
3894 | * | ||
3895 | * This routine processes an unsolicited RSCN (Registration State Change | ||
3896 | * Notification) IOCB. First, the payload of the unsolicited RSCN is walked | ||
3897 | * to invoke fc_host_post_event() routine to the FC transport layer. If the | ||
3898 | * discover state machine is about to begin discovery, it just accepts the | ||
3899 | * RSCN and the discovery process will satisfy the RSCN. If this RSCN only | ||
3900 | * contains N_Port IDs for other vports on this HBA, it just accepts the | ||
3901 | * RSCN and ignore processing it. If the state machine is in the recovery | ||
3902 | * state, the fc_rscn_id_list of this @vport is walked and the | ||
3903 | * lpfc_rscn_recovery_check() routine is invoked to send recovery event for | ||
3904 | * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() | ||
3905 | * routine is invoked to handle the RSCN event. | ||
3906 | * | ||
3907 | * Return code | ||
3908 | * 0 - Just sent the acc response | ||
3909 | * 1 - Sent the acc response and waited for name server completion | ||
3910 | **/ | ||
3074 | static int | 3911 | static int |
3075 | lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | 3912 | lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, |
3076 | struct lpfc_nodelist *ndlp) | 3913 | struct lpfc_nodelist *ndlp) |
@@ -3130,7 +3967,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3130 | if (rscn_id == hba_id) { | 3967 | if (rscn_id == hba_id) { |
3131 | /* ALL NPortIDs in RSCN are on HBA */ | 3968 | /* ALL NPortIDs in RSCN are on HBA */ |
3132 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 3969 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
3133 | "0214 Ignore RSCN " | 3970 | "0219 Ignore RSCN " |
3134 | "Data: x%x x%x x%x x%x\n", | 3971 | "Data: x%x x%x x%x x%x\n", |
3135 | vport->fc_flag, payload_len, | 3972 | vport->fc_flag, payload_len, |
3136 | *lp, vport->fc_rscn_id_cnt); | 3973 | *lp, vport->fc_rscn_id_cnt); |
@@ -3241,6 +4078,22 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3241 | return lpfc_els_handle_rscn(vport); | 4078 | return lpfc_els_handle_rscn(vport); |
3242 | } | 4079 | } |
3243 | 4080 | ||
4081 | /** | ||
4082 | * lpfc_els_handle_rscn: Handle rscn for a vport. | ||
4083 | * @vport: pointer to a host virtual N_Port data structure. | ||
4084 | * | ||
4085 | * This routine handles the Registration State Configuration Notification | ||
4086 | * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall | ||
4087 | * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, | ||
4088 | * if the ndlp to NameServer exists, a Common Transport (CT) command to the | ||
4089 | * NameServer shall be issued. If CT command to the NameServer fails to be | ||
4090 | * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any | ||
4091 | * RSCN activities with the @vport. | ||
4092 | * | ||
4093 | * Return code | ||
4094 | * 0 - Cleaned up rscn on the @vport | ||
4095 | * 1 - Wait for plogi to name server before proceed | ||
4096 | **/ | ||
3244 | int | 4097 | int |
3245 | lpfc_els_handle_rscn(struct lpfc_vport *vport) | 4098 | lpfc_els_handle_rscn(struct lpfc_vport *vport) |
3246 | { | 4099 | { |
@@ -3313,6 +4166,31 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) | |||
3313 | return 0; | 4166 | return 0; |
3314 | } | 4167 | } |
3315 | 4168 | ||
4169 | /** | ||
4170 | * lpfc_els_rcv_flogi: Process an unsolicited flogi iocb. | ||
4171 | * @vport: pointer to a host virtual N_Port data structure. | ||
4172 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
4173 | * @ndlp: pointer to a node-list data structure. | ||
4174 | * | ||
4175 | * This routine processes Fabric Login (FLOGI) IOCB received as an ELS | ||
4176 | * unsolicited event. An unsolicited FLOGI can be received in a point-to- | ||
4177 | * point topology. As an unsolicited FLOGI should not be received in a loop | ||
4178 | * mode, any unsolicited FLOGI received in loop mode shall be ignored. The | ||
4179 | * lpfc_check_sparm() routine is invoked to check the parameters in the | ||
4180 | * unsolicited FLOGI. If parameters validation failed, the routine | ||
4181 | * lpfc_els_rsp_reject() shall be called with reject reason code set to | ||
4182 | * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the | ||
4183 | * FLOGI shall be compared with the Port WWN of the @vport to determine who | ||
4184 | * will initiate PLOGI. The higher lexicographical value party shall has | ||
4185 | * higher priority (as the winning port) and will initiate PLOGI and | ||
4186 | * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result | ||
4187 | * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI | ||
4188 | * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. | ||
4189 | * | ||
4190 | * Return code | ||
4191 | * 0 - Successfully processed the unsolicited flogi | ||
4192 | * 1 - Failed to process the unsolicited flogi | ||
4193 | **/ | ||
3316 | static int | 4194 | static int |
3317 | lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | 4195 | lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, |
3318 | struct lpfc_nodelist *ndlp) | 4196 | struct lpfc_nodelist *ndlp) |
@@ -3402,6 +4280,22 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3402 | return 0; | 4280 | return 0; |
3403 | } | 4281 | } |
3404 | 4282 | ||
4283 | /** | ||
4284 | * lpfc_els_rcv_rnid: Process an unsolicited rnid iocb. | ||
4285 | * @vport: pointer to a host virtual N_Port data structure. | ||
4286 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
4287 | * @ndlp: pointer to a node-list data structure. | ||
4288 | * | ||
4289 | * This routine processes Request Node Identification Data (RNID) IOCB | ||
4290 | * received as an ELS unsolicited event. Only when the RNID specified format | ||
4291 | * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) | ||
4292 | * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to | ||
4293 | * Accept (ACC) the RNID ELS command. All the other RNID formats are | ||
4294 | * rejected by invoking the lpfc_els_rsp_reject() routine. | ||
4295 | * | ||
4296 | * Return code | ||
4297 | * 0 - Successfully processed rnid iocb (currently always return 0) | ||
4298 | **/ | ||
3405 | static int | 4299 | static int |
3406 | lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | 4300 | lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, |
3407 | struct lpfc_nodelist *ndlp) | 4301 | struct lpfc_nodelist *ndlp) |
@@ -3441,6 +4335,19 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3441 | return 0; | 4335 | return 0; |
3442 | } | 4336 | } |
3443 | 4337 | ||
4338 | /** | ||
4339 | * lpfc_els_rcv_lirr: Process an unsolicited lirr iocb. | ||
4340 | * @vport: pointer to a host virtual N_Port data structure. | ||
4341 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
4342 | * @ndlp: pointer to a node-list data structure. | ||
4343 | * | ||
4344 | * This routine processes a Link Incident Report Registration(LIRR) IOCB | ||
4345 | * received as an ELS unsolicited event. Currently, this function just invokes | ||
4346 | * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. | ||
4347 | * | ||
4348 | * Return code | ||
4349 | * 0 - Successfully processed lirr iocb (currently always return 0) | ||
4350 | **/ | ||
3444 | static int | 4351 | static int |
3445 | lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | 4352 | lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, |
3446 | struct lpfc_nodelist *ndlp) | 4353 | struct lpfc_nodelist *ndlp) |
@@ -3456,6 +4363,25 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3456 | return 0; | 4363 | return 0; |
3457 | } | 4364 | } |
3458 | 4365 | ||
4366 | /** | ||
4367 | * lpfc_els_rsp_rps_acc: Completion callbk func for MBX_READ_LNK_STAT mbox cmd. | ||
4368 | * @phba: pointer to lpfc hba data structure. | ||
4369 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
4370 | * | ||
4371 | * This routine is the completion callback function for the MBX_READ_LNK_STAT | ||
4372 | * mailbox command. This callback function is to actually send the Accept | ||
4373 | * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It | ||
4374 | * collects the link statistics from the completion of the MBX_READ_LNK_STAT | ||
4375 | * mailbox command, constructs the RPS response with the link statistics | ||
4376 | * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC | ||
4377 | * response to the RPS. | ||
4378 | * | ||
4379 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
4380 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
4381 | * will be stored into the context1 field of the IOCB for the completion | ||
4382 | * callback function to the RPS Accept Response ELS IOCB command. | ||
4383 | * | ||
4384 | **/ | ||
3459 | static void | 4385 | static void |
3460 | lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | 4386 | lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
3461 | { | 4387 | { |
@@ -3531,6 +4457,24 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3531 | return; | 4457 | return; |
3532 | } | 4458 | } |
3533 | 4459 | ||
4460 | /** | ||
4461 | * lpfc_els_rcv_rps: Process an unsolicited rps iocb. | ||
4462 | * @vport: pointer to a host virtual N_Port data structure. | ||
4463 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
4464 | * @ndlp: pointer to a node-list data structure. | ||
4465 | * | ||
4466 | * This routine processes Read Port Status (RPS) IOCB received as an | ||
4467 | * ELS unsolicited event. It first checks the remote port state. If the | ||
4468 | * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE | ||
4469 | * state, it invokes the lpfc_els_rsp_reject() routine to send the reject | ||
4470 | * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command | ||
4471 | * for reading the HBA link statistics. It is for the callback function, | ||
4472 | * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command | ||
4473 | * to actually sending out RPS Accept (ACC) response. | ||
4474 | * | ||
4475 | * Return codes | ||
4476 | * 0 - Successfully processed rps iocb (currently always return 0) | ||
4477 | **/ | ||
3534 | static int | 4478 | static int |
3535 | lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | 4479 | lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, |
3536 | struct lpfc_nodelist *ndlp) | 4480 | struct lpfc_nodelist *ndlp) |
@@ -3544,14 +4488,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3544 | struct ls_rjt stat; | 4488 | struct ls_rjt stat; |
3545 | 4489 | ||
3546 | if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && | 4490 | if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && |
3547 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { | 4491 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) |
3548 | stat.un.b.lsRjtRsvd0 = 0; | 4492 | /* reject the unsolicited RPS request and done with it */ |
3549 | stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; | 4493 | goto reject_out; |
3550 | stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; | ||
3551 | stat.un.b.vendorUnique = 0; | ||
3552 | lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, | ||
3553 | NULL); | ||
3554 | } | ||
3555 | 4494 | ||
3556 | pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; | 4495 | pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; |
3557 | lp = (uint32_t *) pcmd->virt; | 4496 | lp = (uint32_t *) pcmd->virt; |
@@ -3584,6 +4523,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3584 | mempool_free(mbox, phba->mbox_mem_pool); | 4523 | mempool_free(mbox, phba->mbox_mem_pool); |
3585 | } | 4524 | } |
3586 | } | 4525 | } |
4526 | |||
4527 | reject_out: | ||
4528 | /* issue rejection response */ | ||
3587 | stat.un.b.lsRjtRsvd0 = 0; | 4529 | stat.un.b.lsRjtRsvd0 = 0; |
3588 | stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; | 4530 | stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; |
3589 | stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; | 4531 | stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; |
@@ -3592,6 +4534,25 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3592 | return 0; | 4534 | return 0; |
3593 | } | 4535 | } |
3594 | 4536 | ||
4537 | /** | ||
4538 | * lpfc_els_rsp_rpl_acc: Issue an accept rpl els command. | ||
4539 | * @vport: pointer to a host virtual N_Port data structure. | ||
4540 | * @cmdsize: size of the ELS command. | ||
4541 | * @oldiocb: pointer to the original lpfc command iocb data structure. | ||
4542 | * @ndlp: pointer to a node-list data structure. | ||
4543 | * | ||
4544 | * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. | ||
4545 | * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. | ||
4546 | * | ||
4547 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
4548 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
4549 | * will be stored into the context1 field of the IOCB for the completion | ||
4550 | * callback function to the RPL Accept Response ELS command. | ||
4551 | * | ||
4552 | * Return code | ||
4553 | * 0 - Successfully issued ACC RPL ELS command | ||
4554 | * 1 - Failed to issue ACC RPL ELS command | ||
4555 | **/ | ||
3595 | static int | 4556 | static int |
3596 | lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, | 4557 | lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, |
3597 | struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) | 4558 | struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) |
@@ -3645,6 +4606,22 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, | |||
3645 | return 0; | 4606 | return 0; |
3646 | } | 4607 | } |
3647 | 4608 | ||
4609 | /** | ||
4610 | * lpfc_els_rcv_rpl: Process an unsolicited rpl iocb. | ||
4611 | * @vport: pointer to a host virtual N_Port data structure. | ||
4612 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
4613 | * @ndlp: pointer to a node-list data structure. | ||
4614 | * | ||
4615 | * This routine processes Read Port List (RPL) IOCB received as an ELS | ||
4616 | * unsolicited event. It first checks the remote port state. If the remote | ||
4617 | * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it | ||
4618 | * invokes the lpfc_els_rsp_reject() routine to send reject response. | ||
4619 | * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine | ||
4620 | * to accept the RPL. | ||
4621 | * | ||
4622 | * Return code | ||
4623 | * 0 - Successfully processed rpl iocb (currently always return 0) | ||
4624 | **/ | ||
3648 | static int | 4625 | static int |
3649 | lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | 4626 | lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, |
3650 | struct lpfc_nodelist *ndlp) | 4627 | struct lpfc_nodelist *ndlp) |
@@ -3658,12 +4635,15 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3658 | 4635 | ||
3659 | if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && | 4636 | if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && |
3660 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { | 4637 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { |
4638 | /* issue rejection response */ | ||
3661 | stat.un.b.lsRjtRsvd0 = 0; | 4639 | stat.un.b.lsRjtRsvd0 = 0; |
3662 | stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; | 4640 | stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; |
3663 | stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; | 4641 | stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; |
3664 | stat.un.b.vendorUnique = 0; | 4642 | stat.un.b.vendorUnique = 0; |
3665 | lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, | 4643 | lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, |
3666 | NULL); | 4644 | NULL); |
4645 | /* rejected the unsolicited RPL request and done with it */ | ||
4646 | return 0; | ||
3667 | } | 4647 | } |
3668 | 4648 | ||
3669 | pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; | 4649 | pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; |
@@ -3685,6 +4665,30 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3685 | return 0; | 4665 | return 0; |
3686 | } | 4666 | } |
3687 | 4667 | ||
4668 | /** | ||
4669 | * lpfc_els_rcv_farp: Process an unsolicited farp request els command. | ||
4670 | * @vport: pointer to a virtual N_Port data structure. | ||
4671 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
4672 | * @ndlp: pointer to a node-list data structure. | ||
4673 | * | ||
4674 | * This routine processes Fibre Channel Address Resolution Protocol | ||
4675 | * (FARP) Request IOCB received as an ELS unsolicited event. Currently, | ||
4676 | * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, | ||
4677 | * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the | ||
4678 | * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the | ||
4679 | * remote PortName is compared against the FC PortName stored in the @vport | ||
4680 | * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is | ||
4681 | * compared against the FC NodeName stored in the @vport data structure. | ||
4682 | * If any of these matches and the FARP_REQUEST_FARPR flag is set in the | ||
4683 | * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is | ||
4684 | * invoked to send out FARP Response to the remote node. Before sending the | ||
4685 | * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP | ||
4686 | * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() | ||
4687 | * routine is invoked to log into the remote port first. | ||
4688 | * | ||
4689 | * Return code | ||
4690 | * 0 - Either the FARP Match Mode not supported or successfully processed | ||
4691 | **/ | ||
3688 | static int | 4692 | static int |
3689 | lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | 4693 | lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, |
3690 | struct lpfc_nodelist *ndlp) | 4694 | struct lpfc_nodelist *ndlp) |
@@ -3744,6 +4748,20 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3744 | return 0; | 4748 | return 0; |
3745 | } | 4749 | } |
3746 | 4750 | ||
4751 | /** | ||
4752 | * lpfc_els_rcv_farpr: Process an unsolicited farp response iocb. | ||
4753 | * @vport: pointer to a host virtual N_Port data structure. | ||
4754 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
4755 | * @ndlp: pointer to a node-list data structure. | ||
4756 | * | ||
4757 | * This routine processes Fibre Channel Address Resolution Protocol | ||
4758 | * Response (FARPR) IOCB received as an ELS unsolicited event. It simply | ||
4759 | * invokes the lpfc_els_rsp_acc() routine to the remote node to accept | ||
4760 | * the FARP response request. | ||
4761 | * | ||
4762 | * Return code | ||
4763 | * 0 - Successfully processed FARPR IOCB (currently always return 0) | ||
4764 | **/ | ||
3747 | static int | 4765 | static int |
3748 | lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | 4766 | lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, |
3749 | struct lpfc_nodelist *ndlp) | 4767 | struct lpfc_nodelist *ndlp) |
@@ -3768,6 +4786,25 @@ lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3768 | return 0; | 4786 | return 0; |
3769 | } | 4787 | } |
3770 | 4788 | ||
4789 | /** | ||
4790 | * lpfc_els_rcv_fan: Process an unsolicited fan iocb command. | ||
4791 | * @vport: pointer to a host virtual N_Port data structure. | ||
4792 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
4793 | * @fan_ndlp: pointer to a node-list data structure. | ||
4794 | * | ||
4795 | * This routine processes a Fabric Address Notification (FAN) IOCB | ||
4796 | * command received as an ELS unsolicited event. The FAN ELS command will | ||
4797 | * only be processed on a physical port (i.e., the @vport represents the | ||
4798 | * physical port). The fabric NodeName and PortName from the FAN IOCB are | ||
4799 | * compared against those in the phba data structure. If any of those is | ||
4800 | * different, the lpfc_initial_flogi() routine is invoked to initialize | ||
4801 | * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, | ||
4802 | * if both of those are identical, the lpfc_issue_fabric_reglogin() routine | ||
4803 | * is invoked to register login to the fabric. | ||
4804 | * | ||
4805 | * Return code | ||
4806 | * 0 - Successfully processed fan iocb (currently always return 0). | ||
4807 | **/ | ||
3771 | static int | 4808 | static int |
3772 | lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | 4809 | lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, |
3773 | struct lpfc_nodelist *fan_ndlp) | 4810 | struct lpfc_nodelist *fan_ndlp) |
@@ -3797,6 +4834,16 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3797 | return 0; | 4834 | return 0; |
3798 | } | 4835 | } |
3799 | 4836 | ||
4837 | /** | ||
4838 | * lpfc_els_timeout: Handler funciton to the els timer. | ||
4839 | * @ptr: holder for the timer function associated data. | ||
4840 | * | ||
4841 | * This routine is invoked by the ELS timer after timeout. It posts the ELS | ||
4842 | * timer timeout event by setting the WORKER_ELS_TMO bit to the work port | ||
4843 | * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake | ||
4844 | * up the worker thread. It is for the worker thread to invoke the routine | ||
4845 | * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. | ||
4846 | **/ | ||
3800 | void | 4847 | void |
3801 | lpfc_els_timeout(unsigned long ptr) | 4848 | lpfc_els_timeout(unsigned long ptr) |
3802 | { | 4849 | { |
@@ -3816,6 +4863,15 @@ lpfc_els_timeout(unsigned long ptr) | |||
3816 | return; | 4863 | return; |
3817 | } | 4864 | } |
3818 | 4865 | ||
4866 | /** | ||
4867 | * lpfc_els_timeout_handler: Process an els timeout event. | ||
4868 | * @vport: pointer to a virtual N_Port data structure. | ||
4869 | * | ||
4870 | * This routine is the actual handler function that processes an ELS timeout | ||
4871 | * event. It walks the ELS ring to get and abort all the IOCBs (except the | ||
4872 | * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by | ||
4873 | * invoking the lpfc_sli_issue_abort_iotag() routine. | ||
4874 | **/ | ||
3819 | void | 4875 | void |
3820 | lpfc_els_timeout_handler(struct lpfc_vport *vport) | 4876 | lpfc_els_timeout_handler(struct lpfc_vport *vport) |
3821 | { | 4877 | { |
@@ -3886,6 +4942,26 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) | |||
3886 | mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); | 4942 | mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); |
3887 | } | 4943 | } |
3888 | 4944 | ||
4945 | /** | ||
4946 | * lpfc_els_flush_cmd: Clean up the outstanding els commands to a vport. | ||
4947 | * @vport: pointer to a host virtual N_Port data structure. | ||
4948 | * | ||
4949 | * This routine is used to clean up all the outstanding ELS commands on a | ||
4950 | * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() | ||
4951 | * routine. After that, it walks the ELS transmit queue to remove all the | ||
4952 | * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For | ||
4953 | * the IOCBs with a non-NULL completion callback function, the callback | ||
4954 | * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and | ||
4955 | * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion | ||
4956 | * callback function, the IOCB will simply be released. Finally, it walks | ||
4957 | * the ELS transmit completion queue to issue an abort IOCB to any transmit | ||
4958 | * completion queue IOCB that is associated with the @vport and is not | ||
4959 | * an IOCB from libdfc (i.e., the management plane IOCBs that are not | ||
4960 | * part of the discovery state machine) out to HBA by invoking the | ||
4961 | * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the | ||
4962 | * abort IOCB to any transmit completion queueed IOCB, it does not guarantee | ||
4963 | * the IOCBs are aborted when this function returns. | ||
4964 | **/ | ||
3889 | void | 4965 | void |
3890 | lpfc_els_flush_cmd(struct lpfc_vport *vport) | 4966 | lpfc_els_flush_cmd(struct lpfc_vport *vport) |
3891 | { | 4967 | { |
@@ -3948,6 +5024,23 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) | |||
3948 | return; | 5024 | return; |
3949 | } | 5025 | } |
3950 | 5026 | ||
5027 | /** | ||
5028 | * lpfc_els_flush_all_cmd: Clean up all the outstanding els commands to a HBA. | ||
5029 | * @phba: pointer to lpfc hba data structure. | ||
5030 | * | ||
5031 | * This routine is used to clean up all the outstanding ELS commands on a | ||
5032 | * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() | ||
5033 | * routine. After that, it walks the ELS transmit queue to remove all the | ||
5034 | * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For | ||
5035 | * the IOCBs with the completion callback function associated, the callback | ||
5036 | * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and | ||
5037 | * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion | ||
5038 | * callback function associated, the IOCB will simply be released. Finally, | ||
5039 | * it walks the ELS transmit completion queue to issue an abort IOCB to any | ||
5040 | * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the | ||
5041 | * management plane IOCBs that are not part of the discovery state machine) | ||
5042 | * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. | ||
5043 | **/ | ||
3951 | void | 5044 | void |
3952 | lpfc_els_flush_all_cmd(struct lpfc_hba *phba) | 5045 | lpfc_els_flush_all_cmd(struct lpfc_hba *phba) |
3953 | { | 5046 | { |
@@ -3992,6 +5085,130 @@ lpfc_els_flush_all_cmd(struct lpfc_hba *phba) | |||
3992 | return; | 5085 | return; |
3993 | } | 5086 | } |
3994 | 5087 | ||
5088 | /** | ||
5089 | * lpfc_send_els_failure_event: Posts an ELS command failure event. | ||
5090 | * @phba: Pointer to hba context object. | ||
5091 | * @cmdiocbp: Pointer to command iocb which reported error. | ||
5092 | * @rspiocbp: Pointer to response iocb which reported error. | ||
5093 | * | ||
5094 | * This function sends an event when there is an ELS command | ||
5095 | * failure. | ||
5096 | **/ | ||
5097 | void | ||
5098 | lpfc_send_els_failure_event(struct lpfc_hba *phba, | ||
5099 | struct lpfc_iocbq *cmdiocbp, | ||
5100 | struct lpfc_iocbq *rspiocbp) | ||
5101 | { | ||
5102 | struct lpfc_vport *vport = cmdiocbp->vport; | ||
5103 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
5104 | struct lpfc_lsrjt_event lsrjt_event; | ||
5105 | struct lpfc_fabric_event_header fabric_event; | ||
5106 | struct ls_rjt stat; | ||
5107 | struct lpfc_nodelist *ndlp; | ||
5108 | uint32_t *pcmd; | ||
5109 | |||
5110 | ndlp = cmdiocbp->context1; | ||
5111 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) | ||
5112 | return; | ||
5113 | |||
5114 | if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { | ||
5115 | lsrjt_event.header.event_type = FC_REG_ELS_EVENT; | ||
5116 | lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; | ||
5117 | memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, | ||
5118 | sizeof(struct lpfc_name)); | ||
5119 | memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, | ||
5120 | sizeof(struct lpfc_name)); | ||
5121 | pcmd = (uint32_t *) (((struct lpfc_dmabuf *) | ||
5122 | cmdiocbp->context2)->virt); | ||
5123 | lsrjt_event.command = *pcmd; | ||
5124 | stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); | ||
5125 | lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; | ||
5126 | lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; | ||
5127 | fc_host_post_vendor_event(shost, | ||
5128 | fc_get_event_number(), | ||
5129 | sizeof(lsrjt_event), | ||
5130 | (char *)&lsrjt_event, | ||
5131 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | ||
5132 | return; | ||
5133 | } | ||
5134 | if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || | ||
5135 | (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { | ||
5136 | fabric_event.event_type = FC_REG_FABRIC_EVENT; | ||
5137 | if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) | ||
5138 | fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; | ||
5139 | else | ||
5140 | fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; | ||
5141 | memcpy(fabric_event.wwpn, &ndlp->nlp_portname, | ||
5142 | sizeof(struct lpfc_name)); | ||
5143 | memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, | ||
5144 | sizeof(struct lpfc_name)); | ||
5145 | fc_host_post_vendor_event(shost, | ||
5146 | fc_get_event_number(), | ||
5147 | sizeof(fabric_event), | ||
5148 | (char *)&fabric_event, | ||
5149 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | ||
5150 | return; | ||
5151 | } | ||
5152 | |||
5153 | } | ||
5154 | |||
5155 | /** | ||
5156 | * lpfc_send_els_event: Posts unsolicited els event. | ||
5157 | * @vport: Pointer to vport object. | ||
5158 | * @ndlp: Pointer FC node object. | ||
5159 | * @cmd: ELS command code. | ||
5160 | * | ||
5161 | * This function posts an event when there is an incoming | ||
5162 | * unsolicited ELS command. | ||
5163 | **/ | ||
5164 | static void | ||
5165 | lpfc_send_els_event(struct lpfc_vport *vport, | ||
5166 | struct lpfc_nodelist *ndlp, | ||
5167 | uint32_t cmd) | ||
5168 | { | ||
5169 | struct lpfc_els_event_header els_data; | ||
5170 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
5171 | |||
5172 | els_data.event_type = FC_REG_ELS_EVENT; | ||
5173 | switch (cmd) { | ||
5174 | case ELS_CMD_PLOGI: | ||
5175 | els_data.subcategory = LPFC_EVENT_PLOGI_RCV; | ||
5176 | break; | ||
5177 | case ELS_CMD_PRLO: | ||
5178 | els_data.subcategory = LPFC_EVENT_PRLO_RCV; | ||
5179 | break; | ||
5180 | case ELS_CMD_ADISC: | ||
5181 | els_data.subcategory = LPFC_EVENT_ADISC_RCV; | ||
5182 | break; | ||
5183 | default: | ||
5184 | return; | ||
5185 | } | ||
5186 | memcpy(els_data.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); | ||
5187 | memcpy(els_data.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); | ||
5188 | fc_host_post_vendor_event(shost, | ||
5189 | fc_get_event_number(), | ||
5190 | sizeof(els_data), | ||
5191 | (char *)&els_data, | ||
5192 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | ||
5193 | |||
5194 | return; | ||
5195 | } | ||
5196 | |||
5197 | |||
5198 | /** | ||
5199 | * lpfc_els_unsol_buffer: Process an unsolicited event data buffer. | ||
5200 | * @phba: pointer to lpfc hba data structure. | ||
5201 | * @pring: pointer to a SLI ring. | ||
5202 | * @vport: pointer to a host virtual N_Port data structure. | ||
5203 | * @elsiocb: pointer to lpfc els command iocb data structure. | ||
5204 | * | ||
5205 | * This routine is used for processing the IOCB associated with a unsolicited | ||
5206 | * event. It first determines whether there is an existing ndlp that matches | ||
5207 | * the DID from the unsolicited IOCB. If not, it will create a new one with | ||
5208 | * the DID from the unsolicited IOCB. The ELS command from the unsolicited | ||
5209 | * IOCB is then used to invoke the proper routine and to set up proper state | ||
5210 | * of the discovery state machine. | ||
5211 | **/ | ||
3995 | static void | 5212 | static void |
3996 | lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 5213 | lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
3997 | struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) | 5214 | struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) |
@@ -4059,8 +5276,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4059 | } | 5276 | } |
4060 | 5277 | ||
4061 | phba->fc_stat.elsRcvFrame++; | 5278 | phba->fc_stat.elsRcvFrame++; |
4062 | if (elsiocb->context1) | ||
4063 | lpfc_nlp_put(elsiocb->context1); | ||
4064 | 5279 | ||
4065 | elsiocb->context1 = lpfc_nlp_get(ndlp); | 5280 | elsiocb->context1 = lpfc_nlp_get(ndlp); |
4066 | elsiocb->vport = vport; | 5281 | elsiocb->vport = vport; |
@@ -4081,6 +5296,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4081 | phba->fc_stat.elsRcvPLOGI++; | 5296 | phba->fc_stat.elsRcvPLOGI++; |
4082 | ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); | 5297 | ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); |
4083 | 5298 | ||
5299 | lpfc_send_els_event(vport, ndlp, cmd); | ||
4084 | if (vport->port_state < LPFC_DISC_AUTH) { | 5300 | if (vport->port_state < LPFC_DISC_AUTH) { |
4085 | if (!(phba->pport->fc_flag & FC_PT2PT) || | 5301 | if (!(phba->pport->fc_flag & FC_PT2PT) || |
4086 | (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { | 5302 | (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { |
@@ -4130,6 +5346,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4130 | did, vport->port_state, ndlp->nlp_flag); | 5346 | did, vport->port_state, ndlp->nlp_flag); |
4131 | 5347 | ||
4132 | phba->fc_stat.elsRcvPRLO++; | 5348 | phba->fc_stat.elsRcvPRLO++; |
5349 | lpfc_send_els_event(vport, ndlp, cmd); | ||
4133 | if (vport->port_state < LPFC_DISC_AUTH) { | 5350 | if (vport->port_state < LPFC_DISC_AUTH) { |
4134 | rjt_err = LSRJT_UNABLE_TPC; | 5351 | rjt_err = LSRJT_UNABLE_TPC; |
4135 | break; | 5352 | break; |
@@ -4147,6 +5364,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4147 | "RCV ADISC: did:x%x/ste:x%x flg:x%x", | 5364 | "RCV ADISC: did:x%x/ste:x%x flg:x%x", |
4148 | did, vport->port_state, ndlp->nlp_flag); | 5365 | did, vport->port_state, ndlp->nlp_flag); |
4149 | 5366 | ||
5367 | lpfc_send_els_event(vport, ndlp, cmd); | ||
4150 | phba->fc_stat.elsRcvADISC++; | 5368 | phba->fc_stat.elsRcvADISC++; |
4151 | if (vport->port_state < LPFC_DISC_AUTH) { | 5369 | if (vport->port_state < LPFC_DISC_AUTH) { |
4152 | rjt_err = LSRJT_UNABLE_TPC; | 5370 | rjt_err = LSRJT_UNABLE_TPC; |
@@ -4270,6 +5488,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4270 | NULL); | 5488 | NULL); |
4271 | } | 5489 | } |
4272 | 5490 | ||
5491 | lpfc_nlp_put(elsiocb->context1); | ||
5492 | elsiocb->context1 = NULL; | ||
4273 | return; | 5493 | return; |
4274 | 5494 | ||
4275 | dropit: | 5495 | dropit: |
@@ -4282,6 +5502,19 @@ dropit: | |||
4282 | phba->fc_stat.elsRcvDrop++; | 5502 | phba->fc_stat.elsRcvDrop++; |
4283 | } | 5503 | } |
4284 | 5504 | ||
5505 | /** | ||
5506 | * lpfc_find_vport_by_vpid: Find a vport on a HBA through vport identifier. | ||
5507 | * @phba: pointer to lpfc hba data structure. | ||
5508 | * @vpi: host virtual N_Port identifier. | ||
5509 | * | ||
5510 | * This routine finds a vport on a HBA (referred by @phba) through a | ||
5511 | * @vpi. The function walks the HBA's vport list and returns the address | ||
5512 | * of the vport with the matching @vpi. | ||
5513 | * | ||
5514 | * Return code | ||
5515 | * NULL - No vport with the matching @vpi found | ||
5516 | * Otherwise - Address to the vport with the matching @vpi. | ||
5517 | **/ | ||
4285 | static struct lpfc_vport * | 5518 | static struct lpfc_vport * |
4286 | lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) | 5519 | lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) |
4287 | { | 5520 | { |
@@ -4299,6 +5532,18 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) | |||
4299 | return NULL; | 5532 | return NULL; |
4300 | } | 5533 | } |
4301 | 5534 | ||
5535 | /** | ||
5536 | * lpfc_els_unsol_event: Process an unsolicited event from an els sli ring. | ||
5537 | * @phba: pointer to lpfc hba data structure. | ||
5538 | * @pring: pointer to a SLI ring. | ||
5539 | * @elsiocb: pointer to lpfc els iocb data structure. | ||
5540 | * | ||
5541 | * This routine is used to process an unsolicited event received from a SLI | ||
5542 | * (Service Level Interface) ring. The actual processing of the data buffer | ||
5543 | * associated with the unsolicited event is done by invoking the routine | ||
5544 | * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the | ||
5545 | * SLI ring on which the unsolicited event was received. | ||
5546 | **/ | ||
4302 | void | 5547 | void |
4303 | lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 5548 | lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
4304 | struct lpfc_iocbq *elsiocb) | 5549 | struct lpfc_iocbq *elsiocb) |
@@ -4309,6 +5554,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4309 | struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; | 5554 | struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; |
4310 | struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; | 5555 | struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; |
4311 | 5556 | ||
5557 | elsiocb->context1 = NULL; | ||
4312 | elsiocb->context2 = NULL; | 5558 | elsiocb->context2 = NULL; |
4313 | elsiocb->context3 = NULL; | 5559 | elsiocb->context3 = NULL; |
4314 | 5560 | ||
@@ -4356,8 +5602,6 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4356 | * The different unsolicited event handlers would tell us | 5602 | * The different unsolicited event handlers would tell us |
4357 | * if they are done with "mp" by setting context2 to NULL. | 5603 | * if they are done with "mp" by setting context2 to NULL. |
4358 | */ | 5604 | */ |
4359 | lpfc_nlp_put(elsiocb->context1); | ||
4360 | elsiocb->context1 = NULL; | ||
4361 | if (elsiocb->context2) { | 5605 | if (elsiocb->context2) { |
4362 | lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); | 5606 | lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); |
4363 | elsiocb->context2 = NULL; | 5607 | elsiocb->context2 = NULL; |
@@ -4376,6 +5620,19 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4376 | } | 5620 | } |
4377 | } | 5621 | } |
4378 | 5622 | ||
5623 | /** | ||
5624 | * lpfc_do_scr_ns_plogi: Issue a plogi to the name server for scr. | ||
5625 | * @phba: pointer to lpfc hba data structure. | ||
5626 | * @vport: pointer to a virtual N_Port data structure. | ||
5627 | * | ||
5628 | * This routine issues a Port Login (PLOGI) to the Name Server with | ||
5629 | * State Change Request (SCR) for a @vport. This routine will create an | ||
5630 | * ndlp for the Name Server associated to the @vport if such node does | ||
5631 | * not already exist. The PLOGI to Name Server is issued by invoking the | ||
5632 | * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface | ||
5633 | * (FDMI) is configured to the @vport, a FDMI node will be created and | ||
5634 | * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. | ||
5635 | **/ | ||
4379 | void | 5636 | void |
4380 | lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | 5637 | lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) |
4381 | { | 5638 | { |
@@ -4434,6 +5691,18 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
4434 | return; | 5691 | return; |
4435 | } | 5692 | } |
4436 | 5693 | ||
5694 | /** | ||
5695 | * lpfc_cmpl_reg_new_vport: Completion callback function to register new vport. | ||
5696 | * @phba: pointer to lpfc hba data structure. | ||
5697 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
5698 | * | ||
5699 | * This routine is the completion callback function to register new vport | ||
5700 | * mailbox command. If the new vport mailbox command completes successfully, | ||
5701 | * the fabric registration login shall be performed on physical port (the | ||
5702 | * new vport created is actually a physical port, with VPI 0) or the port | ||
5703 | * login to Name Server for State Change Request (SCR) will be performed | ||
5704 | * on virtual port (real virtual port, with VPI greater than 0). | ||
5705 | **/ | ||
4437 | static void | 5706 | static void |
4438 | lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | 5707 | lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
4439 | { | 5708 | { |
@@ -4491,6 +5760,15 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
4491 | return; | 5760 | return; |
4492 | } | 5761 | } |
4493 | 5762 | ||
5763 | /** | ||
5764 | * lpfc_register_new_vport: Register a new vport with a HBA. | ||
5765 | * @phba: pointer to lpfc hba data structure. | ||
5766 | * @vport: pointer to a host virtual N_Port data structure. | ||
5767 | * @ndlp: pointer to a node-list data structure. | ||
5768 | * | ||
5769 | * This routine registers the @vport as a new virtual port with a HBA. | ||
5770 | * It is done through a registering vpi mailbox command. | ||
5771 | **/ | ||
4494 | static void | 5772 | static void |
4495 | lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, | 5773 | lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, |
4496 | struct lpfc_nodelist *ndlp) | 5774 | struct lpfc_nodelist *ndlp) |
@@ -4531,6 +5809,26 @@ mbox_err_exit: | |||
4531 | return; | 5809 | return; |
4532 | } | 5810 | } |
4533 | 5811 | ||
5812 | /** | ||
5813 | * lpfc_cmpl_els_fdisc: Completion function for fdisc iocb command. | ||
5814 | * @phba: pointer to lpfc hba data structure. | ||
5815 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
5816 | * @rspiocb: pointer to lpfc response iocb data structure. | ||
5817 | * | ||
5818 | * This routine is the completion callback function to a Fabric Discover | ||
5819 | * (FDISC) ELS command. Since all the FDISC ELS commands are issued | ||
5820 | * single threaded, each FDISC completion callback function will reset | ||
5821 | * the discovery timer for all vports such that the timers will not get | ||
5822 | * unnecessary timeout. The function checks the FDISC IOCB status. If error | ||
5823 | * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the | ||
5824 | * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID | ||
5825 | * assigned to the vport has been changed with the completion of the FDISC | ||
5826 | * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) | ||
5827 | * are unregistered from the HBA, and then the lpfc_register_new_vport() | ||
5828 | * routine is invoked to register new vport with the HBA. Otherwise, the | ||
5829 | * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name | ||
5830 | * Server for State Change Request (SCR). | ||
5831 | **/ | ||
4534 | static void | 5832 | static void |
4535 | lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 5833 | lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
4536 | struct lpfc_iocbq *rspiocb) | 5834 | struct lpfc_iocbq *rspiocb) |
@@ -4565,58 +5863,80 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4565 | goto out; | 5863 | goto out; |
4566 | /* FDISC failed */ | 5864 | /* FDISC failed */ |
4567 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 5865 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
4568 | "0124 FDISC failed. (%d/%d)\n", | 5866 | "0126 FDISC failed. (%d/%d)\n", |
4569 | irsp->ulpStatus, irsp->un.ulpWord[4]); | 5867 | irsp->ulpStatus, irsp->un.ulpWord[4]); |
5868 | goto fdisc_failed; | ||
5869 | } | ||
4570 | if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) | 5870 | if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) |
4571 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 5871 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
4572 | lpfc_nlp_put(ndlp); | 5872 | lpfc_nlp_put(ndlp); |
4573 | /* giving up on FDISC. Cancel discovery timer */ | 5873 | /* giving up on FDISC. Cancel discovery timer */ |
4574 | lpfc_can_disctmo(vport); | 5874 | lpfc_can_disctmo(vport); |
4575 | } else { | 5875 | spin_lock_irq(shost->host_lock); |
4576 | spin_lock_irq(shost->host_lock); | 5876 | vport->fc_flag |= FC_FABRIC; |
4577 | vport->fc_flag |= FC_FABRIC; | 5877 | if (vport->phba->fc_topology == TOPOLOGY_LOOP) |
4578 | if (vport->phba->fc_topology == TOPOLOGY_LOOP) | 5878 | vport->fc_flag |= FC_PUBLIC_LOOP; |
4579 | vport->fc_flag |= FC_PUBLIC_LOOP; | 5879 | spin_unlock_irq(shost->host_lock); |
4580 | spin_unlock_irq(shost->host_lock); | ||
4581 | 5880 | ||
4582 | vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; | 5881 | vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; |
4583 | lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); | 5882 | lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); |
4584 | if ((vport->fc_prevDID != vport->fc_myDID) && | 5883 | if ((vport->fc_prevDID != vport->fc_myDID) && |
4585 | !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { | 5884 | !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { |
4586 | /* If our NportID changed, we need to ensure all | 5885 | /* If our NportID changed, we need to ensure all |
4587 | * remaining NPORTs get unreg_login'ed so we can | 5886 | * remaining NPORTs get unreg_login'ed so we can |
4588 | * issue unreg_vpi. | 5887 | * issue unreg_vpi. |
4589 | */ | 5888 | */ |
4590 | list_for_each_entry_safe(np, next_np, | 5889 | list_for_each_entry_safe(np, next_np, |
4591 | &vport->fc_nodes, nlp_listp) { | 5890 | &vport->fc_nodes, nlp_listp) { |
4592 | if (!NLP_CHK_NODE_ACT(ndlp) || | 5891 | if (!NLP_CHK_NODE_ACT(ndlp) || |
4593 | (np->nlp_state != NLP_STE_NPR_NODE) || | 5892 | (np->nlp_state != NLP_STE_NPR_NODE) || |
4594 | !(np->nlp_flag & NLP_NPR_ADISC)) | 5893 | !(np->nlp_flag & NLP_NPR_ADISC)) |
4595 | continue; | 5894 | continue; |
4596 | spin_lock_irq(shost->host_lock); | ||
4597 | np->nlp_flag &= ~NLP_NPR_ADISC; | ||
4598 | spin_unlock_irq(shost->host_lock); | ||
4599 | lpfc_unreg_rpi(vport, np); | ||
4600 | } | ||
4601 | lpfc_mbx_unreg_vpi(vport); | ||
4602 | spin_lock_irq(shost->host_lock); | 5895 | spin_lock_irq(shost->host_lock); |
4603 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 5896 | np->nlp_flag &= ~NLP_NPR_ADISC; |
4604 | spin_unlock_irq(shost->host_lock); | 5897 | spin_unlock_irq(shost->host_lock); |
5898 | lpfc_unreg_rpi(vport, np); | ||
4605 | } | 5899 | } |
4606 | 5900 | lpfc_mbx_unreg_vpi(vport); | |
4607 | if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) | 5901 | spin_lock_irq(shost->host_lock); |
4608 | lpfc_register_new_vport(phba, vport, ndlp); | 5902 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
4609 | else | 5903 | spin_unlock_irq(shost->host_lock); |
4610 | lpfc_do_scr_ns_plogi(phba, vport); | ||
4611 | |||
4612 | /* Unconditionaly kick off releasing fabric node for vports */ | ||
4613 | lpfc_nlp_put(ndlp); | ||
4614 | } | 5904 | } |
4615 | 5905 | ||
5906 | if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) | ||
5907 | lpfc_register_new_vport(phba, vport, ndlp); | ||
5908 | else | ||
5909 | lpfc_do_scr_ns_plogi(phba, vport); | ||
5910 | goto out; | ||
5911 | fdisc_failed: | ||
5912 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | ||
5913 | /* Cancel discovery timer */ | ||
5914 | lpfc_can_disctmo(vport); | ||
5915 | lpfc_nlp_put(ndlp); | ||
4616 | out: | 5916 | out: |
4617 | lpfc_els_free_iocb(phba, cmdiocb); | 5917 | lpfc_els_free_iocb(phba, cmdiocb); |
4618 | } | 5918 | } |
4619 | 5919 | ||
5920 | /** | ||
5921 | * lpfc_issue_els_fdisc: Issue a fdisc iocb command. | ||
5922 | * @vport: pointer to a virtual N_Port data structure. | ||
5923 | * @ndlp: pointer to a node-list data structure. | ||
5924 | * @retry: number of retries to the command IOCB. | ||
5925 | * | ||
5926 | * This routine prepares and issues a Fabric Discover (FDISC) IOCB to | ||
5927 | * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() | ||
5928 | * routine to issue the IOCB, which makes sure only one outstanding fabric | ||
5929 | * IOCB will be sent off HBA at any given time. | ||
5930 | * | ||
5931 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
5932 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
5933 | * will be stored into the context1 field of the IOCB for the completion | ||
5934 | * callback function to the FDISC ELS command. | ||
5935 | * | ||
5936 | * Return code | ||
5937 | * 0 - Successfully issued fdisc iocb command | ||
5938 | * 1 - Failed to issue fdisc iocb command | ||
5939 | **/ | ||
4620 | static int | 5940 | static int |
4621 | lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | 5941 | lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
4622 | uint8_t retry) | 5942 | uint8_t retry) |
@@ -4691,6 +6011,20 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
4691 | return 0; | 6011 | return 0; |
4692 | } | 6012 | } |
4693 | 6013 | ||
6014 | /** | ||
6015 | * lpfc_cmpl_els_npiv_logo: Completion function with vport logo. | ||
6016 | * @phba: pointer to lpfc hba data structure. | ||
6017 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
6018 | * @rspiocb: pointer to lpfc response iocb data structure. | ||
6019 | * | ||
6020 | * This routine is the completion callback function to the issuing of a LOGO | ||
6021 | * ELS command off a vport. It frees the command IOCB and then decrement the | ||
6022 | * reference count held on ndlp for this completion function, indicating that | ||
6023 | * the reference to the ndlp is no long needed. Note that the | ||
6024 | * lpfc_els_free_iocb() routine decrements the ndlp reference held for this | ||
6025 | * callback function and an additional explicit ndlp reference decrementation | ||
6026 | * will trigger the actual release of the ndlp. | ||
6027 | **/ | ||
4694 | static void | 6028 | static void |
4695 | lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 6029 | lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
4696 | struct lpfc_iocbq *rspiocb) | 6030 | struct lpfc_iocbq *rspiocb) |
@@ -4712,6 +6046,22 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4712 | lpfc_nlp_put(ndlp); | 6046 | lpfc_nlp_put(ndlp); |
4713 | } | 6047 | } |
4714 | 6048 | ||
6049 | /** | ||
6050 | * lpfc_issue_els_npiv_logo: Issue a logo off a vport. | ||
6051 | * @vport: pointer to a virtual N_Port data structure. | ||
6052 | * @ndlp: pointer to a node-list data structure. | ||
6053 | * | ||
6054 | * This routine issues a LOGO ELS command to an @ndlp off a @vport. | ||
6055 | * | ||
6056 | * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp | ||
6057 | * will be incremented by 1 for holding the ndlp and the reference to ndlp | ||
6058 | * will be stored into the context1 field of the IOCB for the completion | ||
6059 | * callback function to the LOGO ELS command. | ||
6060 | * | ||
6061 | * Return codes | ||
6062 | * 0 - Successfully issued logo off the @vport | ||
6063 | * 1 - Failed to issue logo off the @vport | ||
6064 | **/ | ||
4715 | int | 6065 | int |
4716 | lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | 6066 | lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
4717 | { | 6067 | { |
@@ -4757,6 +6107,17 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
4757 | return 0; | 6107 | return 0; |
4758 | } | 6108 | } |
4759 | 6109 | ||
6110 | /** | ||
6111 | * lpfc_fabric_block_timeout: Handler function to the fabric block timer. | ||
6112 | * @ptr: holder for the timer function associated data. | ||
6113 | * | ||
6114 | * This routine is invoked by the fabric iocb block timer after | ||
6115 | * timeout. It posts the fabric iocb block timeout event by setting the | ||
6116 | * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes | ||
6117 | * lpfc_worker_wake_up() routine to wake up the worker thread. It is for | ||
6118 | * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the | ||
6119 | * posted event WORKER_FABRIC_BLOCK_TMO. | ||
6120 | **/ | ||
4760 | void | 6121 | void |
4761 | lpfc_fabric_block_timeout(unsigned long ptr) | 6122 | lpfc_fabric_block_timeout(unsigned long ptr) |
4762 | { | 6123 | { |
@@ -4775,6 +6136,16 @@ lpfc_fabric_block_timeout(unsigned long ptr) | |||
4775 | return; | 6136 | return; |
4776 | } | 6137 | } |
4777 | 6138 | ||
6139 | /** | ||
6140 | * lpfc_resume_fabric_iocbs: Issue a fabric iocb from driver internal list. | ||
6141 | * @phba: pointer to lpfc hba data structure. | ||
6142 | * | ||
6143 | * This routine issues one fabric iocb from the driver internal list to | ||
6144 | * the HBA. It first checks whether it's ready to issue one fabric iocb to | ||
6145 | * the HBA (whether there is no outstanding fabric iocb). If so, it shall | ||
6146 | * remove one pending fabric iocb from the driver internal list and invokes | ||
6147 | * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. | ||
6148 | **/ | ||
4778 | static void | 6149 | static void |
4779 | lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) | 6150 | lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) |
4780 | { | 6151 | { |
@@ -4824,6 +6195,15 @@ repeat: | |||
4824 | return; | 6195 | return; |
4825 | } | 6196 | } |
4826 | 6197 | ||
6198 | /** | ||
6199 | * lpfc_unblock_fabric_iocbs: Unblock issuing fabric iocb command. | ||
6200 | * @phba: pointer to lpfc hba data structure. | ||
6201 | * | ||
6202 | * This routine unblocks the issuing fabric iocb command. The function | ||
6203 | * will clear the fabric iocb block bit and then invoke the routine | ||
6204 | * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb | ||
6205 | * from the driver internal fabric iocb list. | ||
6206 | **/ | ||
4827 | void | 6207 | void |
4828 | lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) | 6208 | lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) |
4829 | { | 6209 | { |
@@ -4833,6 +6213,15 @@ lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) | |||
4833 | return; | 6213 | return; |
4834 | } | 6214 | } |
4835 | 6215 | ||
6216 | /** | ||
6217 | * lpfc_block_fabric_iocbs: Block issuing fabric iocb command. | ||
6218 | * @phba: pointer to lpfc hba data structure. | ||
6219 | * | ||
6220 | * This routine blocks the issuing fabric iocb for a specified amount of | ||
6221 | * time (currently 100 ms). This is done by set the fabric iocb block bit | ||
6222 | * and set up a timeout timer for 100ms. When the block bit is set, no more | ||
6223 | * fabric iocb will be issued out of the HBA. | ||
6224 | **/ | ||
4836 | static void | 6225 | static void |
4837 | lpfc_block_fabric_iocbs(struct lpfc_hba *phba) | 6226 | lpfc_block_fabric_iocbs(struct lpfc_hba *phba) |
4838 | { | 6227 | { |
@@ -4846,6 +6235,19 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba) | |||
4846 | return; | 6235 | return; |
4847 | } | 6236 | } |
4848 | 6237 | ||
6238 | /** | ||
6239 | * lpfc_cmpl_fabric_iocb: Completion callback function for fabric iocb. | ||
6240 | * @phba: pointer to lpfc hba data structure. | ||
6241 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
6242 | * @rspiocb: pointer to lpfc response iocb data structure. | ||
6243 | * | ||
6244 | * This routine is the callback function that is put to the fabric iocb's | ||
6245 | * callback function pointer (iocb->iocb_cmpl). The original iocb's callback | ||
6246 | * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback | ||
6247 | * function first restores and invokes the original iocb's callback function | ||
6248 | * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next | ||
6249 | * fabric bound iocb from the driver internal fabric iocb list onto the wire. | ||
6250 | **/ | ||
4849 | static void | 6251 | static void |
4850 | lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 6252 | lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
4851 | struct lpfc_iocbq *rspiocb) | 6253 | struct lpfc_iocbq *rspiocb) |
@@ -4892,6 +6294,30 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4892 | } | 6294 | } |
4893 | } | 6295 | } |
4894 | 6296 | ||
6297 | /** | ||
6298 | * lpfc_issue_fabric_iocb: Issue a fabric iocb command. | ||
6299 | * @phba: pointer to lpfc hba data structure. | ||
6300 | * @iocb: pointer to lpfc command iocb data structure. | ||
6301 | * | ||
6302 | * This routine is used as the top-level API for issuing a fabric iocb command | ||
6303 | * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver | ||
6304 | * function makes sure that only one fabric bound iocb will be outstanding at | ||
6305 | * any given time. As such, this function will first check to see whether there | ||
6306 | * is already an outstanding fabric iocb on the wire. If so, it will put the | ||
6307 | * newly issued iocb onto the driver internal fabric iocb list, waiting to be | ||
6308 | * issued later. Otherwise, it will issue the iocb on the wire and update the | ||
6309 | * fabric iocb count it indicate that there is one fabric iocb on the wire. | ||
6310 | * | ||
6311 | * Note, this implementation has a potential sending out fabric IOCBs out of | ||
6312 | * order. The problem is caused by the construction of the "ready" boolen does | ||
6313 | * not include the condition that the internal fabric IOCB list is empty. As | ||
6314 | * such, it is possible a fabric IOCB issued by this routine might be "jump" | ||
6315 | * ahead of the fabric IOCBs in the internal list. | ||
6316 | * | ||
6317 | * Return code | ||
6318 | * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully | ||
6319 | * IOCB_ERROR - failed to issue fabric iocb | ||
6320 | **/ | ||
4895 | static int | 6321 | static int |
4896 | lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) | 6322 | lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) |
4897 | { | 6323 | { |
@@ -4937,7 +6363,17 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) | |||
4937 | return ret; | 6363 | return ret; |
4938 | } | 6364 | } |
4939 | 6365 | ||
4940 | 6366 | /** | |
6367 | * lpfc_fabric_abort_vport: Abort a vport's iocbs from driver fabric iocb list. | ||
6368 | * @vport: pointer to a virtual N_Port data structure. | ||
6369 | * | ||
6370 | * This routine aborts all the IOCBs associated with a @vport from the | ||
6371 | * driver internal fabric IOCB list. The list contains fabric IOCBs to be | ||
6372 | * issued to the ELS IOCB ring. This abort function walks the fabric IOCB | ||
6373 | * list, removes each IOCB associated with the @vport off the list, set the | ||
6374 | * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function | ||
6375 | * associated with the IOCB. | ||
6376 | **/ | ||
4941 | static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) | 6377 | static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) |
4942 | { | 6378 | { |
4943 | LIST_HEAD(completions); | 6379 | LIST_HEAD(completions); |
@@ -4967,6 +6403,17 @@ static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) | |||
4967 | } | 6403 | } |
4968 | } | 6404 | } |
4969 | 6405 | ||
6406 | /** | ||
6407 | * lpfc_fabric_abort_nport: Abort a ndlp's iocbs from driver fabric iocb list. | ||
6408 | * @ndlp: pointer to a node-list data structure. | ||
6409 | * | ||
6410 | * This routine aborts all the IOCBs associated with an @ndlp from the | ||
6411 | * driver internal fabric IOCB list. The list contains fabric IOCBs to be | ||
6412 | * issued to the ELS IOCB ring. This abort function walks the fabric IOCB | ||
6413 | * list, removes each IOCB associated with the @ndlp off the list, set the | ||
6414 | * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function | ||
6415 | * associated with the IOCB. | ||
6416 | **/ | ||
4970 | void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) | 6417 | void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) |
4971 | { | 6418 | { |
4972 | LIST_HEAD(completions); | 6419 | LIST_HEAD(completions); |
@@ -4996,6 +6443,17 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) | |||
4996 | } | 6443 | } |
4997 | } | 6444 | } |
4998 | 6445 | ||
6446 | /** | ||
6447 | * lpfc_fabric_abort_hba: Abort all iocbs on driver fabric iocb list. | ||
6448 | * @phba: pointer to lpfc hba data structure. | ||
6449 | * | ||
6450 | * This routine aborts all the IOCBs currently on the driver internal | ||
6451 | * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS | ||
6452 | * IOCB ring. This function takes the entire IOCB list off the fabric IOCB | ||
6453 | * list, removes IOCBs off the list, set the status feild to | ||
6454 | * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with | ||
6455 | * the IOCB. | ||
6456 | **/ | ||
4999 | void lpfc_fabric_abort_hba(struct lpfc_hba *phba) | 6457 | void lpfc_fabric_abort_hba(struct lpfc_hba *phba) |
5000 | { | 6458 | { |
5001 | LIST_HEAD(completions); | 6459 | LIST_HEAD(completions); |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index a98d11bf3576..a1a70d9ffc2a 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <scsi/scsi_transport_fc.h> | 30 | #include <scsi/scsi_transport_fc.h> |
31 | 31 | ||
32 | #include "lpfc_hw.h" | 32 | #include "lpfc_hw.h" |
33 | #include "lpfc_nl.h" | ||
33 | #include "lpfc_disc.h" | 34 | #include "lpfc_disc.h" |
34 | #include "lpfc_sli.h" | 35 | #include "lpfc_sli.h" |
35 | #include "lpfc_scsi.h" | 36 | #include "lpfc_scsi.h" |
@@ -88,14 +89,6 @@ lpfc_terminate_rport_io(struct fc_rport *rport) | |||
88 | &phba->sli.ring[phba->sli.fcp_ring], | 89 | &phba->sli.ring[phba->sli.fcp_ring], |
89 | ndlp->nlp_sid, 0, LPFC_CTX_TGT); | 90 | ndlp->nlp_sid, 0, LPFC_CTX_TGT); |
90 | } | 91 | } |
91 | |||
92 | /* | ||
93 | * A device is normally blocked for rediscovery and unblocked when | ||
94 | * devloss timeout happens. In case a vport is removed or driver | ||
95 | * unloaded before devloss timeout happens, we need to unblock here. | ||
96 | */ | ||
97 | scsi_target_unblock(&rport->dev); | ||
98 | return; | ||
99 | } | 92 | } |
100 | 93 | ||
101 | /* | 94 | /* |
@@ -215,8 +208,16 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
215 | return; | 208 | return; |
216 | } | 209 | } |
217 | 210 | ||
218 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) | 211 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { |
212 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | ||
213 | "0284 Devloss timeout Ignored on " | ||
214 | "WWPN %x:%x:%x:%x:%x:%x:%x:%x " | ||
215 | "NPort x%x\n", | ||
216 | *name, *(name+1), *(name+2), *(name+3), | ||
217 | *(name+4), *(name+5), *(name+6), *(name+7), | ||
218 | ndlp->nlp_DID); | ||
219 | return; | 219 | return; |
220 | } | ||
220 | 221 | ||
221 | if (ndlp->nlp_type & NLP_FABRIC) { | 222 | if (ndlp->nlp_type & NLP_FABRIC) { |
222 | /* We will clean up these Nodes in linkup */ | 223 | /* We will clean up these Nodes in linkup */ |
@@ -237,8 +238,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
237 | lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], | 238 | lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], |
238 | ndlp->nlp_sid, 0, LPFC_CTX_TGT); | 239 | ndlp->nlp_sid, 0, LPFC_CTX_TGT); |
239 | } | 240 | } |
240 | if (vport->load_flag & FC_UNLOADING) | ||
241 | warn_on = 0; | ||
242 | 241 | ||
243 | if (warn_on) { | 242 | if (warn_on) { |
244 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | 243 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
@@ -276,6 +275,124 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
276 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); | 275 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); |
277 | } | 276 | } |
278 | 277 | ||
278 | /** | ||
279 | * lpfc_alloc_fast_evt: Allocates data structure for posting event. | ||
280 | * @phba: Pointer to hba context object. | ||
281 | * | ||
282 | * This function is called from the functions which need to post | ||
283 | * events from interrupt context. This function allocates data | ||
284 | * structure required for posting event. It also keeps track of | ||
285 | * number of events pending and prevent event storm when there are | ||
286 | * too many events. | ||
287 | **/ | ||
288 | struct lpfc_fast_path_event * | ||
289 | lpfc_alloc_fast_evt(struct lpfc_hba *phba) { | ||
290 | struct lpfc_fast_path_event *ret; | ||
291 | |||
292 | /* If there are lot of fast event do not exhaust memory due to this */ | ||
293 | if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) | ||
294 | return NULL; | ||
295 | |||
296 | ret = kzalloc(sizeof(struct lpfc_fast_path_event), | ||
297 | GFP_ATOMIC); | ||
298 | if (ret) | ||
299 | atomic_inc(&phba->fast_event_count); | ||
300 | INIT_LIST_HEAD(&ret->work_evt.evt_listp); | ||
301 | ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; | ||
302 | return ret; | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * lpfc_free_fast_evt: Frees event data structure. | ||
307 | * @phba: Pointer to hba context object. | ||
308 | * @evt: Event object which need to be freed. | ||
309 | * | ||
310 | * This function frees the data structure required for posting | ||
311 | * events. | ||
312 | **/ | ||
313 | void | ||
314 | lpfc_free_fast_evt(struct lpfc_hba *phba, | ||
315 | struct lpfc_fast_path_event *evt) { | ||
316 | |||
317 | atomic_dec(&phba->fast_event_count); | ||
318 | kfree(evt); | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * lpfc_send_fastpath_evt: Posts events generated from fast path. | ||
323 | * @phba: Pointer to hba context object. | ||
324 | * @evtp: Event data structure. | ||
325 | * | ||
326 | * This function is called from worker thread, when the interrupt | ||
327 | * context need to post an event. This function posts the event | ||
328 | * to fc transport netlink interface. | ||
329 | **/ | ||
330 | static void | ||
331 | lpfc_send_fastpath_evt(struct lpfc_hba *phba, | ||
332 | struct lpfc_work_evt *evtp) | ||
333 | { | ||
334 | unsigned long evt_category, evt_sub_category; | ||
335 | struct lpfc_fast_path_event *fast_evt_data; | ||
336 | char *evt_data; | ||
337 | uint32_t evt_data_size; | ||
338 | struct Scsi_Host *shost; | ||
339 | |||
340 | fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, | ||
341 | work_evt); | ||
342 | |||
343 | evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; | ||
344 | evt_sub_category = (unsigned long) fast_evt_data->un. | ||
345 | fabric_evt.subcategory; | ||
346 | shost = lpfc_shost_from_vport(fast_evt_data->vport); | ||
347 | if (evt_category == FC_REG_FABRIC_EVENT) { | ||
348 | if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { | ||
349 | evt_data = (char *) &fast_evt_data->un.read_check_error; | ||
350 | evt_data_size = sizeof(fast_evt_data->un. | ||
351 | read_check_error); | ||
352 | } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || | ||
353 | (evt_sub_category == IOSTAT_NPORT_BSY)) { | ||
354 | evt_data = (char *) &fast_evt_data->un.fabric_evt; | ||
355 | evt_data_size = sizeof(fast_evt_data->un.fabric_evt); | ||
356 | } else { | ||
357 | lpfc_free_fast_evt(phba, fast_evt_data); | ||
358 | return; | ||
359 | } | ||
360 | } else if (evt_category == FC_REG_SCSI_EVENT) { | ||
361 | switch (evt_sub_category) { | ||
362 | case LPFC_EVENT_QFULL: | ||
363 | case LPFC_EVENT_DEVBSY: | ||
364 | evt_data = (char *) &fast_evt_data->un.scsi_evt; | ||
365 | evt_data_size = sizeof(fast_evt_data->un.scsi_evt); | ||
366 | break; | ||
367 | case LPFC_EVENT_CHECK_COND: | ||
368 | evt_data = (char *) &fast_evt_data->un.check_cond_evt; | ||
369 | evt_data_size = sizeof(fast_evt_data->un. | ||
370 | check_cond_evt); | ||
371 | break; | ||
372 | case LPFC_EVENT_VARQUEDEPTH: | ||
373 | evt_data = (char *) &fast_evt_data->un.queue_depth_evt; | ||
374 | evt_data_size = sizeof(fast_evt_data->un. | ||
375 | queue_depth_evt); | ||
376 | break; | ||
377 | default: | ||
378 | lpfc_free_fast_evt(phba, fast_evt_data); | ||
379 | return; | ||
380 | } | ||
381 | } else { | ||
382 | lpfc_free_fast_evt(phba, fast_evt_data); | ||
383 | return; | ||
384 | } | ||
385 | |||
386 | fc_host_post_vendor_event(shost, | ||
387 | fc_get_event_number(), | ||
388 | evt_data_size, | ||
389 | evt_data, | ||
390 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | ||
391 | |||
392 | lpfc_free_fast_evt(phba, fast_evt_data); | ||
393 | return; | ||
394 | } | ||
395 | |||
279 | static void | 396 | static void |
280 | lpfc_work_list_done(struct lpfc_hba *phba) | 397 | lpfc_work_list_done(struct lpfc_hba *phba) |
281 | { | 398 | { |
@@ -347,6 +464,10 @@ lpfc_work_list_done(struct lpfc_hba *phba) | |||
347 | lpfc_unblock_mgmt_io(phba); | 464 | lpfc_unblock_mgmt_io(phba); |
348 | complete((struct completion *)(evtp->evt_arg2)); | 465 | complete((struct completion *)(evtp->evt_arg2)); |
349 | break; | 466 | break; |
467 | case LPFC_EVT_FASTPATH_MGMT_EVT: | ||
468 | lpfc_send_fastpath_evt(phba, evtp); | ||
469 | free_evt = 0; | ||
470 | break; | ||
350 | } | 471 | } |
351 | if (free_evt) | 472 | if (free_evt) |
352 | kfree(evtp); | 473 | kfree(evtp); |
@@ -371,6 +492,7 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
371 | spin_unlock_irq(&phba->hbalock); | 492 | spin_unlock_irq(&phba->hbalock); |
372 | 493 | ||
373 | if (ha_copy & HA_ERATT) | 494 | if (ha_copy & HA_ERATT) |
495 | /* Handle the error attention event */ | ||
374 | lpfc_handle_eratt(phba); | 496 | lpfc_handle_eratt(phba); |
375 | 497 | ||
376 | if (ha_copy & HA_MBATT) | 498 | if (ha_copy & HA_MBATT) |
@@ -378,6 +500,7 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
378 | 500 | ||
379 | if (ha_copy & HA_LATT) | 501 | if (ha_copy & HA_LATT) |
380 | lpfc_handle_latt(phba); | 502 | lpfc_handle_latt(phba); |
503 | |||
381 | vports = lpfc_create_vport_work_array(phba); | 504 | vports = lpfc_create_vport_work_array(phba); |
382 | if (vports != NULL) | 505 | if (vports != NULL) |
383 | for(i = 0; i <= phba->max_vpi; i++) { | 506 | for(i = 0; i <= phba->max_vpi; i++) { |
@@ -1013,14 +1136,10 @@ out: | |||
1013 | } | 1136 | } |
1014 | 1137 | ||
1015 | static void | 1138 | static void |
1016 | lpfc_mbx_issue_link_down(struct lpfc_hba *phba) | 1139 | lpfc_enable_la(struct lpfc_hba *phba) |
1017 | { | 1140 | { |
1018 | uint32_t control; | 1141 | uint32_t control; |
1019 | struct lpfc_sli *psli = &phba->sli; | 1142 | struct lpfc_sli *psli = &phba->sli; |
1020 | |||
1021 | lpfc_linkdown(phba); | ||
1022 | |||
1023 | /* turn on Link Attention interrupts - no CLEAR_LA needed */ | ||
1024 | spin_lock_irq(&phba->hbalock); | 1143 | spin_lock_irq(&phba->hbalock); |
1025 | psli->sli_flag |= LPFC_PROCESS_LA; | 1144 | psli->sli_flag |= LPFC_PROCESS_LA; |
1026 | control = readl(phba->HCregaddr); | 1145 | control = readl(phba->HCregaddr); |
@@ -1030,6 +1149,15 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) | |||
1030 | spin_unlock_irq(&phba->hbalock); | 1149 | spin_unlock_irq(&phba->hbalock); |
1031 | } | 1150 | } |
1032 | 1151 | ||
1152 | static void | ||
1153 | lpfc_mbx_issue_link_down(struct lpfc_hba *phba) | ||
1154 | { | ||
1155 | lpfc_linkdown(phba); | ||
1156 | lpfc_enable_la(phba); | ||
1157 | /* turn on Link Attention interrupts - no CLEAR_LA needed */ | ||
1158 | } | ||
1159 | |||
1160 | |||
1033 | /* | 1161 | /* |
1034 | * This routine handles processing a READ_LA mailbox | 1162 | * This routine handles processing a READ_LA mailbox |
1035 | * command upon completion. It is setup in the LPFC_MBOXQ | 1163 | * command upon completion. It is setup in the LPFC_MBOXQ |
@@ -1077,8 +1205,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1077 | } | 1205 | } |
1078 | 1206 | ||
1079 | phba->fc_eventTag = la->eventTag; | 1207 | phba->fc_eventTag = la->eventTag; |
1208 | if (la->mm) | ||
1209 | phba->sli.sli_flag |= LPFC_MENLO_MAINT; | ||
1210 | else | ||
1211 | phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; | ||
1080 | 1212 | ||
1081 | if (la->attType == AT_LINK_UP) { | 1213 | if (la->attType == AT_LINK_UP && (!la->mm)) { |
1082 | phba->fc_stat.LinkUp++; | 1214 | phba->fc_stat.LinkUp++; |
1083 | if (phba->link_flag & LS_LOOPBACK_MODE) { | 1215 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
1084 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 1216 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
@@ -1090,13 +1222,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1090 | } else { | 1222 | } else { |
1091 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 1223 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
1092 | "1303 Link Up Event x%x received " | 1224 | "1303 Link Up Event x%x received " |
1093 | "Data: x%x x%x x%x x%x\n", | 1225 | "Data: x%x x%x x%x x%x x%x x%x %d\n", |
1094 | la->eventTag, phba->fc_eventTag, | 1226 | la->eventTag, phba->fc_eventTag, |
1095 | la->granted_AL_PA, la->UlnkSpeed, | 1227 | la->granted_AL_PA, la->UlnkSpeed, |
1096 | phba->alpa_map[0]); | 1228 | phba->alpa_map[0], |
1229 | la->mm, la->fa, | ||
1230 | phba->wait_4_mlo_maint_flg); | ||
1097 | } | 1231 | } |
1098 | lpfc_mbx_process_link_up(phba, la); | 1232 | lpfc_mbx_process_link_up(phba, la); |
1099 | } else { | 1233 | } else if (la->attType == AT_LINK_DOWN) { |
1100 | phba->fc_stat.LinkDown++; | 1234 | phba->fc_stat.LinkDown++; |
1101 | if (phba->link_flag & LS_LOOPBACK_MODE) { | 1235 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
1102 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 1236 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
@@ -1109,11 +1243,46 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1109 | else { | 1243 | else { |
1110 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 1244 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
1111 | "1305 Link Down Event x%x received " | 1245 | "1305 Link Down Event x%x received " |
1246 | "Data: x%x x%x x%x x%x x%x\n", | ||
1247 | la->eventTag, phba->fc_eventTag, | ||
1248 | phba->pport->port_state, vport->fc_flag, | ||
1249 | la->mm, la->fa); | ||
1250 | } | ||
1251 | lpfc_mbx_issue_link_down(phba); | ||
1252 | } | ||
1253 | if (la->mm && la->attType == AT_LINK_UP) { | ||
1254 | if (phba->link_state != LPFC_LINK_DOWN) { | ||
1255 | phba->fc_stat.LinkDown++; | ||
1256 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | ||
1257 | "1312 Link Down Event x%x received " | ||
1258 | "Data: x%x x%x x%x\n", | ||
1259 | la->eventTag, phba->fc_eventTag, | ||
1260 | phba->pport->port_state, vport->fc_flag); | ||
1261 | lpfc_mbx_issue_link_down(phba); | ||
1262 | } else | ||
1263 | lpfc_enable_la(phba); | ||
1264 | |||
1265 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | ||
1266 | "1310 Menlo Maint Mode Link up Event x%x rcvd " | ||
1112 | "Data: x%x x%x x%x\n", | 1267 | "Data: x%x x%x x%x\n", |
1113 | la->eventTag, phba->fc_eventTag, | 1268 | la->eventTag, phba->fc_eventTag, |
1114 | phba->pport->port_state, vport->fc_flag); | 1269 | phba->pport->port_state, vport->fc_flag); |
1270 | /* | ||
1271 | * The cmnd that triggered this will be waiting for this | ||
1272 | * signal. | ||
1273 | */ | ||
1274 | /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */ | ||
1275 | if (phba->wait_4_mlo_maint_flg) { | ||
1276 | phba->wait_4_mlo_maint_flg = 0; | ||
1277 | wake_up_interruptible(&phba->wait_4_mlo_m_q); | ||
1115 | } | 1278 | } |
1116 | lpfc_mbx_issue_link_down(phba); | 1279 | } |
1280 | |||
1281 | if (la->fa) { | ||
1282 | if (la->mm) | ||
1283 | lpfc_issue_clear_la(phba, vport); | ||
1284 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, | ||
1285 | "1311 fa %d\n", la->fa); | ||
1117 | } | 1286 | } |
1118 | 1287 | ||
1119 | lpfc_mbx_cmpl_read_la_free_mbuf: | 1288 | lpfc_mbx_cmpl_read_la_free_mbuf: |
@@ -1177,7 +1346,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1177 | scsi_host_put(shost); | 1346 | scsi_host_put(shost); |
1178 | } | 1347 | } |
1179 | 1348 | ||
1180 | void | 1349 | int |
1181 | lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) | 1350 | lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) |
1182 | { | 1351 | { |
1183 | struct lpfc_hba *phba = vport->phba; | 1352 | struct lpfc_hba *phba = vport->phba; |
@@ -1186,7 +1355,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) | |||
1186 | 1355 | ||
1187 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 1356 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
1188 | if (!mbox) | 1357 | if (!mbox) |
1189 | return; | 1358 | return 1; |
1190 | 1359 | ||
1191 | lpfc_unreg_vpi(phba, vport->vpi, mbox); | 1360 | lpfc_unreg_vpi(phba, vport->vpi, mbox); |
1192 | mbox->vport = vport; | 1361 | mbox->vport = vport; |
@@ -1197,7 +1366,9 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) | |||
1197 | "1800 Could not issue unreg_vpi\n"); | 1366 | "1800 Could not issue unreg_vpi\n"); |
1198 | mempool_free(mbox, phba->mbox_mem_pool); | 1367 | mempool_free(mbox, phba->mbox_mem_pool); |
1199 | vport->unreg_vpi_cmpl = VPORT_ERROR; | 1368 | vport->unreg_vpi_cmpl = VPORT_ERROR; |
1369 | return rc; | ||
1200 | } | 1370 | } |
1371 | return 0; | ||
1201 | } | 1372 | } |
1202 | 1373 | ||
1203 | static void | 1374 | static void |
@@ -1553,6 +1724,22 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1553 | */ | 1724 | */ |
1554 | lpfc_register_remote_port(vport, ndlp); | 1725 | lpfc_register_remote_port(vport, ndlp); |
1555 | } | 1726 | } |
1727 | if ((new_state == NLP_STE_MAPPED_NODE) && | ||
1728 | (vport->stat_data_enabled)) { | ||
1729 | /* | ||
1730 | * A new target is discovered, if there is no buffer for | ||
1731 | * statistical data collection allocate buffer. | ||
1732 | */ | ||
1733 | ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT, | ||
1734 | sizeof(struct lpfc_scsicmd_bkt), | ||
1735 | GFP_KERNEL); | ||
1736 | |||
1737 | if (!ndlp->lat_data) | ||
1738 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, | ||
1739 | "0286 lpfc_nlp_state_cleanup failed to " | ||
1740 | "allocate statistical data buffer DID " | ||
1741 | "0x%x\n", ndlp->nlp_DID); | ||
1742 | } | ||
1556 | /* | 1743 | /* |
1557 | * if we added to Mapped list, but the remote port | 1744 | * if we added to Mapped list, but the remote port |
1558 | * registration failed or assigned a target id outside | 1745 | * registration failed or assigned a target id outside |
@@ -2786,7 +2973,7 @@ restart_disc: | |||
2786 | 2973 | ||
2787 | default: | 2974 | default: |
2788 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | 2975 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
2789 | "0229 Unexpected discovery timeout, " | 2976 | "0273 Unexpected discovery timeout, " |
2790 | "vport State x%x\n", vport->port_state); | 2977 | "vport State x%x\n", vport->port_state); |
2791 | break; | 2978 | break; |
2792 | } | 2979 | } |
@@ -2940,6 +3127,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
2940 | INIT_LIST_HEAD(&ndlp->nlp_listp); | 3127 | INIT_LIST_HEAD(&ndlp->nlp_listp); |
2941 | kref_init(&ndlp->kref); | 3128 | kref_init(&ndlp->kref); |
2942 | NLP_INT_NODE_ACT(ndlp); | 3129 | NLP_INT_NODE_ACT(ndlp); |
3130 | atomic_set(&ndlp->cmd_pending, 0); | ||
3131 | ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; | ||
2943 | 3132 | ||
2944 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, | 3133 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, |
2945 | "node init: did:x%x", | 3134 | "node init: did:x%x", |
@@ -2979,8 +3168,10 @@ lpfc_nlp_release(struct kref *kref) | |||
2979 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | 3168 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); |
2980 | 3169 | ||
2981 | /* free ndlp memory for final ndlp release */ | 3170 | /* free ndlp memory for final ndlp release */ |
2982 | if (NLP_CHK_FREE_REQ(ndlp)) | 3171 | if (NLP_CHK_FREE_REQ(ndlp)) { |
3172 | kfree(ndlp->lat_data); | ||
2983 | mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); | 3173 | mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); |
3174 | } | ||
2984 | } | 3175 | } |
2985 | 3176 | ||
2986 | /* This routine bumps the reference count for a ndlp structure to ensure | 3177 | /* This routine bumps the reference count for a ndlp structure to ensure |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 7773b949aa7c..5de5dabbbee6 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -1107,6 +1107,8 @@ typedef struct { | |||
1107 | /* Start FireFly Register definitions */ | 1107 | /* Start FireFly Register definitions */ |
1108 | #define PCI_VENDOR_ID_EMULEX 0x10df | 1108 | #define PCI_VENDOR_ID_EMULEX 0x10df |
1109 | #define PCI_DEVICE_ID_FIREFLY 0x1ae5 | 1109 | #define PCI_DEVICE_ID_FIREFLY 0x1ae5 |
1110 | #define PCI_DEVICE_ID_PROTEUS_VF 0xe100 | ||
1111 | #define PCI_DEVICE_ID_PROTEUS_PF 0xe180 | ||
1110 | #define PCI_DEVICE_ID_SAT_SMB 0xf011 | 1112 | #define PCI_DEVICE_ID_SAT_SMB 0xf011 |
1111 | #define PCI_DEVICE_ID_SAT_MID 0xf015 | 1113 | #define PCI_DEVICE_ID_SAT_MID 0xf015 |
1112 | #define PCI_DEVICE_ID_RFLY 0xf095 | 1114 | #define PCI_DEVICE_ID_RFLY 0xf095 |
@@ -1133,10 +1135,12 @@ typedef struct { | |||
1133 | #define PCI_DEVICE_ID_LP11000S 0xfc10 | 1135 | #define PCI_DEVICE_ID_LP11000S 0xfc10 |
1134 | #define PCI_DEVICE_ID_LPE11000S 0xfc20 | 1136 | #define PCI_DEVICE_ID_LPE11000S 0xfc20 |
1135 | #define PCI_DEVICE_ID_SAT_S 0xfc40 | 1137 | #define PCI_DEVICE_ID_SAT_S 0xfc40 |
1138 | #define PCI_DEVICE_ID_PROTEUS_S 0xfc50 | ||
1136 | #define PCI_DEVICE_ID_HELIOS 0xfd00 | 1139 | #define PCI_DEVICE_ID_HELIOS 0xfd00 |
1137 | #define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11 | 1140 | #define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11 |
1138 | #define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12 | 1141 | #define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12 |
1139 | #define PCI_DEVICE_ID_ZEPHYR 0xfe00 | 1142 | #define PCI_DEVICE_ID_ZEPHYR 0xfe00 |
1143 | #define PCI_DEVICE_ID_HORNET 0xfe05 | ||
1140 | #define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 | 1144 | #define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 |
1141 | #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 | 1145 | #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 |
1142 | 1146 | ||
@@ -1154,6 +1158,7 @@ typedef struct { | |||
1154 | #define ZEPHYR_JEDEC_ID 0x0577 | 1158 | #define ZEPHYR_JEDEC_ID 0x0577 |
1155 | #define VIPER_JEDEC_ID 0x4838 | 1159 | #define VIPER_JEDEC_ID 0x4838 |
1156 | #define SATURN_JEDEC_ID 0x1004 | 1160 | #define SATURN_JEDEC_ID 0x1004 |
1161 | #define HORNET_JDEC_ID 0x2057706D | ||
1157 | 1162 | ||
1158 | #define JEDEC_ID_MASK 0x0FFFF000 | 1163 | #define JEDEC_ID_MASK 0x0FFFF000 |
1159 | #define JEDEC_ID_SHIFT 12 | 1164 | #define JEDEC_ID_SHIFT 12 |
@@ -1198,6 +1203,18 @@ typedef struct { /* FireFly BIU registers */ | |||
1198 | #define HA_RXATT 0x00000008 /* Bit 3 */ | 1203 | #define HA_RXATT 0x00000008 /* Bit 3 */ |
1199 | #define HA_RXMASK 0x0000000f | 1204 | #define HA_RXMASK 0x0000000f |
1200 | 1205 | ||
1206 | #define HA_R0_CLR_MSK (HA_R0RE_REQ | HA_R0CE_RSP | HA_R0ATT) | ||
1207 | #define HA_R1_CLR_MSK (HA_R1RE_REQ | HA_R1CE_RSP | HA_R1ATT) | ||
1208 | #define HA_R2_CLR_MSK (HA_R2RE_REQ | HA_R2CE_RSP | HA_R2ATT) | ||
1209 | #define HA_R3_CLR_MSK (HA_R3RE_REQ | HA_R3CE_RSP | HA_R3ATT) | ||
1210 | |||
1211 | #define HA_R0_POS 3 | ||
1212 | #define HA_R1_POS 7 | ||
1213 | #define HA_R2_POS 11 | ||
1214 | #define HA_R3_POS 15 | ||
1215 | #define HA_LE_POS 29 | ||
1216 | #define HA_MB_POS 30 | ||
1217 | #define HA_ER_POS 31 | ||
1201 | /* Chip Attention Register */ | 1218 | /* Chip Attention Register */ |
1202 | 1219 | ||
1203 | #define CA_REG_OFFSET 4 /* Byte offset from register base address */ | 1220 | #define CA_REG_OFFSET 4 /* Byte offset from register base address */ |
@@ -1235,7 +1252,7 @@ typedef struct { /* FireFly BIU registers */ | |||
1235 | 1252 | ||
1236 | /* Host Control Register */ | 1253 | /* Host Control Register */ |
1237 | 1254 | ||
1238 | #define HC_REG_OFFSET 12 /* Word offset from register base address */ | 1255 | #define HC_REG_OFFSET 12 /* Byte offset from register base address */ |
1239 | 1256 | ||
1240 | #define HC_MBINT_ENA 0x00000001 /* Bit 0 */ | 1257 | #define HC_MBINT_ENA 0x00000001 /* Bit 0 */ |
1241 | #define HC_R0INT_ENA 0x00000002 /* Bit 1 */ | 1258 | #define HC_R0INT_ENA 0x00000002 /* Bit 1 */ |
@@ -1248,6 +1265,19 @@ typedef struct { /* FireFly BIU registers */ | |||
1248 | #define HC_LAINT_ENA 0x20000000 /* Bit 29 */ | 1265 | #define HC_LAINT_ENA 0x20000000 /* Bit 29 */ |
1249 | #define HC_ERINT_ENA 0x80000000 /* Bit 31 */ | 1266 | #define HC_ERINT_ENA 0x80000000 /* Bit 31 */ |
1250 | 1267 | ||
1268 | /* Message Signaled Interrupt eXtension (MSI-X) message identifiers */ | ||
1269 | #define MSIX_DFLT_ID 0 | ||
1270 | #define MSIX_RNG0_ID 0 | ||
1271 | #define MSIX_RNG1_ID 1 | ||
1272 | #define MSIX_RNG2_ID 2 | ||
1273 | #define MSIX_RNG3_ID 3 | ||
1274 | |||
1275 | #define MSIX_LINK_ID 4 | ||
1276 | #define MSIX_MBOX_ID 5 | ||
1277 | |||
1278 | #define MSIX_SPARE0_ID 6 | ||
1279 | #define MSIX_SPARE1_ID 7 | ||
1280 | |||
1251 | /* Mailbox Commands */ | 1281 | /* Mailbox Commands */ |
1252 | #define MBX_SHUTDOWN 0x00 /* terminate testing */ | 1282 | #define MBX_SHUTDOWN 0x00 /* terminate testing */ |
1253 | #define MBX_LOAD_SM 0x01 | 1283 | #define MBX_LOAD_SM 0x01 |
@@ -1285,10 +1315,14 @@ typedef struct { /* FireFly BIU registers */ | |||
1285 | #define MBX_KILL_BOARD 0x24 | 1315 | #define MBX_KILL_BOARD 0x24 |
1286 | #define MBX_CONFIG_FARP 0x25 | 1316 | #define MBX_CONFIG_FARP 0x25 |
1287 | #define MBX_BEACON 0x2A | 1317 | #define MBX_BEACON 0x2A |
1318 | #define MBX_CONFIG_MSI 0x30 | ||
1288 | #define MBX_HEARTBEAT 0x31 | 1319 | #define MBX_HEARTBEAT 0x31 |
1289 | #define MBX_WRITE_VPARMS 0x32 | 1320 | #define MBX_WRITE_VPARMS 0x32 |
1290 | #define MBX_ASYNCEVT_ENABLE 0x33 | 1321 | #define MBX_ASYNCEVT_ENABLE 0x33 |
1291 | 1322 | ||
1323 | #define MBX_PORT_CAPABILITIES 0x3B | ||
1324 | #define MBX_PORT_IOV_CONTROL 0x3C | ||
1325 | |||
1292 | #define MBX_CONFIG_HBQ 0x7C | 1326 | #define MBX_CONFIG_HBQ 0x7C |
1293 | #define MBX_LOAD_AREA 0x81 | 1327 | #define MBX_LOAD_AREA 0x81 |
1294 | #define MBX_RUN_BIU_DIAG64 0x84 | 1328 | #define MBX_RUN_BIU_DIAG64 0x84 |
@@ -1474,24 +1508,18 @@ struct ulp_bde64 { /* SLI-2 */ | |||
1474 | uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED | 1508 | uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED |
1475 | VALUE !! */ | 1509 | VALUE !! */ |
1476 | #endif | 1510 | #endif |
1477 | 1511 | #define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */ | |
1478 | #define BUFF_USE_RSVD 0x01 /* bdeFlags */ | 1512 | #define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */ |
1479 | #define BUFF_USE_INTRPT 0x02 /* Not Implemented with LP6000 */ | 1513 | #define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */ |
1480 | #define BUFF_USE_CMND 0x04 /* Optional, 1=cmd/rsp 0=data buffer */ | 1514 | #define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */ |
1481 | #define BUFF_USE_RCV 0x08 /* "" "", 1=rcv buffer, 0=xmit | 1515 | #define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */ |
1482 | buffer */ | 1516 | #define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */ |
1483 | #define BUFF_TYPE_32BIT 0x10 /* "" "", 1=32 bit addr 0=64 bit | 1517 | #define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */ |
1484 | addr */ | ||
1485 | #define BUFF_TYPE_SPECIAL 0x20 /* Not Implemented with LP6000 */ | ||
1486 | #define BUFF_TYPE_BDL 0x40 /* Optional, may be set in BDL */ | ||
1487 | #define BUFF_TYPE_INVALID 0x80 /* "" "" */ | ||
1488 | } f; | 1518 | } f; |
1489 | } tus; | 1519 | } tus; |
1490 | uint32_t addrLow; | 1520 | uint32_t addrLow; |
1491 | uint32_t addrHigh; | 1521 | uint32_t addrHigh; |
1492 | }; | 1522 | }; |
1493 | #define BDE64_SIZE_WORD 0 | ||
1494 | #define BPL64_SIZE_WORD 0x40 | ||
1495 | 1523 | ||
1496 | typedef struct ULP_BDL { /* SLI-2 */ | 1524 | typedef struct ULP_BDL { /* SLI-2 */ |
1497 | #ifdef __BIG_ENDIAN_BITFIELD | 1525 | #ifdef __BIG_ENDIAN_BITFIELD |
@@ -2201,7 +2229,10 @@ typedef struct { | |||
2201 | typedef struct { | 2229 | typedef struct { |
2202 | uint32_t eventTag; /* Event tag */ | 2230 | uint32_t eventTag; /* Event tag */ |
2203 | #ifdef __BIG_ENDIAN_BITFIELD | 2231 | #ifdef __BIG_ENDIAN_BITFIELD |
2204 | uint32_t rsvd1:22; | 2232 | uint32_t rsvd1:19; |
2233 | uint32_t fa:1; | ||
2234 | uint32_t mm:1; /* Menlo Maintenance mode enabled */ | ||
2235 | uint32_t rx:1; | ||
2205 | uint32_t pb:1; | 2236 | uint32_t pb:1; |
2206 | uint32_t il:1; | 2237 | uint32_t il:1; |
2207 | uint32_t attType:8; | 2238 | uint32_t attType:8; |
@@ -2209,7 +2240,10 @@ typedef struct { | |||
2209 | uint32_t attType:8; | 2240 | uint32_t attType:8; |
2210 | uint32_t il:1; | 2241 | uint32_t il:1; |
2211 | uint32_t pb:1; | 2242 | uint32_t pb:1; |
2212 | uint32_t rsvd1:22; | 2243 | uint32_t rx:1; |
2244 | uint32_t mm:1; | ||
2245 | uint32_t fa:1; | ||
2246 | uint32_t rsvd1:19; | ||
2213 | #endif | 2247 | #endif |
2214 | 2248 | ||
2215 | #define AT_RESERVED 0x00 /* Reserved - attType */ | 2249 | #define AT_RESERVED 0x00 /* Reserved - attType */ |
@@ -2230,6 +2264,7 @@ typedef struct { | |||
2230 | 2264 | ||
2231 | #define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */ | 2265 | #define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */ |
2232 | #define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */ | 2266 | #define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */ |
2267 | #define TOPOLOGY_LNK_MENLO_MAINTENANCE 0x05 /* maint mode zephtr to menlo */ | ||
2233 | 2268 | ||
2234 | union { | 2269 | union { |
2235 | struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer | 2270 | struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer |
@@ -2324,6 +2359,36 @@ typedef struct { | |||
2324 | #define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ | 2359 | #define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ |
2325 | #define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ | 2360 | #define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ |
2326 | 2361 | ||
2362 | /* Structure for MB Command UPDATE_CFG (0x1B) */ | ||
2363 | |||
2364 | struct update_cfg_var { | ||
2365 | #ifdef __BIG_ENDIAN_BITFIELD | ||
2366 | uint32_t rsvd2:16; | ||
2367 | uint32_t type:8; | ||
2368 | uint32_t rsvd:1; | ||
2369 | uint32_t ra:1; | ||
2370 | uint32_t co:1; | ||
2371 | uint32_t cv:1; | ||
2372 | uint32_t req:4; | ||
2373 | uint32_t entry_length:16; | ||
2374 | uint32_t region_id:16; | ||
2375 | #else /* __LITTLE_ENDIAN_BITFIELD */ | ||
2376 | uint32_t req:4; | ||
2377 | uint32_t cv:1; | ||
2378 | uint32_t co:1; | ||
2379 | uint32_t ra:1; | ||
2380 | uint32_t rsvd:1; | ||
2381 | uint32_t type:8; | ||
2382 | uint32_t rsvd2:16; | ||
2383 | uint32_t region_id:16; | ||
2384 | uint32_t entry_length:16; | ||
2385 | #endif | ||
2386 | |||
2387 | uint32_t resp_info; | ||
2388 | uint32_t byte_cnt; | ||
2389 | uint32_t data_offset; | ||
2390 | }; | ||
2391 | |||
2327 | struct hbq_mask { | 2392 | struct hbq_mask { |
2328 | #ifdef __BIG_ENDIAN_BITFIELD | 2393 | #ifdef __BIG_ENDIAN_BITFIELD |
2329 | uint8_t tmatch; | 2394 | uint8_t tmatch; |
@@ -2560,6 +2625,40 @@ typedef struct { | |||
2560 | 2625 | ||
2561 | } CONFIG_PORT_VAR; | 2626 | } CONFIG_PORT_VAR; |
2562 | 2627 | ||
2628 | /* Structure for MB Command CONFIG_MSI (0x30) */ | ||
2629 | struct config_msi_var { | ||
2630 | #ifdef __BIG_ENDIAN_BITFIELD | ||
2631 | uint32_t dfltMsgNum:8; /* Default message number */ | ||
2632 | uint32_t rsvd1:11; /* Reserved */ | ||
2633 | uint32_t NID:5; /* Number of secondary attention IDs */ | ||
2634 | uint32_t rsvd2:5; /* Reserved */ | ||
2635 | uint32_t dfltPresent:1; /* Default message number present */ | ||
2636 | uint32_t addFlag:1; /* Add association flag */ | ||
2637 | uint32_t reportFlag:1; /* Report association flag */ | ||
2638 | #else /* __LITTLE_ENDIAN_BITFIELD */ | ||
2639 | uint32_t reportFlag:1; /* Report association flag */ | ||
2640 | uint32_t addFlag:1; /* Add association flag */ | ||
2641 | uint32_t dfltPresent:1; /* Default message number present */ | ||
2642 | uint32_t rsvd2:5; /* Reserved */ | ||
2643 | uint32_t NID:5; /* Number of secondary attention IDs */ | ||
2644 | uint32_t rsvd1:11; /* Reserved */ | ||
2645 | uint32_t dfltMsgNum:8; /* Default message number */ | ||
2646 | #endif | ||
2647 | uint32_t attentionConditions[2]; | ||
2648 | uint8_t attentionId[16]; | ||
2649 | uint8_t messageNumberByHA[64]; | ||
2650 | uint8_t messageNumberByID[16]; | ||
2651 | uint32_t autoClearHA[2]; | ||
2652 | #ifdef __BIG_ENDIAN_BITFIELD | ||
2653 | uint32_t rsvd3:16; | ||
2654 | uint32_t autoClearID:16; | ||
2655 | #else /* __LITTLE_ENDIAN_BITFIELD */ | ||
2656 | uint32_t autoClearID:16; | ||
2657 | uint32_t rsvd3:16; | ||
2658 | #endif | ||
2659 | uint32_t rsvd4; | ||
2660 | }; | ||
2661 | |||
2563 | /* SLI-2 Port Control Block */ | 2662 | /* SLI-2 Port Control Block */ |
2564 | 2663 | ||
2565 | /* SLIM POINTER */ | 2664 | /* SLIM POINTER */ |
@@ -2678,10 +2777,12 @@ typedef union { | |||
2678 | * NEW_FEATURE | 2777 | * NEW_FEATURE |
2679 | */ | 2778 | */ |
2680 | struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */ | 2779 | struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */ |
2780 | struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/ | ||
2681 | CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ | 2781 | CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ |
2682 | REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ | 2782 | REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ |
2683 | UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ | 2783 | UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ |
2684 | ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */ | 2784 | ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */ |
2785 | struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */ | ||
2685 | } MAILVARIANTS; | 2786 | } MAILVARIANTS; |
2686 | 2787 | ||
2687 | /* | 2788 | /* |
@@ -2715,11 +2816,19 @@ struct sli3_pgp { | |||
2715 | uint32_t hbq_get[16]; | 2816 | uint32_t hbq_get[16]; |
2716 | }; | 2817 | }; |
2717 | 2818 | ||
2718 | typedef union { | 2819 | struct sli3_inb_pgp { |
2719 | struct sli2_desc s2; | 2820 | uint32_t ha_copy; |
2720 | struct sli3_desc s3; | 2821 | uint32_t counter; |
2721 | struct sli3_pgp s3_pgp; | 2822 | struct lpfc_pgp port[MAX_RINGS]; |
2722 | } SLI_VAR; | 2823 | uint32_t hbq_get[16]; |
2824 | }; | ||
2825 | |||
2826 | union sli_var { | ||
2827 | struct sli2_desc s2; | ||
2828 | struct sli3_desc s3; | ||
2829 | struct sli3_pgp s3_pgp; | ||
2830 | struct sli3_inb_pgp s3_inb_pgp; | ||
2831 | }; | ||
2723 | 2832 | ||
2724 | typedef struct { | 2833 | typedef struct { |
2725 | #ifdef __BIG_ENDIAN_BITFIELD | 2834 | #ifdef __BIG_ENDIAN_BITFIELD |
@@ -2737,7 +2846,7 @@ typedef struct { | |||
2737 | #endif | 2846 | #endif |
2738 | 2847 | ||
2739 | MAILVARIANTS un; | 2848 | MAILVARIANTS un; |
2740 | SLI_VAR us; | 2849 | union sli_var us; |
2741 | } MAILBOX_t; | 2850 | } MAILBOX_t; |
2742 | 2851 | ||
2743 | /* | 2852 | /* |
@@ -3105,6 +3214,27 @@ struct que_xri64cx_ext_fields { | |||
3105 | struct lpfc_hbq_entry buff[5]; | 3214 | struct lpfc_hbq_entry buff[5]; |
3106 | }; | 3215 | }; |
3107 | 3216 | ||
3217 | #define LPFC_EXT_DATA_BDE_COUNT 3 | ||
3218 | struct fcp_irw_ext { | ||
3219 | uint32_t io_tag64_low; | ||
3220 | uint32_t io_tag64_high; | ||
3221 | #ifdef __BIG_ENDIAN_BITFIELD | ||
3222 | uint8_t reserved1; | ||
3223 | uint8_t reserved2; | ||
3224 | uint8_t reserved3; | ||
3225 | uint8_t ebde_count; | ||
3226 | #else /* __LITTLE_ENDIAN */ | ||
3227 | uint8_t ebde_count; | ||
3228 | uint8_t reserved3; | ||
3229 | uint8_t reserved2; | ||
3230 | uint8_t reserved1; | ||
3231 | #endif | ||
3232 | uint32_t reserved4; | ||
3233 | struct ulp_bde64 rbde; /* response bde */ | ||
3234 | struct ulp_bde64 dbde[LPFC_EXT_DATA_BDE_COUNT]; /* data BDE or BPL */ | ||
3235 | uint8_t icd[32]; /* immediate command data (32 bytes) */ | ||
3236 | }; | ||
3237 | |||
3108 | typedef struct _IOCB { /* IOCB structure */ | 3238 | typedef struct _IOCB { /* IOCB structure */ |
3109 | union { | 3239 | union { |
3110 | GENERIC_RSP grsp; /* Generic response */ | 3240 | GENERIC_RSP grsp; /* Generic response */ |
@@ -3190,7 +3320,7 @@ typedef struct _IOCB { /* IOCB structure */ | |||
3190 | 3320 | ||
3191 | /* words 8-31 used for que_xri_cx iocb */ | 3321 | /* words 8-31 used for que_xri_cx iocb */ |
3192 | struct que_xri64cx_ext_fields que_xri64cx_ext_words; | 3322 | struct que_xri64cx_ext_fields que_xri64cx_ext_words; |
3193 | 3323 | struct fcp_irw_ext fcp_ext; | |
3194 | uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ | 3324 | uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ |
3195 | } unsli3; | 3325 | } unsli3; |
3196 | 3326 | ||
@@ -3292,3 +3422,10 @@ lpfc_error_lost_link(IOCB_t *iocbp) | |||
3292 | iocbp->un.ulpWord[4] == IOERR_LINK_DOWN || | 3422 | iocbp->un.ulpWord[4] == IOERR_LINK_DOWN || |
3293 | iocbp->un.ulpWord[4] == IOERR_SLI_DOWN)); | 3423 | iocbp->un.ulpWord[4] == IOERR_SLI_DOWN)); |
3294 | } | 3424 | } |
3425 | |||
3426 | #define MENLO_TRANSPORT_TYPE 0xfe | ||
3427 | #define MENLO_CONTEXT 0 | ||
3428 | #define MENLO_PU 3 | ||
3429 | #define MENLO_TIMEOUT 30 | ||
3430 | #define SETVAR_MLOMNT 0x103107 | ||
3431 | #define SETVAR_MLORST 0x103007 | ||
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index d51a2a4b43eb..909be3301bba 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -36,6 +36,7 @@ | |||
36 | 36 | ||
37 | #include "lpfc_hw.h" | 37 | #include "lpfc_hw.h" |
38 | #include "lpfc_sli.h" | 38 | #include "lpfc_sli.h" |
39 | #include "lpfc_nl.h" | ||
39 | #include "lpfc_disc.h" | 40 | #include "lpfc_disc.h" |
40 | #include "lpfc_scsi.h" | 41 | #include "lpfc_scsi.h" |
41 | #include "lpfc.h" | 42 | #include "lpfc.h" |
@@ -52,17 +53,20 @@ static struct scsi_transport_template *lpfc_transport_template = NULL; | |||
52 | static struct scsi_transport_template *lpfc_vport_transport_template = NULL; | 53 | static struct scsi_transport_template *lpfc_vport_transport_template = NULL; |
53 | static DEFINE_IDR(lpfc_hba_index); | 54 | static DEFINE_IDR(lpfc_hba_index); |
54 | 55 | ||
55 | /************************************************************************/ | 56 | /** |
56 | /* */ | 57 | * lpfc_config_port_prep: Perform lpfc initialization prior to config port. |
57 | /* lpfc_config_port_prep */ | 58 | * @phba: pointer to lpfc hba data structure. |
58 | /* This routine will do LPFC initialization prior to the */ | 59 | * |
59 | /* CONFIG_PORT mailbox command. This will be initialized */ | 60 | * This routine will do LPFC initialization prior to issuing the CONFIG_PORT |
60 | /* as a SLI layer callback routine. */ | 61 | * mailbox command. It retrieves the revision information from the HBA and |
61 | /* This routine returns 0 on success or -ERESTART if it wants */ | 62 | * collects the Vital Product Data (VPD) about the HBA for preparing the |
62 | /* the SLI layer to reset the HBA and try again. Any */ | 63 | * configuration of the HBA. |
63 | /* other return value indicates an error. */ | 64 | * |
64 | /* */ | 65 | * Return codes: |
65 | /************************************************************************/ | 66 | * 0 - success. |
67 | * -ERESTART - requests the SLI layer to reset the HBA and try again. | ||
68 | * Any other value - indicates an error. | ||
69 | **/ | ||
66 | int | 70 | int |
67 | lpfc_config_port_prep(struct lpfc_hba *phba) | 71 | lpfc_config_port_prep(struct lpfc_hba *phba) |
68 | { | 72 | { |
@@ -180,12 +184,9 @@ lpfc_config_port_prep(struct lpfc_hba *phba) | |||
180 | sizeof (phba->RandomData)); | 184 | sizeof (phba->RandomData)); |
181 | 185 | ||
182 | /* Get adapter VPD information */ | 186 | /* Get adapter VPD information */ |
183 | pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL); | ||
184 | if (!pmb->context2) | ||
185 | goto out_free_mbox; | ||
186 | lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); | 187 | lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); |
187 | if (!lpfc_vpd_data) | 188 | if (!lpfc_vpd_data) |
188 | goto out_free_context2; | 189 | goto out_free_mbox; |
189 | 190 | ||
190 | do { | 191 | do { |
191 | lpfc_dump_mem(phba, pmb, offset); | 192 | lpfc_dump_mem(phba, pmb, offset); |
@@ -200,21 +201,29 @@ lpfc_config_port_prep(struct lpfc_hba *phba) | |||
200 | } | 201 | } |
201 | if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) | 202 | if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) |
202 | mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; | 203 | mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; |
203 | lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, | 204 | lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, |
205 | lpfc_vpd_data + offset, | ||
204 | mb->un.varDmp.word_cnt); | 206 | mb->un.varDmp.word_cnt); |
205 | offset += mb->un.varDmp.word_cnt; | 207 | offset += mb->un.varDmp.word_cnt; |
206 | } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); | 208 | } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); |
207 | lpfc_parse_vpd(phba, lpfc_vpd_data, offset); | 209 | lpfc_parse_vpd(phba, lpfc_vpd_data, offset); |
208 | 210 | ||
209 | kfree(lpfc_vpd_data); | 211 | kfree(lpfc_vpd_data); |
210 | out_free_context2: | ||
211 | kfree(pmb->context2); | ||
212 | out_free_mbox: | 212 | out_free_mbox: |
213 | mempool_free(pmb, phba->mbox_mem_pool); | 213 | mempool_free(pmb, phba->mbox_mem_pool); |
214 | return 0; | 214 | return 0; |
215 | } | 215 | } |
216 | 216 | ||
217 | /* Completion handler for config async event mailbox command. */ | 217 | /** |
218 | * lpfc_config_async_cmpl: Completion handler for config async event mbox cmd. | ||
219 | * @phba: pointer to lpfc hba data structure. | ||
220 | * @pmboxq: pointer to the driver internal queue element for mailbox command. | ||
221 | * | ||
222 | * This is the completion handler for driver's configuring asynchronous event | ||
223 | * mailbox command to the device. If the mailbox command returns successfully, | ||
224 | * it will set internal async event support flag to 1; otherwise, it will | ||
225 | * set internal async event support flag to 0. | ||
226 | **/ | ||
218 | static void | 227 | static void |
219 | lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) | 228 | lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) |
220 | { | 229 | { |
@@ -226,16 +235,19 @@ lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) | |||
226 | return; | 235 | return; |
227 | } | 236 | } |
228 | 237 | ||
229 | /************************************************************************/ | 238 | /** |
230 | /* */ | 239 | * lpfc_config_port_post: Perform lpfc initialization after config port. |
231 | /* lpfc_config_port_post */ | 240 | * @phba: pointer to lpfc hba data structure. |
232 | /* This routine will do LPFC initialization after the */ | 241 | * |
233 | /* CONFIG_PORT mailbox command. This will be initialized */ | 242 | * This routine will do LPFC initialization after the CONFIG_PORT mailbox |
234 | /* as a SLI layer callback routine. */ | 243 | * command call. It performs all internal resource and state setups on the |
235 | /* This routine returns 0 on success. Any other return value */ | 244 | * port: post IOCB buffers, enable appropriate host interrupt attentions, |
236 | /* indicates an error. */ | 245 | * ELS ring timers, etc. |
237 | /* */ | 246 | * |
238 | /************************************************************************/ | 247 | * Return codes |
248 | * 0 - success. | ||
249 | * Any other value - error. | ||
250 | **/ | ||
239 | int | 251 | int |
240 | lpfc_config_port_post(struct lpfc_hba *phba) | 252 | lpfc_config_port_post(struct lpfc_hba *phba) |
241 | { | 253 | { |
@@ -378,6 +390,29 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
378 | if (phba->sli_rev != 3) | 390 | if (phba->sli_rev != 3) |
379 | lpfc_post_rcv_buf(phba); | 391 | lpfc_post_rcv_buf(phba); |
380 | 392 | ||
393 | /* | ||
394 | * Configure HBA MSI-X attention conditions to messages if MSI-X mode | ||
395 | */ | ||
396 | if (phba->intr_type == MSIX) { | ||
397 | rc = lpfc_config_msi(phba, pmb); | ||
398 | if (rc) { | ||
399 | mempool_free(pmb, phba->mbox_mem_pool); | ||
400 | return -EIO; | ||
401 | } | ||
402 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); | ||
403 | if (rc != MBX_SUCCESS) { | ||
404 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | ||
405 | "0352 Config MSI mailbox command " | ||
406 | "failed, mbxCmd x%x, mbxStatus x%x\n", | ||
407 | pmb->mb.mbxCommand, pmb->mb.mbxStatus); | ||
408 | mempool_free(pmb, phba->mbox_mem_pool); | ||
409 | return -EIO; | ||
410 | } | ||
411 | } | ||
412 | |||
413 | /* Initialize ERATT handling flag */ | ||
414 | phba->hba_flag &= ~HBA_ERATT_HANDLED; | ||
415 | |||
381 | /* Enable appropriate host interrupts */ | 416 | /* Enable appropriate host interrupts */ |
382 | spin_lock_irq(&phba->hbalock); | 417 | spin_lock_irq(&phba->hbalock); |
383 | status = readl(phba->HCregaddr); | 418 | status = readl(phba->HCregaddr); |
@@ -393,26 +428,26 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
393 | 428 | ||
394 | if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && | 429 | if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && |
395 | (phba->cfg_poll & DISABLE_FCP_RING_INT)) | 430 | (phba->cfg_poll & DISABLE_FCP_RING_INT)) |
396 | status &= ~(HC_R0INT_ENA << LPFC_FCP_RING); | 431 | status &= ~(HC_R0INT_ENA); |
397 | 432 | ||
398 | writel(status, phba->HCregaddr); | 433 | writel(status, phba->HCregaddr); |
399 | readl(phba->HCregaddr); /* flush */ | 434 | readl(phba->HCregaddr); /* flush */ |
400 | spin_unlock_irq(&phba->hbalock); | 435 | spin_unlock_irq(&phba->hbalock); |
401 | 436 | ||
402 | /* | 437 | /* Set up ring-0 (ELS) timer */ |
403 | * Setup the ring 0 (els) timeout handler | 438 | timeout = phba->fc_ratov * 2; |
404 | */ | ||
405 | timeout = phba->fc_ratov << 1; | ||
406 | mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); | 439 | mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); |
440 | /* Set up heart beat (HB) timer */ | ||
407 | mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); | 441 | mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); |
408 | phba->hb_outstanding = 0; | 442 | phba->hb_outstanding = 0; |
409 | phba->last_completion_time = jiffies; | 443 | phba->last_completion_time = jiffies; |
444 | /* Set up error attention (ERATT) polling timer */ | ||
445 | mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); | ||
410 | 446 | ||
411 | lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); | 447 | lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); |
412 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | 448 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
413 | pmb->vport = vport; | ||
414 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | ||
415 | lpfc_set_loopback_flag(phba); | 449 | lpfc_set_loopback_flag(phba); |
450 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | ||
416 | if (rc != MBX_SUCCESS) { | 451 | if (rc != MBX_SUCCESS) { |
417 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 452 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
418 | "0454 Adapter failed to init, mbxCmd x%x " | 453 | "0454 Adapter failed to init, mbxCmd x%x " |
@@ -447,19 +482,20 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
447 | rc); | 482 | rc); |
448 | mempool_free(pmb, phba->mbox_mem_pool); | 483 | mempool_free(pmb, phba->mbox_mem_pool); |
449 | } | 484 | } |
450 | return (0); | 485 | return 0; |
451 | } | 486 | } |
452 | 487 | ||
453 | /************************************************************************/ | 488 | /** |
454 | /* */ | 489 | * lpfc_hba_down_prep: Perform lpfc uninitialization prior to HBA reset. |
455 | /* lpfc_hba_down_prep */ | 490 | * @phba: pointer to lpfc HBA data structure. |
456 | /* This routine will do LPFC uninitialization before the */ | 491 | * |
457 | /* HBA is reset when bringing down the SLI Layer. This will be */ | 492 | * This routine will do LPFC uninitialization before the HBA is reset when |
458 | /* initialized as a SLI layer callback routine. */ | 493 | * bringing down the SLI Layer. |
459 | /* This routine returns 0 on success. Any other return value */ | 494 | * |
460 | /* indicates an error. */ | 495 | * Return codes |
461 | /* */ | 496 | * 0 - success. |
462 | /************************************************************************/ | 497 | * Any other value - error. |
498 | **/ | ||
463 | int | 499 | int |
464 | lpfc_hba_down_prep(struct lpfc_hba *phba) | 500 | lpfc_hba_down_prep(struct lpfc_hba *phba) |
465 | { | 501 | { |
@@ -481,15 +517,17 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) | |||
481 | return 0; | 517 | return 0; |
482 | } | 518 | } |
483 | 519 | ||
484 | /************************************************************************/ | 520 | /** |
485 | /* */ | 521 | * lpfc_hba_down_post: Perform lpfc uninitialization after HBA reset. |
486 | /* lpfc_hba_down_post */ | 522 | * @phba: pointer to lpfc HBA data structure. |
487 | /* This routine will do uninitialization after the HBA is reset */ | 523 | * |
488 | /* when bringing down the SLI Layer. */ | 524 | * This routine will do uninitialization after the HBA is reset when bring |
489 | /* This routine returns 0 on success. Any other return value */ | 525 | * down the SLI Layer. |
490 | /* indicates an error. */ | 526 | * |
491 | /* */ | 527 | * Return codes |
492 | /************************************************************************/ | 528 | * 0 - sucess. |
529 | * Any other value - error. | ||
530 | **/ | ||
493 | int | 531 | int |
494 | lpfc_hba_down_post(struct lpfc_hba *phba) | 532 | lpfc_hba_down_post(struct lpfc_hba *phba) |
495 | { | 533 | { |
@@ -548,7 +586,18 @@ lpfc_hba_down_post(struct lpfc_hba *phba) | |||
548 | return 0; | 586 | return 0; |
549 | } | 587 | } |
550 | 588 | ||
551 | /* HBA heart beat timeout handler */ | 589 | /** |
590 | * lpfc_hb_timeout: The HBA-timer timeout handler. | ||
591 | * @ptr: unsigned long holds the pointer to lpfc hba data structure. | ||
592 | * | ||
593 | * This is the HBA-timer timeout handler registered to the lpfc driver. When | ||
594 | * this timer fires, a HBA timeout event shall be posted to the lpfc driver | ||
595 | * work-port-events bitmap and the worker thread is notified. This timeout | ||
596 | * event will be used by the worker thread to invoke the actual timeout | ||
597 | * handler routine, lpfc_hb_timeout_handler. Any periodical operations will | ||
598 | * be performed in the timeout handler and the HBA timeout event bit shall | ||
599 | * be cleared by the worker thread after it has taken the event bitmap out. | ||
600 | **/ | ||
552 | static void | 601 | static void |
553 | lpfc_hb_timeout(unsigned long ptr) | 602 | lpfc_hb_timeout(unsigned long ptr) |
554 | { | 603 | { |
@@ -557,17 +606,36 @@ lpfc_hb_timeout(unsigned long ptr) | |||
557 | unsigned long iflag; | 606 | unsigned long iflag; |
558 | 607 | ||
559 | phba = (struct lpfc_hba *)ptr; | 608 | phba = (struct lpfc_hba *)ptr; |
609 | |||
610 | /* Check for heart beat timeout conditions */ | ||
560 | spin_lock_irqsave(&phba->pport->work_port_lock, iflag); | 611 | spin_lock_irqsave(&phba->pport->work_port_lock, iflag); |
561 | tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; | 612 | tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; |
562 | if (!tmo_posted) | 613 | if (!tmo_posted) |
563 | phba->pport->work_port_events |= WORKER_HB_TMO; | 614 | phba->pport->work_port_events |= WORKER_HB_TMO; |
564 | spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); | 615 | spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); |
565 | 616 | ||
617 | /* Tell the worker thread there is work to do */ | ||
566 | if (!tmo_posted) | 618 | if (!tmo_posted) |
567 | lpfc_worker_wake_up(phba); | 619 | lpfc_worker_wake_up(phba); |
568 | return; | 620 | return; |
569 | } | 621 | } |
570 | 622 | ||
623 | /** | ||
624 | * lpfc_hb_mbox_cmpl: The lpfc heart-beat mailbox command callback function. | ||
625 | * @phba: pointer to lpfc hba data structure. | ||
626 | * @pmboxq: pointer to the driver internal queue element for mailbox command. | ||
627 | * | ||
628 | * This is the callback function to the lpfc heart-beat mailbox command. | ||
629 | * If configured, the lpfc driver issues the heart-beat mailbox command to | ||
630 | * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the | ||
631 | * heart-beat mailbox command is issued, the driver shall set up heart-beat | ||
632 | * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks | ||
633 | * heart-beat outstanding state. Once the mailbox command comes back and | ||
634 | * no error conditions detected, the heart-beat mailbox command timer is | ||
635 | * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding | ||
636 | * state is cleared for the next heart-beat. If the timer expired with the | ||
637 | * heart-beat outstanding state set, the driver will put the HBA offline. | ||
638 | **/ | ||
571 | static void | 639 | static void |
572 | lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) | 640 | lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) |
573 | { | 641 | { |
@@ -577,6 +645,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) | |||
577 | phba->hb_outstanding = 0; | 645 | phba->hb_outstanding = 0; |
578 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 646 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
579 | 647 | ||
648 | /* Check and reset heart-beat timer is necessary */ | ||
580 | mempool_free(pmboxq, phba->mbox_mem_pool); | 649 | mempool_free(pmboxq, phba->mbox_mem_pool); |
581 | if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && | 650 | if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && |
582 | !(phba->link_state == LPFC_HBA_ERROR) && | 651 | !(phba->link_state == LPFC_HBA_ERROR) && |
@@ -586,6 +655,22 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) | |||
586 | return; | 655 | return; |
587 | } | 656 | } |
588 | 657 | ||
658 | /** | ||
659 | * lpfc_hb_timeout_handler: The HBA-timer timeout handler. | ||
660 | * @phba: pointer to lpfc hba data structure. | ||
661 | * | ||
662 | * This is the actual HBA-timer timeout handler to be invoked by the worker | ||
663 | * thread whenever the HBA timer fired and HBA-timeout event posted. This | ||
664 | * handler performs any periodic operations needed for the device. If such | ||
665 | * periodic event has already been attended to either in the interrupt handler | ||
666 | * or by processing slow-ring or fast-ring events within the HBA-timer | ||
667 | * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets | ||
668 | * the timer for the next timeout period. If lpfc heart-beat mailbox command | ||
669 | * is configured and there is no heart-beat mailbox command outstanding, a | ||
670 | * heart-beat mailbox is issued and timer set properly. Otherwise, if there | ||
671 | * has been a heart-beat mailbox command outstanding, the HBA shall be put | ||
672 | * to offline. | ||
673 | **/ | ||
589 | void | 674 | void |
590 | lpfc_hb_timeout_handler(struct lpfc_hba *phba) | 675 | lpfc_hb_timeout_handler(struct lpfc_hba *phba) |
591 | { | 676 | { |
@@ -684,6 +769,13 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) | |||
684 | } | 769 | } |
685 | } | 770 | } |
686 | 771 | ||
772 | /** | ||
773 | * lpfc_offline_eratt: Bring lpfc offline on hardware error attention. | ||
774 | * @phba: pointer to lpfc hba data structure. | ||
775 | * | ||
776 | * This routine is called to bring the HBA offline when HBA hardware error | ||
777 | * other than Port Error 6 has been detected. | ||
778 | **/ | ||
687 | static void | 779 | static void |
688 | lpfc_offline_eratt(struct lpfc_hba *phba) | 780 | lpfc_offline_eratt(struct lpfc_hba *phba) |
689 | { | 781 | { |
@@ -704,14 +796,16 @@ lpfc_offline_eratt(struct lpfc_hba *phba) | |||
704 | return; | 796 | return; |
705 | } | 797 | } |
706 | 798 | ||
707 | /************************************************************************/ | 799 | /** |
708 | /* */ | 800 | * lpfc_handle_eratt: The HBA hardware error handler. |
709 | /* lpfc_handle_eratt */ | 801 | * @phba: pointer to lpfc hba data structure. |
710 | /* This routine will handle processing a Host Attention */ | 802 | * |
711 | /* Error Status event. This will be initialized */ | 803 | * This routine is invoked to handle the following HBA hardware error |
712 | /* as a SLI layer callback routine. */ | 804 | * conditions: |
713 | /* */ | 805 | * 1 - HBA error attention interrupt |
714 | /************************************************************************/ | 806 | * 2 - DMA ring index out of range |
807 | * 3 - Mailbox command came back as unknown | ||
808 | **/ | ||
715 | void | 809 | void |
716 | lpfc_handle_eratt(struct lpfc_hba *phba) | 810 | lpfc_handle_eratt(struct lpfc_hba *phba) |
717 | { | 811 | { |
@@ -722,6 +816,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba) | |||
722 | unsigned long temperature; | 816 | unsigned long temperature; |
723 | struct temp_event temp_event_data; | 817 | struct temp_event temp_event_data; |
724 | struct Scsi_Host *shost; | 818 | struct Scsi_Host *shost; |
819 | struct lpfc_board_event_header board_event; | ||
725 | 820 | ||
726 | /* If the pci channel is offline, ignore possible errors, | 821 | /* If the pci channel is offline, ignore possible errors, |
727 | * since we cannot communicate with the pci card anyway. */ | 822 | * since we cannot communicate with the pci card anyway. */ |
@@ -731,6 +826,16 @@ lpfc_handle_eratt(struct lpfc_hba *phba) | |||
731 | if (!phba->cfg_enable_hba_reset) | 826 | if (!phba->cfg_enable_hba_reset) |
732 | return; | 827 | return; |
733 | 828 | ||
829 | /* Send an internal error event to mgmt application */ | ||
830 | board_event.event_type = FC_REG_BOARD_EVENT; | ||
831 | board_event.subcategory = LPFC_EVENT_PORTINTERR; | ||
832 | shost = lpfc_shost_from_vport(phba->pport); | ||
833 | fc_host_post_vendor_event(shost, fc_get_event_number(), | ||
834 | sizeof(board_event), | ||
835 | (char *) &board_event, | ||
836 | SCSI_NL_VID_TYPE_PCI | ||
837 | | PCI_VENDOR_ID_EMULEX); | ||
838 | |||
734 | if (phba->work_hs & HS_FFER6) { | 839 | if (phba->work_hs & HS_FFER6) { |
735 | /* Re-establishing Link */ | 840 | /* Re-establishing Link */ |
736 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, | 841 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, |
@@ -771,7 +876,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba) | |||
771 | temp_event_data.data = (uint32_t)temperature; | 876 | temp_event_data.data = (uint32_t)temperature; |
772 | 877 | ||
773 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 878 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
774 | "0459 Adapter maximum temperature exceeded " | 879 | "0406 Adapter maximum temperature exceeded " |
775 | "(%ld), taking this port offline " | 880 | "(%ld), taking this port offline " |
776 | "Data: x%x x%x x%x\n", | 881 | "Data: x%x x%x x%x\n", |
777 | temperature, phba->work_hs, | 882 | temperature, phba->work_hs, |
@@ -791,8 +896,8 @@ lpfc_handle_eratt(struct lpfc_hba *phba) | |||
791 | 896 | ||
792 | } else { | 897 | } else { |
793 | /* The if clause above forces this code path when the status | 898 | /* The if clause above forces this code path when the status |
794 | * failure is a value other than FFER6. Do not call the offline | 899 | * failure is a value other than FFER6. Do not call the offline |
795 | * twice. This is the adapter hardware error path. | 900 | * twice. This is the adapter hardware error path. |
796 | */ | 901 | */ |
797 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 902 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
798 | "0457 Adapter Hardware Error " | 903 | "0457 Adapter Hardware Error " |
@@ -808,16 +913,16 @@ lpfc_handle_eratt(struct lpfc_hba *phba) | |||
808 | 913 | ||
809 | lpfc_offline_eratt(phba); | 914 | lpfc_offline_eratt(phba); |
810 | } | 915 | } |
916 | return; | ||
811 | } | 917 | } |
812 | 918 | ||
813 | /************************************************************************/ | 919 | /** |
814 | /* */ | 920 | * lpfc_handle_latt: The HBA link event handler. |
815 | /* lpfc_handle_latt */ | 921 | * @phba: pointer to lpfc hba data structure. |
816 | /* This routine will handle processing a Host Attention */ | 922 | * |
817 | /* Link Status event. This will be initialized */ | 923 | * This routine is invoked from the worker thread to handle a HBA host |
818 | /* as a SLI layer callback routine. */ | 924 | * attention link event. |
819 | /* */ | 925 | **/ |
820 | /************************************************************************/ | ||
821 | void | 926 | void |
822 | lpfc_handle_latt(struct lpfc_hba *phba) | 927 | lpfc_handle_latt(struct lpfc_hba *phba) |
823 | { | 928 | { |
@@ -898,12 +1003,20 @@ lpfc_handle_latt_err_exit: | |||
898 | return; | 1003 | return; |
899 | } | 1004 | } |
900 | 1005 | ||
901 | /************************************************************************/ | 1006 | /** |
902 | /* */ | 1007 | * lpfc_parse_vpd: Parse VPD (Vital Product Data). |
903 | /* lpfc_parse_vpd */ | 1008 | * @phba: pointer to lpfc hba data structure. |
904 | /* This routine will parse the VPD data */ | 1009 | * @vpd: pointer to the vital product data. |
905 | /* */ | 1010 | * @len: length of the vital product data in bytes. |
906 | /************************************************************************/ | 1011 | * |
1012 | * This routine parses the Vital Product Data (VPD). The VPD is treated as | ||
1013 | * an array of characters. In this routine, the ModelName, ProgramType, and | ||
1014 | * ModelDesc, etc. fields of the phba data structure will be populated. | ||
1015 | * | ||
1016 | * Return codes | ||
1017 | * 0 - pointer to the VPD passed in is NULL | ||
1018 | * 1 - success | ||
1019 | **/ | ||
907 | static int | 1020 | static int |
908 | lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) | 1021 | lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) |
909 | { | 1022 | { |
@@ -1040,12 +1153,25 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) | |||
1040 | return(1); | 1153 | return(1); |
1041 | } | 1154 | } |
1042 | 1155 | ||
1156 | /** | ||
1157 | * lpfc_get_hba_model_desc: Retrieve HBA device model name and description. | ||
1158 | * @phba: pointer to lpfc hba data structure. | ||
1159 | * @mdp: pointer to the data structure to hold the derived model name. | ||
1160 | * @descp: pointer to the data structure to hold the derived description. | ||
1161 | * | ||
1162 | * This routine retrieves HBA's description based on its registered PCI device | ||
1163 | * ID. The @descp passed into this function points to an array of 256 chars. It | ||
1164 | * shall be returned with the model name, maximum speed, and the host bus type. | ||
1165 | * The @mdp passed into this function points to an array of 80 chars. When the | ||
1166 | * function returns, the @mdp will be filled with the model name. | ||
1167 | **/ | ||
1043 | static void | 1168 | static void |
1044 | lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | 1169 | lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) |
1045 | { | 1170 | { |
1046 | lpfc_vpd_t *vp; | 1171 | lpfc_vpd_t *vp; |
1047 | uint16_t dev_id = phba->pcidev->device; | 1172 | uint16_t dev_id = phba->pcidev->device; |
1048 | int max_speed; | 1173 | int max_speed; |
1174 | int GE = 0; | ||
1049 | struct { | 1175 | struct { |
1050 | char * name; | 1176 | char * name; |
1051 | int max_speed; | 1177 | int max_speed; |
@@ -1177,6 +1303,19 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | |||
1177 | case PCI_DEVICE_ID_SAT_S: | 1303 | case PCI_DEVICE_ID_SAT_S: |
1178 | m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; | 1304 | m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; |
1179 | break; | 1305 | break; |
1306 | case PCI_DEVICE_ID_HORNET: | ||
1307 | m = (typeof(m)){"LP21000", max_speed, "PCIe"}; | ||
1308 | GE = 1; | ||
1309 | break; | ||
1310 | case PCI_DEVICE_ID_PROTEUS_VF: | ||
1311 | m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; | ||
1312 | break; | ||
1313 | case PCI_DEVICE_ID_PROTEUS_PF: | ||
1314 | m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; | ||
1315 | break; | ||
1316 | case PCI_DEVICE_ID_PROTEUS_S: | ||
1317 | m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; | ||
1318 | break; | ||
1180 | default: | 1319 | default: |
1181 | m = (typeof(m)){ NULL }; | 1320 | m = (typeof(m)){ NULL }; |
1182 | break; | 1321 | break; |
@@ -1186,18 +1325,25 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | |||
1186 | snprintf(mdp, 79,"%s", m.name); | 1325 | snprintf(mdp, 79,"%s", m.name); |
1187 | if (descp && descp[0] == '\0') | 1326 | if (descp && descp[0] == '\0') |
1188 | snprintf(descp, 255, | 1327 | snprintf(descp, 255, |
1189 | "Emulex %s %dGb %s Fibre Channel Adapter", | 1328 | "Emulex %s %d%s %s %s", |
1190 | m.name, m.max_speed, m.bus); | 1329 | m.name, m.max_speed, |
1330 | (GE) ? "GE" : "Gb", | ||
1331 | m.bus, | ||
1332 | (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); | ||
1191 | } | 1333 | } |
1192 | 1334 | ||
1193 | /**************************************************/ | 1335 | /** |
1194 | /* lpfc_post_buffer */ | 1336 | * lpfc_post_buffer: Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring. |
1195 | /* */ | 1337 | * @phba: pointer to lpfc hba data structure. |
1196 | /* This routine will post count buffers to the */ | 1338 | * @pring: pointer to a IOCB ring. |
1197 | /* ring with the QUE_RING_BUF_CN command. This */ | 1339 | * @cnt: the number of IOCBs to be posted to the IOCB ring. |
1198 | /* allows 3 buffers / command to be posted. */ | 1340 | * |
1199 | /* Returns the number of buffers NOT posted. */ | 1341 | * This routine posts a given number of IOCBs with the associated DMA buffer |
1200 | /**************************************************/ | 1342 | * descriptors specified by the cnt argument to the given IOCB ring. |
1343 | * | ||
1344 | * Return codes | ||
1345 | * The number of IOCBs NOT able to be posted to the IOCB ring. | ||
1346 | **/ | ||
1201 | int | 1347 | int |
1202 | lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) | 1348 | lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) |
1203 | { | 1349 | { |
@@ -1287,12 +1433,17 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) | |||
1287 | return 0; | 1433 | return 0; |
1288 | } | 1434 | } |
1289 | 1435 | ||
1290 | /************************************************************************/ | 1436 | /** |
1291 | /* */ | 1437 | * lpfc_post_rcv_buf: Post the initial receive IOCB buffers to ELS ring. |
1292 | /* lpfc_post_rcv_buf */ | 1438 | * @phba: pointer to lpfc hba data structure. |
1293 | /* This routine post initial rcv buffers to the configured rings */ | 1439 | * |
1294 | /* */ | 1440 | * This routine posts initial receive IOCB buffers to the ELS ring. The |
1295 | /************************************************************************/ | 1441 | * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is |
1442 | * set to 64 IOCBs. | ||
1443 | * | ||
1444 | * Return codes | ||
1445 | * 0 - success (currently always success) | ||
1446 | **/ | ||
1296 | static int | 1447 | static int |
1297 | lpfc_post_rcv_buf(struct lpfc_hba *phba) | 1448 | lpfc_post_rcv_buf(struct lpfc_hba *phba) |
1298 | { | 1449 | { |
@@ -1307,11 +1458,13 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba) | |||
1307 | 1458 | ||
1308 | #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) | 1459 | #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) |
1309 | 1460 | ||
1310 | /************************************************************************/ | 1461 | /** |
1311 | /* */ | 1462 | * lpfc_sha_init: Set up initial array of hash table entries. |
1312 | /* lpfc_sha_init */ | 1463 | * @HashResultPointer: pointer to an array as hash table. |
1313 | /* */ | 1464 | * |
1314 | /************************************************************************/ | 1465 | * This routine sets up the initial values to the array of hash table entries |
1466 | * for the LC HBAs. | ||
1467 | **/ | ||
1315 | static void | 1468 | static void |
1316 | lpfc_sha_init(uint32_t * HashResultPointer) | 1469 | lpfc_sha_init(uint32_t * HashResultPointer) |
1317 | { | 1470 | { |
@@ -1322,11 +1475,16 @@ lpfc_sha_init(uint32_t * HashResultPointer) | |||
1322 | HashResultPointer[4] = 0xC3D2E1F0; | 1475 | HashResultPointer[4] = 0xC3D2E1F0; |
1323 | } | 1476 | } |
1324 | 1477 | ||
1325 | /************************************************************************/ | 1478 | /** |
1326 | /* */ | 1479 | * lpfc_sha_iterate: Iterate initial hash table with the working hash table. |
1327 | /* lpfc_sha_iterate */ | 1480 | * @HashResultPointer: pointer to an initial/result hash table. |
1328 | /* */ | 1481 | * @HashWorkingPointer: pointer to an working hash table. |
1329 | /************************************************************************/ | 1482 | * |
1483 | * This routine iterates an initial hash table pointed by @HashResultPointer | ||
1484 | * with the values from the working hash table pointeed by @HashWorkingPointer. | ||
1485 | * The results are putting back to the initial hash table, returned through | ||
1486 | * the @HashResultPointer as the result hash table. | ||
1487 | **/ | ||
1330 | static void | 1488 | static void |
1331 | lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) | 1489 | lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) |
1332 | { | 1490 | { |
@@ -1374,22 +1532,29 @@ lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) | |||
1374 | 1532 | ||
1375 | } | 1533 | } |
1376 | 1534 | ||
1377 | /************************************************************************/ | 1535 | /** |
1378 | /* */ | 1536 | * lpfc_challenge_key: Create challenge key based on WWPN of the HBA. |
1379 | /* lpfc_challenge_key */ | 1537 | * @RandomChallenge: pointer to the entry of host challenge random number array. |
1380 | /* */ | 1538 | * @HashWorking: pointer to the entry of the working hash array. |
1381 | /************************************************************************/ | 1539 | * |
1540 | * This routine calculates the working hash array referred by @HashWorking | ||
1541 | * from the challenge random numbers associated with the host, referred by | ||
1542 | * @RandomChallenge. The result is put into the entry of the working hash | ||
1543 | * array and returned by reference through @HashWorking. | ||
1544 | **/ | ||
1382 | static void | 1545 | static void |
1383 | lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) | 1546 | lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) |
1384 | { | 1547 | { |
1385 | *HashWorking = (*RandomChallenge ^ *HashWorking); | 1548 | *HashWorking = (*RandomChallenge ^ *HashWorking); |
1386 | } | 1549 | } |
1387 | 1550 | ||
1388 | /************************************************************************/ | 1551 | /** |
1389 | /* */ | 1552 | * lpfc_hba_init: Perform special handling for LC HBA initialization. |
1390 | /* lpfc_hba_init */ | 1553 | * @phba: pointer to lpfc hba data structure. |
1391 | /* */ | 1554 | * @hbainit: pointer to an array of unsigned 32-bit integers. |
1392 | /************************************************************************/ | 1555 | * |
1556 | * This routine performs the special handling for LC HBA initialization. | ||
1557 | **/ | ||
1393 | void | 1558 | void |
1394 | lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) | 1559 | lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) |
1395 | { | 1560 | { |
@@ -1412,6 +1577,15 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) | |||
1412 | kfree(HashWorking); | 1577 | kfree(HashWorking); |
1413 | } | 1578 | } |
1414 | 1579 | ||
1580 | /** | ||
1581 | * lpfc_cleanup: Performs vport cleanups before deleting a vport. | ||
1582 | * @vport: pointer to a virtual N_Port data structure. | ||
1583 | * | ||
1584 | * This routine performs the necessary cleanups before deleting the @vport. | ||
1585 | * It invokes the discovery state machine to perform necessary state | ||
1586 | * transitions and to release the ndlps associated with the @vport. Note, | ||
1587 | * the physical port is treated as @vport 0. | ||
1588 | **/ | ||
1415 | void | 1589 | void |
1416 | lpfc_cleanup(struct lpfc_vport *vport) | 1590 | lpfc_cleanup(struct lpfc_vport *vport) |
1417 | { | 1591 | { |
@@ -1459,14 +1633,6 @@ lpfc_cleanup(struct lpfc_vport *vport) | |||
1459 | lpfc_disc_state_machine(vport, ndlp, NULL, | 1633 | lpfc_disc_state_machine(vport, ndlp, NULL, |
1460 | NLP_EVT_DEVICE_RM); | 1634 | NLP_EVT_DEVICE_RM); |
1461 | 1635 | ||
1462 | /* nlp_type zero is not defined, nlp_flag zero also not defined, | ||
1463 | * nlp_state is unused, this happens when | ||
1464 | * an initiator has logged | ||
1465 | * into us so cleanup this ndlp. | ||
1466 | */ | ||
1467 | if ((ndlp->nlp_type == 0) && (ndlp->nlp_flag == 0) && | ||
1468 | (ndlp->nlp_state == 0)) | ||
1469 | lpfc_nlp_put(ndlp); | ||
1470 | } | 1636 | } |
1471 | 1637 | ||
1472 | /* At this point, ALL ndlp's should be gone | 1638 | /* At this point, ALL ndlp's should be gone |
@@ -1482,7 +1648,7 @@ lpfc_cleanup(struct lpfc_vport *vport) | |||
1482 | &vport->fc_nodes, nlp_listp) { | 1648 | &vport->fc_nodes, nlp_listp) { |
1483 | lpfc_printf_vlog(ndlp->vport, KERN_ERR, | 1649 | lpfc_printf_vlog(ndlp->vport, KERN_ERR, |
1484 | LOG_NODE, | 1650 | LOG_NODE, |
1485 | "0282: did:x%x ndlp:x%p " | 1651 | "0282 did:x%x ndlp:x%p " |
1486 | "usgmap:x%x refcnt:%d\n", | 1652 | "usgmap:x%x refcnt:%d\n", |
1487 | ndlp->nlp_DID, (void *)ndlp, | 1653 | ndlp->nlp_DID, (void *)ndlp, |
1488 | ndlp->nlp_usg_map, | 1654 | ndlp->nlp_usg_map, |
@@ -1498,6 +1664,14 @@ lpfc_cleanup(struct lpfc_vport *vport) | |||
1498 | return; | 1664 | return; |
1499 | } | 1665 | } |
1500 | 1666 | ||
1667 | /** | ||
1668 | * lpfc_stop_vport_timers: Stop all the timers associated with a vport. | ||
1669 | * @vport: pointer to a virtual N_Port data structure. | ||
1670 | * | ||
1671 | * This routine stops all the timers associated with a @vport. This function | ||
1672 | * is invoked before disabling or deleting a @vport. Note that the physical | ||
1673 | * port is treated as @vport 0. | ||
1674 | **/ | ||
1501 | void | 1675 | void |
1502 | lpfc_stop_vport_timers(struct lpfc_vport *vport) | 1676 | lpfc_stop_vport_timers(struct lpfc_vport *vport) |
1503 | { | 1677 | { |
@@ -1507,6 +1681,13 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) | |||
1507 | return; | 1681 | return; |
1508 | } | 1682 | } |
1509 | 1683 | ||
1684 | /** | ||
1685 | * lpfc_stop_phba_timers: Stop all the timers associated with an HBA. | ||
1686 | * @phba: pointer to lpfc hba data structure. | ||
1687 | * | ||
1688 | * This routine stops all the timers associated with a HBA. This function is | ||
1689 | * invoked before either putting a HBA offline or unloading the driver. | ||
1690 | **/ | ||
1510 | static void | 1691 | static void |
1511 | lpfc_stop_phba_timers(struct lpfc_hba *phba) | 1692 | lpfc_stop_phba_timers(struct lpfc_hba *phba) |
1512 | { | 1693 | { |
@@ -1516,9 +1697,20 @@ lpfc_stop_phba_timers(struct lpfc_hba *phba) | |||
1516 | del_timer_sync(&phba->fabric_block_timer); | 1697 | del_timer_sync(&phba->fabric_block_timer); |
1517 | phba->hb_outstanding = 0; | 1698 | phba->hb_outstanding = 0; |
1518 | del_timer_sync(&phba->hb_tmofunc); | 1699 | del_timer_sync(&phba->hb_tmofunc); |
1700 | del_timer_sync(&phba->eratt_poll); | ||
1519 | return; | 1701 | return; |
1520 | } | 1702 | } |
1521 | 1703 | ||
1704 | /** | ||
1705 | * lpfc_block_mgmt_io: Mark a HBA's management interface as blocked. | ||
1706 | * @phba: pointer to lpfc hba data structure. | ||
1707 | * | ||
1708 | * This routine marks a HBA's management interface as blocked. Once the HBA's | ||
1709 | * management interface is marked as blocked, all the user space access to | ||
1710 | * the HBA, whether they are from sysfs interface or libdfc interface will | ||
1711 | * all be blocked. The HBA is set to block the management interface when the | ||
1712 | * driver prepares the HBA interface for online or offline. | ||
1713 | **/ | ||
1522 | static void | 1714 | static void |
1523 | lpfc_block_mgmt_io(struct lpfc_hba * phba) | 1715 | lpfc_block_mgmt_io(struct lpfc_hba * phba) |
1524 | { | 1716 | { |
@@ -1529,6 +1721,18 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba) | |||
1529 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 1721 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
1530 | } | 1722 | } |
1531 | 1723 | ||
1724 | /** | ||
1725 | * lpfc_online: Initialize and bring a HBA online. | ||
1726 | * @phba: pointer to lpfc hba data structure. | ||
1727 | * | ||
1728 | * This routine initializes the HBA and brings a HBA online. During this | ||
1729 | * process, the management interface is blocked to prevent user space access | ||
1730 | * to the HBA interfering with the driver initialization. | ||
1731 | * | ||
1732 | * Return codes | ||
1733 | * 0 - successful | ||
1734 | * 1 - failed | ||
1735 | **/ | ||
1532 | int | 1736 | int |
1533 | lpfc_online(struct lpfc_hba *phba) | 1737 | lpfc_online(struct lpfc_hba *phba) |
1534 | { | 1738 | { |
@@ -1574,6 +1778,17 @@ lpfc_online(struct lpfc_hba *phba) | |||
1574 | return 0; | 1778 | return 0; |
1575 | } | 1779 | } |
1576 | 1780 | ||
1781 | /** | ||
1782 | * lpfc_unblock_mgmt_io: Mark a HBA's management interface to be not blocked. | ||
1783 | * @phba: pointer to lpfc hba data structure. | ||
1784 | * | ||
1785 | * This routine marks a HBA's management interface as not blocked. Once the | ||
1786 | * HBA's management interface is marked as not blocked, all the user space | ||
1787 | * access to the HBA, whether they are from sysfs interface or libdfc | ||
1788 | * interface will be allowed. The HBA is set to block the management interface | ||
1789 | * when the driver prepares the HBA interface for online or offline and then | ||
1790 | * set to unblock the management interface afterwards. | ||
1791 | **/ | ||
1577 | void | 1792 | void |
1578 | lpfc_unblock_mgmt_io(struct lpfc_hba * phba) | 1793 | lpfc_unblock_mgmt_io(struct lpfc_hba * phba) |
1579 | { | 1794 | { |
@@ -1584,6 +1799,14 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba) | |||
1584 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 1799 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
1585 | } | 1800 | } |
1586 | 1801 | ||
1802 | /** | ||
1803 | * lpfc_offline_prep: Prepare a HBA to be brought offline. | ||
1804 | * @phba: pointer to lpfc hba data structure. | ||
1805 | * | ||
1806 | * This routine is invoked to prepare a HBA to be brought offline. It performs | ||
1807 | * unregistration login to all the nodes on all vports and flushes the mailbox | ||
1808 | * queue to make it ready to be brought offline. | ||
1809 | **/ | ||
1587 | void | 1810 | void |
1588 | lpfc_offline_prep(struct lpfc_hba * phba) | 1811 | lpfc_offline_prep(struct lpfc_hba * phba) |
1589 | { | 1812 | { |
@@ -1633,6 +1856,14 @@ lpfc_offline_prep(struct lpfc_hba * phba) | |||
1633 | lpfc_sli_flush_mbox_queue(phba); | 1856 | lpfc_sli_flush_mbox_queue(phba); |
1634 | } | 1857 | } |
1635 | 1858 | ||
1859 | /** | ||
1860 | * lpfc_offline: Bring a HBA offline. | ||
1861 | * @phba: pointer to lpfc hba data structure. | ||
1862 | * | ||
1863 | * This routine actually brings a HBA offline. It stops all the timers | ||
1864 | * associated with the HBA, brings down the SLI layer, and eventually | ||
1865 | * marks the HBA as in offline state for the upper layer protocol. | ||
1866 | **/ | ||
1636 | void | 1867 | void |
1637 | lpfc_offline(struct lpfc_hba *phba) | 1868 | lpfc_offline(struct lpfc_hba *phba) |
1638 | { | 1869 | { |
@@ -1670,12 +1901,17 @@ lpfc_offline(struct lpfc_hba *phba) | |||
1670 | lpfc_destroy_vport_work_array(phba, vports); | 1901 | lpfc_destroy_vport_work_array(phba, vports); |
1671 | } | 1902 | } |
1672 | 1903 | ||
1673 | /****************************************************************************** | 1904 | /** |
1674 | * Function name: lpfc_scsi_free | 1905 | * lpfc_scsi_free: Free all the SCSI buffers and IOCBs from driver lists. |
1675 | * | 1906 | * @phba: pointer to lpfc hba data structure. |
1676 | * Description: Called from lpfc_pci_remove_one free internal driver resources | 1907 | * |
1677 | * | 1908 | * This routine is to free all the SCSI buffers and IOCBs from the driver |
1678 | ******************************************************************************/ | 1909 | * list back to kernel. It is called from lpfc_pci_remove_one to free |
1910 | * the internal resources before the device is removed from the system. | ||
1911 | * | ||
1912 | * Return codes | ||
1913 | * 0 - successful (for now, it always returns 0) | ||
1914 | **/ | ||
1679 | static int | 1915 | static int |
1680 | lpfc_scsi_free(struct lpfc_hba *phba) | 1916 | lpfc_scsi_free(struct lpfc_hba *phba) |
1681 | { | 1917 | { |
@@ -1704,6 +1940,22 @@ lpfc_scsi_free(struct lpfc_hba *phba) | |||
1704 | return 0; | 1940 | return 0; |
1705 | } | 1941 | } |
1706 | 1942 | ||
1943 | /** | ||
1944 | * lpfc_create_port: Create an FC port. | ||
1945 | * @phba: pointer to lpfc hba data structure. | ||
1946 | * @instance: a unique integer ID to this FC port. | ||
1947 | * @dev: pointer to the device data structure. | ||
1948 | * | ||
1949 | * This routine creates a FC port for the upper layer protocol. The FC port | ||
1950 | * can be created on top of either a physical port or a virtual port provided | ||
1951 | * by the HBA. This routine also allocates a SCSI host data structure (shost) | ||
1952 | * and associates the FC port created before adding the shost into the SCSI | ||
1953 | * layer. | ||
1954 | * | ||
1955 | * Return codes | ||
1956 | * @vport - pointer to the virtual N_Port data structure. | ||
1957 | * NULL - port create failed. | ||
1958 | **/ | ||
1707 | struct lpfc_vport * | 1959 | struct lpfc_vport * |
1708 | lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) | 1960 | lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) |
1709 | { | 1961 | { |
@@ -1777,6 +2029,13 @@ out: | |||
1777 | return NULL; | 2029 | return NULL; |
1778 | } | 2030 | } |
1779 | 2031 | ||
2032 | /** | ||
2033 | * destroy_port: Destroy an FC port. | ||
2034 | * @vport: pointer to an lpfc virtual N_Port data structure. | ||
2035 | * | ||
2036 | * This routine destroys a FC port from the upper layer protocol. All the | ||
2037 | * resources associated with the port are released. | ||
2038 | **/ | ||
1780 | void | 2039 | void |
1781 | destroy_port(struct lpfc_vport *vport) | 2040 | destroy_port(struct lpfc_vport *vport) |
1782 | { | 2041 | { |
@@ -1797,6 +2056,16 @@ destroy_port(struct lpfc_vport *vport) | |||
1797 | return; | 2056 | return; |
1798 | } | 2057 | } |
1799 | 2058 | ||
2059 | /** | ||
2060 | * lpfc_get_instance: Get a unique integer ID. | ||
2061 | * | ||
2062 | * This routine allocates a unique integer ID from lpfc_hba_index pool. It | ||
2063 | * uses the kernel idr facility to perform the task. | ||
2064 | * | ||
2065 | * Return codes: | ||
2066 | * instance - a unique integer ID allocated as the new instance. | ||
2067 | * -1 - lpfc get instance failed. | ||
2068 | **/ | ||
1800 | int | 2069 | int |
1801 | lpfc_get_instance(void) | 2070 | lpfc_get_instance(void) |
1802 | { | 2071 | { |
@@ -1810,11 +2079,21 @@ lpfc_get_instance(void) | |||
1810 | return instance; | 2079 | return instance; |
1811 | } | 2080 | } |
1812 | 2081 | ||
1813 | /* | 2082 | /** |
1814 | * Note: there is no scan_start function as adapter initialization | 2083 | * lpfc_scan_finished: method for SCSI layer to detect whether scan is done. |
1815 | * will have asynchronously kicked off the link initialization. | 2084 | * @shost: pointer to SCSI host data structure. |
1816 | */ | 2085 | * @time: elapsed time of the scan in jiffies. |
1817 | 2086 | * | |
2087 | * This routine is called by the SCSI layer with a SCSI host to determine | ||
2088 | * whether the scan host is finished. | ||
2089 | * | ||
2090 | * Note: there is no scan_start function as adapter initialization will have | ||
2091 | * asynchronously kicked off the link initialization. | ||
2092 | * | ||
2093 | * Return codes | ||
2094 | * 0 - SCSI host scan is not over yet. | ||
2095 | * 1 - SCSI host scan is over. | ||
2096 | **/ | ||
1818 | int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) | 2097 | int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) |
1819 | { | 2098 | { |
1820 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 2099 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
@@ -1858,6 +2137,13 @@ finished: | |||
1858 | return stat; | 2137 | return stat; |
1859 | } | 2138 | } |
1860 | 2139 | ||
2140 | /** | ||
2141 | * lpfc_host_attrib_init: Initialize SCSI host attributes on a FC port. | ||
2142 | * @shost: pointer to SCSI host data structure. | ||
2143 | * | ||
2144 | * This routine initializes a given SCSI host attributes on a FC port. The | ||
2145 | * SCSI host can be either on top of a physical port or a virtual port. | ||
2146 | **/ | ||
1861 | void lpfc_host_attrib_init(struct Scsi_Host *shost) | 2147 | void lpfc_host_attrib_init(struct Scsi_Host *shost) |
1862 | { | 2148 | { |
1863 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 2149 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
@@ -1906,42 +2192,157 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) | |||
1906 | spin_unlock_irq(shost->host_lock); | 2192 | spin_unlock_irq(shost->host_lock); |
1907 | } | 2193 | } |
1908 | 2194 | ||
2195 | /** | ||
2196 | * lpfc_enable_msix: Enable MSI-X interrupt mode. | ||
2197 | * @phba: pointer to lpfc hba data structure. | ||
2198 | * | ||
2199 | * This routine is invoked to enable the MSI-X interrupt vectors. The kernel | ||
2200 | * function pci_enable_msix() is called to enable the MSI-X vectors. Note that | ||
2201 | * pci_enable_msix(), once invoked, enables either all or nothing, depending | ||
2202 | * on the current availability of PCI vector resources. The device driver is | ||
2203 | * responsible for calling the individual request_irq() to register each MSI-X | ||
2204 | * vector with a interrupt handler, which is done in this function. Note that | ||
2205 | * later when device is unloading, the driver should always call free_irq() | ||
2206 | * on all MSI-X vectors it has done request_irq() on before calling | ||
2207 | * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device | ||
2208 | * will be left with MSI-X enabled and leaks its vectors. | ||
2209 | * | ||
2210 | * Return codes | ||
2211 | * 0 - sucessful | ||
2212 | * other values - error | ||
2213 | **/ | ||
1909 | static int | 2214 | static int |
1910 | lpfc_enable_msix(struct lpfc_hba *phba) | 2215 | lpfc_enable_msix(struct lpfc_hba *phba) |
1911 | { | 2216 | { |
1912 | int error; | 2217 | int rc, i; |
2218 | LPFC_MBOXQ_t *pmb; | ||
1913 | 2219 | ||
1914 | phba->msix_entries[0].entry = 0; | 2220 | /* Set up MSI-X multi-message vectors */ |
1915 | phba->msix_entries[0].vector = 0; | 2221 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) |
2222 | phba->msix_entries[i].entry = i; | ||
1916 | 2223 | ||
1917 | error = pci_enable_msix(phba->pcidev, phba->msix_entries, | 2224 | /* Configure MSI-X capability structure */ |
2225 | rc = pci_enable_msix(phba->pcidev, phba->msix_entries, | ||
1918 | ARRAY_SIZE(phba->msix_entries)); | 2226 | ARRAY_SIZE(phba->msix_entries)); |
1919 | if (error) { | 2227 | if (rc) { |
1920 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 2228 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
1921 | "0420 Enable MSI-X failed (%d), continuing " | 2229 | "0420 Enable MSI-X failed (%d), continuing " |
1922 | "with MSI\n", error); | 2230 | "with MSI\n", rc); |
1923 | pci_disable_msix(phba->pcidev); | 2231 | goto msi_fail_out; |
1924 | return error; | 2232 | } else |
2233 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) | ||
2234 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2235 | "0477 MSI-X entry[%d]: vector=x%x " | ||
2236 | "message=%d\n", i, | ||
2237 | phba->msix_entries[i].vector, | ||
2238 | phba->msix_entries[i].entry); | ||
2239 | /* | ||
2240 | * Assign MSI-X vectors to interrupt handlers | ||
2241 | */ | ||
2242 | |||
2243 | /* vector-0 is associated to slow-path handler */ | ||
2244 | rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, | ||
2245 | IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); | ||
2246 | if (rc) { | ||
2247 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2248 | "0421 MSI-X slow-path request_irq failed " | ||
2249 | "(%d), continuing with MSI\n", rc); | ||
2250 | goto msi_fail_out; | ||
1925 | } | 2251 | } |
1926 | 2252 | ||
1927 | error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0, | 2253 | /* vector-1 is associated to fast-path handler */ |
1928 | LPFC_DRIVER_NAME, phba); | 2254 | rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, |
1929 | if (error) { | 2255 | IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); |
2256 | |||
2257 | if (rc) { | ||
1930 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2258 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
1931 | "0421 MSI-X request_irq failed (%d), " | 2259 | "0429 MSI-X fast-path request_irq failed " |
1932 | "continuing with MSI\n", error); | 2260 | "(%d), continuing with MSI\n", rc); |
1933 | pci_disable_msix(phba->pcidev); | 2261 | goto irq_fail_out; |
1934 | } | 2262 | } |
1935 | return error; | 2263 | |
2264 | /* | ||
2265 | * Configure HBA MSI-X attention conditions to messages | ||
2266 | */ | ||
2267 | pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
2268 | |||
2269 | if (!pmb) { | ||
2270 | rc = -ENOMEM; | ||
2271 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2272 | "0474 Unable to allocate memory for issuing " | ||
2273 | "MBOX_CONFIG_MSI command\n"); | ||
2274 | goto mem_fail_out; | ||
2275 | } | ||
2276 | rc = lpfc_config_msi(phba, pmb); | ||
2277 | if (rc) | ||
2278 | goto mbx_fail_out; | ||
2279 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); | ||
2280 | if (rc != MBX_SUCCESS) { | ||
2281 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | ||
2282 | "0351 Config MSI mailbox command failed, " | ||
2283 | "mbxCmd x%x, mbxStatus x%x\n", | ||
2284 | pmb->mb.mbxCommand, pmb->mb.mbxStatus); | ||
2285 | goto mbx_fail_out; | ||
2286 | } | ||
2287 | |||
2288 | /* Free memory allocated for mailbox command */ | ||
2289 | mempool_free(pmb, phba->mbox_mem_pool); | ||
2290 | return rc; | ||
2291 | |||
2292 | mbx_fail_out: | ||
2293 | /* Free memory allocated for mailbox command */ | ||
2294 | mempool_free(pmb, phba->mbox_mem_pool); | ||
2295 | |||
2296 | mem_fail_out: | ||
2297 | /* free the irq already requested */ | ||
2298 | free_irq(phba->msix_entries[1].vector, phba); | ||
2299 | |||
2300 | irq_fail_out: | ||
2301 | /* free the irq already requested */ | ||
2302 | free_irq(phba->msix_entries[0].vector, phba); | ||
2303 | |||
2304 | msi_fail_out: | ||
2305 | /* Unconfigure MSI-X capability structure */ | ||
2306 | pci_disable_msix(phba->pcidev); | ||
2307 | return rc; | ||
1936 | } | 2308 | } |
1937 | 2309 | ||
2310 | /** | ||
2311 | * lpfc_disable_msix: Disable MSI-X interrupt mode. | ||
2312 | * @phba: pointer to lpfc hba data structure. | ||
2313 | * | ||
2314 | * This routine is invoked to release the MSI-X vectors and then disable the | ||
2315 | * MSI-X interrupt mode. | ||
2316 | **/ | ||
1938 | static void | 2317 | static void |
1939 | lpfc_disable_msix(struct lpfc_hba *phba) | 2318 | lpfc_disable_msix(struct lpfc_hba *phba) |
1940 | { | 2319 | { |
1941 | free_irq(phba->msix_entries[0].vector, phba); | 2320 | int i; |
2321 | |||
2322 | /* Free up MSI-X multi-message vectors */ | ||
2323 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) | ||
2324 | free_irq(phba->msix_entries[i].vector, phba); | ||
2325 | /* Disable MSI-X */ | ||
1942 | pci_disable_msix(phba->pcidev); | 2326 | pci_disable_msix(phba->pcidev); |
1943 | } | 2327 | } |
1944 | 2328 | ||
2329 | /** | ||
2330 | * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem. | ||
2331 | * @pdev: pointer to PCI device | ||
2332 | * @pid: pointer to PCI device identifier | ||
2333 | * | ||
2334 | * This routine is to be registered to the kernel's PCI subsystem. When an | ||
2335 | * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at | ||
2336 | * PCI device-specific information of the device and driver to see if the | ||
2337 | * driver state that it can support this kind of device. If the match is | ||
2338 | * successful, the driver core invokes this routine. If this routine | ||
2339 | * determines it can claim the HBA, it does all the initialization that it | ||
2340 | * needs to do to handle the HBA properly. | ||
2341 | * | ||
2342 | * Return code | ||
2343 | * 0 - driver can claim the device | ||
2344 | * negative value - driver can not claim the device | ||
2345 | **/ | ||
1945 | static int __devinit | 2346 | static int __devinit |
1946 | lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | 2347 | lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) |
1947 | { | 2348 | { |
@@ -1956,6 +2357,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
1956 | int i, hbq_count; | 2357 | int i, hbq_count; |
1957 | uint16_t iotag; | 2358 | uint16_t iotag; |
1958 | int bars = pci_select_bars(pdev, IORESOURCE_MEM); | 2359 | int bars = pci_select_bars(pdev, IORESOURCE_MEM); |
2360 | struct lpfc_adapter_event_header adapter_event; | ||
1959 | 2361 | ||
1960 | if (pci_enable_device_mem(pdev)) | 2362 | if (pci_enable_device_mem(pdev)) |
1961 | goto out; | 2363 | goto out; |
@@ -1966,6 +2368,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
1966 | if (!phba) | 2368 | if (!phba) |
1967 | goto out_release_regions; | 2369 | goto out_release_regions; |
1968 | 2370 | ||
2371 | atomic_set(&phba->fast_event_count, 0); | ||
1969 | spin_lock_init(&phba->hbalock); | 2372 | spin_lock_init(&phba->hbalock); |
1970 | 2373 | ||
1971 | /* Initialize ndlp management spinlock */ | 2374 | /* Initialize ndlp management spinlock */ |
@@ -1978,6 +2381,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
1978 | goto out_free_phba; | 2381 | goto out_free_phba; |
1979 | 2382 | ||
1980 | INIT_LIST_HEAD(&phba->port_list); | 2383 | INIT_LIST_HEAD(&phba->port_list); |
2384 | init_waitqueue_head(&phba->wait_4_mlo_m_q); | ||
1981 | /* | 2385 | /* |
1982 | * Get all the module params for configuring this host and then | 2386 | * Get all the module params for configuring this host and then |
1983 | * establish the host. | 2387 | * establish the host. |
@@ -2000,6 +2404,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2000 | init_timer(&phba->fabric_block_timer); | 2404 | init_timer(&phba->fabric_block_timer); |
2001 | phba->fabric_block_timer.function = lpfc_fabric_block_timeout; | 2405 | phba->fabric_block_timer.function = lpfc_fabric_block_timeout; |
2002 | phba->fabric_block_timer.data = (unsigned long) phba; | 2406 | phba->fabric_block_timer.data = (unsigned long) phba; |
2407 | init_timer(&phba->eratt_poll); | ||
2408 | phba->eratt_poll.function = lpfc_poll_eratt; | ||
2409 | phba->eratt_poll.data = (unsigned long) phba; | ||
2003 | 2410 | ||
2004 | pci_set_master(pdev); | 2411 | pci_set_master(pdev); |
2005 | pci_try_set_mwi(pdev); | 2412 | pci_try_set_mwi(pdev); |
@@ -2019,7 +2426,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2019 | bar2map_len = pci_resource_len(phba->pcidev, 2); | 2426 | bar2map_len = pci_resource_len(phba->pcidev, 2); |
2020 | 2427 | ||
2021 | /* Map HBA SLIM to a kernel virtual address. */ | 2428 | /* Map HBA SLIM to a kernel virtual address. */ |
2022 | phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); | 2429 | phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); |
2023 | if (!phba->slim_memmap_p) { | 2430 | if (!phba->slim_memmap_p) { |
2024 | error = -ENODEV; | 2431 | error = -ENODEV; |
2025 | dev_printk(KERN_ERR, &pdev->dev, | 2432 | dev_printk(KERN_ERR, &pdev->dev, |
@@ -2037,12 +2444,18 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2037 | } | 2444 | } |
2038 | 2445 | ||
2039 | /* Allocate memory for SLI-2 structures */ | 2446 | /* Allocate memory for SLI-2 structures */ |
2040 | phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, | 2447 | phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, |
2041 | &phba->slim2p_mapping, GFP_KERNEL); | 2448 | SLI2_SLIM_SIZE, |
2042 | if (!phba->slim2p) | 2449 | &phba->slim2p.phys, |
2450 | GFP_KERNEL); | ||
2451 | if (!phba->slim2p.virt) | ||
2043 | goto out_iounmap; | 2452 | goto out_iounmap; |
2044 | 2453 | ||
2045 | memset(phba->slim2p, 0, SLI2_SLIM_SIZE); | 2454 | memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); |
2455 | phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); | ||
2456 | phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); | ||
2457 | phba->IOCBs = (phba->slim2p.virt + | ||
2458 | offsetof(struct lpfc_sli2_slim, IOCBs)); | ||
2046 | 2459 | ||
2047 | phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, | 2460 | phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, |
2048 | lpfc_sli_hbq_size(), | 2461 | lpfc_sli_hbq_size(), |
@@ -2111,7 +2524,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2111 | phba->fc_arbtov = FF_DEF_ARBTOV; | 2524 | phba->fc_arbtov = FF_DEF_ARBTOV; |
2112 | 2525 | ||
2113 | INIT_LIST_HEAD(&phba->work_list); | 2526 | INIT_LIST_HEAD(&phba->work_list); |
2114 | phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); | 2527 | phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); |
2115 | phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); | 2528 | phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); |
2116 | 2529 | ||
2117 | /* Initialize the wait queue head for the kernel thread */ | 2530 | /* Initialize the wait queue head for the kernel thread */ |
@@ -2146,21 +2559,42 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2146 | pci_set_drvdata(pdev, shost); | 2559 | pci_set_drvdata(pdev, shost); |
2147 | phba->intr_type = NONE; | 2560 | phba->intr_type = NONE; |
2148 | 2561 | ||
2562 | phba->MBslimaddr = phba->slim_memmap_p; | ||
2563 | phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; | ||
2564 | phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; | ||
2565 | phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; | ||
2566 | phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; | ||
2567 | |||
2568 | /* Configure and enable interrupt */ | ||
2149 | if (phba->cfg_use_msi == 2) { | 2569 | if (phba->cfg_use_msi == 2) { |
2150 | error = lpfc_enable_msix(phba); | 2570 | /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ |
2151 | if (!error) | 2571 | error = lpfc_sli_config_port(phba, 3); |
2152 | phba->intr_type = MSIX; | 2572 | if (error) |
2573 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2574 | "0427 Firmware not capable of SLI 3 mode.\n"); | ||
2575 | else { | ||
2576 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2577 | "0426 Firmware capable of SLI 3 mode.\n"); | ||
2578 | /* Now, try to enable MSI-X interrupt mode */ | ||
2579 | error = lpfc_enable_msix(phba); | ||
2580 | if (!error) { | ||
2581 | phba->intr_type = MSIX; | ||
2582 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2583 | "0430 enable MSI-X mode.\n"); | ||
2584 | } | ||
2585 | } | ||
2153 | } | 2586 | } |
2154 | 2587 | ||
2155 | /* Fallback to MSI if MSI-X initialization failed */ | 2588 | /* Fallback to MSI if MSI-X initialization failed */ |
2156 | if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { | 2589 | if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { |
2157 | retval = pci_enable_msi(phba->pcidev); | 2590 | retval = pci_enable_msi(phba->pcidev); |
2158 | if (!retval) | 2591 | if (!retval) { |
2159 | phba->intr_type = MSI; | 2592 | phba->intr_type = MSI; |
2160 | else | ||
2161 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 2593 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
2162 | "0452 Enable MSI failed, continuing " | 2594 | "0473 enable MSI mode.\n"); |
2163 | "with IRQ\n"); | 2595 | } else |
2596 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2597 | "0452 enable IRQ mode.\n"); | ||
2164 | } | 2598 | } |
2165 | 2599 | ||
2166 | /* MSI-X is the only case the doesn't need to call request_irq */ | 2600 | /* MSI-X is the only case the doesn't need to call request_irq */ |
@@ -2176,18 +2610,16 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2176 | phba->intr_type = INTx; | 2610 | phba->intr_type = INTx; |
2177 | } | 2611 | } |
2178 | 2612 | ||
2179 | phba->MBslimaddr = phba->slim_memmap_p; | ||
2180 | phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; | ||
2181 | phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; | ||
2182 | phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; | ||
2183 | phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; | ||
2184 | |||
2185 | if (lpfc_alloc_sysfs_attr(vport)) { | 2613 | if (lpfc_alloc_sysfs_attr(vport)) { |
2614 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2615 | "1476 Failed to allocate sysfs attr\n"); | ||
2186 | error = -ENOMEM; | 2616 | error = -ENOMEM; |
2187 | goto out_free_irq; | 2617 | goto out_free_irq; |
2188 | } | 2618 | } |
2189 | 2619 | ||
2190 | if (lpfc_sli_hba_setup(phba)) { | 2620 | if (lpfc_sli_hba_setup(phba)) { |
2621 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2622 | "1477 Failed to set up hba\n"); | ||
2191 | error = -ENODEV; | 2623 | error = -ENODEV; |
2192 | goto out_remove_device; | 2624 | goto out_remove_device; |
2193 | } | 2625 | } |
@@ -2206,6 +2638,16 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2206 | spin_unlock_irq(shost->host_lock); | 2638 | spin_unlock_irq(shost->host_lock); |
2207 | } | 2639 | } |
2208 | 2640 | ||
2641 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2642 | "0428 Perform SCSI scan\n"); | ||
2643 | /* Send board arrival event to upper layer */ | ||
2644 | adapter_event.event_type = FC_REG_ADAPTER_EVENT; | ||
2645 | adapter_event.subcategory = LPFC_EVENT_ARRIVAL; | ||
2646 | fc_host_post_vendor_event(shost, fc_get_event_number(), | ||
2647 | sizeof(adapter_event), | ||
2648 | (char *) &adapter_event, | ||
2649 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | ||
2650 | |||
2209 | scsi_scan_host(shost); | 2651 | scsi_scan_host(shost); |
2210 | 2652 | ||
2211 | return 0; | 2653 | return 0; |
@@ -2238,11 +2680,11 @@ out_free_iocbq: | |||
2238 | } | 2680 | } |
2239 | lpfc_mem_free(phba); | 2681 | lpfc_mem_free(phba); |
2240 | out_free_hbqslimp: | 2682 | out_free_hbqslimp: |
2241 | dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, | 2683 | dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), |
2242 | phba->hbqslimp.phys); | 2684 | phba->hbqslimp.virt, phba->hbqslimp.phys); |
2243 | out_free_slim: | 2685 | out_free_slim: |
2244 | dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, | 2686 | dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, |
2245 | phba->slim2p_mapping); | 2687 | phba->slim2p.virt, phba->slim2p.phys); |
2246 | out_iounmap: | 2688 | out_iounmap: |
2247 | iounmap(phba->ctrl_regs_memmap_p); | 2689 | iounmap(phba->ctrl_regs_memmap_p); |
2248 | out_iounmap_slim: | 2690 | out_iounmap_slim: |
@@ -2262,6 +2704,14 @@ out: | |||
2262 | return error; | 2704 | return error; |
2263 | } | 2705 | } |
2264 | 2706 | ||
2707 | /** | ||
2708 | * lpfc_pci_remove_one: lpfc PCI func to unregister device from PCI subsystem. | ||
2709 | * @pdev: pointer to PCI device | ||
2710 | * | ||
2711 | * This routine is to be registered to the kernel's PCI subsystem. When an | ||
2712 | * Emulex HBA is removed from PCI bus. It perform all the necessary cleanup | ||
2713 | * for the HBA device to be removed from the PCI subsystem properly. | ||
2714 | **/ | ||
2265 | static void __devexit | 2715 | static void __devexit |
2266 | lpfc_pci_remove_one(struct pci_dev *pdev) | 2716 | lpfc_pci_remove_one(struct pci_dev *pdev) |
2267 | { | 2717 | { |
@@ -2316,12 +2766,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
2316 | lpfc_scsi_free(phba); | 2766 | lpfc_scsi_free(phba); |
2317 | lpfc_mem_free(phba); | 2767 | lpfc_mem_free(phba); |
2318 | 2768 | ||
2319 | dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, | 2769 | dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), |
2320 | phba->hbqslimp.phys); | 2770 | phba->hbqslimp.virt, phba->hbqslimp.phys); |
2321 | 2771 | ||
2322 | /* Free resources associated with SLI2 interface */ | 2772 | /* Free resources associated with SLI2 interface */ |
2323 | dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, | 2773 | dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, |
2324 | phba->slim2p, phba->slim2p_mapping); | 2774 | phba->slim2p.virt, phba->slim2p.phys); |
2325 | 2775 | ||
2326 | /* unmap adapter SLIM and Control Registers */ | 2776 | /* unmap adapter SLIM and Control Registers */ |
2327 | iounmap(phba->ctrl_regs_memmap_p); | 2777 | iounmap(phba->ctrl_regs_memmap_p); |
@@ -2336,13 +2786,21 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
2336 | } | 2786 | } |
2337 | 2787 | ||
2338 | /** | 2788 | /** |
2339 | * lpfc_io_error_detected - called when PCI error is detected | 2789 | * lpfc_io_error_detected: Driver method for handling PCI I/O error detected. |
2340 | * @pdev: Pointer to PCI device | 2790 | * @pdev: pointer to PCI device. |
2341 | * @state: The current pci conneection state | 2791 | * @state: the current PCI connection state. |
2342 | * | 2792 | * |
2343 | * This function is called after a PCI bus error affecting | 2793 | * This routine is registered to the PCI subsystem for error handling. This |
2344 | * this device has been detected. | 2794 | * function is called by the PCI subsystem after a PCI bus error affecting |
2345 | */ | 2795 | * this device has been detected. When this function is invoked, it will |
2796 | * need to stop all the I/Os and interrupt(s) to the device. Once that is | ||
2797 | * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to | ||
2798 | * perform proper recovery as desired. | ||
2799 | * | ||
2800 | * Return codes | ||
2801 | * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery | ||
2802 | * PCI_ERS_RESULT_DISCONNECT - device could not be recovered | ||
2803 | **/ | ||
2346 | static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, | 2804 | static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, |
2347 | pci_channel_state_t state) | 2805 | pci_channel_state_t state) |
2348 | { | 2806 | { |
@@ -2351,8 +2809,15 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, | |||
2351 | struct lpfc_sli *psli = &phba->sli; | 2809 | struct lpfc_sli *psli = &phba->sli; |
2352 | struct lpfc_sli_ring *pring; | 2810 | struct lpfc_sli_ring *pring; |
2353 | 2811 | ||
2354 | if (state == pci_channel_io_perm_failure) | 2812 | if (state == pci_channel_io_perm_failure) { |
2813 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2814 | "0472 PCI channel I/O permanent failure\n"); | ||
2815 | /* Block all SCSI devices' I/Os on the host */ | ||
2816 | lpfc_scsi_dev_block(phba); | ||
2817 | /* Clean up all driver's outstanding SCSI I/Os */ | ||
2818 | lpfc_sli_flush_fcp_rings(phba); | ||
2355 | return PCI_ERS_RESULT_DISCONNECT; | 2819 | return PCI_ERS_RESULT_DISCONNECT; |
2820 | } | ||
2356 | 2821 | ||
2357 | pci_disable_device(pdev); | 2822 | pci_disable_device(pdev); |
2358 | /* | 2823 | /* |
@@ -2376,10 +2841,21 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, | |||
2376 | } | 2841 | } |
2377 | 2842 | ||
2378 | /** | 2843 | /** |
2379 | * lpfc_io_slot_reset - called after the pci bus has been reset. | 2844 | * lpfc_io_slot_reset: Restart a PCI device from scratch. |
2380 | * @pdev: Pointer to PCI device | 2845 | * @pdev: pointer to PCI device. |
2846 | * | ||
2847 | * This routine is registered to the PCI subsystem for error handling. This is | ||
2848 | * called after PCI bus has been reset to restart the PCI card from scratch, | ||
2849 | * as if from a cold-boot. During the PCI subsystem error recovery, after the | ||
2850 | * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform | ||
2851 | * proper error recovery and then call this routine before calling the .resume | ||
2852 | * method to recover the device. This function will initialize the HBA device, | ||
2853 | * enable the interrupt, but it will just put the HBA to offline state without | ||
2854 | * passing any I/O traffic. | ||
2381 | * | 2855 | * |
2382 | * Restart the card from scratch, as if from a cold-boot. | 2856 | * Return codes |
2857 | * PCI_ERS_RESULT_RECOVERED - the device has been recovered | ||
2858 | * PCI_ERS_RESULT_DISCONNECT - device could not be recovered | ||
2383 | */ | 2859 | */ |
2384 | static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | 2860 | static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) |
2385 | { | 2861 | { |
@@ -2404,20 +2880,34 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | |||
2404 | /* Enable configured interrupt method */ | 2880 | /* Enable configured interrupt method */ |
2405 | phba->intr_type = NONE; | 2881 | phba->intr_type = NONE; |
2406 | if (phba->cfg_use_msi == 2) { | 2882 | if (phba->cfg_use_msi == 2) { |
2407 | error = lpfc_enable_msix(phba); | 2883 | /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ |
2408 | if (!error) | 2884 | error = lpfc_sli_config_port(phba, 3); |
2409 | phba->intr_type = MSIX; | 2885 | if (error) |
2886 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2887 | "0478 Firmware not capable of SLI 3 mode.\n"); | ||
2888 | else { | ||
2889 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2890 | "0479 Firmware capable of SLI 3 mode.\n"); | ||
2891 | /* Now, try to enable MSI-X interrupt mode */ | ||
2892 | error = lpfc_enable_msix(phba); | ||
2893 | if (!error) { | ||
2894 | phba->intr_type = MSIX; | ||
2895 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2896 | "0480 enable MSI-X mode.\n"); | ||
2897 | } | ||
2898 | } | ||
2410 | } | 2899 | } |
2411 | 2900 | ||
2412 | /* Fallback to MSI if MSI-X initialization failed */ | 2901 | /* Fallback to MSI if MSI-X initialization failed */ |
2413 | if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { | 2902 | if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { |
2414 | retval = pci_enable_msi(phba->pcidev); | 2903 | retval = pci_enable_msi(phba->pcidev); |
2415 | if (!retval) | 2904 | if (!retval) { |
2416 | phba->intr_type = MSI; | 2905 | phba->intr_type = MSI; |
2417 | else | ||
2418 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 2906 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
2419 | "0470 Enable MSI failed, continuing " | 2907 | "0481 enable MSI mode.\n"); |
2420 | "with IRQ\n"); | 2908 | } else |
2909 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2910 | "0470 enable IRQ mode.\n"); | ||
2421 | } | 2911 | } |
2422 | 2912 | ||
2423 | /* MSI-X is the only case the doesn't need to call request_irq */ | 2913 | /* MSI-X is the only case the doesn't need to call request_irq */ |
@@ -2440,11 +2930,13 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | |||
2440 | } | 2930 | } |
2441 | 2931 | ||
2442 | /** | 2932 | /** |
2443 | * lpfc_io_resume - called when traffic can start flowing again. | 2933 | * lpfc_io_resume: Resume PCI I/O operation. |
2444 | * @pdev: Pointer to PCI device | 2934 | * @pdev: pointer to PCI device |
2445 | * | 2935 | * |
2446 | * This callback is called when the error recovery driver tells us that | 2936 | * This routine is registered to the PCI subsystem for error handling. It is |
2447 | * its OK to resume normal operation. | 2937 | * called when kernel error recovery tells the lpfc driver that it is ok to |
2938 | * resume normal PCI operation after PCI bus error recovery. After this call, | ||
2939 | * traffic can start to flow from this device again. | ||
2448 | */ | 2940 | */ |
2449 | static void lpfc_io_resume(struct pci_dev *pdev) | 2941 | static void lpfc_io_resume(struct pci_dev *pdev) |
2450 | { | 2942 | { |
@@ -2491,6 +2983,8 @@ static struct pci_device_id lpfc_id_table[] = { | |||
2491 | PCI_ANY_ID, PCI_ANY_ID, }, | 2983 | PCI_ANY_ID, PCI_ANY_ID, }, |
2492 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, | 2984 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, |
2493 | PCI_ANY_ID, PCI_ANY_ID, }, | 2985 | PCI_ANY_ID, PCI_ANY_ID, }, |
2986 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, | ||
2987 | PCI_ANY_ID, PCI_ANY_ID, }, | ||
2494 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, | 2988 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, |
2495 | PCI_ANY_ID, PCI_ANY_ID, }, | 2989 | PCI_ANY_ID, PCI_ANY_ID, }, |
2496 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, | 2990 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, |
@@ -2521,6 +3015,12 @@ static struct pci_device_id lpfc_id_table[] = { | |||
2521 | PCI_ANY_ID, PCI_ANY_ID, }, | 3015 | PCI_ANY_ID, PCI_ANY_ID, }, |
2522 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, | 3016 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, |
2523 | PCI_ANY_ID, PCI_ANY_ID, }, | 3017 | PCI_ANY_ID, PCI_ANY_ID, }, |
3018 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, | ||
3019 | PCI_ANY_ID, PCI_ANY_ID, }, | ||
3020 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, | ||
3021 | PCI_ANY_ID, PCI_ANY_ID, }, | ||
3022 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, | ||
3023 | PCI_ANY_ID, PCI_ANY_ID, }, | ||
2524 | { 0 } | 3024 | { 0 } |
2525 | }; | 3025 | }; |
2526 | 3026 | ||
@@ -2540,6 +3040,18 @@ static struct pci_driver lpfc_driver = { | |||
2540 | .err_handler = &lpfc_err_handler, | 3040 | .err_handler = &lpfc_err_handler, |
2541 | }; | 3041 | }; |
2542 | 3042 | ||
3043 | /** | ||
3044 | * lpfc_init: lpfc module initialization routine. | ||
3045 | * | ||
3046 | * This routine is to be invoked when the lpfc module is loaded into the | ||
3047 | * kernel. The special kernel macro module_init() is used to indicate the | ||
3048 | * role of this routine to the kernel as lpfc module entry point. | ||
3049 | * | ||
3050 | * Return codes | ||
3051 | * 0 - successful | ||
3052 | * -ENOMEM - FC attach transport failed | ||
3053 | * all others - failed | ||
3054 | */ | ||
2543 | static int __init | 3055 | static int __init |
2544 | lpfc_init(void) | 3056 | lpfc_init(void) |
2545 | { | 3057 | { |
@@ -2567,12 +3079,20 @@ lpfc_init(void) | |||
2567 | error = pci_register_driver(&lpfc_driver); | 3079 | error = pci_register_driver(&lpfc_driver); |
2568 | if (error) { | 3080 | if (error) { |
2569 | fc_release_transport(lpfc_transport_template); | 3081 | fc_release_transport(lpfc_transport_template); |
2570 | fc_release_transport(lpfc_vport_transport_template); | 3082 | if (lpfc_enable_npiv) |
3083 | fc_release_transport(lpfc_vport_transport_template); | ||
2571 | } | 3084 | } |
2572 | 3085 | ||
2573 | return error; | 3086 | return error; |
2574 | } | 3087 | } |
2575 | 3088 | ||
3089 | /** | ||
3090 | * lpfc_exit: lpfc module removal routine. | ||
3091 | * | ||
3092 | * This routine is invoked when the lpfc module is removed from the kernel. | ||
3093 | * The special kernel macro module_exit() is used to indicate the role of | ||
3094 | * this routine to the kernel as lpfc module exit point. | ||
3095 | */ | ||
2576 | static void __exit | 3096 | static void __exit |
2577 | lpfc_exit(void) | 3097 | lpfc_exit(void) |
2578 | { | 3098 | { |
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 7a9be4c5b7cb..7465fe746fe9 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | #include "lpfc_hw.h" | 31 | #include "lpfc_hw.h" |
32 | #include "lpfc_sli.h" | 32 | #include "lpfc_sli.h" |
33 | #include "lpfc_nl.h" | ||
33 | #include "lpfc_disc.h" | 34 | #include "lpfc_disc.h" |
34 | #include "lpfc_scsi.h" | 35 | #include "lpfc_scsi.h" |
35 | #include "lpfc.h" | 36 | #include "lpfc.h" |
@@ -37,10 +38,20 @@ | |||
37 | #include "lpfc_crtn.h" | 38 | #include "lpfc_crtn.h" |
38 | #include "lpfc_compat.h" | 39 | #include "lpfc_compat.h" |
39 | 40 | ||
40 | /**********************************************/ | 41 | /** |
41 | 42 | * lpfc_dump_mem: Prepare a mailbox command for retrieving HBA's VPD memory. | |
42 | /* mailbox command */ | 43 | * @phba: pointer to lpfc hba data structure. |
43 | /**********************************************/ | 44 | * @pmb: pointer to the driver internal queue element for mailbox command. |
45 | * @offset: offset for dumping VPD memory mailbox command. | ||
46 | * | ||
47 | * The dump mailbox command provides a method for the device driver to obtain | ||
48 | * various types of information from the HBA device. | ||
49 | * | ||
50 | * This routine prepares the mailbox command for dumping HBA Vital Product | ||
51 | * Data (VPD) memory. This mailbox command is to be used for retrieving a | ||
52 | * portion (DMP_RSP_SIZE bytes) of a HBA's VPD from the HBA at an address | ||
53 | * offset specified by the offset parameter. | ||
54 | **/ | ||
44 | void | 55 | void |
45 | lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset) | 56 | lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset) |
46 | { | 57 | { |
@@ -65,10 +76,17 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset) | |||
65 | return; | 76 | return; |
66 | } | 77 | } |
67 | 78 | ||
68 | /**********************************************/ | 79 | /** |
69 | /* lpfc_read_nv Issue a READ NVPARAM */ | 80 | * lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param. |
70 | /* mailbox command */ | 81 | * @phba: pointer to lpfc hba data structure. |
71 | /**********************************************/ | 82 | * @pmb: pointer to the driver internal queue element for mailbox command. |
83 | * | ||
84 | * The read NVRAM mailbox command returns the HBA's non-volatile parameters | ||
85 | * that are used as defaults when the Fibre Channel link is brought on-line. | ||
86 | * | ||
87 | * This routine prepares the mailbox command for reading information stored | ||
88 | * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN. | ||
89 | **/ | ||
72 | void | 90 | void |
73 | lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 91 | lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) |
74 | { | 92 | { |
@@ -81,10 +99,19 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
81 | return; | 99 | return; |
82 | } | 100 | } |
83 | 101 | ||
84 | /**********************************************/ | 102 | /** |
85 | /* lpfc_config_async Issue a */ | 103 | * lpfc_config_async: Prepare a mailbox command for enabling HBA async event. |
86 | /* MBX_ASYNC_EVT_ENABLE mailbox command */ | 104 | * @phba: pointer to lpfc hba data structure. |
87 | /**********************************************/ | 105 | * @pmb: pointer to the driver internal queue element for mailbox command. |
106 | * @ring: ring number for the asynchronous event to be configured. | ||
107 | * | ||
108 | * The asynchronous event enable mailbox command is used to enable the | ||
109 | * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and | ||
110 | * specifies the default ring to which events are posted. | ||
111 | * | ||
112 | * This routine prepares the mailbox command for enabling HBA asynchronous | ||
113 | * event support on a IOCB ring. | ||
114 | **/ | ||
88 | void | 115 | void |
89 | lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, | 116 | lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, |
90 | uint32_t ring) | 117 | uint32_t ring) |
@@ -99,10 +126,19 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, | |||
99 | return; | 126 | return; |
100 | } | 127 | } |
101 | 128 | ||
102 | /**********************************************/ | 129 | /** |
103 | /* lpfc_heart_beat Issue a HEART_BEAT */ | 130 | * lpfc_heart_beat: Prepare a mailbox command for heart beat. |
104 | /* mailbox command */ | 131 | * @phba: pointer to lpfc hba data structure. |
105 | /**********************************************/ | 132 | * @pmb: pointer to the driver internal queue element for mailbox command. |
133 | * | ||
134 | * The heart beat mailbox command is used to detect an unresponsive HBA, which | ||
135 | * is defined as any device where no error attention is sent and both mailbox | ||
136 | * and rings are not processed. | ||
137 | * | ||
138 | * This routine prepares the mailbox command for issuing a heart beat in the | ||
139 | * form of mailbox command to the HBA. The timely completion of the heart | ||
140 | * beat mailbox command indicates the health of the HBA. | ||
141 | **/ | ||
106 | void | 142 | void |
107 | lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 143 | lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) |
108 | { | 144 | { |
@@ -115,10 +151,26 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
115 | return; | 151 | return; |
116 | } | 152 | } |
117 | 153 | ||
118 | /**********************************************/ | 154 | /** |
119 | /* lpfc_read_la Issue a READ LA */ | 155 | * lpfc_read_la: Prepare a mailbox command for reading HBA link attention. |
120 | /* mailbox command */ | 156 | * @phba: pointer to lpfc hba data structure. |
121 | /**********************************************/ | 157 | * @pmb: pointer to the driver internal queue element for mailbox command. |
158 | * @mp: DMA buffer memory for reading the link attention information into. | ||
159 | * | ||
160 | * The read link attention mailbox command is issued to read the Link Event | ||
161 | * Attention information indicated by the HBA port when the Link Event bit | ||
162 | * of the Host Attention (HSTATT) register is set to 1. A Link Event | ||
163 | * Attention occurs based on an exception detected at the Fibre Channel link | ||
164 | * interface. | ||
165 | * | ||
166 | * This routine prepares the mailbox command for reading HBA link attention | ||
167 | * information. A DMA memory has been set aside and address passed to the | ||
168 | * HBA through @mp for the HBA to DMA link attention information into the | ||
169 | * memory as part of the execution of the mailbox command. | ||
170 | * | ||
171 | * Return codes | ||
172 | * 0 - Success (currently always return 0) | ||
173 | **/ | ||
122 | int | 174 | int |
123 | lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp) | 175 | lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp) |
124 | { | 176 | { |
@@ -143,10 +195,21 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp) | |||
143 | return (0); | 195 | return (0); |
144 | } | 196 | } |
145 | 197 | ||
146 | /**********************************************/ | 198 | /** |
147 | /* lpfc_clear_la Issue a CLEAR LA */ | 199 | * lpfc_clear_la: Prepare a mailbox command for clearing HBA link attention. |
148 | /* mailbox command */ | 200 | * @phba: pointer to lpfc hba data structure. |
149 | /**********************************************/ | 201 | * @pmb: pointer to the driver internal queue element for mailbox command. |
202 | * | ||
203 | * The clear link attention mailbox command is issued to clear the link event | ||
204 | * attention condition indicated by the Link Event bit of the Host Attention | ||
205 | * (HSTATT) register. The link event attention condition is cleared only if | ||
206 | * the event tag specified matches that of the current link event counter. | ||
207 | * The current event tag is read using the read link attention event mailbox | ||
208 | * command. | ||
209 | * | ||
210 | * This routine prepares the mailbox command for clearing HBA link attention | ||
211 | * information. | ||
212 | **/ | ||
150 | void | 213 | void |
151 | lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 214 | lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) |
152 | { | 215 | { |
@@ -161,10 +224,20 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
161 | return; | 224 | return; |
162 | } | 225 | } |
163 | 226 | ||
164 | /**************************************************/ | 227 | /** |
165 | /* lpfc_config_link Issue a CONFIG LINK */ | 228 | * lpfc_config_link: Prepare a mailbox command for configuring link on a HBA. |
166 | /* mailbox command */ | 229 | * @phba: pointer to lpfc hba data structure. |
167 | /**************************************************/ | 230 | * @pmb: pointer to the driver internal queue element for mailbox command. |
231 | * | ||
232 | * The configure link mailbox command is used before the initialize link | ||
233 | * mailbox command to override default value and to configure link-oriented | ||
234 | * parameters such as DID address and various timers. Typically, this | ||
235 | * command would be used after an F_Port login to set the returned DID address | ||
236 | * and the fabric timeout values. This command is not valid before a configure | ||
237 | * port command has configured the HBA port. | ||
238 | * | ||
239 | * This routine prepares the mailbox command for configuring link on a HBA. | ||
240 | **/ | ||
168 | void | 241 | void |
169 | lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 242 | lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) |
170 | { | 243 | { |
@@ -199,10 +272,98 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
199 | return; | 272 | return; |
200 | } | 273 | } |
201 | 274 | ||
202 | /**********************************************/ | 275 | /** |
203 | /* lpfc_init_link Issue an INIT LINK */ | 276 | * lpfc_config_msi: Prepare a mailbox command for configuring msi-x. |
204 | /* mailbox command */ | 277 | * @phba: pointer to lpfc hba data structure. |
205 | /**********************************************/ | 278 | * @pmb: pointer to the driver internal queue element for mailbox command. |
279 | * | ||
280 | * The configure MSI-X mailbox command is used to configure the HBA's SLI-3 | ||
281 | * MSI-X multi-message interrupt vector association to interrupt attention | ||
282 | * conditions. | ||
283 | * | ||
284 | * Return codes | ||
285 | * 0 - Success | ||
286 | * -EINVAL - Failure | ||
287 | **/ | ||
288 | int | ||
289 | lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | ||
290 | { | ||
291 | MAILBOX_t *mb = &pmb->mb; | ||
292 | uint32_t attentionConditions[2]; | ||
293 | |||
294 | /* Sanity check */ | ||
295 | if (phba->cfg_use_msi != 2) { | ||
296 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
297 | "0475 Not configured for supporting MSI-X " | ||
298 | "cfg_use_msi: 0x%x\n", phba->cfg_use_msi); | ||
299 | return -EINVAL; | ||
300 | } | ||
301 | |||
302 | if (phba->sli_rev < 3) { | ||
303 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
304 | "0476 HBA not supporting SLI-3 or later " | ||
305 | "SLI Revision: 0x%x\n", phba->sli_rev); | ||
306 | return -EINVAL; | ||
307 | } | ||
308 | |||
309 | /* Clear mailbox command fields */ | ||
310 | memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); | ||
311 | |||
312 | /* | ||
313 | * SLI-3, Message Signaled Interrupt Fearure. | ||
314 | */ | ||
315 | |||
316 | /* Multi-message attention configuration */ | ||
317 | attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT | | ||
318 | HA_LATT | HA_MBATT); | ||
319 | attentionConditions[1] = 0; | ||
320 | |||
321 | mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0]; | ||
322 | mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1]; | ||
323 | |||
324 | /* | ||
325 | * Set up message number to HA bit association | ||
326 | */ | ||
327 | #ifdef __BIG_ENDIAN_BITFIELD | ||
328 | /* RA0 (FCP Ring) */ | ||
329 | mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1; | ||
330 | /* RA1 (Other Protocol Extra Ring) */ | ||
331 | mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1; | ||
332 | #else /* __LITTLE_ENDIAN_BITFIELD */ | ||
333 | /* RA0 (FCP Ring) */ | ||
334 | mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1; | ||
335 | /* RA1 (Other Protocol Extra Ring) */ | ||
336 | mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1; | ||
337 | #endif | ||
338 | /* Multi-message interrupt autoclear configuration*/ | ||
339 | mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0]; | ||
340 | mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1]; | ||
341 | |||
342 | /* For now, HBA autoclear does not work reliably, disable it */ | ||
343 | mb->un.varCfgMSI.autoClearHA[0] = 0; | ||
344 | mb->un.varCfgMSI.autoClearHA[1] = 0; | ||
345 | |||
346 | /* Set command and owner bit */ | ||
347 | mb->mbxCommand = MBX_CONFIG_MSI; | ||
348 | mb->mbxOwner = OWN_HOST; | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | /** | ||
354 | * lpfc_init_link: Prepare a mailbox command for initialize link on a HBA. | ||
355 | * @phba: pointer to lpfc hba data structure. | ||
356 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
357 | * @topology: the link topology for the link to be initialized to. | ||
358 | * @linkspeed: the link speed for the link to be initialized to. | ||
359 | * | ||
360 | * The initialize link mailbox command is used to initialize the Fibre | ||
361 | * Channel link. This command must follow a configure port command that | ||
362 | * establishes the mode of operation. | ||
363 | * | ||
364 | * This routine prepares the mailbox command for initializing link on a HBA | ||
365 | * with the specified link topology and speed. | ||
366 | **/ | ||
206 | void | 367 | void |
207 | lpfc_init_link(struct lpfc_hba * phba, | 368 | lpfc_init_link(struct lpfc_hba * phba, |
208 | LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed) | 369 | LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed) |
@@ -269,10 +430,27 @@ lpfc_init_link(struct lpfc_hba * phba, | |||
269 | return; | 430 | return; |
270 | } | 431 | } |
271 | 432 | ||
272 | /**********************************************/ | 433 | /** |
273 | /* lpfc_read_sparam Issue a READ SPARAM */ | 434 | * lpfc_read_sparam: Prepare a mailbox command for reading HBA parameters. |
274 | /* mailbox command */ | 435 | * @phba: pointer to lpfc hba data structure. |
275 | /**********************************************/ | 436 | * @pmb: pointer to the driver internal queue element for mailbox command. |
437 | * @vpi: virtual N_Port identifier. | ||
438 | * | ||
439 | * The read service parameter mailbox command is used to read the HBA port | ||
440 | * service parameters. The service parameters are read into the buffer | ||
441 | * specified directly by a BDE in the mailbox command. These service | ||
442 | * parameters may then be used to build the payload of an N_Port/F_POrt | ||
443 | * login request and reply (LOGI/ACC). | ||
444 | * | ||
445 | * This routine prepares the mailbox command for reading HBA port service | ||
446 | * parameters. The DMA memory is allocated in this function and the addresses | ||
447 | * are populated into the mailbox command for the HBA to DMA the service | ||
448 | * parameters into. | ||
449 | * | ||
450 | * Return codes | ||
451 | * 0 - Success | ||
452 | * 1 - DMA memory allocation failed | ||
453 | **/ | ||
276 | int | 454 | int |
277 | lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) | 455 | lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) |
278 | { | 456 | { |
@@ -312,10 +490,21 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) | |||
312 | return (0); | 490 | return (0); |
313 | } | 491 | } |
314 | 492 | ||
315 | /********************************************/ | 493 | /** |
316 | /* lpfc_unreg_did Issue a UNREG_DID */ | 494 | * lpfc_unreg_did: Prepare a mailbox command for unregistering DID. |
317 | /* mailbox command */ | 495 | * @phba: pointer to lpfc hba data structure. |
318 | /********************************************/ | 496 | * @vpi: virtual N_Port identifier. |
497 | * @did: remote port identifier. | ||
498 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
499 | * | ||
500 | * The unregister DID mailbox command is used to unregister an N_Port/F_Port | ||
501 | * login for an unknown RPI by specifying the DID of a remote port. This | ||
502 | * command frees an RPI context in the HBA port. This has the effect of | ||
503 | * performing an implicit N_Port/F_Port logout. | ||
504 | * | ||
505 | * This routine prepares the mailbox command for unregistering a remote | ||
506 | * N_Port/F_Port (DID) login. | ||
507 | **/ | ||
319 | void | 508 | void |
320 | lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, | 509 | lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, |
321 | LPFC_MBOXQ_t * pmb) | 510 | LPFC_MBOXQ_t * pmb) |
@@ -333,10 +522,19 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, | |||
333 | return; | 522 | return; |
334 | } | 523 | } |
335 | 524 | ||
336 | /**********************************************/ | 525 | /** |
337 | /* lpfc_read_nv Issue a READ CONFIG */ | 526 | * lpfc_read_config: Prepare a mailbox command for reading HBA configuration. |
338 | /* mailbox command */ | 527 | * @phba: pointer to lpfc hba data structure. |
339 | /**********************************************/ | 528 | * @pmb: pointer to the driver internal queue element for mailbox command. |
529 | * | ||
530 | * The read configuration mailbox command is used to read the HBA port | ||
531 | * configuration parameters. This mailbox command provides a method for | ||
532 | * seeing any parameters that may have changed via various configuration | ||
533 | * mailbox commands. | ||
534 | * | ||
535 | * This routine prepares the mailbox command for reading out HBA configuration | ||
536 | * parameters. | ||
537 | **/ | ||
340 | void | 538 | void |
341 | lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 539 | lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) |
342 | { | 540 | { |
@@ -350,10 +548,18 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
350 | return; | 548 | return; |
351 | } | 549 | } |
352 | 550 | ||
353 | /*************************************************/ | 551 | /** |
354 | /* lpfc_read_lnk_stat Issue a READ LINK STATUS */ | 552 | * lpfc_read_lnk_stat: Prepare a mailbox command for reading HBA link stats. |
355 | /* mailbox command */ | 553 | * @phba: pointer to lpfc hba data structure. |
356 | /*************************************************/ | 554 | * @pmb: pointer to the driver internal queue element for mailbox command. |
555 | * | ||
556 | * The read link status mailbox command is used to read the link status from | ||
557 | * the HBA. Link status includes all link-related error counters. These | ||
558 | * counters are maintained by the HBA and originated in the link hardware | ||
559 | * unit. Note that all of these counters wrap. | ||
560 | * | ||
561 | * This routine prepares the mailbox command for reading out HBA link status. | ||
562 | **/ | ||
357 | void | 563 | void |
358 | lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 564 | lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) |
359 | { | 565 | { |
@@ -367,10 +573,30 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
367 | return; | 573 | return; |
368 | } | 574 | } |
369 | 575 | ||
370 | /********************************************/ | 576 | /** |
371 | /* lpfc_reg_login Issue a REG_LOGIN */ | 577 | * lpfc_reg_login: Prepare a mailbox command for registering remote login. |
372 | /* mailbox command */ | 578 | * @phba: pointer to lpfc hba data structure. |
373 | /********************************************/ | 579 | * @vpi: virtual N_Port identifier. |
580 | * @did: remote port identifier. | ||
581 | * @param: pointer to memory holding the server parameters. | ||
582 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
583 | * @flag: action flag to be passed back for the complete function. | ||
584 | * | ||
585 | * The registration login mailbox command is used to register an N_Port or | ||
586 | * F_Port login. This registration allows the HBA to cache the remote N_Port | ||
587 | * service parameters internally and thereby make the appropriate FC-2 | ||
588 | * decisions. The remote port service parameters are handed off by the driver | ||
589 | * to the HBA using a descriptor entry that directly identifies a buffer in | ||
590 | * host memory. In exchange, the HBA returns an RPI identifier. | ||
591 | * | ||
592 | * This routine prepares the mailbox command for registering remote port login. | ||
593 | * The function allocates DMA buffer for passing the service parameters to the | ||
594 | * HBA with the mailbox command. | ||
595 | * | ||
596 | * Return codes | ||
597 | * 0 - Success | ||
598 | * 1 - DMA memory allocation failed | ||
599 | **/ | ||
374 | int | 600 | int |
375 | lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, | 601 | lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, |
376 | uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) | 602 | uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) |
@@ -418,10 +644,20 @@ lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, | |||
418 | return (0); | 644 | return (0); |
419 | } | 645 | } |
420 | 646 | ||
421 | /**********************************************/ | 647 | /** |
422 | /* lpfc_unreg_login Issue a UNREG_LOGIN */ | 648 | * lpfc_unreg_login: Prepare a mailbox command for unregistering remote login. |
423 | /* mailbox command */ | 649 | * @phba: pointer to lpfc hba data structure. |
424 | /**********************************************/ | 650 | * @vpi: virtual N_Port identifier. |
651 | * @rpi: remote port identifier | ||
652 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
653 | * | ||
654 | * The unregistration login mailbox command is used to unregister an N_Port | ||
655 | * or F_Port login. This command frees an RPI context in the HBA. It has the | ||
656 | * effect of performing an implicit N_Port/F_Port logout. | ||
657 | * | ||
658 | * This routine prepares the mailbox command for unregistering remote port | ||
659 | * login. | ||
660 | **/ | ||
425 | void | 661 | void |
426 | lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, | 662 | lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, |
427 | LPFC_MBOXQ_t * pmb) | 663 | LPFC_MBOXQ_t * pmb) |
@@ -440,10 +676,21 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, | |||
440 | return; | 676 | return; |
441 | } | 677 | } |
442 | 678 | ||
443 | /**************************************************/ | 679 | /** |
444 | /* lpfc_reg_vpi Issue a REG_VPI */ | 680 | * lpfc_reg_vpi: Prepare a mailbox command for registering vport identifier. |
445 | /* mailbox command */ | 681 | * @phba: pointer to lpfc hba data structure. |
446 | /**************************************************/ | 682 | * @vpi: virtual N_Port identifier. |
683 | * @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port). | ||
684 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
685 | * | ||
686 | * The registration vport identifier mailbox command is used to activate a | ||
687 | * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the | ||
688 | * N_Port_ID against the information in the selected virtual N_Port context | ||
689 | * block and marks it active to allow normal processing of IOCB commands and | ||
690 | * received unsolicited exchanges. | ||
691 | * | ||
692 | * This routine prepares the mailbox command for registering a virtual N_Port. | ||
693 | **/ | ||
447 | void | 694 | void |
448 | lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, | 695 | lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, |
449 | LPFC_MBOXQ_t *pmb) | 696 | LPFC_MBOXQ_t *pmb) |
@@ -461,10 +708,22 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, | |||
461 | 708 | ||
462 | } | 709 | } |
463 | 710 | ||
464 | /**************************************************/ | 711 | /** |
465 | /* lpfc_unreg_vpi Issue a UNREG_VNPI */ | 712 | * lpfc_unreg_vpi: Prepare a mailbox command for unregistering vport id. |
466 | /* mailbox command */ | 713 | * @phba: pointer to lpfc hba data structure. |
467 | /**************************************************/ | 714 | * @vpi: virtual N_Port identifier. |
715 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
716 | * | ||
717 | * The unregistration vport identifier mailbox command is used to inactivate | ||
718 | * a virtual N_Port. The driver must have logged out and unregistered all | ||
719 | * remote N_Ports to abort any activity on the virtual N_Port. The HBA will | ||
720 | * unregisters any default RPIs associated with the specified vpi, aborting | ||
721 | * any active exchanges. The HBA will post the mailbox response after making | ||
722 | * the virtual N_Port inactive. | ||
723 | * | ||
724 | * This routine prepares the mailbox command for unregistering a virtual | ||
725 | * N_Port. | ||
726 | **/ | ||
468 | void | 727 | void |
469 | lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) | 728 | lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) |
470 | { | 729 | { |
@@ -479,12 +738,19 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) | |||
479 | 738 | ||
480 | } | 739 | } |
481 | 740 | ||
741 | /** | ||
742 | * lpfc_config_pcb_setup: Set up IOCB rings in the Port Control Block (PCB) | ||
743 | * @phba: pointer to lpfc hba data structure. | ||
744 | * | ||
745 | * This routine sets up and initializes the IOCB rings in the Port Control | ||
746 | * Block (PCB). | ||
747 | **/ | ||
482 | static void | 748 | static void |
483 | lpfc_config_pcb_setup(struct lpfc_hba * phba) | 749 | lpfc_config_pcb_setup(struct lpfc_hba * phba) |
484 | { | 750 | { |
485 | struct lpfc_sli *psli = &phba->sli; | 751 | struct lpfc_sli *psli = &phba->sli; |
486 | struct lpfc_sli_ring *pring; | 752 | struct lpfc_sli_ring *pring; |
487 | PCB_t *pcbp = &phba->slim2p->pcb; | 753 | PCB_t *pcbp = phba->pcb; |
488 | dma_addr_t pdma_addr; | 754 | dma_addr_t pdma_addr; |
489 | uint32_t offset; | 755 | uint32_t offset; |
490 | uint32_t iocbCnt = 0; | 756 | uint32_t iocbCnt = 0; |
@@ -513,29 +779,43 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba) | |||
513 | continue; | 779 | continue; |
514 | } | 780 | } |
515 | /* Command ring setup for ring */ | 781 | /* Command ring setup for ring */ |
516 | pring->cmdringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt]; | 782 | pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt]; |
517 | pcbp->rdsc[i].cmdEntries = pring->numCiocb; | 783 | pcbp->rdsc[i].cmdEntries = pring->numCiocb; |
518 | 784 | ||
519 | offset = (uint8_t *) &phba->slim2p->IOCBs[iocbCnt] - | 785 | offset = (uint8_t *) &phba->IOCBs[iocbCnt] - |
520 | (uint8_t *) phba->slim2p; | 786 | (uint8_t *) phba->slim2p.virt; |
521 | pdma_addr = phba->slim2p_mapping + offset; | 787 | pdma_addr = phba->slim2p.phys + offset; |
522 | pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); | 788 | pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); |
523 | pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); | 789 | pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); |
524 | iocbCnt += pring->numCiocb; | 790 | iocbCnt += pring->numCiocb; |
525 | 791 | ||
526 | /* Response ring setup for ring */ | 792 | /* Response ring setup for ring */ |
527 | pring->rspringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt]; | 793 | pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt]; |
528 | 794 | ||
529 | pcbp->rdsc[i].rspEntries = pring->numRiocb; | 795 | pcbp->rdsc[i].rspEntries = pring->numRiocb; |
530 | offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] - | 796 | offset = (uint8_t *)&phba->IOCBs[iocbCnt] - |
531 | (uint8_t *)phba->slim2p; | 797 | (uint8_t *)phba->slim2p.virt; |
532 | pdma_addr = phba->slim2p_mapping + offset; | 798 | pdma_addr = phba->slim2p.phys + offset; |
533 | pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr); | 799 | pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr); |
534 | pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr); | 800 | pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr); |
535 | iocbCnt += pring->numRiocb; | 801 | iocbCnt += pring->numRiocb; |
536 | } | 802 | } |
537 | } | 803 | } |
538 | 804 | ||
805 | /** | ||
806 | * lpfc_read_rev: Prepare a mailbox command for reading HBA revision. | ||
807 | * @phba: pointer to lpfc hba data structure. | ||
808 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
809 | * | ||
810 | * The read revision mailbox command is used to read the revision levels of | ||
811 | * the HBA components. These components include hardware units, resident | ||
812 | * firmware, and available firmware. HBAs that supports SLI-3 mode of | ||
813 | * operation provide different response information depending on the version | ||
814 | * requested by the driver. | ||
815 | * | ||
816 | * This routine prepares the mailbox command for reading HBA revision | ||
817 | * information. | ||
818 | **/ | ||
539 | void | 819 | void |
540 | lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 820 | lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) |
541 | { | 821 | { |
@@ -548,6 +828,16 @@ lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
548 | return; | 828 | return; |
549 | } | 829 | } |
550 | 830 | ||
831 | /** | ||
832 | * lpfc_build_hbq_profile2: Set up the HBQ Selection Profile 2. | ||
833 | * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. | ||
834 | * @hbq_desc: pointer to the HBQ selection profile descriptor. | ||
835 | * | ||
836 | * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA | ||
837 | * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs | ||
838 | * the Sequence Length Test using the fields in the Selection Profile 2 | ||
839 | * extension in words 20:31. | ||
840 | **/ | ||
551 | static void | 841 | static void |
552 | lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb, | 842 | lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb, |
553 | struct lpfc_hbq_init *hbq_desc) | 843 | struct lpfc_hbq_init *hbq_desc) |
@@ -557,6 +847,16 @@ lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb, | |||
557 | hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff; | 847 | hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff; |
558 | } | 848 | } |
559 | 849 | ||
850 | /** | ||
851 | * lpfc_build_hbq_profile3: Set up the HBQ Selection Profile 3. | ||
852 | * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. | ||
853 | * @hbq_desc: pointer to the HBQ selection profile descriptor. | ||
854 | * | ||
855 | * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA | ||
856 | * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs | ||
857 | * the Sequence Length Test and Byte Field Test using the fields in the | ||
858 | * Selection Profile 3 extension in words 20:31. | ||
859 | **/ | ||
560 | static void | 860 | static void |
561 | lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb, | 861 | lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb, |
562 | struct lpfc_hbq_init *hbq_desc) | 862 | struct lpfc_hbq_init *hbq_desc) |
@@ -569,6 +869,17 @@ lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb, | |||
569 | sizeof(hbqmb->profiles.profile3.cmdmatch)); | 869 | sizeof(hbqmb->profiles.profile3.cmdmatch)); |
570 | } | 870 | } |
571 | 871 | ||
872 | /** | ||
873 | * lpfc_build_hbq_profile5: Set up the HBQ Selection Profile 5. | ||
874 | * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. | ||
875 | * @hbq_desc: pointer to the HBQ selection profile descriptor. | ||
876 | * | ||
877 | * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The | ||
878 | * HBA tests the initial frame of an incoming sequence using the frame's | ||
879 | * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test | ||
880 | * and Byte Field Test using the fields in the Selection Profile 5 extension | ||
881 | * words 20:31. | ||
882 | **/ | ||
572 | static void | 883 | static void |
573 | lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb, | 884 | lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb, |
574 | struct lpfc_hbq_init *hbq_desc) | 885 | struct lpfc_hbq_init *hbq_desc) |
@@ -581,6 +892,20 @@ lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb, | |||
581 | sizeof(hbqmb->profiles.profile5.cmdmatch)); | 892 | sizeof(hbqmb->profiles.profile5.cmdmatch)); |
582 | } | 893 | } |
583 | 894 | ||
895 | /** | ||
896 | * lpfc_config_hbq: Prepare a mailbox command for configuring an HBQ. | ||
897 | * @phba: pointer to lpfc hba data structure. | ||
898 | * @id: HBQ identifier. | ||
899 | * @hbq_desc: pointer to the HBA descriptor data structure. | ||
900 | * @hbq_entry_index: index of the HBQ entry data structures. | ||
901 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
902 | * | ||
903 | * The configure HBQ (Host Buffer Queue) mailbox command is used to configure | ||
904 | * an HBQ. The configuration binds events that require buffers to a particular | ||
905 | * ring and HBQ based on a selection profile. | ||
906 | * | ||
907 | * This routine prepares the mailbox command for configuring an HBQ. | ||
908 | **/ | ||
584 | void | 909 | void |
585 | lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id, | 910 | lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id, |
586 | struct lpfc_hbq_init *hbq_desc, | 911 | struct lpfc_hbq_init *hbq_desc, |
@@ -641,8 +966,23 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id, | |||
641 | return; | 966 | return; |
642 | } | 967 | } |
643 | 968 | ||
644 | 969 | /** | |
645 | 970 | * lpfc_config_ring: Prepare a mailbox command for configuring an IOCB ring. | |
971 | * @phba: pointer to lpfc hba data structure. | ||
972 | * @ring: | ||
973 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
974 | * | ||
975 | * The configure ring mailbox command is used to configure an IOCB ring. This | ||
976 | * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the | ||
977 | * ring. This is used to map incoming sequences to a particular ring whose | ||
978 | * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not | ||
979 | * attempt to configure a ring whose number is greater than the number | ||
980 | * specified in the Port Control Block (PCB). It is an error to issue the | ||
981 | * configure ring command more than once with the same ring number. The HBA | ||
982 | * returns an error if the driver attempts this. | ||
983 | * | ||
984 | * This routine prepares the mailbox command for configuring IOCB ring. | ||
985 | **/ | ||
646 | void | 986 | void |
647 | lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) | 987 | lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) |
648 | { | 988 | { |
@@ -684,6 +1024,20 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) | |||
684 | return; | 1024 | return; |
685 | } | 1025 | } |
686 | 1026 | ||
1027 | /** | ||
1028 | * lpfc_config_port: Prepare a mailbox command for configuring port. | ||
1029 | * @phba: pointer to lpfc hba data structure. | ||
1030 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
1031 | * | ||
1032 | * The configure port mailbox command is used to identify the Port Control | ||
1033 | * Block (PCB) in the driver memory. After this command is issued, the | ||
1034 | * driver must not access the mailbox in the HBA without first resetting | ||
1035 | * the HBA. The HBA may copy the PCB information to internal storage for | ||
1036 | * subsequent use; the driver can not change the PCB information unless it | ||
1037 | * resets the HBA. | ||
1038 | * | ||
1039 | * This routine prepares the mailbox command for configuring port. | ||
1040 | **/ | ||
687 | void | 1041 | void |
688 | lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | 1042 | lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
689 | { | 1043 | { |
@@ -702,8 +1056,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
702 | 1056 | ||
703 | mb->un.varCfgPort.pcbLen = sizeof(PCB_t); | 1057 | mb->un.varCfgPort.pcbLen = sizeof(PCB_t); |
704 | 1058 | ||
705 | offset = (uint8_t *)&phba->slim2p->pcb - (uint8_t *)phba->slim2p; | 1059 | offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt; |
706 | pdma_addr = phba->slim2p_mapping + offset; | 1060 | pdma_addr = phba->slim2p.phys + offset; |
707 | mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); | 1061 | mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); |
708 | mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); | 1062 | mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); |
709 | 1063 | ||
@@ -711,12 +1065,13 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
711 | 1065 | ||
712 | if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { | 1066 | if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { |
713 | mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ | 1067 | mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ |
1068 | mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ | ||
1069 | mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ | ||
714 | mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); | 1070 | mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); |
715 | if (phba->max_vpi && phba->cfg_enable_npiv && | 1071 | if (phba->max_vpi && phba->cfg_enable_npiv && |
716 | phba->vpd.sli3Feat.cmv) { | 1072 | phba->vpd.sli3Feat.cmv) { |
717 | mb->un.varCfgPort.max_vpi = phba->max_vpi; | 1073 | mb->un.varCfgPort.max_vpi = phba->max_vpi; |
718 | mb->un.varCfgPort.cmv = 1; | 1074 | mb->un.varCfgPort.cmv = 1; |
719 | phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; | ||
720 | } else | 1075 | } else |
721 | mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; | 1076 | mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; |
722 | } else | 1077 | } else |
@@ -724,16 +1079,15 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
724 | mb->un.varCfgPort.sli_mode = phba->sli_rev; | 1079 | mb->un.varCfgPort.sli_mode = phba->sli_rev; |
725 | 1080 | ||
726 | /* Now setup pcb */ | 1081 | /* Now setup pcb */ |
727 | phba->slim2p->pcb.type = TYPE_NATIVE_SLI2; | 1082 | phba->pcb->type = TYPE_NATIVE_SLI2; |
728 | phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2; | 1083 | phba->pcb->feature = FEATURE_INITIAL_SLI2; |
729 | 1084 | ||
730 | /* Setup Mailbox pointers */ | 1085 | /* Setup Mailbox pointers */ |
731 | phba->slim2p->pcb.mailBoxSize = offsetof(MAILBOX_t, us) + | 1086 | phba->pcb->mailBoxSize = sizeof(MAILBOX_t); |
732 | sizeof(struct sli2_desc); | 1087 | offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt; |
733 | offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p; | 1088 | pdma_addr = phba->slim2p.phys + offset; |
734 | pdma_addr = phba->slim2p_mapping + offset; | 1089 | phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr); |
735 | phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr); | 1090 | phba->pcb->mbAddrLow = putPaddrLow(pdma_addr); |
736 | phba->slim2p->pcb.mbAddrLow = putPaddrLow(pdma_addr); | ||
737 | 1091 | ||
738 | /* | 1092 | /* |
739 | * Setup Host Group ring pointer. | 1093 | * Setup Host Group ring pointer. |
@@ -794,13 +1148,13 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
794 | } | 1148 | } |
795 | 1149 | ||
796 | /* mask off BAR0's flag bits 0 - 3 */ | 1150 | /* mask off BAR0's flag bits 0 - 3 */ |
797 | phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + | 1151 | phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + |
798 | (void __iomem *) phba->host_gp - | 1152 | (void __iomem *)phba->host_gp - |
799 | (void __iomem *)phba->MBslimaddr; | 1153 | (void __iomem *)phba->MBslimaddr; |
800 | if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) | 1154 | if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) |
801 | phba->slim2p->pcb.hgpAddrHigh = bar_high; | 1155 | phba->pcb->hgpAddrHigh = bar_high; |
802 | else | 1156 | else |
803 | phba->slim2p->pcb.hgpAddrHigh = 0; | 1157 | phba->pcb->hgpAddrHigh = 0; |
804 | /* write HGP data to SLIM at the required longword offset */ | 1158 | /* write HGP data to SLIM at the required longword offset */ |
805 | memset(&hgp, 0, sizeof(struct lpfc_hgp)); | 1159 | memset(&hgp, 0, sizeof(struct lpfc_hgp)); |
806 | 1160 | ||
@@ -810,17 +1164,19 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
810 | } | 1164 | } |
811 | 1165 | ||
812 | /* Setup Port Group ring pointer */ | 1166 | /* Setup Port Group ring pointer */ |
813 | if (phba->sli_rev == 3) | 1167 | if (phba->sli3_options & LPFC_SLI3_INB_ENABLED) { |
814 | pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s3_pgp.port - | 1168 | pgp_offset = offsetof(struct lpfc_sli2_slim, |
815 | (uint8_t *)phba->slim2p; | 1169 | mbx.us.s3_inb_pgp.port); |
816 | else | 1170 | phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; |
817 | pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port - | 1171 | } else if (phba->sli_rev == 3) { |
818 | (uint8_t *)phba->slim2p; | 1172 | pgp_offset = offsetof(struct lpfc_sli2_slim, |
819 | 1173 | mbx.us.s3_pgp.port); | |
820 | pdma_addr = phba->slim2p_mapping + pgp_offset; | 1174 | phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; |
821 | phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr); | 1175 | } else |
822 | phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr); | 1176 | pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port); |
823 | phba->hbq_get = &phba->slim2p->mbx.us.s3_pgp.hbq_get[0]; | 1177 | pdma_addr = phba->slim2p.phys + pgp_offset; |
1178 | phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr); | ||
1179 | phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr); | ||
824 | 1180 | ||
825 | /* Use callback routine to setp rings in the pcb */ | 1181 | /* Use callback routine to setp rings in the pcb */ |
826 | lpfc_config_pcb_setup(phba); | 1182 | lpfc_config_pcb_setup(phba); |
@@ -835,10 +1191,24 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
835 | } | 1191 | } |
836 | 1192 | ||
837 | /* Swap PCB if needed */ | 1193 | /* Swap PCB if needed */ |
838 | lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb, | 1194 | lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t)); |
839 | sizeof(PCB_t)); | ||
840 | } | 1195 | } |
841 | 1196 | ||
1197 | /** | ||
1198 | * lpfc_kill_board: Prepare a mailbox command for killing board. | ||
1199 | * @phba: pointer to lpfc hba data structure. | ||
1200 | * @pmb: pointer to the driver internal queue element for mailbox command. | ||
1201 | * | ||
1202 | * The kill board mailbox command is used to tell firmware to perform a | ||
1203 | * graceful shutdown of a channel on a specified board to prepare for reset. | ||
1204 | * When the kill board mailbox command is received, the ER3 bit is set to 1 | ||
1205 | * in the Host Status register and the ER Attention bit is set to 1 in the | ||
1206 | * Host Attention register of the HBA function that received the kill board | ||
1207 | * command. | ||
1208 | * | ||
1209 | * This routine prepares the mailbox command for killing the board in | ||
1210 | * preparation for a graceful shutdown. | ||
1211 | **/ | ||
842 | void | 1212 | void |
843 | lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 1213 | lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) |
844 | { | 1214 | { |
@@ -850,6 +1220,16 @@ lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
850 | return; | 1220 | return; |
851 | } | 1221 | } |
852 | 1222 | ||
1223 | /** | ||
1224 | * lpfc_mbox_put: Put a mailbox cmd into the tail of driver's mailbox queue. | ||
1225 | * @phba: pointer to lpfc hba data structure. | ||
1226 | * @mbq: pointer to the driver internal queue element for mailbox command. | ||
1227 | * | ||
1228 | * Driver maintains a internal mailbox command queue implemented as a linked | ||
1229 | * list. When a mailbox command is issued, it shall be put into the mailbox | ||
1230 | * command queue such that they shall be processed orderly as HBA can process | ||
1231 | * one mailbox command at a time. | ||
1232 | **/ | ||
853 | void | 1233 | void |
854 | lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) | 1234 | lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) |
855 | { | 1235 | { |
@@ -864,6 +1244,20 @@ lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) | |||
864 | return; | 1244 | return; |
865 | } | 1245 | } |
866 | 1246 | ||
1247 | /** | ||
1248 | * lpfc_mbox_get: Remove a mailbox cmd from the head of driver's mailbox queue. | ||
1249 | * @phba: pointer to lpfc hba data structure. | ||
1250 | * | ||
1251 | * Driver maintains a internal mailbox command queue implemented as a linked | ||
1252 | * list. When a mailbox command is issued, it shall be put into the mailbox | ||
1253 | * command queue such that they shall be processed orderly as HBA can process | ||
1254 | * one mailbox command at a time. After HBA finished processing a mailbox | ||
1255 | * command, the driver will remove a pending mailbox command from the head of | ||
1256 | * the mailbox command queue and send to the HBA for processing. | ||
1257 | * | ||
1258 | * Return codes | ||
1259 | * pointer to the driver internal queue element for mailbox command. | ||
1260 | **/ | ||
867 | LPFC_MBOXQ_t * | 1261 | LPFC_MBOXQ_t * |
868 | lpfc_mbox_get(struct lpfc_hba * phba) | 1262 | lpfc_mbox_get(struct lpfc_hba * phba) |
869 | { | 1263 | { |
@@ -877,6 +1271,17 @@ lpfc_mbox_get(struct lpfc_hba * phba) | |||
877 | return mbq; | 1271 | return mbq; |
878 | } | 1272 | } |
879 | 1273 | ||
1274 | /** | ||
1275 | * lpfc_mbox_cmpl_put: Put mailbox command into mailbox command complete list. | ||
1276 | * @phba: pointer to lpfc hba data structure. | ||
1277 | * @mbq: pointer to the driver internal queue element for mailbox command. | ||
1278 | * | ||
1279 | * This routine put the completed mailbox command into the mailbox command | ||
1280 | * complete list. This routine is called from driver interrupt handler | ||
1281 | * context.The mailbox complete list is used by the driver worker thread | ||
1282 | * to process mailbox complete callback functions outside the driver interrupt | ||
1283 | * handler. | ||
1284 | **/ | ||
880 | void | 1285 | void |
881 | lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) | 1286 | lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) |
882 | { | 1287 | { |
@@ -887,6 +1292,17 @@ lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) | |||
887 | return; | 1292 | return; |
888 | } | 1293 | } |
889 | 1294 | ||
1295 | /** | ||
1296 | * lpfc_mbox_tmo_val: Retrieve mailbox command timeout value. | ||
1297 | * @phba: pointer to lpfc hba data structure. | ||
1298 | * @cmd: mailbox command code. | ||
1299 | * | ||
1300 | * This routine retrieves the proper timeout value according to the mailbox | ||
1301 | * command code. | ||
1302 | * | ||
1303 | * Return codes | ||
1304 | * Timeout value to be used for the given mailbox command | ||
1305 | **/ | ||
890 | int | 1306 | int |
891 | lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) | 1307 | lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) |
892 | { | 1308 | { |
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 3c0cebc71800..a4bba2069248 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2006 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | #include "lpfc_hw.h" | 31 | #include "lpfc_hw.h" |
32 | #include "lpfc_sli.h" | 32 | #include "lpfc_sli.h" |
33 | #include "lpfc_nl.h" | ||
33 | #include "lpfc_disc.h" | 34 | #include "lpfc_disc.h" |
34 | #include "lpfc_scsi.h" | 35 | #include "lpfc_scsi.h" |
35 | #include "lpfc.h" | 36 | #include "lpfc.h" |
@@ -39,7 +40,21 @@ | |||
39 | #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ | 40 | #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ |
40 | 41 | ||
41 | 42 | ||
42 | 43 | /** | |
44 | * lpfc_mem_alloc: create and allocate all PCI and memory pools | ||
45 | * @phba: HBA to allocate pools for | ||
46 | * | ||
47 | * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, | ||
48 | * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools | ||
49 | * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. | ||
50 | * | ||
51 | * Notes: Not interrupt-safe. Must be called with no locks held. If any | ||
52 | * allocation fails, frees all successfully allocated memory before returning. | ||
53 | * | ||
54 | * Returns: | ||
55 | * 0 on success | ||
56 | * -ENOMEM on failure (if any memory allocations fail) | ||
57 | **/ | ||
43 | int | 58 | int |
44 | lpfc_mem_alloc(struct lpfc_hba * phba) | 59 | lpfc_mem_alloc(struct lpfc_hba * phba) |
45 | { | 60 | { |
@@ -120,6 +135,16 @@ lpfc_mem_alloc(struct lpfc_hba * phba) | |||
120 | return -ENOMEM; | 135 | return -ENOMEM; |
121 | } | 136 | } |
122 | 137 | ||
138 | /** | ||
139 | * lpfc_mem_free: Frees all PCI and memory allocated by lpfc_mem_alloc | ||
140 | * @phba: HBA to free memory for | ||
141 | * | ||
142 | * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, | ||
143 | * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and | ||
144 | * lpfc_nodelist. Also frees the VPI bitmask. | ||
145 | * | ||
146 | * Returns: None | ||
147 | **/ | ||
123 | void | 148 | void |
124 | lpfc_mem_free(struct lpfc_hba * phba) | 149 | lpfc_mem_free(struct lpfc_hba * phba) |
125 | { | 150 | { |
@@ -181,12 +206,29 @@ lpfc_mem_free(struct lpfc_hba * phba) | |||
181 | phba->lpfc_scsi_dma_buf_pool = NULL; | 206 | phba->lpfc_scsi_dma_buf_pool = NULL; |
182 | phba->lpfc_mbuf_pool = NULL; | 207 | phba->lpfc_mbuf_pool = NULL; |
183 | 208 | ||
184 | /* Free the iocb lookup array */ | 209 | /* Free the iocb lookup array */ |
185 | kfree(psli->iocbq_lookup); | 210 | kfree(psli->iocbq_lookup); |
186 | psli->iocbq_lookup = NULL; | 211 | psli->iocbq_lookup = NULL; |
187 | |||
188 | } | 212 | } |
189 | 213 | ||
214 | /** | ||
215 | * lpfc_mbuf_alloc: Allocate an mbuf from the lpfc_mbuf_pool PCI pool | ||
216 | * @phba: HBA which owns the pool to allocate from | ||
217 | * @mem_flags: indicates if this is a priority (MEM_PRI) allocation | ||
218 | * @handle: used to return the DMA-mapped address of the mbuf | ||
219 | * | ||
220 | * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool. | ||
221 | * Allocates from generic pci_pool_alloc function first and if that fails and | ||
222 | * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the | ||
223 | * HBA's pool. | ||
224 | * | ||
225 | * Notes: Not interrupt-safe. Must be called with no locks held. Takes | ||
226 | * phba->hbalock. | ||
227 | * | ||
228 | * Returns: | ||
229 | * pointer to the allocated mbuf on success | ||
230 | * NULL on failure | ||
231 | **/ | ||
190 | void * | 232 | void * |
191 | lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) | 233 | lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) |
192 | { | 234 | { |
@@ -206,6 +248,20 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) | |||
206 | return ret; | 248 | return ret; |
207 | } | 249 | } |
208 | 250 | ||
251 | /** | ||
252 | * __lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (locked) | ||
253 | * @phba: HBA which owns the pool to return to | ||
254 | * @virt: mbuf to free | ||
255 | * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed | ||
256 | * | ||
257 | * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if | ||
258 | * it is below its max_count, frees the mbuf otherwise. | ||
259 | * | ||
260 | * Notes: Must be called with phba->hbalock held to synchronize access to | ||
261 | * lpfc_mbuf_safety_pool. | ||
262 | * | ||
263 | * Returns: None | ||
264 | **/ | ||
209 | void | 265 | void |
210 | __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) | 266 | __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) |
211 | { | 267 | { |
@@ -221,7 +277,21 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) | |||
221 | return; | 277 | return; |
222 | } | 278 | } |
223 | 279 | ||
280 | /** | ||
281 | * lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked) | ||
282 | * @phba: HBA which owns the pool to return to | ||
283 | * @virt: mbuf to free | ||
284 | * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed | ||
285 | * | ||
286 | * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if | ||
287 | * it is below its max_count, frees the mbuf otherwise. | ||
288 | * | ||
289 | * Notes: Takes phba->hbalock. Can be called with or without other locks held. | ||
290 | * | ||
291 | * Returns: None | ||
292 | **/ | ||
224 | void | 293 | void |
294 | |||
225 | lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) | 295 | lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) |
226 | { | 296 | { |
227 | unsigned long iflags; | 297 | unsigned long iflags; |
@@ -232,6 +302,19 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) | |||
232 | return; | 302 | return; |
233 | } | 303 | } |
234 | 304 | ||
305 | /** | ||
306 | * lpfc_els_hbq_alloc: Allocate an HBQ buffer | ||
307 | * @phba: HBA to allocate HBQ buffer for | ||
308 | * | ||
309 | * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI | ||
310 | * pool along a non-DMA-mapped container for it. | ||
311 | * | ||
312 | * Notes: Not interrupt-safe. Must be called with no locks held. | ||
313 | * | ||
314 | * Returns: | ||
315 | * pointer to HBQ on success | ||
316 | * NULL on failure | ||
317 | **/ | ||
235 | struct hbq_dmabuf * | 318 | struct hbq_dmabuf * |
236 | lpfc_els_hbq_alloc(struct lpfc_hba *phba) | 319 | lpfc_els_hbq_alloc(struct lpfc_hba *phba) |
237 | { | 320 | { |
@@ -251,6 +334,18 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) | |||
251 | return hbqbp; | 334 | return hbqbp; |
252 | } | 335 | } |
253 | 336 | ||
337 | /** | ||
338 | * lpfc_mem_hbq_free: Frees an HBQ buffer allocated with lpfc_els_hbq_alloc | ||
339 | * @phba: HBA buffer was allocated for | ||
340 | * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc | ||
341 | * | ||
342 | * Description: Frees both the container and the DMA-mapped buffer returned by | ||
343 | * lpfc_els_hbq_alloc. | ||
344 | * | ||
345 | * Notes: Can be called with or without locks held. | ||
346 | * | ||
347 | * Returns: None | ||
348 | **/ | ||
254 | void | 349 | void |
255 | lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) | 350 | lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) |
256 | { | 351 | { |
@@ -259,7 +354,18 @@ lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) | |||
259 | return; | 354 | return; |
260 | } | 355 | } |
261 | 356 | ||
262 | /* This is ONLY called for the LPFC_ELS_HBQ */ | 357 | /** |
358 | * lpfc_in_buf_free: Free a DMA buffer | ||
359 | * @phba: HBA buffer is associated with | ||
360 | * @mp: Buffer to free | ||
361 | * | ||
362 | * Description: Frees the given DMA buffer in the appropriate way given if the | ||
363 | * HBA is running in SLI3 mode with HBQs enabled. | ||
364 | * | ||
365 | * Notes: Takes phba->hbalock. Can be called with or without other locks held. | ||
366 | * | ||
367 | * Returns: None | ||
368 | **/ | ||
263 | void | 369 | void |
264 | lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) | 370 | lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) |
265 | { | 371 | { |
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h new file mode 100644 index 000000000000..1accb5a9f4e6 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_nl.h | |||
@@ -0,0 +1,163 @@ | |||
1 | /******************************************************************* | ||
2 | * This file is part of the Emulex Linux Device Driver for * | ||
3 | * Fibre Channel Host Bus Adapters. * | ||
4 | * Copyright (C) 2008 Emulex. All rights reserved. * | ||
5 | * EMULEX and SLI are trademarks of Emulex. * | ||
6 | * www.emulex.com * | ||
7 | * * | ||
8 | * This program is free software; you can redistribute it and/or * | ||
9 | * modify it under the terms of version 2 of the GNU General * | ||
10 | * Public License as published by the Free Software Foundation. * | ||
11 | * This program is distributed in the hope that it will be useful. * | ||
12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
17 | * more details, a copy of which can be found in the file COPYING * | ||
18 | * included with this package. * | ||
19 | *******************************************************************/ | ||
20 | |||
21 | /* Event definitions for RegisterForEvent */ | ||
22 | #define FC_REG_LINK_EVENT 0x0001 /* link up / down events */ | ||
23 | #define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */ | ||
24 | #define FC_REG_CT_EVENT 0x0004 /* CT request events */ | ||
25 | #define FC_REG_DUMP_EVENT 0x0008 /* Dump events */ | ||
26 | #define FC_REG_TEMPERATURE_EVENT 0x0010 /* temperature events */ | ||
27 | #define FC_REG_ELS_EVENT 0x0020 /* lpfc els events */ | ||
28 | #define FC_REG_FABRIC_EVENT 0x0040 /* lpfc fabric events */ | ||
29 | #define FC_REG_SCSI_EVENT 0x0080 /* lpfc scsi events */ | ||
30 | #define FC_REG_BOARD_EVENT 0x0100 /* lpfc board events */ | ||
31 | #define FC_REG_ADAPTER_EVENT 0x0200 /* lpfc adapter events */ | ||
32 | #define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \ | ||
33 | FC_REG_RSCN_EVENT | \ | ||
34 | FC_REG_CT_EVENT | \ | ||
35 | FC_REG_DUMP_EVENT | \ | ||
36 | FC_REG_TEMPERATURE_EVENT | \ | ||
37 | FC_REG_ELS_EVENT | \ | ||
38 | FC_REG_FABRIC_EVENT | \ | ||
39 | FC_REG_SCSI_EVENT | \ | ||
40 | FC_REG_BOARD_EVENT | \ | ||
41 | FC_REG_ADAPTER_EVENT) | ||
42 | /* Temperature events */ | ||
43 | #define LPFC_CRIT_TEMP 0x1 | ||
44 | #define LPFC_THRESHOLD_TEMP 0x2 | ||
45 | #define LPFC_NORMAL_TEMP 0x3 | ||
46 | /* | ||
47 | * All net link event payloads will begin with and event type | ||
48 | * and subcategory. The event type must come first. | ||
49 | * The subcategory further defines the data that follows in the rest | ||
50 | * of the payload. Each category will have its own unique header plus | ||
51 | * any addtional data unique to the subcategory. | ||
52 | * The payload sent via the fc transport is one-way driver->application. | ||
53 | */ | ||
54 | |||
55 | /* els event header */ | ||
56 | struct lpfc_els_event_header { | ||
57 | uint32_t event_type; | ||
58 | uint32_t subcategory; | ||
59 | uint8_t wwpn[8]; | ||
60 | uint8_t wwnn[8]; | ||
61 | }; | ||
62 | |||
63 | /* subcategory codes for FC_REG_ELS_EVENT */ | ||
64 | #define LPFC_EVENT_PLOGI_RCV 0x01 | ||
65 | #define LPFC_EVENT_PRLO_RCV 0x02 | ||
66 | #define LPFC_EVENT_ADISC_RCV 0x04 | ||
67 | #define LPFC_EVENT_LSRJT_RCV 0x08 | ||
68 | |||
69 | /* special els lsrjt event */ | ||
70 | struct lpfc_lsrjt_event { | ||
71 | struct lpfc_els_event_header header; | ||
72 | uint32_t command; | ||
73 | uint32_t reason_code; | ||
74 | uint32_t explanation; | ||
75 | }; | ||
76 | |||
77 | |||
78 | /* fabric event header */ | ||
79 | struct lpfc_fabric_event_header { | ||
80 | uint32_t event_type; | ||
81 | uint32_t subcategory; | ||
82 | uint8_t wwpn[8]; | ||
83 | uint8_t wwnn[8]; | ||
84 | }; | ||
85 | |||
86 | /* subcategory codes for FC_REG_FABRIC_EVENT */ | ||
87 | #define LPFC_EVENT_FABRIC_BUSY 0x01 | ||
88 | #define LPFC_EVENT_PORT_BUSY 0x02 | ||
89 | #define LPFC_EVENT_FCPRDCHKERR 0x04 | ||
90 | |||
91 | /* special case fabric fcprdchkerr event */ | ||
92 | struct lpfc_fcprdchkerr_event { | ||
93 | struct lpfc_fabric_event_header header; | ||
94 | uint32_t lun; | ||
95 | uint32_t opcode; | ||
96 | uint32_t fcpiparam; | ||
97 | }; | ||
98 | |||
99 | |||
100 | /* scsi event header */ | ||
101 | struct lpfc_scsi_event_header { | ||
102 | uint32_t event_type; | ||
103 | uint32_t subcategory; | ||
104 | uint32_t lun; | ||
105 | uint8_t wwpn[8]; | ||
106 | uint8_t wwnn[8]; | ||
107 | }; | ||
108 | |||
109 | /* subcategory codes for FC_REG_SCSI_EVENT */ | ||
110 | #define LPFC_EVENT_QFULL 0x0001 | ||
111 | #define LPFC_EVENT_DEVBSY 0x0002 | ||
112 | #define LPFC_EVENT_CHECK_COND 0x0004 | ||
113 | #define LPFC_EVENT_LUNRESET 0x0008 | ||
114 | #define LPFC_EVENT_TGTRESET 0x0010 | ||
115 | #define LPFC_EVENT_BUSRESET 0x0020 | ||
116 | #define LPFC_EVENT_VARQUEDEPTH 0x0040 | ||
117 | |||
118 | /* special case scsi varqueuedepth event */ | ||
119 | struct lpfc_scsi_varqueuedepth_event { | ||
120 | struct lpfc_scsi_event_header scsi_event; | ||
121 | uint32_t oldval; | ||
122 | uint32_t newval; | ||
123 | }; | ||
124 | |||
125 | /* special case scsi check condition event */ | ||
126 | struct lpfc_scsi_check_condition_event { | ||
127 | struct lpfc_scsi_event_header scsi_event; | ||
128 | uint8_t sense_key; | ||
129 | uint8_t asc; | ||
130 | uint8_t ascq; | ||
131 | }; | ||
132 | |||
133 | /* event codes for FC_REG_BOARD_EVENT */ | ||
134 | #define LPFC_EVENT_PORTINTERR 0x01 | ||
135 | |||
136 | /* board event header */ | ||
137 | struct lpfc_board_event_header { | ||
138 | uint32_t event_type; | ||
139 | uint32_t subcategory; | ||
140 | }; | ||
141 | |||
142 | |||
143 | /* event codes for FC_REG_ADAPTER_EVENT */ | ||
144 | #define LPFC_EVENT_ARRIVAL 0x01 | ||
145 | |||
146 | /* adapter event header */ | ||
147 | struct lpfc_adapter_event_header { | ||
148 | uint32_t event_type; | ||
149 | uint32_t subcategory; | ||
150 | }; | ||
151 | |||
152 | |||
153 | /* event codes for temp_event */ | ||
154 | #define LPFC_CRIT_TEMP 0x1 | ||
155 | #define LPFC_THRESHOLD_TEMP 0x2 | ||
156 | #define LPFC_NORMAL_TEMP 0x3 | ||
157 | |||
158 | struct temp_event { | ||
159 | uint32_t event_type; | ||
160 | uint32_t event_code; | ||
161 | uint32_t data; | ||
162 | }; | ||
163 | |||
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 6688a8689b56..0c25d97acb42 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | #include "lpfc_hw.h" | 31 | #include "lpfc_hw.h" |
32 | #include "lpfc_sli.h" | 32 | #include "lpfc_sli.h" |
33 | #include "lpfc_nl.h" | ||
33 | #include "lpfc_disc.h" | 34 | #include "lpfc_disc.h" |
34 | #include "lpfc_scsi.h" | 35 | #include "lpfc_scsi.h" |
35 | #include "lpfc.h" | 36 | #include "lpfc.h" |
@@ -1003,20 +1004,8 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1003 | spin_lock_irq(shost->host_lock); | 1004 | spin_lock_irq(shost->host_lock); |
1004 | ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; | 1005 | ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; |
1005 | spin_unlock_irq(shost->host_lock); | 1006 | spin_unlock_irq(shost->host_lock); |
1006 | 1007 | if (vport->num_disc_nodes) | |
1007 | if (vport->num_disc_nodes) { | ||
1008 | lpfc_more_adisc(vport); | 1008 | lpfc_more_adisc(vport); |
1009 | if ((vport->num_disc_nodes == 0) && | ||
1010 | (vport->fc_npr_cnt)) | ||
1011 | lpfc_els_disc_plogi(vport); | ||
1012 | if (vport->num_disc_nodes == 0) { | ||
1013 | spin_lock_irq(shost->host_lock); | ||
1014 | vport->fc_flag &= ~FC_NDISC_ACTIVE; | ||
1015 | spin_unlock_irq(shost->host_lock); | ||
1016 | lpfc_can_disctmo(vport); | ||
1017 | lpfc_end_rscn(vport); | ||
1018 | } | ||
1019 | } | ||
1020 | } | 1009 | } |
1021 | return ndlp->nlp_state; | 1010 | return ndlp->nlp_state; |
1022 | } | 1011 | } |
@@ -1865,8 +1854,13 @@ static uint32_t | |||
1865 | lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | 1854 | lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
1866 | void *arg, uint32_t evt) | 1855 | void *arg, uint32_t evt) |
1867 | { | 1856 | { |
1857 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1858 | if (ndlp->nlp_DID == Fabric_DID) { | ||
1859 | spin_lock_irq(shost->host_lock); | ||
1860 | vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); | ||
1861 | spin_unlock_irq(shost->host_lock); | ||
1862 | } | ||
1868 | lpfc_unreg_rpi(vport, ndlp); | 1863 | lpfc_unreg_rpi(vport, ndlp); |
1869 | /* This routine does nothing, just return the current state */ | ||
1870 | return ndlp->nlp_state; | 1864 | return ndlp->nlp_state; |
1871 | } | 1865 | } |
1872 | 1866 | ||
@@ -2155,7 +2149,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
2155 | lpfc_nlp_put(ndlp); | 2149 | lpfc_nlp_put(ndlp); |
2156 | } else { | 2150 | } else { |
2157 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 2151 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
2158 | "0212 DSM out state %d on NPort free\n", rc); | 2152 | "0213 DSM out state %d on NPort free\n", rc); |
2159 | 2153 | ||
2160 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, | 2154 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, |
2161 | "DSM out: ste:%d did:x%x flg:x%x", | 2155 | "DSM out: ste:%d did:x%x flg:x%x", |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 1bcebbd3dfac..bd1867411821 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "lpfc_version.h" | 32 | #include "lpfc_version.h" |
33 | #include "lpfc_hw.h" | 33 | #include "lpfc_hw.h" |
34 | #include "lpfc_sli.h" | 34 | #include "lpfc_sli.h" |
35 | #include "lpfc_nl.h" | ||
35 | #include "lpfc_disc.h" | 36 | #include "lpfc_disc.h" |
36 | #include "lpfc_scsi.h" | 37 | #include "lpfc_scsi.h" |
37 | #include "lpfc.h" | 38 | #include "lpfc.h" |
@@ -42,6 +43,111 @@ | |||
42 | #define LPFC_RESET_WAIT 2 | 43 | #define LPFC_RESET_WAIT 2 |
43 | #define LPFC_ABORT_WAIT 2 | 44 | #define LPFC_ABORT_WAIT 2 |
44 | 45 | ||
46 | /** | ||
47 | * lpfc_update_stats: Update statistical data for the command completion. | ||
48 | * @phba: Pointer to HBA object. | ||
49 | * @lpfc_cmd: lpfc scsi command object pointer. | ||
50 | * | ||
51 | * This function is called when there is a command completion and this | ||
52 | * function updates the statistical data for the command completion. | ||
53 | **/ | ||
54 | static void | ||
55 | lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | ||
56 | { | ||
57 | struct lpfc_rport_data *rdata = lpfc_cmd->rdata; | ||
58 | struct lpfc_nodelist *pnode = rdata->pnode; | ||
59 | struct scsi_cmnd *cmd = lpfc_cmd->pCmd; | ||
60 | unsigned long flags; | ||
61 | struct Scsi_Host *shost = cmd->device->host; | ||
62 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
63 | unsigned long latency; | ||
64 | int i; | ||
65 | |||
66 | if (cmd->result) | ||
67 | return; | ||
68 | |||
69 | spin_lock_irqsave(shost->host_lock, flags); | ||
70 | if (!vport->stat_data_enabled || | ||
71 | vport->stat_data_blocked || | ||
72 | !pnode->lat_data || | ||
73 | (phba->bucket_type == LPFC_NO_BUCKET)) { | ||
74 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
75 | return; | ||
76 | } | ||
77 | latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time); | ||
78 | |||
79 | if (phba->bucket_type == LPFC_LINEAR_BUCKET) { | ||
80 | i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ | ||
81 | phba->bucket_step; | ||
82 | if (i >= LPFC_MAX_BUCKET_COUNT) | ||
83 | i = LPFC_MAX_BUCKET_COUNT; | ||
84 | } else { | ||
85 | for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) | ||
86 | if (latency <= (phba->bucket_base + | ||
87 | ((1<<i)*phba->bucket_step))) | ||
88 | break; | ||
89 | } | ||
90 | |||
91 | pnode->lat_data[i].cmd_count++; | ||
92 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
93 | } | ||
94 | |||
95 | |||
96 | /** | ||
97 | * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change | ||
98 | * event. | ||
99 | * @phba: Pointer to HBA context object. | ||
100 | * @vport: Pointer to vport object. | ||
101 | * @ndlp: Pointer to FC node associated with the target. | ||
102 | * @lun: Lun number of the scsi device. | ||
103 | * @old_val: Old value of the queue depth. | ||
104 | * @new_val: New value of the queue depth. | ||
105 | * | ||
106 | * This function sends an event to the mgmt application indicating | ||
107 | * there is a change in the scsi device queue depth. | ||
108 | **/ | ||
109 | static void | ||
110 | lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, | ||
111 | struct lpfc_vport *vport, | ||
112 | struct lpfc_nodelist *ndlp, | ||
113 | uint32_t lun, | ||
114 | uint32_t old_val, | ||
115 | uint32_t new_val) | ||
116 | { | ||
117 | struct lpfc_fast_path_event *fast_path_evt; | ||
118 | unsigned long flags; | ||
119 | |||
120 | fast_path_evt = lpfc_alloc_fast_evt(phba); | ||
121 | if (!fast_path_evt) | ||
122 | return; | ||
123 | |||
124 | fast_path_evt->un.queue_depth_evt.scsi_event.event_type = | ||
125 | FC_REG_SCSI_EVENT; | ||
126 | fast_path_evt->un.queue_depth_evt.scsi_event.subcategory = | ||
127 | LPFC_EVENT_VARQUEDEPTH; | ||
128 | |||
129 | /* Report all luns with change in queue depth */ | ||
130 | fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun; | ||
131 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { | ||
132 | memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn, | ||
133 | &ndlp->nlp_portname, sizeof(struct lpfc_name)); | ||
134 | memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn, | ||
135 | &ndlp->nlp_nodename, sizeof(struct lpfc_name)); | ||
136 | } | ||
137 | |||
138 | fast_path_evt->un.queue_depth_evt.oldval = old_val; | ||
139 | fast_path_evt->un.queue_depth_evt.newval = new_val; | ||
140 | fast_path_evt->vport = vport; | ||
141 | |||
142 | fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; | ||
143 | spin_lock_irqsave(&phba->hbalock, flags); | ||
144 | list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); | ||
145 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
146 | lpfc_worker_wake_up(phba); | ||
147 | |||
148 | return; | ||
149 | } | ||
150 | |||
45 | /* | 151 | /* |
46 | * This function is called with no lock held when there is a resource | 152 | * This function is called with no lock held when there is a resource |
47 | * error in driver or in firmware. | 153 | * error in driver or in firmware. |
@@ -117,9 +223,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) | |||
117 | struct lpfc_vport **vports; | 223 | struct lpfc_vport **vports; |
118 | struct Scsi_Host *shost; | 224 | struct Scsi_Host *shost; |
119 | struct scsi_device *sdev; | 225 | struct scsi_device *sdev; |
120 | unsigned long new_queue_depth; | 226 | unsigned long new_queue_depth, old_queue_depth; |
121 | unsigned long num_rsrc_err, num_cmd_success; | 227 | unsigned long num_rsrc_err, num_cmd_success; |
122 | int i; | 228 | int i; |
229 | struct lpfc_rport_data *rdata; | ||
123 | 230 | ||
124 | num_rsrc_err = atomic_read(&phba->num_rsrc_err); | 231 | num_rsrc_err = atomic_read(&phba->num_rsrc_err); |
125 | num_cmd_success = atomic_read(&phba->num_cmd_success); | 232 | num_cmd_success = atomic_read(&phba->num_cmd_success); |
@@ -137,6 +244,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) | |||
137 | else | 244 | else |
138 | new_queue_depth = sdev->queue_depth - | 245 | new_queue_depth = sdev->queue_depth - |
139 | new_queue_depth; | 246 | new_queue_depth; |
247 | old_queue_depth = sdev->queue_depth; | ||
140 | if (sdev->ordered_tags) | 248 | if (sdev->ordered_tags) |
141 | scsi_adjust_queue_depth(sdev, | 249 | scsi_adjust_queue_depth(sdev, |
142 | MSG_ORDERED_TAG, | 250 | MSG_ORDERED_TAG, |
@@ -145,6 +253,13 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) | |||
145 | scsi_adjust_queue_depth(sdev, | 253 | scsi_adjust_queue_depth(sdev, |
146 | MSG_SIMPLE_TAG, | 254 | MSG_SIMPLE_TAG, |
147 | new_queue_depth); | 255 | new_queue_depth); |
256 | rdata = sdev->hostdata; | ||
257 | if (rdata) | ||
258 | lpfc_send_sdev_queuedepth_change_event( | ||
259 | phba, vports[i], | ||
260 | rdata->pnode, | ||
261 | sdev->lun, old_queue_depth, | ||
262 | new_queue_depth); | ||
148 | } | 263 | } |
149 | } | 264 | } |
150 | lpfc_destroy_vport_work_array(phba, vports); | 265 | lpfc_destroy_vport_work_array(phba, vports); |
@@ -159,6 +274,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) | |||
159 | struct Scsi_Host *shost; | 274 | struct Scsi_Host *shost; |
160 | struct scsi_device *sdev; | 275 | struct scsi_device *sdev; |
161 | int i; | 276 | int i; |
277 | struct lpfc_rport_data *rdata; | ||
162 | 278 | ||
163 | vports = lpfc_create_vport_work_array(phba); | 279 | vports = lpfc_create_vport_work_array(phba); |
164 | if (vports != NULL) | 280 | if (vports != NULL) |
@@ -176,6 +292,14 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) | |||
176 | scsi_adjust_queue_depth(sdev, | 292 | scsi_adjust_queue_depth(sdev, |
177 | MSG_SIMPLE_TAG, | 293 | MSG_SIMPLE_TAG, |
178 | sdev->queue_depth+1); | 294 | sdev->queue_depth+1); |
295 | rdata = sdev->hostdata; | ||
296 | if (rdata) | ||
297 | lpfc_send_sdev_queuedepth_change_event( | ||
298 | phba, vports[i], | ||
299 | rdata->pnode, | ||
300 | sdev->lun, | ||
301 | sdev->queue_depth - 1, | ||
302 | sdev->queue_depth); | ||
179 | } | 303 | } |
180 | } | 304 | } |
181 | lpfc_destroy_vport_work_array(phba, vports); | 305 | lpfc_destroy_vport_work_array(phba, vports); |
@@ -183,6 +307,35 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) | |||
183 | atomic_set(&phba->num_cmd_success, 0); | 307 | atomic_set(&phba->num_cmd_success, 0); |
184 | } | 308 | } |
185 | 309 | ||
310 | /** | ||
311 | * lpfc_scsi_dev_block: set all scsi hosts to block state. | ||
312 | * @phba: Pointer to HBA context object. | ||
313 | * | ||
314 | * This function walks vport list and set each SCSI host to block state | ||
315 | * by invoking fc_remote_port_delete() routine. This function is invoked | ||
316 | * with EEH when device's PCI slot has been permanently disabled. | ||
317 | **/ | ||
318 | void | ||
319 | lpfc_scsi_dev_block(struct lpfc_hba *phba) | ||
320 | { | ||
321 | struct lpfc_vport **vports; | ||
322 | struct Scsi_Host *shost; | ||
323 | struct scsi_device *sdev; | ||
324 | struct fc_rport *rport; | ||
325 | int i; | ||
326 | |||
327 | vports = lpfc_create_vport_work_array(phba); | ||
328 | if (vports != NULL) | ||
329 | for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | ||
330 | shost = lpfc_shost_from_vport(vports[i]); | ||
331 | shost_for_each_device(sdev, shost) { | ||
332 | rport = starget_to_rport(scsi_target(sdev)); | ||
333 | fc_remote_port_delete(rport); | ||
334 | } | ||
335 | } | ||
336 | lpfc_destroy_vport_work_array(phba, vports); | ||
337 | } | ||
338 | |||
186 | /* | 339 | /* |
187 | * This routine allocates a scsi buffer, which contains all the necessary | 340 | * This routine allocates a scsi buffer, which contains all the necessary |
188 | * information needed to initiate a SCSI I/O. The non-DMAable buffer region | 341 | * information needed to initiate a SCSI I/O. The non-DMAable buffer region |
@@ -198,7 +351,9 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport) | |||
198 | struct lpfc_scsi_buf *psb; | 351 | struct lpfc_scsi_buf *psb; |
199 | struct ulp_bde64 *bpl; | 352 | struct ulp_bde64 *bpl; |
200 | IOCB_t *iocb; | 353 | IOCB_t *iocb; |
201 | dma_addr_t pdma_phys; | 354 | dma_addr_t pdma_phys_fcp_cmd; |
355 | dma_addr_t pdma_phys_fcp_rsp; | ||
356 | dma_addr_t pdma_phys_bpl; | ||
202 | uint16_t iotag; | 357 | uint16_t iotag; |
203 | 358 | ||
204 | psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); | 359 | psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); |
@@ -238,40 +393,60 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport) | |||
238 | 393 | ||
239 | /* Initialize local short-hand pointers. */ | 394 | /* Initialize local short-hand pointers. */ |
240 | bpl = psb->fcp_bpl; | 395 | bpl = psb->fcp_bpl; |
241 | pdma_phys = psb->dma_handle; | 396 | pdma_phys_fcp_cmd = psb->dma_handle; |
397 | pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); | ||
398 | pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + | ||
399 | sizeof(struct fcp_rsp); | ||
242 | 400 | ||
243 | /* | 401 | /* |
244 | * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg | 402 | * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg |
245 | * list bdes. Initialize the first two and leave the rest for | 403 | * list bdes. Initialize the first two and leave the rest for |
246 | * queuecommand. | 404 | * queuecommand. |
247 | */ | 405 | */ |
248 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); | 406 | bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); |
249 | bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); | 407 | bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); |
250 | bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); | 408 | bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); |
251 | bpl->tus.f.bdeFlags = BUFF_USE_CMND; | 409 | bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
252 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | 410 | bpl[0].tus.w = le32_to_cpu(bpl->tus.w); |
253 | bpl++; | ||
254 | 411 | ||
255 | /* Setup the physical region for the FCP RSP */ | 412 | /* Setup the physical region for the FCP RSP */ |
256 | pdma_phys += sizeof (struct fcp_cmnd); | 413 | bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); |
257 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); | 414 | bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); |
258 | bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); | 415 | bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); |
259 | bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); | 416 | bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
260 | bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); | 417 | bpl[1].tus.w = le32_to_cpu(bpl->tus.w); |
261 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | ||
262 | 418 | ||
263 | /* | 419 | /* |
264 | * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, | 420 | * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, |
265 | * initialize it with all known data now. | 421 | * initialize it with all known data now. |
266 | */ | 422 | */ |
267 | pdma_phys += (sizeof (struct fcp_rsp)); | ||
268 | iocb = &psb->cur_iocbq.iocb; | 423 | iocb = &psb->cur_iocbq.iocb; |
269 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; | 424 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; |
270 | iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); | 425 | if (phba->sli_rev == 3) { |
271 | iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); | 426 | /* fill in immediate fcp command BDE */ |
272 | iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); | 427 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; |
273 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; | 428 | iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); |
274 | iocb->ulpBdeCount = 1; | 429 | iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, |
430 | unsli3.fcp_ext.icd); | ||
431 | iocb->un.fcpi64.bdl.addrHigh = 0; | ||
432 | iocb->ulpBdeCount = 0; | ||
433 | iocb->ulpLe = 0; | ||
434 | /* fill in responce BDE */ | ||
435 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
436 | iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = | ||
437 | sizeof(struct fcp_rsp); | ||
438 | iocb->unsli3.fcp_ext.rbde.addrLow = | ||
439 | putPaddrLow(pdma_phys_fcp_rsp); | ||
440 | iocb->unsli3.fcp_ext.rbde.addrHigh = | ||
441 | putPaddrHigh(pdma_phys_fcp_rsp); | ||
442 | } else { | ||
443 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
444 | iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); | ||
445 | iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl); | ||
446 | iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl); | ||
447 | iocb->ulpBdeCount = 1; | ||
448 | iocb->ulpLe = 1; | ||
449 | } | ||
275 | iocb->ulpClass = CLASS3; | 450 | iocb->ulpClass = CLASS3; |
276 | 451 | ||
277 | return psb; | 452 | return psb; |
@@ -313,8 +488,9 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
313 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; | 488 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; |
314 | struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; | 489 | struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; |
315 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; | 490 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; |
491 | struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; | ||
316 | dma_addr_t physaddr; | 492 | dma_addr_t physaddr; |
317 | uint32_t i, num_bde = 0; | 493 | uint32_t num_bde = 0; |
318 | int nseg, datadir = scsi_cmnd->sc_data_direction; | 494 | int nseg, datadir = scsi_cmnd->sc_data_direction; |
319 | 495 | ||
320 | /* | 496 | /* |
@@ -352,37 +528,159 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
352 | * during probe that limits the number of sg elements in any | 528 | * during probe that limits the number of sg elements in any |
353 | * single scsi command. Just run through the seg_cnt and format | 529 | * single scsi command. Just run through the seg_cnt and format |
354 | * the bde's. | 530 | * the bde's. |
531 | * When using SLI-3 the driver will try to fit all the BDEs into | ||
532 | * the IOCB. If it can't then the BDEs get added to a BPL as it | ||
533 | * does for SLI-2 mode. | ||
355 | */ | 534 | */ |
356 | scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) { | 535 | scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { |
357 | physaddr = sg_dma_address(sgel); | 536 | physaddr = sg_dma_address(sgel); |
358 | bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); | 537 | if (phba->sli_rev == 3 && |
359 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); | 538 | nseg <= LPFC_EXT_DATA_BDE_COUNT) { |
360 | bpl->tus.f.bdeSize = sg_dma_len(sgel); | 539 | data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
361 | if (datadir == DMA_TO_DEVICE) | 540 | data_bde->tus.f.bdeSize = sg_dma_len(sgel); |
362 | bpl->tus.f.bdeFlags = 0; | 541 | data_bde->addrLow = putPaddrLow(physaddr); |
363 | else | 542 | data_bde->addrHigh = putPaddrHigh(physaddr); |
364 | bpl->tus.f.bdeFlags = BUFF_USE_RCV; | 543 | data_bde++; |
365 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | 544 | } else { |
366 | bpl++; | 545 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
367 | num_bde++; | 546 | bpl->tus.f.bdeSize = sg_dma_len(sgel); |
547 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | ||
548 | bpl->addrLow = | ||
549 | le32_to_cpu(putPaddrLow(physaddr)); | ||
550 | bpl->addrHigh = | ||
551 | le32_to_cpu(putPaddrHigh(physaddr)); | ||
552 | bpl++; | ||
553 | } | ||
368 | } | 554 | } |
369 | } | 555 | } |
370 | 556 | ||
371 | /* | 557 | /* |
372 | * Finish initializing those IOCB fields that are dependent on the | 558 | * Finish initializing those IOCB fields that are dependent on the |
373 | * scsi_cmnd request_buffer. Note that the bdeSize is explicitly | 559 | * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is |
374 | * reinitialized since all iocb memory resources are used many times | 560 | * explicitly reinitialized and for SLI-3 the extended bde count is |
375 | * for transmit, receive, and continuation bpl's. | 561 | * explicitly reinitialized since all iocb memory resources are reused. |
376 | */ | 562 | */ |
377 | iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); | 563 | if (phba->sli_rev == 3) { |
378 | iocb_cmd->un.fcpi64.bdl.bdeSize += | 564 | if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { |
379 | (num_bde * sizeof (struct ulp_bde64)); | 565 | /* |
380 | iocb_cmd->ulpBdeCount = 1; | 566 | * The extended IOCB format can only fit 3 BDE or a BPL. |
381 | iocb_cmd->ulpLe = 1; | 567 | * This I/O has more than 3 BDE so the 1st data bde will |
568 | * be a BPL that is filled in here. | ||
569 | */ | ||
570 | physaddr = lpfc_cmd->dma_handle; | ||
571 | data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; | ||
572 | data_bde->tus.f.bdeSize = (num_bde * | ||
573 | sizeof(struct ulp_bde64)); | ||
574 | physaddr += (sizeof(struct fcp_cmnd) + | ||
575 | sizeof(struct fcp_rsp) + | ||
576 | (2 * sizeof(struct ulp_bde64))); | ||
577 | data_bde->addrHigh = putPaddrHigh(physaddr); | ||
578 | data_bde->addrLow = putPaddrLow(physaddr); | ||
579 | /* ebde count includes the responce bde and data bpl */ | ||
580 | iocb_cmd->unsli3.fcp_ext.ebde_count = 2; | ||
581 | } else { | ||
582 | /* ebde count includes the responce bde and data bdes */ | ||
583 | iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); | ||
584 | } | ||
585 | } else { | ||
586 | iocb_cmd->un.fcpi64.bdl.bdeSize = | ||
587 | ((num_bde + 2) * sizeof(struct ulp_bde64)); | ||
588 | } | ||
382 | fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); | 589 | fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); |
383 | return 0; | 590 | return 0; |
384 | } | 591 | } |
385 | 592 | ||
593 | /** | ||
594 | * lpfc_send_scsi_error_event: Posts an event when there is SCSI error. | ||
595 | * @phba: Pointer to hba context object. | ||
596 | * @vport: Pointer to vport object. | ||
597 | * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. | ||
598 | * @rsp_iocb: Pointer to response iocb object which reported error. | ||
599 | * | ||
600 | * This function posts an event when there is a SCSI command reporting | ||
601 | * error from the scsi device. | ||
602 | **/ | ||
603 | static void | ||
604 | lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, | ||
605 | struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) { | ||
606 | struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; | ||
607 | struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; | ||
608 | uint32_t resp_info = fcprsp->rspStatus2; | ||
609 | uint32_t scsi_status = fcprsp->rspStatus3; | ||
610 | uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; | ||
611 | struct lpfc_fast_path_event *fast_path_evt = NULL; | ||
612 | struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; | ||
613 | unsigned long flags; | ||
614 | |||
615 | /* If there is queuefull or busy condition send a scsi event */ | ||
616 | if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || | ||
617 | (cmnd->result == SAM_STAT_BUSY)) { | ||
618 | fast_path_evt = lpfc_alloc_fast_evt(phba); | ||
619 | if (!fast_path_evt) | ||
620 | return; | ||
621 | fast_path_evt->un.scsi_evt.event_type = | ||
622 | FC_REG_SCSI_EVENT; | ||
623 | fast_path_evt->un.scsi_evt.subcategory = | ||
624 | (cmnd->result == SAM_STAT_TASK_SET_FULL) ? | ||
625 | LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; | ||
626 | fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; | ||
627 | memcpy(&fast_path_evt->un.scsi_evt.wwpn, | ||
628 | &pnode->nlp_portname, sizeof(struct lpfc_name)); | ||
629 | memcpy(&fast_path_evt->un.scsi_evt.wwnn, | ||
630 | &pnode->nlp_nodename, sizeof(struct lpfc_name)); | ||
631 | } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && | ||
632 | ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { | ||
633 | fast_path_evt = lpfc_alloc_fast_evt(phba); | ||
634 | if (!fast_path_evt) | ||
635 | return; | ||
636 | fast_path_evt->un.check_cond_evt.scsi_event.event_type = | ||
637 | FC_REG_SCSI_EVENT; | ||
638 | fast_path_evt->un.check_cond_evt.scsi_event.subcategory = | ||
639 | LPFC_EVENT_CHECK_COND; | ||
640 | fast_path_evt->un.check_cond_evt.scsi_event.lun = | ||
641 | cmnd->device->lun; | ||
642 | memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, | ||
643 | &pnode->nlp_portname, sizeof(struct lpfc_name)); | ||
644 | memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, | ||
645 | &pnode->nlp_nodename, sizeof(struct lpfc_name)); | ||
646 | fast_path_evt->un.check_cond_evt.sense_key = | ||
647 | cmnd->sense_buffer[2] & 0xf; | ||
648 | fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; | ||
649 | fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; | ||
650 | } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && | ||
651 | fcpi_parm && | ||
652 | ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || | ||
653 | ((scsi_status == SAM_STAT_GOOD) && | ||
654 | !(resp_info & (RESID_UNDER | RESID_OVER))))) { | ||
655 | /* | ||
656 | * If status is good or resid does not match with fcp_param and | ||
657 | * there is valid fcpi_parm, then there is a read_check error | ||
658 | */ | ||
659 | fast_path_evt = lpfc_alloc_fast_evt(phba); | ||
660 | if (!fast_path_evt) | ||
661 | return; | ||
662 | fast_path_evt->un.read_check_error.header.event_type = | ||
663 | FC_REG_FABRIC_EVENT; | ||
664 | fast_path_evt->un.read_check_error.header.subcategory = | ||
665 | LPFC_EVENT_FCPRDCHKERR; | ||
666 | memcpy(&fast_path_evt->un.read_check_error.header.wwpn, | ||
667 | &pnode->nlp_portname, sizeof(struct lpfc_name)); | ||
668 | memcpy(&fast_path_evt->un.read_check_error.header.wwnn, | ||
669 | &pnode->nlp_nodename, sizeof(struct lpfc_name)); | ||
670 | fast_path_evt->un.read_check_error.lun = cmnd->device->lun; | ||
671 | fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; | ||
672 | fast_path_evt->un.read_check_error.fcpiparam = | ||
673 | fcpi_parm; | ||
674 | } else | ||
675 | return; | ||
676 | |||
677 | fast_path_evt->vport = vport; | ||
678 | spin_lock_irqsave(&phba->hbalock, flags); | ||
679 | list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); | ||
680 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
681 | lpfc_worker_wake_up(phba); | ||
682 | return; | ||
683 | } | ||
386 | static void | 684 | static void |
387 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) | 685 | lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) |
388 | { | 686 | { |
@@ -411,6 +709,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
411 | uint32_t rsplen = 0; | 709 | uint32_t rsplen = 0; |
412 | uint32_t logit = LOG_FCP | LOG_FCP_ERROR; | 710 | uint32_t logit = LOG_FCP | LOG_FCP_ERROR; |
413 | 711 | ||
712 | |||
414 | /* | 713 | /* |
415 | * If this is a task management command, there is no | 714 | * If this is a task management command, there is no |
416 | * scsi packet associated with this lpfc_cmd. The driver | 715 | * scsi packet associated with this lpfc_cmd. The driver |
@@ -526,6 +825,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
526 | 825 | ||
527 | out: | 826 | out: |
528 | cmnd->result = ScsiResult(host_status, scsi_status); | 827 | cmnd->result = ScsiResult(host_status, scsi_status); |
828 | lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); | ||
529 | } | 829 | } |
530 | 830 | ||
531 | static void | 831 | static void |
@@ -542,9 +842,11 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
542 | struct scsi_device *sdev, *tmp_sdev; | 842 | struct scsi_device *sdev, *tmp_sdev; |
543 | int depth = 0; | 843 | int depth = 0; |
544 | unsigned long flags; | 844 | unsigned long flags; |
845 | struct lpfc_fast_path_event *fast_path_evt; | ||
545 | 846 | ||
546 | lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; | 847 | lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; |
547 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; | 848 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; |
849 | atomic_dec(&pnode->cmd_pending); | ||
548 | 850 | ||
549 | if (lpfc_cmd->status) { | 851 | if (lpfc_cmd->status) { |
550 | if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && | 852 | if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && |
@@ -570,12 +872,36 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
570 | break; | 872 | break; |
571 | case IOSTAT_NPORT_BSY: | 873 | case IOSTAT_NPORT_BSY: |
572 | case IOSTAT_FABRIC_BSY: | 874 | case IOSTAT_FABRIC_BSY: |
573 | cmd->result = ScsiResult(DID_BUS_BUSY, 0); | 875 | cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); |
876 | fast_path_evt = lpfc_alloc_fast_evt(phba); | ||
877 | if (!fast_path_evt) | ||
878 | break; | ||
879 | fast_path_evt->un.fabric_evt.event_type = | ||
880 | FC_REG_FABRIC_EVENT; | ||
881 | fast_path_evt->un.fabric_evt.subcategory = | ||
882 | (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? | ||
883 | LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; | ||
884 | if (pnode && NLP_CHK_NODE_ACT(pnode)) { | ||
885 | memcpy(&fast_path_evt->un.fabric_evt.wwpn, | ||
886 | &pnode->nlp_portname, | ||
887 | sizeof(struct lpfc_name)); | ||
888 | memcpy(&fast_path_evt->un.fabric_evt.wwnn, | ||
889 | &pnode->nlp_nodename, | ||
890 | sizeof(struct lpfc_name)); | ||
891 | } | ||
892 | fast_path_evt->vport = vport; | ||
893 | fast_path_evt->work_evt.evt = | ||
894 | LPFC_EVT_FASTPATH_MGMT_EVT; | ||
895 | spin_lock_irqsave(&phba->hbalock, flags); | ||
896 | list_add_tail(&fast_path_evt->work_evt.evt_listp, | ||
897 | &phba->work_list); | ||
898 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
899 | lpfc_worker_wake_up(phba); | ||
574 | break; | 900 | break; |
575 | case IOSTAT_LOCAL_REJECT: | 901 | case IOSTAT_LOCAL_REJECT: |
576 | if (lpfc_cmd->result == RJT_UNAVAIL_PERM || | 902 | if (lpfc_cmd->result == IOERR_INVALID_RPI || |
577 | lpfc_cmd->result == IOERR_NO_RESOURCES || | 903 | lpfc_cmd->result == IOERR_NO_RESOURCES || |
578 | lpfc_cmd->result == RJT_LOGIN_REQUIRED) { | 904 | lpfc_cmd->result == IOERR_ABORT_REQUESTED) { |
579 | cmd->result = ScsiResult(DID_REQUEUE, 0); | 905 | cmd->result = ScsiResult(DID_REQUEUE, 0); |
580 | break; | 906 | break; |
581 | } /* else: fall through */ | 907 | } /* else: fall through */ |
@@ -586,7 +912,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
586 | 912 | ||
587 | if (!pnode || !NLP_CHK_NODE_ACT(pnode) | 913 | if (!pnode || !NLP_CHK_NODE_ACT(pnode) |
588 | || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) | 914 | || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) |
589 | cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); | 915 | cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, |
916 | SAM_STAT_BUSY); | ||
590 | } else { | 917 | } else { |
591 | cmd->result = ScsiResult(DID_OK, 0); | 918 | cmd->result = ScsiResult(DID_OK, 0); |
592 | } | 919 | } |
@@ -602,8 +929,32 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
602 | scsi_get_resid(cmd)); | 929 | scsi_get_resid(cmd)); |
603 | } | 930 | } |
604 | 931 | ||
932 | lpfc_update_stats(phba, lpfc_cmd); | ||
605 | result = cmd->result; | 933 | result = cmd->result; |
606 | sdev = cmd->device; | 934 | sdev = cmd->device; |
935 | if (vport->cfg_max_scsicmpl_time && | ||
936 | time_after(jiffies, lpfc_cmd->start_time + | ||
937 | msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { | ||
938 | spin_lock_irqsave(sdev->host->host_lock, flags); | ||
939 | if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) && | ||
940 | (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) && | ||
941 | ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10)))) | ||
942 | pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending); | ||
943 | |||
944 | pnode->last_change_time = jiffies; | ||
945 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | ||
946 | } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && | ||
947 | time_after(jiffies, pnode->last_change_time + | ||
948 | msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { | ||
949 | spin_lock_irqsave(sdev->host->host_lock, flags); | ||
950 | pnode->cmd_qdepth += pnode->cmd_qdepth * | ||
951 | LPFC_TGTQ_RAMPUP_PCENT / 100; | ||
952 | if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) | ||
953 | pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; | ||
954 | pnode->last_change_time = jiffies; | ||
955 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | ||
956 | } | ||
957 | |||
607 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | 958 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); |
608 | cmd->scsi_done(cmd); | 959 | cmd->scsi_done(cmd); |
609 | 960 | ||
@@ -647,6 +998,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
647 | pnode->last_ramp_up_time = jiffies; | 998 | pnode->last_ramp_up_time = jiffies; |
648 | } | 999 | } |
649 | } | 1000 | } |
1001 | lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode, | ||
1002 | 0xFFFFFFFF, | ||
1003 | sdev->queue_depth - 1, sdev->queue_depth); | ||
650 | } | 1004 | } |
651 | 1005 | ||
652 | /* | 1006 | /* |
@@ -676,6 +1030,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
676 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 1030 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
677 | "0711 detected queue full - lun queue " | 1031 | "0711 detected queue full - lun queue " |
678 | "depth adjusted to %d.\n", depth); | 1032 | "depth adjusted to %d.\n", depth); |
1033 | lpfc_send_sdev_queuedepth_change_event(phba, vport, | ||
1034 | pnode, 0xFFFFFFFF, | ||
1035 | depth+1, depth); | ||
679 | } | 1036 | } |
680 | } | 1037 | } |
681 | 1038 | ||
@@ -692,6 +1049,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
692 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 1049 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
693 | } | 1050 | } |
694 | 1051 | ||
1052 | /** | ||
1053 | * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB. | ||
1054 | * @data: A pointer to the immediate command data portion of the IOCB. | ||
1055 | * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. | ||
1056 | * | ||
1057 | * The routine copies the entire FCP command from @fcp_cmnd to @data while | ||
1058 | * byte swapping the data to big endian format for transmission on the wire. | ||
1059 | **/ | ||
1060 | static void | ||
1061 | lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) | ||
1062 | { | ||
1063 | int i, j; | ||
1064 | for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); | ||
1065 | i += sizeof(uint32_t), j++) { | ||
1066 | ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); | ||
1067 | } | ||
1068 | } | ||
1069 | |||
695 | static void | 1070 | static void |
696 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | 1071 | lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, |
697 | struct lpfc_nodelist *pnode) | 1072 | struct lpfc_nodelist *pnode) |
@@ -758,7 +1133,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
758 | fcp_cmnd->fcpCntl3 = 0; | 1133 | fcp_cmnd->fcpCntl3 = 0; |
759 | phba->fc4ControlRequests++; | 1134 | phba->fc4ControlRequests++; |
760 | } | 1135 | } |
761 | 1136 | if (phba->sli_rev == 3) | |
1137 | lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); | ||
762 | /* | 1138 | /* |
763 | * Finish initializing those IOCB fields that are independent | 1139 | * Finish initializing those IOCB fields that are independent |
764 | * of the scsi_cmnd request_buffer | 1140 | * of the scsi_cmnd request_buffer |
@@ -798,11 +1174,13 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | |||
798 | piocb = &piocbq->iocb; | 1174 | piocb = &piocbq->iocb; |
799 | 1175 | ||
800 | fcp_cmnd = lpfc_cmd->fcp_cmnd; | 1176 | fcp_cmnd = lpfc_cmd->fcp_cmnd; |
801 | int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); | 1177 | /* Clear out any old data in the FCP command area */ |
1178 | memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); | ||
1179 | int_to_scsilun(lun, &fcp_cmnd->fcp_lun); | ||
802 | fcp_cmnd->fcpCntl2 = task_mgmt_cmd; | 1180 | fcp_cmnd->fcpCntl2 = task_mgmt_cmd; |
803 | 1181 | if (vport->phba->sli_rev == 3) | |
1182 | lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); | ||
804 | piocb->ulpCommand = CMD_FCP_ICMND64_CR; | 1183 | piocb->ulpCommand = CMD_FCP_ICMND64_CR; |
805 | |||
806 | piocb->ulpContext = ndlp->nlp_rpi; | 1184 | piocb->ulpContext = ndlp->nlp_rpi; |
807 | if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { | 1185 | if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { |
808 | piocb->ulpFCP2Rcvy = 1; | 1186 | piocb->ulpFCP2Rcvy = 1; |
@@ -967,9 +1345,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
967 | * transport is still transitioning. | 1345 | * transport is still transitioning. |
968 | */ | 1346 | */ |
969 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { | 1347 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
970 | cmnd->result = ScsiResult(DID_BUS_BUSY, 0); | 1348 | cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); |
971 | goto out_fail_command; | 1349 | goto out_fail_command; |
972 | } | 1350 | } |
1351 | if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) | ||
1352 | goto out_host_busy; | ||
1353 | |||
973 | lpfc_cmd = lpfc_get_scsi_buf(phba); | 1354 | lpfc_cmd = lpfc_get_scsi_buf(phba); |
974 | if (lpfc_cmd == NULL) { | 1355 | if (lpfc_cmd == NULL) { |
975 | lpfc_adjust_queue_depth(phba); | 1356 | lpfc_adjust_queue_depth(phba); |
@@ -980,6 +1361,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
980 | goto out_host_busy; | 1361 | goto out_host_busy; |
981 | } | 1362 | } |
982 | 1363 | ||
1364 | lpfc_cmd->start_time = jiffies; | ||
983 | /* | 1365 | /* |
984 | * Store the midlayer's command structure for the completion phase | 1366 | * Store the midlayer's command structure for the completion phase |
985 | * and complete the command initialization. | 1367 | * and complete the command initialization. |
@@ -987,6 +1369,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
987 | lpfc_cmd->pCmd = cmnd; | 1369 | lpfc_cmd->pCmd = cmnd; |
988 | lpfc_cmd->rdata = rdata; | 1370 | lpfc_cmd->rdata = rdata; |
989 | lpfc_cmd->timeout = 0; | 1371 | lpfc_cmd->timeout = 0; |
1372 | lpfc_cmd->start_time = jiffies; | ||
990 | cmnd->host_scribble = (unsigned char *)lpfc_cmd; | 1373 | cmnd->host_scribble = (unsigned char *)lpfc_cmd; |
991 | cmnd->scsi_done = done; | 1374 | cmnd->scsi_done = done; |
992 | 1375 | ||
@@ -996,6 +1379,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
996 | 1379 | ||
997 | lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); | 1380 | lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); |
998 | 1381 | ||
1382 | atomic_inc(&ndlp->cmd_pending); | ||
999 | err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], | 1383 | err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], |
1000 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); | 1384 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); |
1001 | if (err) | 1385 | if (err) |
@@ -1010,6 +1394,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
1010 | return 0; | 1394 | return 0; |
1011 | 1395 | ||
1012 | out_host_busy_free_buf: | 1396 | out_host_busy_free_buf: |
1397 | atomic_dec(&ndlp->cmd_pending); | ||
1013 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | 1398 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); |
1014 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 1399 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
1015 | out_host_busy: | 1400 | out_host_busy: |
@@ -1145,6 +1530,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
1145 | int ret = SUCCESS; | 1530 | int ret = SUCCESS; |
1146 | int status; | 1531 | int status; |
1147 | int cnt; | 1532 | int cnt; |
1533 | struct lpfc_scsi_event_header scsi_event; | ||
1148 | 1534 | ||
1149 | lpfc_block_error_handler(cmnd); | 1535 | lpfc_block_error_handler(cmnd); |
1150 | /* | 1536 | /* |
@@ -1163,6 +1549,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
1163 | break; | 1549 | break; |
1164 | pnode = rdata->pnode; | 1550 | pnode = rdata->pnode; |
1165 | } | 1551 | } |
1552 | |||
1553 | scsi_event.event_type = FC_REG_SCSI_EVENT; | ||
1554 | scsi_event.subcategory = LPFC_EVENT_TGTRESET; | ||
1555 | scsi_event.lun = 0; | ||
1556 | memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); | ||
1557 | memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); | ||
1558 | |||
1559 | fc_host_post_vendor_event(shost, | ||
1560 | fc_get_event_number(), | ||
1561 | sizeof(scsi_event), | ||
1562 | (char *)&scsi_event, | ||
1563 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | ||
1564 | |||
1166 | if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { | 1565 | if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { |
1167 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 1566 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
1168 | "0721 LUN Reset rport " | 1567 | "0721 LUN Reset rport " |
@@ -1242,10 +1641,23 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | |||
1242 | struct lpfc_hba *phba = vport->phba; | 1641 | struct lpfc_hba *phba = vport->phba; |
1243 | struct lpfc_nodelist *ndlp = NULL; | 1642 | struct lpfc_nodelist *ndlp = NULL; |
1244 | int match; | 1643 | int match; |
1245 | int ret = SUCCESS, status, i; | 1644 | int ret = SUCCESS, status = SUCCESS, i; |
1246 | int cnt; | 1645 | int cnt; |
1247 | struct lpfc_scsi_buf * lpfc_cmd; | 1646 | struct lpfc_scsi_buf * lpfc_cmd; |
1248 | unsigned long later; | 1647 | unsigned long later; |
1648 | struct lpfc_scsi_event_header scsi_event; | ||
1649 | |||
1650 | scsi_event.event_type = FC_REG_SCSI_EVENT; | ||
1651 | scsi_event.subcategory = LPFC_EVENT_BUSRESET; | ||
1652 | scsi_event.lun = 0; | ||
1653 | memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); | ||
1654 | memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); | ||
1655 | |||
1656 | fc_host_post_vendor_event(shost, | ||
1657 | fc_get_event_number(), | ||
1658 | sizeof(scsi_event), | ||
1659 | (char *)&scsi_event, | ||
1660 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | ||
1249 | 1661 | ||
1250 | lpfc_block_error_handler(cmnd); | 1662 | lpfc_block_error_handler(cmnd); |
1251 | /* | 1663 | /* |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index daba92374985..437f182e2322 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h | |||
@@ -107,6 +107,10 @@ struct fcp_cmnd { | |||
107 | 107 | ||
108 | }; | 108 | }; |
109 | 109 | ||
110 | struct lpfc_scsicmd_bkt { | ||
111 | uint32_t cmd_count; | ||
112 | }; | ||
113 | |||
110 | struct lpfc_scsi_buf { | 114 | struct lpfc_scsi_buf { |
111 | struct list_head list; | 115 | struct list_head list; |
112 | struct scsi_cmnd *pCmd; | 116 | struct scsi_cmnd *pCmd; |
@@ -139,6 +143,7 @@ struct lpfc_scsi_buf { | |||
139 | */ | 143 | */ |
140 | struct lpfc_iocbq cur_iocbq; | 144 | struct lpfc_iocbq cur_iocbq; |
141 | wait_queue_head_t *waitq; | 145 | wait_queue_head_t *waitq; |
146 | unsigned long start_time; | ||
142 | }; | 147 | }; |
143 | 148 | ||
144 | #define LPFC_SCSI_DMA_EXT_SIZE 264 | 149 | #define LPFC_SCSI_DMA_EXT_SIZE 264 |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 50fe07646738..8ab5babdeebc 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -32,6 +32,7 @@ | |||
32 | 32 | ||
33 | #include "lpfc_hw.h" | 33 | #include "lpfc_hw.h" |
34 | #include "lpfc_sli.h" | 34 | #include "lpfc_sli.h" |
35 | #include "lpfc_nl.h" | ||
35 | #include "lpfc_disc.h" | 36 | #include "lpfc_disc.h" |
36 | #include "lpfc_scsi.h" | 37 | #include "lpfc_scsi.h" |
37 | #include "lpfc.h" | 38 | #include "lpfc.h" |
@@ -66,10 +67,16 @@ typedef enum _lpfc_iocb_type { | |||
66 | LPFC_ABORT_IOCB | 67 | LPFC_ABORT_IOCB |
67 | } lpfc_iocb_type; | 68 | } lpfc_iocb_type; |
68 | 69 | ||
69 | /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer | 70 | /** |
70 | * to the start of the ring, and the slot number of the | 71 | * lpfc_cmd_iocb: Get next command iocb entry in the ring. |
71 | * desired iocb entry, calc a pointer to that entry. | 72 | * @phba: Pointer to HBA context object. |
72 | */ | 73 | * @pring: Pointer to driver SLI ring object. |
74 | * | ||
75 | * This function returns pointer to next command iocb entry | ||
76 | * in the command ring. The caller must hold hbalock to prevent | ||
77 | * other threads consume the next command iocb. | ||
78 | * SLI-2/SLI-3 provide different sized iocbs. | ||
79 | **/ | ||
73 | static inline IOCB_t * | 80 | static inline IOCB_t * |
74 | lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | 81 | lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
75 | { | 82 | { |
@@ -77,6 +84,16 @@ lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | |||
77 | pring->cmdidx * phba->iocb_cmd_size); | 84 | pring->cmdidx * phba->iocb_cmd_size); |
78 | } | 85 | } |
79 | 86 | ||
87 | /** | ||
88 | * lpfc_resp_iocb: Get next response iocb entry in the ring. | ||
89 | * @phba: Pointer to HBA context object. | ||
90 | * @pring: Pointer to driver SLI ring object. | ||
91 | * | ||
92 | * This function returns pointer to next response iocb entry | ||
93 | * in the response ring. The caller must hold hbalock to make sure | ||
94 | * that no other thread consume the next response iocb. | ||
95 | * SLI-2/SLI-3 provide different sized iocbs. | ||
96 | **/ | ||
80 | static inline IOCB_t * | 97 | static inline IOCB_t * |
81 | lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | 98 | lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
82 | { | 99 | { |
@@ -84,6 +101,15 @@ lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | |||
84 | pring->rspidx * phba->iocb_rsp_size); | 101 | pring->rspidx * phba->iocb_rsp_size); |
85 | } | 102 | } |
86 | 103 | ||
104 | /** | ||
105 | * __lpfc_sli_get_iocbq: Allocates an iocb object from iocb pool. | ||
106 | * @phba: Pointer to HBA context object. | ||
107 | * | ||
108 | * This function is called with hbalock held. This function | ||
109 | * allocates a new driver iocb object from the iocb pool. If the | ||
110 | * allocation is successful, it returns pointer to the newly | ||
111 | * allocated iocb object else it returns NULL. | ||
112 | **/ | ||
87 | static struct lpfc_iocbq * | 113 | static struct lpfc_iocbq * |
88 | __lpfc_sli_get_iocbq(struct lpfc_hba *phba) | 114 | __lpfc_sli_get_iocbq(struct lpfc_hba *phba) |
89 | { | 115 | { |
@@ -94,6 +120,15 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba) | |||
94 | return iocbq; | 120 | return iocbq; |
95 | } | 121 | } |
96 | 122 | ||
123 | /** | ||
124 | * lpfc_sli_get_iocbq: Allocates an iocb object from iocb pool. | ||
125 | * @phba: Pointer to HBA context object. | ||
126 | * | ||
127 | * This function is called with no lock held. This function | ||
128 | * allocates a new driver iocb object from the iocb pool. If the | ||
129 | * allocation is successful, it returns pointer to the newly | ||
130 | * allocated iocb object else it returns NULL. | ||
131 | **/ | ||
97 | struct lpfc_iocbq * | 132 | struct lpfc_iocbq * |
98 | lpfc_sli_get_iocbq(struct lpfc_hba *phba) | 133 | lpfc_sli_get_iocbq(struct lpfc_hba *phba) |
99 | { | 134 | { |
@@ -106,6 +141,16 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) | |||
106 | return iocbq; | 141 | return iocbq; |
107 | } | 142 | } |
108 | 143 | ||
144 | /** | ||
145 | * __lpfc_sli_release_iocbq: Release iocb to the iocb pool. | ||
146 | * @phba: Pointer to HBA context object. | ||
147 | * @iocbq: Pointer to driver iocb object. | ||
148 | * | ||
149 | * This function is called with hbalock held to release driver | ||
150 | * iocb object to the iocb pool. The iotag in the iocb object | ||
151 | * does not change for each use of the iocb object. This function | ||
152 | * clears all other fields of the iocb object when it is freed. | ||
153 | **/ | ||
109 | static void | 154 | static void |
110 | __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | 155 | __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) |
111 | { | 156 | { |
@@ -118,6 +163,14 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | |||
118 | list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); | 163 | list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); |
119 | } | 164 | } |
120 | 165 | ||
166 | /** | ||
167 | * lpfc_sli_release_iocbq: Release iocb to the iocb pool. | ||
168 | * @phba: Pointer to HBA context object. | ||
169 | * @iocbq: Pointer to driver iocb object. | ||
170 | * | ||
171 | * This function is called with no lock held to release the iocb to | ||
172 | * iocb pool. | ||
173 | **/ | ||
121 | void | 174 | void |
122 | lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | 175 | lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) |
123 | { | 176 | { |
@@ -131,10 +184,21 @@ lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | |||
131 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 184 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
132 | } | 185 | } |
133 | 186 | ||
134 | /* | 187 | /** |
135 | * Translate the iocb command to an iocb command type used to decide the final | 188 | * lpfc_sli_iocb_cmd_type: Get the iocb type. |
136 | * disposition of each completed IOCB. | 189 | * @iocb_cmnd : iocb command code. |
137 | */ | 190 | * |
191 | * This function is called by ring event handler function to get the iocb type. | ||
192 | * This function translates the iocb command to an iocb command type used to | ||
193 | * decide the final disposition of each completed IOCB. | ||
194 | * The function returns | ||
195 | * LPFC_UNKNOWN_IOCB if it is an unsupported iocb | ||
196 | * LPFC_SOL_IOCB if it is a solicited iocb completion | ||
197 | * LPFC_ABORT_IOCB if it is an abort iocb | ||
198 | * LPFC_UNSOL_IOCB if it is an unsolicited iocb | ||
199 | * | ||
200 | * The caller is not required to hold any lock. | ||
201 | **/ | ||
138 | static lpfc_iocb_type | 202 | static lpfc_iocb_type |
139 | lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) | 203 | lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) |
140 | { | 204 | { |
@@ -230,6 +294,17 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) | |||
230 | return type; | 294 | return type; |
231 | } | 295 | } |
232 | 296 | ||
297 | /** | ||
298 | * lpfc_sli_ring_map: Issue config_ring mbox for all rings. | ||
299 | * @phba: Pointer to HBA context object. | ||
300 | * | ||
301 | * This function is called from SLI initialization code | ||
302 | * to configure every ring of the HBA's SLI interface. The | ||
303 | * caller is not required to hold any lock. This function issues | ||
304 | * a config_ring mailbox command for each ring. | ||
305 | * This function returns zero if successful else returns a negative | ||
306 | * error code. | ||
307 | **/ | ||
233 | static int | 308 | static int |
234 | lpfc_sli_ring_map(struct lpfc_hba *phba) | 309 | lpfc_sli_ring_map(struct lpfc_hba *phba) |
235 | { | 310 | { |
@@ -262,6 +337,18 @@ lpfc_sli_ring_map(struct lpfc_hba *phba) | |||
262 | return ret; | 337 | return ret; |
263 | } | 338 | } |
264 | 339 | ||
340 | /** | ||
341 | * lpfc_sli_ringtxcmpl_put: Adds new iocb to the txcmplq. | ||
342 | * @phba: Pointer to HBA context object. | ||
343 | * @pring: Pointer to driver SLI ring object. | ||
344 | * @piocb: Pointer to the driver iocb object. | ||
345 | * | ||
346 | * This function is called with hbalock held. The function adds the | ||
347 | * new iocb to txcmplq of the given ring. This function always returns | ||
348 | * 0. If this function is called for ELS ring, this function checks if | ||
349 | * there is a vport associated with the ELS command. This function also | ||
350 | * starts els_tmofunc timer if this is an ELS command. | ||
351 | **/ | ||
265 | static int | 352 | static int |
266 | lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 353 | lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
267 | struct lpfc_iocbq *piocb) | 354 | struct lpfc_iocbq *piocb) |
@@ -282,6 +369,16 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
282 | return 0; | 369 | return 0; |
283 | } | 370 | } |
284 | 371 | ||
372 | /** | ||
373 | * lpfc_sli_ringtx_get: Get first element of the txq. | ||
374 | * @phba: Pointer to HBA context object. | ||
375 | * @pring: Pointer to driver SLI ring object. | ||
376 | * | ||
377 | * This function is called with hbalock held to get next | ||
378 | * iocb in txq of the given ring. If there is any iocb in | ||
379 | * the txq, the function returns first iocb in the list after | ||
380 | * removing the iocb from the list, else it returns NULL. | ||
381 | **/ | ||
285 | static struct lpfc_iocbq * | 382 | static struct lpfc_iocbq * |
286 | lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | 383 | lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
287 | { | 384 | { |
@@ -293,14 +390,25 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | |||
293 | return cmd_iocb; | 390 | return cmd_iocb; |
294 | } | 391 | } |
295 | 392 | ||
393 | /** | ||
394 | * lpfc_sli_next_iocb_slot: Get next iocb slot in the ring. | ||
395 | * @phba: Pointer to HBA context object. | ||
396 | * @pring: Pointer to driver SLI ring object. | ||
397 | * | ||
398 | * This function is called with hbalock held and the caller must post the | ||
399 | * iocb without releasing the lock. If the caller releases the lock, | ||
400 | * iocb slot returned by the function is not guaranteed to be available. | ||
401 | * The function returns pointer to the next available iocb slot if there | ||
402 | * is available slot in the ring, else it returns NULL. | ||
403 | * If the get index of the ring is ahead of the put index, the function | ||
404 | * will post an error attention event to the worker thread to take the | ||
405 | * HBA to offline state. | ||
406 | **/ | ||
296 | static IOCB_t * | 407 | static IOCB_t * |
297 | lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | 408 | lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
298 | { | 409 | { |
299 | struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? | 410 | struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; |
300 | &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : | ||
301 | &phba->slim2p->mbx.us.s2.port[pring->ringno]; | ||
302 | uint32_t max_cmd_idx = pring->numCiocb; | 411 | uint32_t max_cmd_idx = pring->numCiocb; |
303 | |||
304 | if ((pring->next_cmdidx == pring->cmdidx) && | 412 | if ((pring->next_cmdidx == pring->cmdidx) && |
305 | (++pring->next_cmdidx >= max_cmd_idx)) | 413 | (++pring->next_cmdidx >= max_cmd_idx)) |
306 | pring->next_cmdidx = 0; | 414 | pring->next_cmdidx = 0; |
@@ -336,6 +444,18 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | |||
336 | return lpfc_cmd_iocb(phba, pring); | 444 | return lpfc_cmd_iocb(phba, pring); |
337 | } | 445 | } |
338 | 446 | ||
447 | /** | ||
448 | * lpfc_sli_next_iotag: Get an iotag for the iocb. | ||
449 | * @phba: Pointer to HBA context object. | ||
450 | * @iocbq: Pointer to driver iocb object. | ||
451 | * | ||
452 | * This function gets an iotag for the iocb. If there is no unused iotag and | ||
453 | * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup | ||
454 | * array and assigns a new iotag. | ||
455 | * The function returns the allocated iotag if successful, else returns zero. | ||
456 | * Zero is not a valid iotag. | ||
457 | * The caller is not required to hold any lock. | ||
458 | **/ | ||
339 | uint16_t | 459 | uint16_t |
340 | lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | 460 | lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) |
341 | { | 461 | { |
@@ -399,6 +519,20 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | |||
399 | return 0; | 519 | return 0; |
400 | } | 520 | } |
401 | 521 | ||
522 | /** | ||
523 | * lpfc_sli_submit_iocb: Submit an iocb to the firmware. | ||
524 | * @phba: Pointer to HBA context object. | ||
525 | * @pring: Pointer to driver SLI ring object. | ||
526 | * @iocb: Pointer to iocb slot in the ring. | ||
527 | * @nextiocb: Pointer to driver iocb object which need to be | ||
528 | * posted to firmware. | ||
529 | * | ||
530 | * This function is called with hbalock held to post a new iocb to | ||
531 | * the firmware. This function copies the new iocb to ring iocb slot and | ||
532 | * updates the ring pointers. It adds the new iocb to txcmplq if there is | ||
533 | * a completion call back for this iocb else the function will free the | ||
534 | * iocb object. | ||
535 | **/ | ||
402 | static void | 536 | static void |
403 | lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 537 | lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
404 | IOCB_t *iocb, struct lpfc_iocbq *nextiocb) | 538 | IOCB_t *iocb, struct lpfc_iocbq *nextiocb) |
@@ -441,6 +575,18 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
441 | writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); | 575 | writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); |
442 | } | 576 | } |
443 | 577 | ||
578 | /** | ||
579 | * lpfc_sli_update_full_ring: Update the chip attention register. | ||
580 | * @phba: Pointer to HBA context object. | ||
581 | * @pring: Pointer to driver SLI ring object. | ||
582 | * | ||
583 | * The caller is not required to hold any lock for calling this function. | ||
584 | * This function updates the chip attention bits for the ring to inform firmware | ||
585 | * that there are pending work to be done for this ring and requests an | ||
586 | * interrupt when there is space available in the ring. This function is | ||
587 | * called when the driver is unable to post more iocbs to the ring due | ||
588 | * to unavailability of space in the ring. | ||
589 | **/ | ||
444 | static void | 590 | static void |
445 | lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | 591 | lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
446 | { | 592 | { |
@@ -460,6 +606,15 @@ lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | |||
460 | pring->stats.iocb_cmd_full++; | 606 | pring->stats.iocb_cmd_full++; |
461 | } | 607 | } |
462 | 608 | ||
609 | /** | ||
610 | * lpfc_sli_update_ring: Update chip attention register. | ||
611 | * @phba: Pointer to HBA context object. | ||
612 | * @pring: Pointer to driver SLI ring object. | ||
613 | * | ||
614 | * This function updates the chip attention register bit for the | ||
615 | * given ring to inform HBA that there is more work to be done | ||
616 | * in this ring. The caller is not required to hold any lock. | ||
617 | **/ | ||
463 | static void | 618 | static void |
464 | lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | 619 | lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
465 | { | 620 | { |
@@ -468,11 +623,22 @@ lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | |||
468 | /* | 623 | /* |
469 | * Tell the HBA that there is work to do in this ring. | 624 | * Tell the HBA that there is work to do in this ring. |
470 | */ | 625 | */ |
471 | wmb(); | 626 | if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { |
472 | writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); | 627 | wmb(); |
473 | readl(phba->CAregaddr); /* flush */ | 628 | writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); |
629 | readl(phba->CAregaddr); /* flush */ | ||
630 | } | ||
474 | } | 631 | } |
475 | 632 | ||
633 | /** | ||
634 | * lpfc_sli_resume_iocb: Process iocbs in the txq. | ||
635 | * @phba: Pointer to HBA context object. | ||
636 | * @pring: Pointer to driver SLI ring object. | ||
637 | * | ||
638 | * This function is called with hbalock held to post pending iocbs | ||
639 | * in the txq to the firmware. This function is called when driver | ||
640 | * detects space available in the ring. | ||
641 | **/ | ||
476 | static void | 642 | static void |
477 | lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | 643 | lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
478 | { | 644 | { |
@@ -504,6 +670,16 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | |||
504 | return; | 670 | return; |
505 | } | 671 | } |
506 | 672 | ||
673 | /** | ||
674 | * lpfc_sli_next_hbq_slot: Get next hbq entry for the HBQ. | ||
675 | * @phba: Pointer to HBA context object. | ||
676 | * @hbqno: HBQ number. | ||
677 | * | ||
678 | * This function is called with hbalock held to get the next | ||
679 | * available slot for the given HBQ. If there is free slot | ||
680 | * available for the HBQ it will return pointer to the next available | ||
681 | * HBQ entry else it will return NULL. | ||
682 | **/ | ||
507 | static struct lpfc_hbq_entry * | 683 | static struct lpfc_hbq_entry * |
508 | lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) | 684 | lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) |
509 | { | 685 | { |
@@ -539,6 +715,15 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) | |||
539 | hbqp->hbqPutIdx; | 715 | hbqp->hbqPutIdx; |
540 | } | 716 | } |
541 | 717 | ||
718 | /** | ||
719 | * lpfc_sli_hbqbuf_free_all: Free all the hbq buffers. | ||
720 | * @phba: Pointer to HBA context object. | ||
721 | * | ||
722 | * This function is called with no lock held to free all the | ||
723 | * hbq buffers while uninitializing the SLI interface. It also | ||
724 | * frees the HBQ buffers returned by the firmware but not yet | ||
725 | * processed by the upper layers. | ||
726 | **/ | ||
542 | void | 727 | void |
543 | lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) | 728 | lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) |
544 | { | 729 | { |
@@ -584,6 +769,18 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) | |||
584 | spin_unlock_irqrestore(&phba->hbalock, flags); | 769 | spin_unlock_irqrestore(&phba->hbalock, flags); |
585 | } | 770 | } |
586 | 771 | ||
772 | /** | ||
773 | * lpfc_sli_hbq_to_firmware: Post the hbq buffer to firmware. | ||
774 | * @phba: Pointer to HBA context object. | ||
775 | * @hbqno: HBQ number. | ||
776 | * @hbq_buf: Pointer to HBQ buffer. | ||
777 | * | ||
778 | * This function is called with the hbalock held to post a | ||
779 | * hbq buffer to the firmware. If the function finds an empty | ||
780 | * slot in the HBQ, it will post the buffer. The function will return | ||
781 | * pointer to the hbq entry if it successfully post the buffer | ||
782 | * else it will return NULL. | ||
783 | **/ | ||
587 | static struct lpfc_hbq_entry * | 784 | static struct lpfc_hbq_entry * |
588 | lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, | 785 | lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, |
589 | struct hbq_dmabuf *hbq_buf) | 786 | struct hbq_dmabuf *hbq_buf) |
@@ -612,6 +809,7 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, | |||
612 | return hbqe; | 809 | return hbqe; |
613 | } | 810 | } |
614 | 811 | ||
812 | /* HBQ for ELS and CT traffic. */ | ||
615 | static struct lpfc_hbq_init lpfc_els_hbq = { | 813 | static struct lpfc_hbq_init lpfc_els_hbq = { |
616 | .rn = 1, | 814 | .rn = 1, |
617 | .entry_count = 200, | 815 | .entry_count = 200, |
@@ -623,6 +821,7 @@ static struct lpfc_hbq_init lpfc_els_hbq = { | |||
623 | .add_count = 5, | 821 | .add_count = 5, |
624 | }; | 822 | }; |
625 | 823 | ||
824 | /* HBQ for the extra ring if needed */ | ||
626 | static struct lpfc_hbq_init lpfc_extra_hbq = { | 825 | static struct lpfc_hbq_init lpfc_extra_hbq = { |
627 | .rn = 1, | 826 | .rn = 1, |
628 | .entry_count = 200, | 827 | .entry_count = 200, |
@@ -634,51 +833,81 @@ static struct lpfc_hbq_init lpfc_extra_hbq = { | |||
634 | .add_count = 5, | 833 | .add_count = 5, |
635 | }; | 834 | }; |
636 | 835 | ||
836 | /* Array of HBQs */ | ||
637 | struct lpfc_hbq_init *lpfc_hbq_defs[] = { | 837 | struct lpfc_hbq_init *lpfc_hbq_defs[] = { |
638 | &lpfc_els_hbq, | 838 | &lpfc_els_hbq, |
639 | &lpfc_extra_hbq, | 839 | &lpfc_extra_hbq, |
640 | }; | 840 | }; |
641 | 841 | ||
842 | /** | ||
843 | * lpfc_sli_hbqbuf_fill_hbqs: Post more hbq buffers to HBQ. | ||
844 | * @phba: Pointer to HBA context object. | ||
845 | * @hbqno: HBQ number. | ||
846 | * @count: Number of HBQ buffers to be posted. | ||
847 | * | ||
848 | * This function is called with no lock held to post more hbq buffers to the | ||
849 | * given HBQ. The function returns the number of HBQ buffers successfully | ||
850 | * posted. | ||
851 | **/ | ||
642 | static int | 852 | static int |
643 | lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) | 853 | lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) |
644 | { | 854 | { |
645 | uint32_t i, start, end; | 855 | uint32_t i, posted = 0; |
646 | unsigned long flags; | 856 | unsigned long flags; |
647 | struct hbq_dmabuf *hbq_buffer; | 857 | struct hbq_dmabuf *hbq_buffer; |
648 | 858 | LIST_HEAD(hbq_buf_list); | |
649 | if (!phba->hbqs[hbqno].hbq_alloc_buffer) | 859 | if (!phba->hbqs[hbqno].hbq_alloc_buffer) |
650 | return 0; | 860 | return 0; |
651 | 861 | ||
652 | start = phba->hbqs[hbqno].buffer_count; | 862 | if ((phba->hbqs[hbqno].buffer_count + count) > |
653 | end = count + start; | 863 | lpfc_hbq_defs[hbqno]->entry_count) |
654 | if (end > lpfc_hbq_defs[hbqno]->entry_count) | 864 | count = lpfc_hbq_defs[hbqno]->entry_count - |
655 | end = lpfc_hbq_defs[hbqno]->entry_count; | 865 | phba->hbqs[hbqno].buffer_count; |
656 | 866 | if (!count) | |
867 | return 0; | ||
868 | /* Allocate HBQ entries */ | ||
869 | for (i = 0; i < count; i++) { | ||
870 | hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); | ||
871 | if (!hbq_buffer) | ||
872 | break; | ||
873 | list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); | ||
874 | } | ||
657 | /* Check whether HBQ is still in use */ | 875 | /* Check whether HBQ is still in use */ |
658 | spin_lock_irqsave(&phba->hbalock, flags); | 876 | spin_lock_irqsave(&phba->hbalock, flags); |
659 | if (!phba->hbq_in_use) | 877 | if (!phba->hbq_in_use) |
660 | goto out; | 878 | goto err; |
661 | 879 | while (!list_empty(&hbq_buf_list)) { | |
662 | /* Populate HBQ entries */ | 880 | list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, |
663 | for (i = start; i < end; i++) { | 881 | dbuf.list); |
664 | hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); | 882 | hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | |
665 | if (!hbq_buffer) | 883 | (hbqno << 16)); |
666 | goto err; | 884 | if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { |
667 | hbq_buffer->tag = (i | (hbqno << 16)); | ||
668 | if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) | ||
669 | phba->hbqs[hbqno].buffer_count++; | 885 | phba->hbqs[hbqno].buffer_count++; |
670 | else | 886 | posted++; |
887 | } else | ||
671 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); | 888 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); |
672 | } | 889 | } |
673 | |||
674 | out: | ||
675 | spin_unlock_irqrestore(&phba->hbalock, flags); | 890 | spin_unlock_irqrestore(&phba->hbalock, flags); |
676 | return 0; | 891 | return posted; |
677 | err: | 892 | err: |
678 | spin_unlock_irqrestore(&phba->hbalock, flags); | 893 | spin_unlock_irqrestore(&phba->hbalock, flags); |
679 | return 1; | 894 | while (!list_empty(&hbq_buf_list)) { |
895 | list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, | ||
896 | dbuf.list); | ||
897 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); | ||
898 | } | ||
899 | return 0; | ||
680 | } | 900 | } |
681 | 901 | ||
902 | /** | ||
903 | * lpfc_sli_hbqbuf_add_hbqs: Post more HBQ buffers to firmware. | ||
904 | * @phba: Pointer to HBA context object. | ||
905 | * @qno: HBQ number. | ||
906 | * | ||
907 | * This function posts more buffers to the HBQ. This function | ||
908 | * is called with no lock held. The function returns the number of HBQ entries | ||
909 | * successfully allocated. | ||
910 | **/ | ||
682 | int | 911 | int |
683 | lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) | 912 | lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) |
684 | { | 913 | { |
@@ -686,6 +915,15 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) | |||
686 | lpfc_hbq_defs[qno]->add_count)); | 915 | lpfc_hbq_defs[qno]->add_count)); |
687 | } | 916 | } |
688 | 917 | ||
918 | /** | ||
919 | * lpfc_sli_hbqbuf_init_hbqs: Post initial buffers to the HBQ. | ||
920 | * @phba: Pointer to HBA context object. | ||
921 | * @qno: HBQ queue number. | ||
922 | * | ||
923 | * This function is called from SLI initialization code path with | ||
924 | * no lock held to post initial HBQ buffers to firmware. The | ||
925 | * function returns the number of HBQ entries successfully allocated. | ||
926 | **/ | ||
689 | static int | 927 | static int |
690 | lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) | 928 | lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) |
691 | { | 929 | { |
@@ -693,6 +931,16 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) | |||
693 | lpfc_hbq_defs[qno]->init_count)); | 931 | lpfc_hbq_defs[qno]->init_count)); |
694 | } | 932 | } |
695 | 933 | ||
934 | /** | ||
935 | * lpfc_sli_hbqbuf_find: Find the hbq buffer associated with a tag. | ||
936 | * @phba: Pointer to HBA context object. | ||
937 | * @tag: Tag of the hbq buffer. | ||
938 | * | ||
939 | * This function is called with hbalock held. This function searches | ||
940 | * for the hbq buffer associated with the given tag in the hbq buffer | ||
941 | * list. If it finds the hbq buffer, it returns the hbq_buffer other wise | ||
942 | * it returns NULL. | ||
943 | **/ | ||
696 | static struct hbq_dmabuf * | 944 | static struct hbq_dmabuf * |
697 | lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) | 945 | lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) |
698 | { | 946 | { |
@@ -716,6 +964,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) | |||
716 | return NULL; | 964 | return NULL; |
717 | } | 965 | } |
718 | 966 | ||
967 | /** | ||
968 | * lpfc_sli_free_hbq: Give back the hbq buffer to firmware. | ||
969 | * @phba: Pointer to HBA context object. | ||
970 | * @hbq_buffer: Pointer to HBQ buffer. | ||
971 | * | ||
972 | * This function is called with hbalock. This function gives back | ||
973 | * the hbq buffer to firmware. If the HBQ does not have space to | ||
974 | * post the buffer, it will free the buffer. | ||
975 | **/ | ||
719 | void | 976 | void |
720 | lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) | 977 | lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) |
721 | { | 978 | { |
@@ -729,6 +986,15 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) | |||
729 | } | 986 | } |
730 | } | 987 | } |
731 | 988 | ||
989 | /** | ||
990 | * lpfc_sli_chk_mbx_command: Check if the mailbox is a legitimate mailbox. | ||
991 | * @mbxCommand: mailbox command code. | ||
992 | * | ||
993 | * This function is called by the mailbox event handler function to verify | ||
994 | * that the completed mailbox command is a legitimate mailbox command. If the | ||
995 | * completed mailbox is not known to the function, it will return MBX_SHUTDOWN | ||
996 | * and the mailbox event handler will take the HBA offline. | ||
997 | **/ | ||
732 | static int | 998 | static int |
733 | lpfc_sli_chk_mbx_command(uint8_t mbxCommand) | 999 | lpfc_sli_chk_mbx_command(uint8_t mbxCommand) |
734 | { | 1000 | { |
@@ -785,6 +1051,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) | |||
785 | case MBX_REG_VPI: | 1051 | case MBX_REG_VPI: |
786 | case MBX_UNREG_VPI: | 1052 | case MBX_UNREG_VPI: |
787 | case MBX_HEARTBEAT: | 1053 | case MBX_HEARTBEAT: |
1054 | case MBX_PORT_CAPABILITIES: | ||
1055 | case MBX_PORT_IOV_CONTROL: | ||
788 | ret = mbxCommand; | 1056 | ret = mbxCommand; |
789 | break; | 1057 | break; |
790 | default: | 1058 | default: |
@@ -793,6 +1061,19 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) | |||
793 | } | 1061 | } |
794 | return ret; | 1062 | return ret; |
795 | } | 1063 | } |
1064 | |||
1065 | /** | ||
1066 | * lpfc_sli_wake_mbox_wait: Completion handler for mbox issued from | ||
1067 | * lpfc_sli_issue_mbox_wait. | ||
1068 | * @phba: Pointer to HBA context object. | ||
1069 | * @pmboxq: Pointer to mailbox command. | ||
1070 | * | ||
1071 | * This is completion handler function for mailbox commands issued from | ||
1072 | * lpfc_sli_issue_mbox_wait function. This function is called by the | ||
1073 | * mailbox event handler function with no lock held. This function | ||
1074 | * will wake up thread waiting on the wait queue pointed by context1 | ||
1075 | * of the mailbox. | ||
1076 | **/ | ||
796 | static void | 1077 | static void |
797 | lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) | 1078 | lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) |
798 | { | 1079 | { |
@@ -812,6 +1093,17 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) | |||
812 | return; | 1093 | return; |
813 | } | 1094 | } |
814 | 1095 | ||
1096 | |||
1097 | /** | ||
1098 | * lpfc_sli_def_mbox_cmpl: Default mailbox completion handler. | ||
1099 | * @phba: Pointer to HBA context object. | ||
1100 | * @pmb: Pointer to mailbox object. | ||
1101 | * | ||
1102 | * This function is the default mailbox completion handler. It | ||
1103 | * frees the memory resources associated with the completed mailbox | ||
1104 | * command. If the completed command is a REG_LOGIN mailbox command, | ||
1105 | * this function will issue a UREG_LOGIN to re-claim the RPI. | ||
1106 | **/ | ||
815 | void | 1107 | void |
816 | lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | 1108 | lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
817 | { | 1109 | { |
@@ -846,6 +1138,19 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
846 | return; | 1138 | return; |
847 | } | 1139 | } |
848 | 1140 | ||
1141 | /** | ||
1142 | * lpfc_sli_handle_mb_event: Handle mailbox completions from firmware. | ||
1143 | * @phba: Pointer to HBA context object. | ||
1144 | * | ||
1145 | * This function is called with no lock held. This function processes all | ||
1146 | * the completed mailbox commands and gives it to upper layers. The interrupt | ||
1147 | * service routine processes mailbox completion interrupt and adds completed | ||
1148 | * mailbox commands to the mboxq_cmpl queue and signals the worker thread. | ||
1149 | * Worker thread call lpfc_sli_handle_mb_event, which will return the | ||
1150 | * completed mailbox commands in mboxq_cmpl queue to the upper layers. This | ||
1151 | * function returns the mailbox commands to the upper layer by calling the | ||
1152 | * completion handler function of each mailbox. | ||
1153 | **/ | ||
849 | int | 1154 | int |
850 | lpfc_sli_handle_mb_event(struct lpfc_hba *phba) | 1155 | lpfc_sli_handle_mb_event(struct lpfc_hba *phba) |
851 | { | 1156 | { |
@@ -953,6 +1258,18 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) | |||
953 | return 0; | 1258 | return 0; |
954 | } | 1259 | } |
955 | 1260 | ||
1261 | /** | ||
1262 | * lpfc_sli_replace_hbqbuff: Replace the HBQ buffer with a new buffer. | ||
1263 | * @phba: Pointer to HBA context object. | ||
1264 | * @tag: Tag for the HBQ buffer. | ||
1265 | * | ||
1266 | * This function is called from unsolicited event handler code path to get the | ||
1267 | * HBQ buffer associated with an unsolicited iocb. This function is called with | ||
1268 | * no lock held. It returns the buffer associated with the given tag and posts | ||
1269 | * another buffer to the firmware. Note that the new buffer must be allocated | ||
1270 | * before taking the hbalock and that the hba lock must be held until it is | ||
1271 | * finished with the hbq entry swap. | ||
1272 | **/ | ||
956 | static struct lpfc_dmabuf * | 1273 | static struct lpfc_dmabuf * |
957 | lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) | 1274 | lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) |
958 | { | 1275 | { |
@@ -962,22 +1279,28 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) | |||
962 | dma_addr_t phys; /* mapped address */ | 1279 | dma_addr_t phys; /* mapped address */ |
963 | unsigned long flags; | 1280 | unsigned long flags; |
964 | 1281 | ||
1282 | hbqno = tag >> 16; | ||
1283 | new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); | ||
965 | /* Check whether HBQ is still in use */ | 1284 | /* Check whether HBQ is still in use */ |
966 | spin_lock_irqsave(&phba->hbalock, flags); | 1285 | spin_lock_irqsave(&phba->hbalock, flags); |
967 | if (!phba->hbq_in_use) { | 1286 | if (!phba->hbq_in_use) { |
1287 | if (new_hbq_entry) | ||
1288 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, | ||
1289 | new_hbq_entry); | ||
968 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1290 | spin_unlock_irqrestore(&phba->hbalock, flags); |
969 | return NULL; | 1291 | return NULL; |
970 | } | 1292 | } |
971 | 1293 | ||
972 | hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); | 1294 | hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); |
973 | if (hbq_entry == NULL) { | 1295 | if (hbq_entry == NULL) { |
1296 | if (new_hbq_entry) | ||
1297 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, | ||
1298 | new_hbq_entry); | ||
974 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1299 | spin_unlock_irqrestore(&phba->hbalock, flags); |
975 | return NULL; | 1300 | return NULL; |
976 | } | 1301 | } |
977 | list_del(&hbq_entry->dbuf.list); | 1302 | list_del(&hbq_entry->dbuf.list); |
978 | 1303 | ||
979 | hbqno = tag >> 16; | ||
980 | new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); | ||
981 | if (new_hbq_entry == NULL) { | 1304 | if (new_hbq_entry == NULL) { |
982 | list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list); | 1305 | list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list); |
983 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1306 | spin_unlock_irqrestore(&phba->hbalock, flags); |
@@ -997,6 +1320,18 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) | |||
997 | return &new_hbq_entry->dbuf; | 1320 | return &new_hbq_entry->dbuf; |
998 | } | 1321 | } |
999 | 1322 | ||
1323 | /** | ||
1324 | * lpfc_sli_get_buff: Get the buffer associated with the buffer tag. | ||
1325 | * @phba: Pointer to HBA context object. | ||
1326 | * @pring: Pointer to driver SLI ring object. | ||
1327 | * @tag: buffer tag. | ||
1328 | * | ||
1329 | * This function is called with no lock held. When QUE_BUFTAG_BIT bit | ||
1330 | * is set in the tag the buffer is posted for a particular exchange, | ||
1331 | * the function will return the buffer without replacing the buffer. | ||
1332 | * If the buffer is for unsolicited ELS or CT traffic, this function | ||
1333 | * returns the buffer and also posts another buffer to the firmware. | ||
1334 | **/ | ||
1000 | static struct lpfc_dmabuf * | 1335 | static struct lpfc_dmabuf * |
1001 | lpfc_sli_get_buff(struct lpfc_hba *phba, | 1336 | lpfc_sli_get_buff(struct lpfc_hba *phba, |
1002 | struct lpfc_sli_ring *pring, | 1337 | struct lpfc_sli_ring *pring, |
@@ -1008,6 +1343,21 @@ lpfc_sli_get_buff(struct lpfc_hba *phba, | |||
1008 | return lpfc_sli_replace_hbqbuff(phba, tag); | 1343 | return lpfc_sli_replace_hbqbuff(phba, tag); |
1009 | } | 1344 | } |
1010 | 1345 | ||
1346 | |||
1347 | /** | ||
1348 | * lpfc_sli_process_unsol_iocb: Unsolicited iocb handler. | ||
1349 | * @phba: Pointer to HBA context object. | ||
1350 | * @pring: Pointer to driver SLI ring object. | ||
1351 | * @saveq: Pointer to the unsolicited iocb. | ||
1352 | * | ||
1353 | * This function is called with no lock held by the ring event handler | ||
1354 | * when there is an unsolicited iocb posted to the response ring by the | ||
1355 | * firmware. This function gets the buffer associated with the iocbs | ||
1356 | * and calls the event handler for the ring. This function handles both | ||
1357 | * qring buffers and hbq buffers. | ||
1358 | * When the function returns 1 the caller can free the iocb object otherwise | ||
1359 | * upper layer functions will free the iocb objects. | ||
1360 | **/ | ||
1011 | static int | 1361 | static int |
1012 | lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 1362 | lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
1013 | struct lpfc_iocbq *saveq) | 1363 | struct lpfc_iocbq *saveq) |
@@ -1192,6 +1542,18 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
1192 | return 1; | 1542 | return 1; |
1193 | } | 1543 | } |
1194 | 1544 | ||
1545 | /** | ||
1546 | * lpfc_sli_iocbq_lookup: Find command iocb for the given response iocb. | ||
1547 | * @phba: Pointer to HBA context object. | ||
1548 | * @pring: Pointer to driver SLI ring object. | ||
1549 | * @prspiocb: Pointer to response iocb object. | ||
1550 | * | ||
1551 | * This function looks up the iocb_lookup table to get the command iocb | ||
1552 | * corresponding to the given response iocb using the iotag of the | ||
1553 | * response iocb. This function is called with the hbalock held. | ||
1554 | * This function returns the command iocb object if it finds the command | ||
1555 | * iocb else returns NULL. | ||
1556 | **/ | ||
1195 | static struct lpfc_iocbq * | 1557 | static struct lpfc_iocbq * |
1196 | lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, | 1558 | lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, |
1197 | struct lpfc_sli_ring *pring, | 1559 | struct lpfc_sli_ring *pring, |
@@ -1217,6 +1579,23 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, | |||
1217 | return NULL; | 1579 | return NULL; |
1218 | } | 1580 | } |
1219 | 1581 | ||
1582 | /** | ||
1583 | * lpfc_sli_process_sol_iocb: process solicited iocb completion. | ||
1584 | * @phba: Pointer to HBA context object. | ||
1585 | * @pring: Pointer to driver SLI ring object. | ||
1586 | * @saveq: Pointer to the response iocb to be processed. | ||
1587 | * | ||
1588 | * This function is called by the ring event handler for non-fcp | ||
1589 | * rings when there is a new response iocb in the response ring. | ||
1590 | * The caller is not required to hold any locks. This function | ||
1591 | * gets the command iocb associated with the response iocb and | ||
1592 | * calls the completion handler for the command iocb. If there | ||
1593 | * is no completion handler, the function will free the resources | ||
1594 | * associated with command iocb. If the response iocb is for | ||
1595 | * an already aborted command iocb, the status of the completion | ||
1596 | * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. | ||
1597 | * This function always returns 1. | ||
1598 | **/ | ||
1220 | static int | 1599 | static int |
1221 | lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 1600 | lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
1222 | struct lpfc_iocbq *saveq) | 1601 | struct lpfc_iocbq *saveq) |
@@ -1233,6 +1612,17 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
1233 | if (cmdiocbp) { | 1612 | if (cmdiocbp) { |
1234 | if (cmdiocbp->iocb_cmpl) { | 1613 | if (cmdiocbp->iocb_cmpl) { |
1235 | /* | 1614 | /* |
1615 | * If an ELS command failed send an event to mgmt | ||
1616 | * application. | ||
1617 | */ | ||
1618 | if (saveq->iocb.ulpStatus && | ||
1619 | (pring->ringno == LPFC_ELS_RING) && | ||
1620 | (cmdiocbp->iocb.ulpCommand == | ||
1621 | CMD_ELS_REQUEST64_CR)) | ||
1622 | lpfc_send_els_failure_event(phba, | ||
1623 | cmdiocbp, saveq); | ||
1624 | |||
1625 | /* | ||
1236 | * Post all ELS completions to the worker thread. | 1626 | * Post all ELS completions to the worker thread. |
1237 | * All other are passed to the completion callback. | 1627 | * All other are passed to the completion callback. |
1238 | */ | 1628 | */ |
@@ -1282,12 +1672,20 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
1282 | return rc; | 1672 | return rc; |
1283 | } | 1673 | } |
1284 | 1674 | ||
1675 | /** | ||
1676 | * lpfc_sli_rsp_pointers_error: Response ring pointer error handler. | ||
1677 | * @phba: Pointer to HBA context object. | ||
1678 | * @pring: Pointer to driver SLI ring object. | ||
1679 | * | ||
1680 | * This function is called from the iocb ring event handlers when | ||
1681 | * put pointer is ahead of the get pointer for a ring. This function signal | ||
1682 | * an error attention condition to the worker thread and the worker | ||
1683 | * thread will transition the HBA to offline state. | ||
1684 | **/ | ||
1285 | static void | 1685 | static void |
1286 | lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | 1686 | lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
1287 | { | 1687 | { |
1288 | struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? | 1688 | struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; |
1289 | &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : | ||
1290 | &phba->slim2p->mbx.us.s2.port[pring->ringno]; | ||
1291 | /* | 1689 | /* |
1292 | * Ring <ringno> handler: portRspPut <portRspPut> is bigger then | 1690 | * Ring <ringno> handler: portRspPut <portRspPut> is bigger then |
1293 | * rsp ring <portRspMax> | 1691 | * rsp ring <portRspMax> |
@@ -1312,6 +1710,51 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | |||
1312 | return; | 1710 | return; |
1313 | } | 1711 | } |
1314 | 1712 | ||
1713 | /** | ||
1714 | * lpfc_poll_eratt: Error attention polling timer timeout handler. | ||
1715 | * @ptr: Pointer to address of HBA context object. | ||
1716 | * | ||
1717 | * This function is invoked by the Error Attention polling timer when the | ||
1718 | * timer times out. It will check the SLI Error Attention register for | ||
1719 | * possible attention events. If so, it will post an Error Attention event | ||
1720 | * and wake up worker thread to process it. Otherwise, it will set up the | ||
1721 | * Error Attention polling timer for the next poll. | ||
1722 | **/ | ||
1723 | void lpfc_poll_eratt(unsigned long ptr) | ||
1724 | { | ||
1725 | struct lpfc_hba *phba; | ||
1726 | uint32_t eratt = 0; | ||
1727 | |||
1728 | phba = (struct lpfc_hba *)ptr; | ||
1729 | |||
1730 | /* Check chip HA register for error event */ | ||
1731 | eratt = lpfc_sli_check_eratt(phba); | ||
1732 | |||
1733 | if (eratt) | ||
1734 | /* Tell the worker thread there is work to do */ | ||
1735 | lpfc_worker_wake_up(phba); | ||
1736 | else | ||
1737 | /* Restart the timer for next eratt poll */ | ||
1738 | mod_timer(&phba->eratt_poll, jiffies + | ||
1739 | HZ * LPFC_ERATT_POLL_INTERVAL); | ||
1740 | return; | ||
1741 | } | ||
1742 | |||
1743 | /** | ||
1744 | * lpfc_sli_poll_fcp_ring: Handle FCP ring completion in polling mode. | ||
1745 | * @phba: Pointer to HBA context object. | ||
1746 | * | ||
1747 | * This function is called from lpfc_queuecommand, lpfc_poll_timeout, | ||
1748 | * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING | ||
1749 | * is enabled. | ||
1750 | * | ||
1751 | * The caller does not hold any lock. | ||
1752 | * The function processes each response iocb in the response ring until it | ||
1753 | * finds an iocb with LE bit set and chains all the iocbs upto the iocb with | ||
1754 | * LE bit set. The function will call the completion handler of the command iocb | ||
1755 | * if the response iocb indicates a completion for a command iocb or it is | ||
1756 | * an abort completion. | ||
1757 | **/ | ||
1315 | void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) | 1758 | void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) |
1316 | { | 1759 | { |
1317 | struct lpfc_sli *psli = &phba->sli; | 1760 | struct lpfc_sli *psli = &phba->sli; |
@@ -1320,7 +1763,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) | |||
1320 | IOCB_t *entry = NULL; | 1763 | IOCB_t *entry = NULL; |
1321 | struct lpfc_iocbq *cmdiocbq = NULL; | 1764 | struct lpfc_iocbq *cmdiocbq = NULL; |
1322 | struct lpfc_iocbq rspiocbq; | 1765 | struct lpfc_iocbq rspiocbq; |
1323 | struct lpfc_pgp *pgp; | 1766 | struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; |
1324 | uint32_t status; | 1767 | uint32_t status; |
1325 | uint32_t portRspPut, portRspMax; | 1768 | uint32_t portRspPut, portRspMax; |
1326 | int type; | 1769 | int type; |
@@ -1330,11 +1773,6 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) | |||
1330 | 1773 | ||
1331 | pring->stats.iocb_event++; | 1774 | pring->stats.iocb_event++; |
1332 | 1775 | ||
1333 | pgp = (phba->sli_rev == 3) ? | ||
1334 | &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : | ||
1335 | &phba->slim2p->mbx.us.s2.port[pring->ringno]; | ||
1336 | |||
1337 | |||
1338 | /* | 1776 | /* |
1339 | * The next available response entry should never exceed the maximum | 1777 | * The next available response entry should never exceed the maximum |
1340 | * entries. If it does, treat it as an adapter hardware error. | 1778 | * entries. If it does, treat it as an adapter hardware error. |
@@ -1372,8 +1810,8 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) | |||
1372 | irsp->un.ulpWord[3], | 1810 | irsp->un.ulpWord[3], |
1373 | irsp->un.ulpWord[4], | 1811 | irsp->un.ulpWord[4], |
1374 | irsp->un.ulpWord[5], | 1812 | irsp->un.ulpWord[5], |
1375 | *(((uint32_t *) irsp) + 6), | 1813 | *(uint32_t *)&irsp->un1, |
1376 | *(((uint32_t *) irsp) + 7)); | 1814 | *((uint32_t *)&irsp->un1 + 1)); |
1377 | } | 1815 | } |
1378 | 1816 | ||
1379 | switch (type) { | 1817 | switch (type) { |
@@ -1465,17 +1903,28 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) | |||
1465 | return; | 1903 | return; |
1466 | } | 1904 | } |
1467 | 1905 | ||
1468 | /* | 1906 | /** |
1907 | * lpfc_sli_handle_fast_ring_event: Handle ring events on FCP ring. | ||
1908 | * @phba: Pointer to HBA context object. | ||
1909 | * @pring: Pointer to driver SLI ring object. | ||
1910 | * @mask: Host attention register mask for this ring. | ||
1911 | * | ||
1912 | * This function is called from the interrupt context when there is a ring | ||
1913 | * event for the fcp ring. The caller does not hold any lock. | ||
1914 | * The function processes each response iocb in the response ring until it | ||
1915 | * finds an iocb with LE bit set and chains all the iocbs upto the iocb with | ||
1916 | * LE bit set. The function will call the completion handler of the command iocb | ||
1917 | * if the response iocb indicates a completion for a command iocb or it is | ||
1918 | * an abort completion. The function will call lpfc_sli_process_unsol_iocb | ||
1919 | * function if this is an unsolicited iocb. | ||
1469 | * This routine presumes LPFC_FCP_RING handling and doesn't bother | 1920 | * This routine presumes LPFC_FCP_RING handling and doesn't bother |
1470 | * to check it explicitly. | 1921 | * to check it explicitly. This function always returns 1. |
1471 | */ | 1922 | **/ |
1472 | static int | 1923 | static int |
1473 | lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | 1924 | lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, |
1474 | struct lpfc_sli_ring *pring, uint32_t mask) | 1925 | struct lpfc_sli_ring *pring, uint32_t mask) |
1475 | { | 1926 | { |
1476 | struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? | 1927 | struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; |
1477 | &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : | ||
1478 | &phba->slim2p->mbx.us.s2.port[pring->ringno]; | ||
1479 | IOCB_t *irsp = NULL; | 1928 | IOCB_t *irsp = NULL; |
1480 | IOCB_t *entry = NULL; | 1929 | IOCB_t *entry = NULL; |
1481 | struct lpfc_iocbq *cmdiocbq = NULL; | 1930 | struct lpfc_iocbq *cmdiocbq = NULL; |
@@ -1548,8 +1997,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
1548 | irsp->un.ulpWord[3], | 1997 | irsp->un.ulpWord[3], |
1549 | irsp->un.ulpWord[4], | 1998 | irsp->un.ulpWord[4], |
1550 | irsp->un.ulpWord[5], | 1999 | irsp->un.ulpWord[5], |
1551 | *(((uint32_t *) irsp) + 6), | 2000 | *(uint32_t *)&irsp->un1, |
1552 | *(((uint32_t *) irsp) + 7)); | 2001 | *((uint32_t *)&irsp->un1 + 1)); |
1553 | } | 2002 | } |
1554 | 2003 | ||
1555 | switch (type) { | 2004 | switch (type) { |
@@ -1646,13 +2095,28 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
1646 | return rc; | 2095 | return rc; |
1647 | } | 2096 | } |
1648 | 2097 | ||
2098 | /** | ||
2099 | * lpfc_sli_handle_slow_ring_event: Handle ring events for non-FCP rings. | ||
2100 | * @phba: Pointer to HBA context object. | ||
2101 | * @pring: Pointer to driver SLI ring object. | ||
2102 | * @mask: Host attention register mask for this ring. | ||
2103 | * | ||
2104 | * This function is called from the worker thread when there is a ring | ||
2105 | * event for non-fcp rings. The caller does not hold any lock . | ||
2106 | * The function processes each response iocb in the response ring until it | ||
2107 | * finds an iocb with LE bit set and chains all the iocbs upto the iocb with | ||
2108 | * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the | ||
2109 | * response iocb indicates a completion of a command iocb. The function | ||
2110 | * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited | ||
2111 | * iocb. The function frees the resources or calls the completion handler if | ||
2112 | * this iocb is an abort completion. The function returns 0 when the allocated | ||
2113 | * iocbs are not freed, otherwise returns 1. | ||
2114 | **/ | ||
1649 | int | 2115 | int |
1650 | lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, | 2116 | lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, |
1651 | struct lpfc_sli_ring *pring, uint32_t mask) | 2117 | struct lpfc_sli_ring *pring, uint32_t mask) |
1652 | { | 2118 | { |
1653 | struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? | 2119 | struct lpfc_pgp *pgp; |
1654 | &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : | ||
1655 | &phba->slim2p->mbx.us.s2.port[pring->ringno]; | ||
1656 | IOCB_t *entry; | 2120 | IOCB_t *entry; |
1657 | IOCB_t *irsp = NULL; | 2121 | IOCB_t *irsp = NULL; |
1658 | struct lpfc_iocbq *rspiocbp = NULL; | 2122 | struct lpfc_iocbq *rspiocbp = NULL; |
@@ -1666,6 +2130,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, | |||
1666 | int rc = 1; | 2130 | int rc = 1; |
1667 | unsigned long iflag; | 2131 | unsigned long iflag; |
1668 | 2132 | ||
2133 | pgp = &phba->port_gp[pring->ringno]; | ||
1669 | spin_lock_irqsave(&phba->hbalock, iflag); | 2134 | spin_lock_irqsave(&phba->hbalock, iflag); |
1670 | pring->stats.iocb_event++; | 2135 | pring->stats.iocb_event++; |
1671 | 2136 | ||
@@ -1904,6 +2369,16 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, | |||
1904 | return rc; | 2369 | return rc; |
1905 | } | 2370 | } |
1906 | 2371 | ||
2372 | /** | ||
2373 | * lpfc_sli_abort_iocb_ring: Abort all iocbs in the ring. | ||
2374 | * @phba: Pointer to HBA context object. | ||
2375 | * @pring: Pointer to driver SLI ring object. | ||
2376 | * | ||
2377 | * This function aborts all iocbs in the given ring and frees all the iocb | ||
2378 | * objects in txq. This function issues an abort iocb for all the iocb commands | ||
2379 | * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before | ||
2380 | * the return of this function. The caller is not required to hold any locks. | ||
2381 | **/ | ||
1907 | void | 2382 | void |
1908 | lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | 2383 | lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
1909 | { | 2384 | { |
@@ -1943,6 +2418,83 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) | |||
1943 | } | 2418 | } |
1944 | } | 2419 | } |
1945 | 2420 | ||
2421 | /** | ||
2422 | * lpfc_sli_flush_fcp_rings: flush all iocbs in the fcp ring. | ||
2423 | * @phba: Pointer to HBA context object. | ||
2424 | * | ||
2425 | * This function flushes all iocbs in the fcp ring and frees all the iocb | ||
2426 | * objects in txq and txcmplq. This function will not issue abort iocbs | ||
2427 | * for all the iocb commands in txcmplq, they will just be returned with | ||
2428 | * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI | ||
2429 | * slot has been permanently disabled. | ||
2430 | **/ | ||
2431 | void | ||
2432 | lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) | ||
2433 | { | ||
2434 | LIST_HEAD(txq); | ||
2435 | LIST_HEAD(txcmplq); | ||
2436 | struct lpfc_iocbq *iocb; | ||
2437 | IOCB_t *cmd = NULL; | ||
2438 | struct lpfc_sli *psli = &phba->sli; | ||
2439 | struct lpfc_sli_ring *pring; | ||
2440 | |||
2441 | /* Currently, only one fcp ring */ | ||
2442 | pring = &psli->ring[psli->fcp_ring]; | ||
2443 | |||
2444 | spin_lock_irq(&phba->hbalock); | ||
2445 | /* Retrieve everything on txq */ | ||
2446 | list_splice_init(&pring->txq, &txq); | ||
2447 | pring->txq_cnt = 0; | ||
2448 | |||
2449 | /* Retrieve everything on the txcmplq */ | ||
2450 | list_splice_init(&pring->txcmplq, &txcmplq); | ||
2451 | pring->txcmplq_cnt = 0; | ||
2452 | spin_unlock_irq(&phba->hbalock); | ||
2453 | |||
2454 | /* Flush the txq */ | ||
2455 | while (!list_empty(&txq)) { | ||
2456 | iocb = list_get_first(&txq, struct lpfc_iocbq, list); | ||
2457 | cmd = &iocb->iocb; | ||
2458 | list_del_init(&iocb->list); | ||
2459 | |||
2460 | if (!iocb->iocb_cmpl) | ||
2461 | lpfc_sli_release_iocbq(phba, iocb); | ||
2462 | else { | ||
2463 | cmd->ulpStatus = IOSTAT_LOCAL_REJECT; | ||
2464 | cmd->un.ulpWord[4] = IOERR_SLI_DOWN; | ||
2465 | (iocb->iocb_cmpl) (phba, iocb, iocb); | ||
2466 | } | ||
2467 | } | ||
2468 | |||
2469 | /* Flush the txcmpq */ | ||
2470 | while (!list_empty(&txcmplq)) { | ||
2471 | iocb = list_get_first(&txcmplq, struct lpfc_iocbq, list); | ||
2472 | cmd = &iocb->iocb; | ||
2473 | list_del_init(&iocb->list); | ||
2474 | |||
2475 | if (!iocb->iocb_cmpl) | ||
2476 | lpfc_sli_release_iocbq(phba, iocb); | ||
2477 | else { | ||
2478 | cmd->ulpStatus = IOSTAT_LOCAL_REJECT; | ||
2479 | cmd->un.ulpWord[4] = IOERR_SLI_DOWN; | ||
2480 | (iocb->iocb_cmpl) (phba, iocb, iocb); | ||
2481 | } | ||
2482 | } | ||
2483 | } | ||
2484 | |||
2485 | /** | ||
2486 | * lpfc_sli_brdready: Check for host status bits. | ||
2487 | * @phba: Pointer to HBA context object. | ||
2488 | * @mask: Bit mask to be checked. | ||
2489 | * | ||
2490 | * This function reads the host status register and compares | ||
2491 | * with the provided bit mask to check if HBA completed | ||
2492 | * the restart. This function will wait in a loop for the | ||
2493 | * HBA to complete restart. If the HBA does not restart within | ||
2494 | * 15 iterations, the function will reset the HBA again. The | ||
2495 | * function returns 1 when HBA fail to restart otherwise returns | ||
2496 | * zero. | ||
2497 | **/ | ||
1946 | int | 2498 | int |
1947 | lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) | 2499 | lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) |
1948 | { | 2500 | { |
@@ -1990,6 +2542,13 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) | |||
1990 | 2542 | ||
1991 | #define BARRIER_TEST_PATTERN (0xdeadbeef) | 2543 | #define BARRIER_TEST_PATTERN (0xdeadbeef) |
1992 | 2544 | ||
2545 | /** | ||
2546 | * lpfc_reset_barrier: Make HBA ready for HBA reset. | ||
2547 | * @phba: Pointer to HBA context object. | ||
2548 | * | ||
2549 | * This function is called before resetting an HBA. This | ||
2550 | * function requests HBA to quiesce DMAs before a reset. | ||
2551 | **/ | ||
1993 | void lpfc_reset_barrier(struct lpfc_hba *phba) | 2552 | void lpfc_reset_barrier(struct lpfc_hba *phba) |
1994 | { | 2553 | { |
1995 | uint32_t __iomem *resp_buf; | 2554 | uint32_t __iomem *resp_buf; |
@@ -2063,6 +2622,17 @@ restore_hc: | |||
2063 | readl(phba->HCregaddr); /* flush */ | 2622 | readl(phba->HCregaddr); /* flush */ |
2064 | } | 2623 | } |
2065 | 2624 | ||
2625 | /** | ||
2626 | * lpfc_sli_brdkill: Issue a kill_board mailbox command. | ||
2627 | * @phba: Pointer to HBA context object. | ||
2628 | * | ||
2629 | * This function issues a kill_board mailbox command and waits for | ||
2630 | * the error attention interrupt. This function is called for stopping | ||
2631 | * the firmware processing. The caller is not required to hold any | ||
2632 | * locks. This function calls lpfc_hba_down_post function to free | ||
2633 | * any pending commands after the kill. The function will return 1 when it | ||
2634 | * fails to kill the board else will return 0. | ||
2635 | **/ | ||
2066 | int | 2636 | int |
2067 | lpfc_sli_brdkill(struct lpfc_hba *phba) | 2637 | lpfc_sli_brdkill(struct lpfc_hba *phba) |
2068 | { | 2638 | { |
@@ -2139,6 +2709,17 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) | |||
2139 | return ha_copy & HA_ERATT ? 0 : 1; | 2709 | return ha_copy & HA_ERATT ? 0 : 1; |
2140 | } | 2710 | } |
2141 | 2711 | ||
2712 | /** | ||
2713 | * lpfc_sli_brdreset: Reset the HBA. | ||
2714 | * @phba: Pointer to HBA context object. | ||
2715 | * | ||
2716 | * This function resets the HBA by writing HC_INITFF to the control | ||
2717 | * register. After the HBA resets, this function resets all the iocb ring | ||
2718 | * indices. This function disables PCI layer parity checking during | ||
2719 | * the reset. | ||
2720 | * This function returns 0 always. | ||
2721 | * The caller is not required to hold any locks. | ||
2722 | **/ | ||
2142 | int | 2723 | int |
2143 | lpfc_sli_brdreset(struct lpfc_hba *phba) | 2724 | lpfc_sli_brdreset(struct lpfc_hba *phba) |
2144 | { | 2725 | { |
@@ -2191,6 +2772,19 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) | |||
2191 | return 0; | 2772 | return 0; |
2192 | } | 2773 | } |
2193 | 2774 | ||
2775 | /** | ||
2776 | * lpfc_sli_brdrestart: Restart the HBA. | ||
2777 | * @phba: Pointer to HBA context object. | ||
2778 | * | ||
2779 | * This function is called in the SLI initialization code path to | ||
2780 | * restart the HBA. The caller is not required to hold any lock. | ||
2781 | * This function writes MBX_RESTART mailbox command to the SLIM and | ||
2782 | * resets the HBA. At the end of the function, it calls lpfc_hba_down_post | ||
2783 | * function to free any pending commands. The function enables | ||
2784 | * POST only during the first initialization. The function returns zero. | ||
2785 | * The function does not guarantee completion of MBX_RESTART mailbox | ||
2786 | * command before the return of this function. | ||
2787 | **/ | ||
2194 | int | 2788 | int |
2195 | lpfc_sli_brdrestart(struct lpfc_hba *phba) | 2789 | lpfc_sli_brdrestart(struct lpfc_hba *phba) |
2196 | { | 2790 | { |
@@ -2251,6 +2845,16 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) | |||
2251 | return 0; | 2845 | return 0; |
2252 | } | 2846 | } |
2253 | 2847 | ||
2848 | /** | ||
2849 | * lpfc_sli_chipset_init: Wait for the restart of the HBA after a restart. | ||
2850 | * @phba: Pointer to HBA context object. | ||
2851 | * | ||
2852 | * This function is called after a HBA restart to wait for successful | ||
2853 | * restart of the HBA. Successful restart of the HBA is indicated by | ||
2854 | * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 | ||
2855 | * iteration, the function will restart the HBA again. The function returns | ||
2856 | * zero if HBA successfully restarted else returns negative error code. | ||
2857 | **/ | ||
2254 | static int | 2858 | static int |
2255 | lpfc_sli_chipset_init(struct lpfc_hba *phba) | 2859 | lpfc_sli_chipset_init(struct lpfc_hba *phba) |
2256 | { | 2860 | { |
@@ -2336,12 +2940,25 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) | |||
2336 | return 0; | 2940 | return 0; |
2337 | } | 2941 | } |
2338 | 2942 | ||
2943 | /** | ||
2944 | * lpfc_sli_hbq_count: Get the number of HBQs to be configured. | ||
2945 | * | ||
2946 | * This function calculates and returns the number of HBQs required to be | ||
2947 | * configured. | ||
2948 | **/ | ||
2339 | int | 2949 | int |
2340 | lpfc_sli_hbq_count(void) | 2950 | lpfc_sli_hbq_count(void) |
2341 | { | 2951 | { |
2342 | return ARRAY_SIZE(lpfc_hbq_defs); | 2952 | return ARRAY_SIZE(lpfc_hbq_defs); |
2343 | } | 2953 | } |
2344 | 2954 | ||
2955 | /** | ||
2956 | * lpfc_sli_hbq_entry_count: Calculate total number of hbq entries. | ||
2957 | * | ||
2958 | * This function adds the number of hbq entries in every HBQ to get | ||
2959 | * the total number of hbq entries required for the HBA and returns | ||
2960 | * the total count. | ||
2961 | **/ | ||
2345 | static int | 2962 | static int |
2346 | lpfc_sli_hbq_entry_count(void) | 2963 | lpfc_sli_hbq_entry_count(void) |
2347 | { | 2964 | { |
@@ -2354,12 +2971,27 @@ lpfc_sli_hbq_entry_count(void) | |||
2354 | return count; | 2971 | return count; |
2355 | } | 2972 | } |
2356 | 2973 | ||
2974 | /** | ||
2975 | * lpfc_sli_hbq_size: Calculate memory required for all hbq entries. | ||
2976 | * | ||
2977 | * This function calculates amount of memory required for all hbq entries | ||
2978 | * to be configured and returns the total memory required. | ||
2979 | **/ | ||
2357 | int | 2980 | int |
2358 | lpfc_sli_hbq_size(void) | 2981 | lpfc_sli_hbq_size(void) |
2359 | { | 2982 | { |
2360 | return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); | 2983 | return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); |
2361 | } | 2984 | } |
2362 | 2985 | ||
2986 | /** | ||
2987 | * lpfc_sli_hbq_setup: configure and initialize HBQs. | ||
2988 | * @phba: Pointer to HBA context object. | ||
2989 | * | ||
2990 | * This function is called during the SLI initialization to configure | ||
2991 | * all the HBQs and post buffers to the HBQ. The caller is not | ||
2992 | * required to hold any locks. This function will return zero if successful | ||
2993 | * else it will return negative error code. | ||
2994 | **/ | ||
2363 | static int | 2995 | static int |
2364 | lpfc_sli_hbq_setup(struct lpfc_hba *phba) | 2996 | lpfc_sli_hbq_setup(struct lpfc_hba *phba) |
2365 | { | 2997 | { |
@@ -2415,15 +3047,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba) | |||
2415 | mempool_free(pmb, phba->mbox_mem_pool); | 3047 | mempool_free(pmb, phba->mbox_mem_pool); |
2416 | 3048 | ||
2417 | /* Initially populate or replenish the HBQs */ | 3049 | /* Initially populate or replenish the HBQs */ |
2418 | for (hbqno = 0; hbqno < hbq_count; ++hbqno) { | 3050 | for (hbqno = 0; hbqno < hbq_count; ++hbqno) |
2419 | if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno)) | 3051 | lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); |
2420 | return -ENOMEM; | ||
2421 | } | ||
2422 | return 0; | 3052 | return 0; |
2423 | } | 3053 | } |
2424 | 3054 | ||
2425 | static int | 3055 | /** |
2426 | lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode) | 3056 | * lpfc_sli_config_port: Issue config port mailbox command. |
3057 | * @phba: Pointer to HBA context object. | ||
3058 | * @sli_mode: sli mode - 2/3 | ||
3059 | * | ||
3060 | * This function is called by the sli intialization code path | ||
3061 | * to issue config_port mailbox command. This function restarts the | ||
3062 | * HBA firmware and issues a config_port mailbox command to configure | ||
3063 | * the SLI interface in the sli mode specified by sli_mode | ||
3064 | * variable. The caller is not required to hold any locks. | ||
3065 | * The function returns 0 if successful, else returns negative error | ||
3066 | * code. | ||
3067 | **/ | ||
3068 | int | ||
3069 | lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) | ||
2427 | { | 3070 | { |
2428 | LPFC_MBOXQ_t *pmb; | 3071 | LPFC_MBOXQ_t *pmb; |
2429 | uint32_t resetcount = 0, rc = 0, done = 0; | 3072 | uint32_t resetcount = 0, rc = 0, done = 0; |
@@ -2460,13 +3103,15 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode) | |||
2460 | if (rc == -ERESTART) { | 3103 | if (rc == -ERESTART) { |
2461 | phba->link_state = LPFC_LINK_UNKNOWN; | 3104 | phba->link_state = LPFC_LINK_UNKNOWN; |
2462 | continue; | 3105 | continue; |
2463 | } else if (rc) { | 3106 | } else if (rc) |
2464 | break; | 3107 | break; |
2465 | } | ||
2466 | |||
2467 | phba->link_state = LPFC_INIT_MBX_CMDS; | 3108 | phba->link_state = LPFC_INIT_MBX_CMDS; |
2468 | lpfc_config_port(phba, pmb); | 3109 | lpfc_config_port(phba, pmb); |
2469 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); | 3110 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); |
3111 | phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | | ||
3112 | LPFC_SLI3_HBQ_ENABLED | | ||
3113 | LPFC_SLI3_CRP_ENABLED | | ||
3114 | LPFC_SLI3_INB_ENABLED); | ||
2470 | if (rc != MBX_SUCCESS) { | 3115 | if (rc != MBX_SUCCESS) { |
2471 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 3116 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
2472 | "0442 Adapter failed to init, mbxCmd x%x " | 3117 | "0442 Adapter failed to init, mbxCmd x%x " |
@@ -2476,30 +3121,64 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode) | |||
2476 | phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; | 3121 | phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; |
2477 | spin_unlock_irq(&phba->hbalock); | 3122 | spin_unlock_irq(&phba->hbalock); |
2478 | rc = -ENXIO; | 3123 | rc = -ENXIO; |
2479 | } else { | 3124 | } else |
2480 | done = 1; | 3125 | done = 1; |
2481 | phba->max_vpi = (phba->max_vpi && | ||
2482 | pmb->mb.un.varCfgPort.gmv) != 0 | ||
2483 | ? pmb->mb.un.varCfgPort.max_vpi | ||
2484 | : 0; | ||
2485 | } | ||
2486 | } | 3126 | } |
2487 | |||
2488 | if (!done) { | 3127 | if (!done) { |
2489 | rc = -EINVAL; | 3128 | rc = -EINVAL; |
2490 | goto do_prep_failed; | 3129 | goto do_prep_failed; |
2491 | } | 3130 | } |
2492 | 3131 | if (pmb->mb.un.varCfgPort.sli_mode == 3) { | |
2493 | if ((pmb->mb.un.varCfgPort.sli_mode == 3) && | 3132 | if (!pmb->mb.un.varCfgPort.cMA) { |
2494 | (!pmb->mb.un.varCfgPort.cMA)) { | 3133 | rc = -ENXIO; |
2495 | rc = -ENXIO; | 3134 | goto do_prep_failed; |
3135 | } | ||
3136 | if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { | ||
3137 | phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; | ||
3138 | phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; | ||
3139 | } else | ||
3140 | phba->max_vpi = 0; | ||
3141 | if (pmb->mb.un.varCfgPort.gerbm) | ||
3142 | phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; | ||
3143 | if (pmb->mb.un.varCfgPort.gcrp) | ||
3144 | phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; | ||
3145 | if (pmb->mb.un.varCfgPort.ginb) { | ||
3146 | phba->sli3_options |= LPFC_SLI3_INB_ENABLED; | ||
3147 | phba->port_gp = phba->mbox->us.s3_inb_pgp.port; | ||
3148 | phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy; | ||
3149 | phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter; | ||
3150 | phba->inb_last_counter = | ||
3151 | phba->mbox->us.s3_inb_pgp.counter; | ||
3152 | } else { | ||
3153 | phba->port_gp = phba->mbox->us.s3_pgp.port; | ||
3154 | phba->inb_ha_copy = NULL; | ||
3155 | phba->inb_counter = NULL; | ||
3156 | } | ||
3157 | } else { | ||
3158 | phba->port_gp = phba->mbox->us.s2.port; | ||
3159 | phba->inb_ha_copy = NULL; | ||
3160 | phba->inb_counter = NULL; | ||
3161 | phba->max_vpi = 0; | ||
2496 | } | 3162 | } |
2497 | |||
2498 | do_prep_failed: | 3163 | do_prep_failed: |
2499 | mempool_free(pmb, phba->mbox_mem_pool); | 3164 | mempool_free(pmb, phba->mbox_mem_pool); |
2500 | return rc; | 3165 | return rc; |
2501 | } | 3166 | } |
2502 | 3167 | ||
3168 | |||
3169 | /** | ||
3170 | * lpfc_sli_hba_setup: SLI intialization function. | ||
3171 | * @phba: Pointer to HBA context object. | ||
3172 | * | ||
3173 | * This function is the main SLI intialization function. This function | ||
3174 | * is called by the HBA intialization code, HBA reset code and HBA | ||
3175 | * error attention handler code. Caller is not required to hold any | ||
3176 | * locks. This function issues config_port mailbox command to configure | ||
3177 | * the SLI, setup iocb rings and HBQ rings. In the end the function | ||
3178 | * calls the config_port_post function to issue init_link mailbox | ||
3179 | * command and to start the discovery. The function will return zero | ||
3180 | * if successful, else it will return negative error code. | ||
3181 | **/ | ||
2503 | int | 3182 | int |
2504 | lpfc_sli_hba_setup(struct lpfc_hba *phba) | 3183 | lpfc_sli_hba_setup(struct lpfc_hba *phba) |
2505 | { | 3184 | { |
@@ -2528,22 +3207,20 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) | |||
2528 | break; | 3207 | break; |
2529 | } | 3208 | } |
2530 | 3209 | ||
2531 | rc = lpfc_do_config_port(phba, mode); | 3210 | rc = lpfc_sli_config_port(phba, mode); |
3211 | |||
2532 | if (rc && lpfc_sli_mode == 3) | 3212 | if (rc && lpfc_sli_mode == 3) |
2533 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, | 3213 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, |
2534 | "1820 Unable to select SLI-3. " | 3214 | "1820 Unable to select SLI-3. " |
2535 | "Not supported by adapter.\n"); | 3215 | "Not supported by adapter.\n"); |
2536 | if (rc && mode != 2) | 3216 | if (rc && mode != 2) |
2537 | rc = lpfc_do_config_port(phba, 2); | 3217 | rc = lpfc_sli_config_port(phba, 2); |
2538 | if (rc) | 3218 | if (rc) |
2539 | goto lpfc_sli_hba_setup_error; | 3219 | goto lpfc_sli_hba_setup_error; |
2540 | 3220 | ||
2541 | if (phba->sli_rev == 3) { | 3221 | if (phba->sli_rev == 3) { |
2542 | phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; | 3222 | phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; |
2543 | phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; | 3223 | phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; |
2544 | phba->sli3_options |= LPFC_SLI3_ENABLED; | ||
2545 | phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; | ||
2546 | |||
2547 | } else { | 3224 | } else { |
2548 | phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; | 3225 | phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; |
2549 | phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; | 3226 | phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; |
@@ -2558,8 +3235,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) | |||
2558 | if (rc) | 3235 | if (rc) |
2559 | goto lpfc_sli_hba_setup_error; | 3236 | goto lpfc_sli_hba_setup_error; |
2560 | 3237 | ||
2561 | /* Init HBQs */ | 3238 | /* Init HBQs */ |
2562 | |||
2563 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | 3239 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
2564 | rc = lpfc_sli_hbq_setup(phba); | 3240 | rc = lpfc_sli_hbq_setup(phba); |
2565 | if (rc) | 3241 | if (rc) |
@@ -2581,19 +3257,19 @@ lpfc_sli_hba_setup_error: | |||
2581 | return rc; | 3257 | return rc; |
2582 | } | 3258 | } |
2583 | 3259 | ||
2584 | /*! lpfc_mbox_timeout | 3260 | |
2585 | * | 3261 | /** |
2586 | * \pre | 3262 | * lpfc_mbox_timeout: Timeout call back function for mbox timer. |
2587 | * \post | 3263 | * @ptr: context object - pointer to hba structure. |
2588 | * \param hba Pointer to per struct lpfc_hba structure | ||
2589 | * \param l1 Pointer to the driver's mailbox queue. | ||
2590 | * \return | ||
2591 | * void | ||
2592 | * | ||
2593 | * \b Description: | ||
2594 | * | 3264 | * |
2595 | * This routine handles mailbox timeout events at timer interrupt context. | 3265 | * This is the callback function for mailbox timer. The mailbox |
2596 | */ | 3266 | * timer is armed when a new mailbox command is issued and the timer |
3267 | * is deleted when the mailbox complete. The function is called by | ||
3268 | * the kernel timer code when a mailbox does not complete within | ||
3269 | * expected time. This function wakes up the worker thread to | ||
3270 | * process the mailbox timeout and returns. All the processing is | ||
3271 | * done by the worker thread function lpfc_mbox_timeout_handler. | ||
3272 | **/ | ||
2597 | void | 3273 | void |
2598 | lpfc_mbox_timeout(unsigned long ptr) | 3274 | lpfc_mbox_timeout(unsigned long ptr) |
2599 | { | 3275 | { |
@@ -2612,6 +3288,15 @@ lpfc_mbox_timeout(unsigned long ptr) | |||
2612 | return; | 3288 | return; |
2613 | } | 3289 | } |
2614 | 3290 | ||
3291 | |||
3292 | /** | ||
3293 | * lpfc_mbox_timeout_handler: Worker thread function to handle mailbox timeout. | ||
3294 | * @phba: Pointer to HBA context object. | ||
3295 | * | ||
3296 | * This function is called from worker thread when a mailbox command times out. | ||
3297 | * The caller is not required to hold any locks. This function will reset the | ||
3298 | * HBA and recover all the pending commands. | ||
3299 | **/ | ||
2615 | void | 3300 | void |
2616 | lpfc_mbox_timeout_handler(struct lpfc_hba *phba) | 3301 | lpfc_mbox_timeout_handler(struct lpfc_hba *phba) |
2617 | { | 3302 | { |
@@ -2666,6 +3351,32 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) | |||
2666 | return; | 3351 | return; |
2667 | } | 3352 | } |
2668 | 3353 | ||
3354 | /** | ||
3355 | * lpfc_sli_issue_mbox: Issue a mailbox command to firmware. | ||
3356 | * @phba: Pointer to HBA context object. | ||
3357 | * @pmbox: Pointer to mailbox object. | ||
3358 | * @flag: Flag indicating how the mailbox need to be processed. | ||
3359 | * | ||
3360 | * This function is called by discovery code and HBA management code | ||
3361 | * to submit a mailbox command to firmware. This function gets the | ||
3362 | * hbalock to protect the data structures. | ||
3363 | * The mailbox command can be submitted in polling mode, in which case | ||
3364 | * this function will wait in a polling loop for the completion of the | ||
3365 | * mailbox. | ||
3366 | * If the mailbox is submitted in no_wait mode (not polling) the | ||
3367 | * function will submit the command and returns immediately without waiting | ||
3368 | * for the mailbox completion. The no_wait is supported only when HBA | ||
3369 | * is in SLI2/SLI3 mode - interrupts are enabled. | ||
3370 | * The SLI interface allows only one mailbox pending at a time. If the | ||
3371 | * mailbox is issued in polling mode and there is already a mailbox | ||
3372 | * pending, then the function will return an error. If the mailbox is issued | ||
3373 | * in NO_WAIT mode and there is a mailbox pending already, the function | ||
3374 | * will return MBX_BUSY after queuing the mailbox into mailbox queue. | ||
3375 | * The sli layer owns the mailbox object until the completion of mailbox | ||
3376 | * command if this function return MBX_BUSY or MBX_SUCCESS. For all other | ||
3377 | * return codes the caller owns the mailbox command after the return of | ||
3378 | * the function. | ||
3379 | **/ | ||
2669 | int | 3380 | int |
2670 | lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | 3381 | lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) |
2671 | { | 3382 | { |
@@ -2676,7 +3387,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2676 | int i; | 3387 | int i; |
2677 | unsigned long timeout; | 3388 | unsigned long timeout; |
2678 | unsigned long drvr_flag = 0; | 3389 | unsigned long drvr_flag = 0; |
2679 | volatile uint32_t word0, ldata; | 3390 | uint32_t word0, ldata; |
2680 | void __iomem *to_slim; | 3391 | void __iomem *to_slim; |
2681 | int processing_queue = 0; | 3392 | int processing_queue = 0; |
2682 | 3393 | ||
@@ -2836,12 +3547,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2836 | 3547 | ||
2837 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { | 3548 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { |
2838 | /* First copy command data to host SLIM area */ | 3549 | /* First copy command data to host SLIM area */ |
2839 | lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE); | 3550 | lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); |
2840 | } else { | 3551 | } else { |
2841 | if (mb->mbxCommand == MBX_CONFIG_PORT) { | 3552 | if (mb->mbxCommand == MBX_CONFIG_PORT) { |
2842 | /* copy command data into host mbox for cmpl */ | 3553 | /* copy command data into host mbox for cmpl */ |
2843 | lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, | 3554 | lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); |
2844 | MAILBOX_CMD_SIZE); | ||
2845 | } | 3555 | } |
2846 | 3556 | ||
2847 | /* First copy mbox command data to HBA SLIM, skip past first | 3557 | /* First copy mbox command data to HBA SLIM, skip past first |
@@ -2851,7 +3561,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2851 | MAILBOX_CMD_SIZE - sizeof (uint32_t)); | 3561 | MAILBOX_CMD_SIZE - sizeof (uint32_t)); |
2852 | 3562 | ||
2853 | /* Next copy over first word, with mbxOwner set */ | 3563 | /* Next copy over first word, with mbxOwner set */ |
2854 | ldata = *((volatile uint32_t *)mb); | 3564 | ldata = *((uint32_t *)mb); |
2855 | to_slim = phba->MBslimaddr; | 3565 | to_slim = phba->MBslimaddr; |
2856 | writel(ldata, to_slim); | 3566 | writel(ldata, to_slim); |
2857 | readl(to_slim); /* flush */ | 3567 | readl(to_slim); /* flush */ |
@@ -2883,7 +3593,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2883 | 3593 | ||
2884 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { | 3594 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { |
2885 | /* First read mbox status word */ | 3595 | /* First read mbox status word */ |
2886 | word0 = *((volatile uint32_t *)&phba->slim2p->mbx); | 3596 | word0 = *((uint32_t *)phba->mbox); |
2887 | word0 = le32_to_cpu(word0); | 3597 | word0 = le32_to_cpu(word0); |
2888 | } else { | 3598 | } else { |
2889 | /* First read mbox status word */ | 3599 | /* First read mbox status word */ |
@@ -2922,12 +3632,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2922 | 3632 | ||
2923 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { | 3633 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { |
2924 | /* First copy command data */ | 3634 | /* First copy command data */ |
2925 | word0 = *((volatile uint32_t *) | 3635 | word0 = *((uint32_t *)phba->mbox); |
2926 | &phba->slim2p->mbx); | ||
2927 | word0 = le32_to_cpu(word0); | 3636 | word0 = le32_to_cpu(word0); |
2928 | if (mb->mbxCommand == MBX_CONFIG_PORT) { | 3637 | if (mb->mbxCommand == MBX_CONFIG_PORT) { |
2929 | MAILBOX_t *slimmb; | 3638 | MAILBOX_t *slimmb; |
2930 | volatile uint32_t slimword0; | 3639 | uint32_t slimword0; |
2931 | /* Check real SLIM for any errors */ | 3640 | /* Check real SLIM for any errors */ |
2932 | slimword0 = readl(phba->MBslimaddr); | 3641 | slimword0 = readl(phba->MBslimaddr); |
2933 | slimmb = (MAILBOX_t *) & slimword0; | 3642 | slimmb = (MAILBOX_t *) & slimword0; |
@@ -2948,8 +3657,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2948 | 3657 | ||
2949 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { | 3658 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { |
2950 | /* copy results back to user */ | 3659 | /* copy results back to user */ |
2951 | lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, | 3660 | lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); |
2952 | MAILBOX_CMD_SIZE); | ||
2953 | } else { | 3661 | } else { |
2954 | /* First copy command data */ | 3662 | /* First copy command data */ |
2955 | lpfc_memcpy_from_slim(mb, phba->MBslimaddr, | 3663 | lpfc_memcpy_from_slim(mb, phba->MBslimaddr, |
@@ -2980,9 +3688,16 @@ out_not_finished: | |||
2980 | return MBX_NOT_FINISHED; | 3688 | return MBX_NOT_FINISHED; |
2981 | } | 3689 | } |
2982 | 3690 | ||
2983 | /* | 3691 | /** |
2984 | * Caller needs to hold lock. | 3692 | * __lpfc_sli_ringtx_put: Add an iocb to the txq. |
2985 | */ | 3693 | * @phba: Pointer to HBA context object. |
3694 | * @pring: Pointer to driver SLI ring object. | ||
3695 | * @piocb: Pointer to address of newly added command iocb. | ||
3696 | * | ||
3697 | * This function is called with hbalock held to add a command | ||
3698 | * iocb to the txq when SLI layer cannot submit the command iocb | ||
3699 | * to the ring. | ||
3700 | **/ | ||
2986 | static void | 3701 | static void |
2987 | __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 3702 | __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
2988 | struct lpfc_iocbq *piocb) | 3703 | struct lpfc_iocbq *piocb) |
@@ -2992,6 +3707,23 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
2992 | pring->txq_cnt++; | 3707 | pring->txq_cnt++; |
2993 | } | 3708 | } |
2994 | 3709 | ||
3710 | /** | ||
3711 | * lpfc_sli_next_iocb: Get the next iocb in the txq. | ||
3712 | * @phba: Pointer to HBA context object. | ||
3713 | * @pring: Pointer to driver SLI ring object. | ||
3714 | * @piocb: Pointer to address of newly added command iocb. | ||
3715 | * | ||
3716 | * This function is called with hbalock held before a new | ||
3717 | * iocb is submitted to the firmware. This function checks | ||
3718 | * txq to flush the iocbs in txq to Firmware before | ||
3719 | * submitting new iocbs to the Firmware. | ||
3720 | * If there are iocbs in the txq which need to be submitted | ||
3721 | * to firmware, lpfc_sli_next_iocb returns the first element | ||
3722 | * of the txq after dequeuing it from txq. | ||
3723 | * If there is no iocb in the txq then the function will return | ||
3724 | * *piocb and *piocb is set to NULL. Caller needs to check | ||
3725 | * *piocb to find if there are more commands in the txq. | ||
3726 | **/ | ||
2995 | static struct lpfc_iocbq * | 3727 | static struct lpfc_iocbq * |
2996 | lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 3728 | lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
2997 | struct lpfc_iocbq **piocb) | 3729 | struct lpfc_iocbq **piocb) |
@@ -3007,9 +3739,30 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3007 | return nextiocb; | 3739 | return nextiocb; |
3008 | } | 3740 | } |
3009 | 3741 | ||
3010 | /* | 3742 | /** |
3011 | * Lockless version of lpfc_sli_issue_iocb. | 3743 | * __lpfc_sli_issue_iocb: Lockless version of lpfc_sli_issue_iocb. |
3012 | */ | 3744 | * @phba: Pointer to HBA context object. |
3745 | * @pring: Pointer to driver SLI ring object. | ||
3746 | * @piocb: Pointer to command iocb. | ||
3747 | * @flag: Flag indicating if this command can be put into txq. | ||
3748 | * | ||
3749 | * __lpfc_sli_issue_iocb is used by other functions in the driver | ||
3750 | * to issue an iocb command to the HBA. If the PCI slot is recovering | ||
3751 | * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT | ||
3752 | * flag is turned on, the function returns IOCB_ERROR. | ||
3753 | * When the link is down, this function allows only iocbs for | ||
3754 | * posting buffers. | ||
3755 | * This function finds next available slot in the command ring and | ||
3756 | * posts the command to the available slot and writes the port | ||
3757 | * attention register to request HBA start processing new iocb. | ||
3758 | * If there is no slot available in the ring and | ||
3759 | * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the | ||
3760 | * txq, otherwise the function returns IOCB_BUSY. | ||
3761 | * | ||
3762 | * This function is called with hbalock held. | ||
3763 | * The function will return success after it successfully submit the | ||
3764 | * iocb to firmware or after adding to the txq. | ||
3765 | **/ | ||
3013 | static int | 3766 | static int |
3014 | __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 3767 | __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
3015 | struct lpfc_iocbq *piocb, uint32_t flag) | 3768 | struct lpfc_iocbq *piocb, uint32_t flag) |
@@ -3052,6 +3805,16 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3052 | * can be issued if the link is not up. | 3805 | * can be issued if the link is not up. |
3053 | */ | 3806 | */ |
3054 | switch (piocb->iocb.ulpCommand) { | 3807 | switch (piocb->iocb.ulpCommand) { |
3808 | case CMD_GEN_REQUEST64_CR: | ||
3809 | case CMD_GEN_REQUEST64_CX: | ||
3810 | if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || | ||
3811 | (piocb->iocb.un.genreq64.w5.hcsw.Rctl != | ||
3812 | FC_FCP_CMND) || | ||
3813 | (piocb->iocb.un.genreq64.w5.hcsw.Type != | ||
3814 | MENLO_TRANSPORT_TYPE)) | ||
3815 | |||
3816 | goto iocb_busy; | ||
3817 | break; | ||
3055 | case CMD_QUE_RING_BUF_CN: | 3818 | case CMD_QUE_RING_BUF_CN: |
3056 | case CMD_QUE_RING_BUF64_CN: | 3819 | case CMD_QUE_RING_BUF64_CN: |
3057 | /* | 3820 | /* |
@@ -3106,6 +3869,19 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3106 | } | 3869 | } |
3107 | 3870 | ||
3108 | 3871 | ||
3872 | /** | ||
3873 | * lpfc_sli_issue_iocb: Wrapper function for __lpfc_sli_issue_iocb. | ||
3874 | * @phba: Pointer to HBA context object. | ||
3875 | * @pring: Pointer to driver SLI ring object. | ||
3876 | * @piocb: Pointer to command iocb. | ||
3877 | * @flag: Flag indicating if this command can be put into txq. | ||
3878 | * | ||
3879 | * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb | ||
3880 | * function. This function gets the hbalock and calls | ||
3881 | * __lpfc_sli_issue_iocb function and will return the error returned | ||
3882 | * by __lpfc_sli_issue_iocb function. This wrapper is used by | ||
3883 | * functions which do not hold hbalock. | ||
3884 | **/ | ||
3109 | int | 3885 | int |
3110 | lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 3886 | lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
3111 | struct lpfc_iocbq *piocb, uint32_t flag) | 3887 | struct lpfc_iocbq *piocb, uint32_t flag) |
@@ -3120,6 +3896,17 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3120 | return rc; | 3896 | return rc; |
3121 | } | 3897 | } |
3122 | 3898 | ||
3899 | /** | ||
3900 | * lpfc_extra_ring_setup: Extra ring setup function. | ||
3901 | * @phba: Pointer to HBA context object. | ||
3902 | * | ||
3903 | * This function is called while driver attaches with the | ||
3904 | * HBA to setup the extra ring. The extra ring is used | ||
3905 | * only when driver needs to support target mode functionality | ||
3906 | * or IP over FC functionalities. | ||
3907 | * | ||
3908 | * This function is called with no lock held. | ||
3909 | **/ | ||
3123 | static int | 3910 | static int |
3124 | lpfc_extra_ring_setup( struct lpfc_hba *phba) | 3911 | lpfc_extra_ring_setup( struct lpfc_hba *phba) |
3125 | { | 3912 | { |
@@ -3155,6 +3942,19 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba) | |||
3155 | return 0; | 3942 | return 0; |
3156 | } | 3943 | } |
3157 | 3944 | ||
3945 | /** | ||
3946 | * lpfc_sli_async_event_handler: ASYNC iocb handler function. | ||
3947 | * @phba: Pointer to HBA context object. | ||
3948 | * @pring: Pointer to driver SLI ring object. | ||
3949 | * @iocbq: Pointer to iocb object. | ||
3950 | * | ||
3951 | * This function is called by the slow ring event handler | ||
3952 | * function when there is an ASYNC event iocb in the ring. | ||
3953 | * This function is called with no lock held. | ||
3954 | * Currently this function handles only temperature related | ||
3955 | * ASYNC events. The function decodes the temperature sensor | ||
3956 | * event message and posts events for the management applications. | ||
3957 | **/ | ||
3158 | static void | 3958 | static void |
3159 | lpfc_sli_async_event_handler(struct lpfc_hba * phba, | 3959 | lpfc_sli_async_event_handler(struct lpfc_hba * phba, |
3160 | struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) | 3960 | struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) |
@@ -3210,6 +4010,17 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba, | |||
3210 | } | 4010 | } |
3211 | 4011 | ||
3212 | 4012 | ||
4013 | /** | ||
4014 | * lpfc_sli_setup: SLI ring setup function. | ||
4015 | * @phba: Pointer to HBA context object. | ||
4016 | * | ||
4017 | * lpfc_sli_setup sets up rings of the SLI interface with | ||
4018 | * number of iocbs per ring and iotags. This function is | ||
4019 | * called while driver attach to the HBA and before the | ||
4020 | * interrupts are enabled. So there is no need for locking. | ||
4021 | * | ||
4022 | * This function always returns 0. | ||
4023 | **/ | ||
3213 | int | 4024 | int |
3214 | lpfc_sli_setup(struct lpfc_hba *phba) | 4025 | lpfc_sli_setup(struct lpfc_hba *phba) |
3215 | { | 4026 | { |
@@ -3321,6 +4132,17 @@ lpfc_sli_setup(struct lpfc_hba *phba) | |||
3321 | return 0; | 4132 | return 0; |
3322 | } | 4133 | } |
3323 | 4134 | ||
4135 | /** | ||
4136 | * lpfc_sli_queue_setup: Queue initialization function. | ||
4137 | * @phba: Pointer to HBA context object. | ||
4138 | * | ||
4139 | * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each | ||
4140 | * ring. This function also initializes ring indices of each ring. | ||
4141 | * This function is called during the initialization of the SLI | ||
4142 | * interface of an HBA. | ||
4143 | * This function is called with no lock held and always returns | ||
4144 | * 1. | ||
4145 | **/ | ||
3324 | int | 4146 | int |
3325 | lpfc_sli_queue_setup(struct lpfc_hba *phba) | 4147 | lpfc_sli_queue_setup(struct lpfc_hba *phba) |
3326 | { | 4148 | { |
@@ -3349,6 +4171,23 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba) | |||
3349 | return 1; | 4171 | return 1; |
3350 | } | 4172 | } |
3351 | 4173 | ||
4174 | /** | ||
4175 | * lpfc_sli_host_down: Vport cleanup function. | ||
4176 | * @vport: Pointer to virtual port object. | ||
4177 | * | ||
4178 | * lpfc_sli_host_down is called to clean up the resources | ||
4179 | * associated with a vport before destroying virtual | ||
4180 | * port data structures. | ||
4181 | * This function does following operations: | ||
4182 | * - Free discovery resources associated with this virtual | ||
4183 | * port. | ||
4184 | * - Free iocbs associated with this virtual port in | ||
4185 | * the txq. | ||
4186 | * - Send abort for all iocb commands associated with this | ||
4187 | * vport in txcmplq. | ||
4188 | * | ||
4189 | * This function is called with no lock held and always returns 1. | ||
4190 | **/ | ||
3352 | int | 4191 | int |
3353 | lpfc_sli_host_down(struct lpfc_vport *vport) | 4192 | lpfc_sli_host_down(struct lpfc_vport *vport) |
3354 | { | 4193 | { |
@@ -3411,6 +4250,21 @@ lpfc_sli_host_down(struct lpfc_vport *vport) | |||
3411 | return 1; | 4250 | return 1; |
3412 | } | 4251 | } |
3413 | 4252 | ||
4253 | /** | ||
4254 | * lpfc_sli_hba_down: Resource cleanup function for the HBA. | ||
4255 | * @phba: Pointer to HBA context object. | ||
4256 | * | ||
4257 | * This function cleans up all iocb, buffers, mailbox commands | ||
4258 | * while shutting down the HBA. This function is called with no | ||
4259 | * lock held and always returns 1. | ||
4260 | * This function does the following to cleanup driver resources: | ||
4261 | * - Free discovery resources for each virtual port | ||
4262 | * - Cleanup any pending fabric iocbs | ||
4263 | * - Iterate through the iocb txq and free each entry | ||
4264 | * in the list. | ||
4265 | * - Free up any buffer posted to the HBA | ||
4266 | * - Free mailbox commands in the mailbox queue. | ||
4267 | **/ | ||
3414 | int | 4268 | int |
3415 | lpfc_sli_hba_down(struct lpfc_hba *phba) | 4269 | lpfc_sli_hba_down(struct lpfc_hba *phba) |
3416 | { | 4270 | { |
@@ -3501,6 +4355,18 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) | |||
3501 | return 1; | 4355 | return 1; |
3502 | } | 4356 | } |
3503 | 4357 | ||
4358 | /** | ||
4359 | * lpfc_sli_pcimem_bcopy: SLI memory copy function. | ||
4360 | * @srcp: Source memory pointer. | ||
4361 | * @destp: Destination memory pointer. | ||
4362 | * @cnt: Number of words required to be copied. | ||
4363 | * | ||
4364 | * This function is used for copying data between driver memory | ||
4365 | * and the SLI memory. This function also changes the endianness | ||
4366 | * of each word if native endianness is different from SLI | ||
4367 | * endianness. This function can be called with or without | ||
4368 | * lock. | ||
4369 | **/ | ||
3504 | void | 4370 | void |
3505 | lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) | 4371 | lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) |
3506 | { | 4372 | { |
@@ -3518,6 +4384,17 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) | |||
3518 | } | 4384 | } |
3519 | } | 4385 | } |
3520 | 4386 | ||
4387 | |||
4388 | /** | ||
4389 | * lpfc_sli_ringpostbuf_put: Function to add a buffer to postbufq. | ||
4390 | * @phba: Pointer to HBA context object. | ||
4391 | * @pring: Pointer to driver SLI ring object. | ||
4392 | * @mp: Pointer to driver buffer object. | ||
4393 | * | ||
4394 | * This function is called with no lock held. | ||
4395 | * It always return zero after adding the buffer to the postbufq | ||
4396 | * buffer list. | ||
4397 | **/ | ||
3521 | int | 4398 | int |
3522 | lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 4399 | lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
3523 | struct lpfc_dmabuf *mp) | 4400 | struct lpfc_dmabuf *mp) |
@@ -3531,6 +4408,18 @@ lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3531 | return 0; | 4408 | return 0; |
3532 | } | 4409 | } |
3533 | 4410 | ||
4411 | /** | ||
4412 | * lpfc_sli_get_buffer_tag: Tag allocation function for a buffer posted | ||
4413 | * using CMD_QUE_XRI64_CX iocb. | ||
4414 | * @phba: Pointer to HBA context object. | ||
4415 | * | ||
4416 | * When HBQ is enabled, buffers are searched based on tags. This function | ||
4417 | * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The | ||
4418 | * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag | ||
4419 | * does not conflict with tags of buffer posted for unsolicited events. | ||
4420 | * The function returns the allocated tag. The function is called with | ||
4421 | * no locks held. | ||
4422 | **/ | ||
3534 | uint32_t | 4423 | uint32_t |
3535 | lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) | 4424 | lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) |
3536 | { | 4425 | { |
@@ -3545,6 +4434,22 @@ lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) | |||
3545 | return phba->buffer_tag_count; | 4434 | return phba->buffer_tag_count; |
3546 | } | 4435 | } |
3547 | 4436 | ||
4437 | /** | ||
4438 | * lpfc_sli_ring_taggedbuf_get: Search HBQ buffer associated with | ||
4439 | * posted using CMD_QUE_XRI64_CX iocb. | ||
4440 | * @phba: Pointer to HBA context object. | ||
4441 | * @pring: Pointer to driver SLI ring object. | ||
4442 | * @tag: Buffer tag. | ||
4443 | * | ||
4444 | * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq | ||
4445 | * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX | ||
4446 | * iocb is posted to the response ring with the tag of the buffer. | ||
4447 | * This function searches the pring->postbufq list using the tag | ||
4448 | * to find buffer associated with CMD_IOCB_RET_XRI64_CX | ||
4449 | * iocb. If the buffer is found then lpfc_dmabuf object of the | ||
4450 | * buffer is returned to the caller else NULL is returned. | ||
4451 | * This function is called with no lock held. | ||
4452 | **/ | ||
3548 | struct lpfc_dmabuf * | 4453 | struct lpfc_dmabuf * |
3549 | lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 4454 | lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
3550 | uint32_t tag) | 4455 | uint32_t tag) |
@@ -3565,7 +4470,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3565 | 4470 | ||
3566 | spin_unlock_irq(&phba->hbalock); | 4471 | spin_unlock_irq(&phba->hbalock); |
3567 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 4472 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
3568 | "0410 Cannot find virtual addr for buffer tag on " | 4473 | "0402 Cannot find virtual addr for buffer tag on " |
3569 | "ring %d Data x%lx x%p x%p x%x\n", | 4474 | "ring %d Data x%lx x%p x%p x%x\n", |
3570 | pring->ringno, (unsigned long) tag, | 4475 | pring->ringno, (unsigned long) tag, |
3571 | slp->next, slp->prev, pring->postbufq_cnt); | 4476 | slp->next, slp->prev, pring->postbufq_cnt); |
@@ -3573,6 +4478,23 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3573 | return NULL; | 4478 | return NULL; |
3574 | } | 4479 | } |
3575 | 4480 | ||
4481 | /** | ||
4482 | * lpfc_sli_ringpostbuf_get: SLI2 buffer search function for | ||
4483 | * unsolicited ct and els events. | ||
4484 | * @phba: Pointer to HBA context object. | ||
4485 | * @pring: Pointer to driver SLI ring object. | ||
4486 | * @phys: DMA address of the buffer. | ||
4487 | * | ||
4488 | * This function searches the buffer list using the dma_address | ||
4489 | * of unsolicited event to find the driver's lpfc_dmabuf object | ||
4490 | * corresponding to the dma_address. The function returns the | ||
4491 | * lpfc_dmabuf object if a buffer is found else it returns NULL. | ||
4492 | * This function is called by the ct and els unsolicited event | ||
4493 | * handlers to get the buffer associated with the unsolicited | ||
4494 | * event. | ||
4495 | * | ||
4496 | * This function is called with no lock held. | ||
4497 | **/ | ||
3576 | struct lpfc_dmabuf * | 4498 | struct lpfc_dmabuf * |
3577 | lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 4499 | lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
3578 | dma_addr_t phys) | 4500 | dma_addr_t phys) |
@@ -3600,6 +4522,17 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3600 | return NULL; | 4522 | return NULL; |
3601 | } | 4523 | } |
3602 | 4524 | ||
4525 | /** | ||
4526 | * lpfc_sli_abort_els_cmpl: Completion handler for the els abort iocbs. | ||
4527 | * @phba: Pointer to HBA context object. | ||
4528 | * @cmdiocb: Pointer to driver command iocb object. | ||
4529 | * @rspiocb: Pointer to driver response iocb object. | ||
4530 | * | ||
4531 | * This function is the completion handler for the abort iocbs for | ||
4532 | * ELS commands. This function is called from the ELS ring event | ||
4533 | * handler with no lock held. This function frees memory resources | ||
4534 | * associated with the abort iocb. | ||
4535 | **/ | ||
3603 | static void | 4536 | static void |
3604 | lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 4537 | lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
3605 | struct lpfc_iocbq *rspiocb) | 4538 | struct lpfc_iocbq *rspiocb) |
@@ -3665,6 +4598,17 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
3665 | return; | 4598 | return; |
3666 | } | 4599 | } |
3667 | 4600 | ||
4601 | /** | ||
4602 | * lpfc_ignore_els_cmpl: Completion handler for aborted ELS command. | ||
4603 | * @phba: Pointer to HBA context object. | ||
4604 | * @cmdiocb: Pointer to driver command iocb object. | ||
4605 | * @rspiocb: Pointer to driver response iocb object. | ||
4606 | * | ||
4607 | * The function is called from SLI ring event handler with no | ||
4608 | * lock held. This function is the completion handler for ELS commands | ||
4609 | * which are aborted. The function frees memory resources used for | ||
4610 | * the aborted ELS commands. | ||
4611 | **/ | ||
3668 | static void | 4612 | static void |
3669 | lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 4613 | lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
3670 | struct lpfc_iocbq *rspiocb) | 4614 | struct lpfc_iocbq *rspiocb) |
@@ -3673,7 +4617,7 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
3673 | 4617 | ||
3674 | /* ELS cmd tag <ulpIoTag> completes */ | 4618 | /* ELS cmd tag <ulpIoTag> completes */ |
3675 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 4619 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
3676 | "0133 Ignoring ELS cmd tag x%x completion Data: " | 4620 | "0139 Ignoring ELS cmd tag x%x completion Data: " |
3677 | "x%x x%x x%x\n", | 4621 | "x%x x%x x%x\n", |
3678 | irsp->ulpIoTag, irsp->ulpStatus, | 4622 | irsp->ulpIoTag, irsp->ulpStatus, |
3679 | irsp->un.ulpWord[4], irsp->ulpTimeout); | 4623 | irsp->un.ulpWord[4], irsp->ulpTimeout); |
@@ -3684,6 +4628,17 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
3684 | return; | 4628 | return; |
3685 | } | 4629 | } |
3686 | 4630 | ||
4631 | /** | ||
4632 | * lpfc_sli_issue_abort_iotag: Abort function for a command iocb. | ||
4633 | * @phba: Pointer to HBA context object. | ||
4634 | * @pring: Pointer to driver SLI ring object. | ||
4635 | * @cmdiocb: Pointer to driver command iocb object. | ||
4636 | * | ||
4637 | * This function issues an abort iocb for the provided command | ||
4638 | * iocb. This function is called with hbalock held. | ||
4639 | * The function returns 0 when it fails due to memory allocation | ||
4640 | * failure or when the command iocb is an abort request. | ||
4641 | **/ | ||
3687 | int | 4642 | int |
3688 | lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 4643 | lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
3689 | struct lpfc_iocbq *cmdiocb) | 4644 | struct lpfc_iocbq *cmdiocb) |
@@ -3748,6 +4703,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3748 | iabt->un.acxri.abortIoTag, abtsiocbp->iotag); | 4703 | iabt->un.acxri.abortIoTag, abtsiocbp->iotag); |
3749 | retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); | 4704 | retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); |
3750 | 4705 | ||
4706 | if (retval) | ||
4707 | __lpfc_sli_release_iocbq(phba, abtsiocbp); | ||
3751 | abort_iotag_exit: | 4708 | abort_iotag_exit: |
3752 | /* | 4709 | /* |
3753 | * Caller to this routine should check for IOCB_ERROR | 4710 | * Caller to this routine should check for IOCB_ERROR |
@@ -3757,6 +4714,29 @@ abort_iotag_exit: | |||
3757 | return retval; | 4714 | return retval; |
3758 | } | 4715 | } |
3759 | 4716 | ||
4717 | /** | ||
4718 | * lpfc_sli_validate_fcp_iocb: Filtering function, used to find commands | ||
4719 | * associated with a vport/SCSI target/lun. | ||
4720 | * @iocbq: Pointer to driver iocb object. | ||
4721 | * @vport: Pointer to driver virtual port object. | ||
4722 | * @tgt_id: SCSI ID of the target. | ||
4723 | * @lun_id: LUN ID of the scsi device. | ||
4724 | * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST | ||
4725 | * | ||
4726 | * This function acts as iocb filter for functions which abort or count | ||
4727 | * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return | ||
4728 | * 0 if the filtering criteria is met for the given iocb and will return | ||
4729 | * 1 if the filtering criteria is not met. | ||
4730 | * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the | ||
4731 | * given iocb is for the SCSI device specified by vport, tgt_id and | ||
4732 | * lun_id parameter. | ||
4733 | * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the | ||
4734 | * given iocb is for the SCSI target specified by vport and tgt_id | ||
4735 | * parameters. | ||
4736 | * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the | ||
4737 | * given iocb is for the SCSI host associated with the given vport. | ||
4738 | * This function is called with no locks held. | ||
4739 | **/ | ||
3760 | static int | 4740 | static int |
3761 | lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, | 4741 | lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, |
3762 | uint16_t tgt_id, uint64_t lun_id, | 4742 | uint16_t tgt_id, uint64_t lun_id, |
@@ -3800,6 +4780,25 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, | |||
3800 | return rc; | 4780 | return rc; |
3801 | } | 4781 | } |
3802 | 4782 | ||
4783 | /** | ||
4784 | * lpfc_sli_sum_iocb: Function to count the number of FCP iocbs pending. | ||
4785 | * @vport: Pointer to virtual port. | ||
4786 | * @tgt_id: SCSI ID of the target. | ||
4787 | * @lun_id: LUN ID of the scsi device. | ||
4788 | * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. | ||
4789 | * | ||
4790 | * This function returns number of FCP commands pending for the vport. | ||
4791 | * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP | ||
4792 | * commands pending on the vport associated with SCSI device specified | ||
4793 | * by tgt_id and lun_id parameters. | ||
4794 | * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP | ||
4795 | * commands pending on the vport associated with SCSI target specified | ||
4796 | * by tgt_id parameter. | ||
4797 | * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP | ||
4798 | * commands pending on the vport. | ||
4799 | * This function returns the number of iocbs which satisfy the filter. | ||
4800 | * This function is called without any lock held. | ||
4801 | **/ | ||
3803 | int | 4802 | int |
3804 | lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, | 4803 | lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, |
3805 | lpfc_ctx_cmd ctx_cmd) | 4804 | lpfc_ctx_cmd ctx_cmd) |
@@ -3819,6 +4818,17 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, | |||
3819 | return sum; | 4818 | return sum; |
3820 | } | 4819 | } |
3821 | 4820 | ||
4821 | /** | ||
4822 | * lpfc_sli_abort_fcp_cmpl: Completion handler function for an aborted | ||
4823 | * FCP iocb. | ||
4824 | * @phba: Pointer to HBA context object | ||
4825 | * @cmdiocb: Pointer to command iocb object. | ||
4826 | * @rspiocb: Pointer to response iocb object. | ||
4827 | * | ||
4828 | * This function is called when an aborted FCP iocb completes. This | ||
4829 | * function is called by the ring event handler with no lock held. | ||
4830 | * This function frees the iocb. | ||
4831 | **/ | ||
3822 | void | 4832 | void |
3823 | lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | 4833 | lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
3824 | struct lpfc_iocbq *rspiocb) | 4834 | struct lpfc_iocbq *rspiocb) |
@@ -3827,6 +4837,28 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
3827 | return; | 4837 | return; |
3828 | } | 4838 | } |
3829 | 4839 | ||
4840 | /** | ||
4841 | * lpfc_sli_abort_iocb: This function issue abort for all SCSI commands | ||
4842 | * pending on a SCSI host(vport)/target/lun. | ||
4843 | * @vport: Pointer to virtual port. | ||
4844 | * @pring: Pointer to driver SLI ring object. | ||
4845 | * @tgt_id: SCSI ID of the target. | ||
4846 | * @lun_id: LUN ID of the scsi device. | ||
4847 | * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. | ||
4848 | * | ||
4849 | * This function sends an abort command for every SCSI command | ||
4850 | * associated with the given virtual port pending on the ring | ||
4851 | * filtered by lpfc_sli_validate_fcp_iocb function. | ||
4852 | * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the | ||
4853 | * FCP iocbs associated with lun specified by tgt_id and lun_id | ||
4854 | * parameters | ||
4855 | * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the | ||
4856 | * FCP iocbs associated with SCSI target specified by tgt_id parameter. | ||
4857 | * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all | ||
4858 | * FCP iocbs associated with virtual port. | ||
4859 | * This function returns number of iocbs it failed to abort. | ||
4860 | * This function is called with no locks held. | ||
4861 | **/ | ||
3830 | int | 4862 | int |
3831 | lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | 4863 | lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, |
3832 | uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) | 4864 | uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) |
@@ -3878,6 +4910,24 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | |||
3878 | return errcnt; | 4910 | return errcnt; |
3879 | } | 4911 | } |
3880 | 4912 | ||
4913 | /** | ||
4914 | * lpfc_sli_wake_iocb_wait: iocb completion handler for iocb issued using | ||
4915 | * lpfc_sli_issue_iocb_wait. | ||
4916 | * @phba: Pointer to HBA context object. | ||
4917 | * @cmdiocbq: Pointer to command iocb. | ||
4918 | * @rspiocbq: Pointer to response iocb. | ||
4919 | * | ||
4920 | * This function is the completion handler for iocbs issued using | ||
4921 | * lpfc_sli_issue_iocb_wait function. This function is called by the | ||
4922 | * ring event handler function without any lock held. This function | ||
4923 | * can be called from both worker thread context and interrupt | ||
4924 | * context. This function also can be called from other thread which | ||
4925 | * cleans up the SLI layer objects. | ||
4926 | * This function copy the contents of the response iocb to the | ||
4927 | * response iocb memory object provided by the caller of | ||
4928 | * lpfc_sli_issue_iocb_wait and then wakes up the thread which | ||
4929 | * sleeps for the iocb completion. | ||
4930 | **/ | ||
3881 | static void | 4931 | static void |
3882 | lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, | 4932 | lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, |
3883 | struct lpfc_iocbq *cmdiocbq, | 4933 | struct lpfc_iocbq *cmdiocbq, |
@@ -3899,13 +4949,36 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, | |||
3899 | return; | 4949 | return; |
3900 | } | 4950 | } |
3901 | 4951 | ||
3902 | /* | 4952 | /** |
3903 | * Issue the caller's iocb and wait for its completion, but no longer than the | 4953 | * lpfc_sli_issue_iocb_wait: Synchronous function to issue iocb commands. |
3904 | * caller's timeout. Note that iocb_flags is cleared before the | 4954 | * @phba: Pointer to HBA context object.. |
3905 | * lpfc_sli_issue_call since the wake routine sets a unique value and by | 4955 | * @pring: Pointer to sli ring. |
3906 | * definition this is a wait function. | 4956 | * @piocb: Pointer to command iocb. |
3907 | */ | 4957 | * @prspiocbq: Pointer to response iocb. |
3908 | 4958 | * @timeout: Timeout in number of seconds. | |
4959 | * | ||
4960 | * This function issues the iocb to firmware and waits for the | ||
4961 | * iocb to complete. If the iocb command is not | ||
4962 | * completed within timeout seconds, it returns IOCB_TIMEDOUT. | ||
4963 | * Caller should not free the iocb resources if this function | ||
4964 | * returns IOCB_TIMEDOUT. | ||
4965 | * The function waits for the iocb completion using an | ||
4966 | * non-interruptible wait. | ||
4967 | * This function will sleep while waiting for iocb completion. | ||
4968 | * So, this function should not be called from any context which | ||
4969 | * does not allow sleeping. Due to the same reason, this function | ||
4970 | * cannot be called with interrupt disabled. | ||
4971 | * This function assumes that the iocb completions occur while | ||
4972 | * this function sleep. So, this function cannot be called from | ||
4973 | * the thread which process iocb completion for this ring. | ||
4974 | * This function clears the iocb_flag of the iocb object before | ||
4975 | * issuing the iocb and the iocb completion handler sets this | ||
4976 | * flag and wakes this thread when the iocb completes. | ||
4977 | * The contents of the response iocb will be copied to prspiocbq | ||
4978 | * by the completion handler when the command completes. | ||
4979 | * This function returns IOCB_SUCCESS when success. | ||
4980 | * This function is called with no lock held. | ||
4981 | **/ | ||
3909 | int | 4982 | int |
3910 | lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | 4983 | lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, |
3911 | struct lpfc_sli_ring *pring, | 4984 | struct lpfc_sli_ring *pring, |
@@ -3963,7 +5036,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | |||
3963 | } | 5036 | } |
3964 | } else { | 5037 | } else { |
3965 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | 5038 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
3966 | ":0332 IOCB wait issue failed, Data x%x\n", | 5039 | "0332 IOCB wait issue failed, Data x%x\n", |
3967 | retval); | 5040 | retval); |
3968 | retval = IOCB_ERROR; | 5041 | retval = IOCB_ERROR; |
3969 | } | 5042 | } |
@@ -3983,6 +5056,32 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | |||
3983 | return retval; | 5056 | return retval; |
3984 | } | 5057 | } |
3985 | 5058 | ||
5059 | /** | ||
5060 | * lpfc_sli_issue_mbox_wait: Synchronous function to issue mailbox. | ||
5061 | * @phba: Pointer to HBA context object. | ||
5062 | * @pmboxq: Pointer to driver mailbox object. | ||
5063 | * @timeout: Timeout in number of seconds. | ||
5064 | * | ||
5065 | * This function issues the mailbox to firmware and waits for the | ||
5066 | * mailbox command to complete. If the mailbox command is not | ||
5067 | * completed within timeout seconds, it returns MBX_TIMEOUT. | ||
5068 | * The function waits for the mailbox completion using an | ||
5069 | * interruptible wait. If the thread is woken up due to a | ||
5070 | * signal, MBX_TIMEOUT error is returned to the caller. Caller | ||
5071 | * should not free the mailbox resources, if this function returns | ||
5072 | * MBX_TIMEOUT. | ||
5073 | * This function will sleep while waiting for mailbox completion. | ||
5074 | * So, this function should not be called from any context which | ||
5075 | * does not allow sleeping. Due to the same reason, this function | ||
5076 | * cannot be called with interrupt disabled. | ||
5077 | * This function assumes that the mailbox completion occurs while | ||
5078 | * this function sleep. So, this function cannot be called from | ||
5079 | * the worker thread which processes mailbox completion. | ||
5080 | * This function is called in the context of HBA management | ||
5081 | * applications. | ||
5082 | * This function returns MBX_SUCCESS when successful. | ||
5083 | * This function is called with no lock held. | ||
5084 | **/ | ||
3986 | int | 5085 | int |
3987 | lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, | 5086 | lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, |
3988 | uint32_t timeout) | 5087 | uint32_t timeout) |
@@ -4027,6 +5126,18 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, | |||
4027 | return retval; | 5126 | return retval; |
4028 | } | 5127 | } |
4029 | 5128 | ||
5129 | /** | ||
5130 | * lpfc_sli_flush_mbox_queue: mailbox queue cleanup function. | ||
5131 | * @phba: Pointer to HBA context. | ||
5132 | * | ||
5133 | * This function is called to cleanup any pending mailbox | ||
5134 | * objects in the driver queue before bringing the HBA offline. | ||
5135 | * This function is called while resetting the HBA. | ||
5136 | * The function is called without any lock held. The function | ||
5137 | * takes hbalock to update SLI data structure. | ||
5138 | * This function returns 1 when there is an active mailbox | ||
5139 | * command pending else returns 0. | ||
5140 | **/ | ||
4030 | int | 5141 | int |
4031 | lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) | 5142 | lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) |
4032 | { | 5143 | { |
@@ -4058,8 +5169,74 @@ lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) | |||
4058 | return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; | 5169 | return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; |
4059 | } | 5170 | } |
4060 | 5171 | ||
5172 | /** | ||
5173 | * lpfc_sli_check_eratt: check error attention events | ||
5174 | * @phba: Pointer to HBA context. | ||
5175 | * | ||
5176 | * This function is called form timer soft interrupt context to check HBA's | ||
5177 | * error attention register bit for error attention events. | ||
5178 | * | ||
5179 | * This fucntion returns 1 when there is Error Attention in the Host Attention | ||
5180 | * Register and returns 0 otherwise. | ||
5181 | **/ | ||
5182 | int | ||
5183 | lpfc_sli_check_eratt(struct lpfc_hba *phba) | ||
5184 | { | ||
5185 | uint32_t ha_copy; | ||
5186 | |||
5187 | /* If somebody is waiting to handle an eratt, don't process it | ||
5188 | * here. The brdkill function will do this. | ||
5189 | */ | ||
5190 | if (phba->link_flag & LS_IGNORE_ERATT) | ||
5191 | return 0; | ||
5192 | |||
5193 | /* Check if interrupt handler handles this ERATT */ | ||
5194 | spin_lock_irq(&phba->hbalock); | ||
5195 | if (phba->hba_flag & HBA_ERATT_HANDLED) { | ||
5196 | /* Interrupt handler has handled ERATT */ | ||
5197 | spin_unlock_irq(&phba->hbalock); | ||
5198 | return 0; | ||
5199 | } | ||
5200 | |||
5201 | /* Read chip Host Attention (HA) register */ | ||
5202 | ha_copy = readl(phba->HAregaddr); | ||
5203 | if (ha_copy & HA_ERATT) { | ||
5204 | /* Read host status register to retrieve error event */ | ||
5205 | lpfc_sli_read_hs(phba); | ||
5206 | /* Set the driver HA work bitmap */ | ||
5207 | phba->work_ha |= HA_ERATT; | ||
5208 | /* Indicate polling handles this ERATT */ | ||
5209 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
5210 | spin_unlock_irq(&phba->hbalock); | ||
5211 | return 1; | ||
5212 | } | ||
5213 | spin_unlock_irq(&phba->hbalock); | ||
5214 | return 0; | ||
5215 | } | ||
5216 | |||
5217 | /** | ||
5218 | * lpfc_sp_intr_handler: The slow-path interrupt handler of lpfc driver. | ||
5219 | * @irq: Interrupt number. | ||
5220 | * @dev_id: The device context pointer. | ||
5221 | * | ||
5222 | * This function is directly called from the PCI layer as an interrupt | ||
5223 | * service routine when the device is enabled with MSI-X multi-message | ||
5224 | * interrupt mode and there are slow-path events in the HBA. However, | ||
5225 | * when the device is enabled with either MSI or Pin-IRQ interrupt mode, | ||
5226 | * this function is called as part of the device-level interrupt handler. | ||
5227 | * When the PCI slot is in error recovery or the HBA is undergoing | ||
5228 | * initialization, the interrupt handler will not process the interrupt. | ||
5229 | * The link attention and ELS ring attention events are handled by the | ||
5230 | * worker thread. The interrupt handler signals the worker thread and | ||
5231 | * and returns for these events. This function is called without any | ||
5232 | * lock held. It gets the hbalock to access and update SLI data | ||
5233 | * structures. | ||
5234 | * | ||
5235 | * This function returns IRQ_HANDLED when interrupt is handled else it | ||
5236 | * returns IRQ_NONE. | ||
5237 | **/ | ||
4061 | irqreturn_t | 5238 | irqreturn_t |
4062 | lpfc_intr_handler(int irq, void *dev_id) | 5239 | lpfc_sp_intr_handler(int irq, void *dev_id) |
4063 | { | 5240 | { |
4064 | struct lpfc_hba *phba; | 5241 | struct lpfc_hba *phba; |
4065 | uint32_t ha_copy; | 5242 | uint32_t ha_copy; |
@@ -4078,48 +5255,52 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
4078 | * Get the driver's phba structure from the dev_id and | 5255 | * Get the driver's phba structure from the dev_id and |
4079 | * assume the HBA is not interrupting. | 5256 | * assume the HBA is not interrupting. |
4080 | */ | 5257 | */ |
4081 | phba = (struct lpfc_hba *) dev_id; | 5258 | phba = (struct lpfc_hba *)dev_id; |
4082 | 5259 | ||
4083 | if (unlikely(!phba)) | 5260 | if (unlikely(!phba)) |
4084 | return IRQ_NONE; | 5261 | return IRQ_NONE; |
4085 | 5262 | ||
4086 | /* If the pci channel is offline, ignore all the interrupts. */ | ||
4087 | if (unlikely(pci_channel_offline(phba->pcidev))) | ||
4088 | return IRQ_NONE; | ||
4089 | |||
4090 | phba->sli.slistat.sli_intr++; | ||
4091 | |||
4092 | /* | 5263 | /* |
4093 | * Call the HBA to see if it is interrupting. If not, don't claim | 5264 | * Stuff needs to be attented to when this function is invoked as an |
4094 | * the interrupt | 5265 | * individual interrupt handler in MSI-X multi-message interrupt mode |
4095 | */ | ||
4096 | |||
4097 | /* Ignore all interrupts during initialization. */ | ||
4098 | if (unlikely(phba->link_state < LPFC_LINK_DOWN)) | ||
4099 | return IRQ_NONE; | ||
4100 | |||
4101 | /* | ||
4102 | * Read host attention register to determine interrupt source | ||
4103 | * Clear Attention Sources, except Error Attention (to | ||
4104 | * preserve status) and Link Attention | ||
4105 | */ | ||
4106 | spin_lock(&phba->hbalock); | ||
4107 | ha_copy = readl(phba->HAregaddr); | ||
4108 | /* If somebody is waiting to handle an eratt don't process it | ||
4109 | * here. The brdkill function will do this. | ||
4110 | */ | 5266 | */ |
4111 | if (phba->link_flag & LS_IGNORE_ERATT) | 5267 | if (phba->intr_type == MSIX) { |
4112 | ha_copy &= ~HA_ERATT; | 5268 | /* If the pci channel is offline, ignore all the interrupts */ |
4113 | writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); | 5269 | if (unlikely(pci_channel_offline(phba->pcidev))) |
4114 | readl(phba->HAregaddr); /* flush */ | 5270 | return IRQ_NONE; |
4115 | spin_unlock(&phba->hbalock); | 5271 | /* Update device-level interrupt statistics */ |
4116 | 5272 | phba->sli.slistat.sli_intr++; | |
4117 | if (unlikely(!ha_copy)) | 5273 | /* Ignore all interrupts during initialization. */ |
4118 | return IRQ_NONE; | 5274 | if (unlikely(phba->link_state < LPFC_LINK_DOWN)) |
5275 | return IRQ_NONE; | ||
5276 | /* Need to read HA REG for slow-path events */ | ||
5277 | spin_lock(&phba->hbalock); | ||
5278 | ha_copy = readl(phba->HAregaddr); | ||
5279 | /* If somebody is waiting to handle an eratt don't process it | ||
5280 | * here. The brdkill function will do this. | ||
5281 | */ | ||
5282 | if (phba->link_flag & LS_IGNORE_ERATT) | ||
5283 | ha_copy &= ~HA_ERATT; | ||
5284 | /* Check the need for handling ERATT in interrupt handler */ | ||
5285 | if (ha_copy & HA_ERATT) { | ||
5286 | if (phba->hba_flag & HBA_ERATT_HANDLED) | ||
5287 | /* ERATT polling has handled ERATT */ | ||
5288 | ha_copy &= ~HA_ERATT; | ||
5289 | else | ||
5290 | /* Indicate interrupt handler handles ERATT */ | ||
5291 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
5292 | } | ||
5293 | /* Clear up only attention source related to slow-path */ | ||
5294 | writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), | ||
5295 | phba->HAregaddr); | ||
5296 | readl(phba->HAregaddr); /* flush */ | ||
5297 | spin_unlock(&phba->hbalock); | ||
5298 | } else | ||
5299 | ha_copy = phba->ha_copy; | ||
4119 | 5300 | ||
4120 | work_ha_copy = ha_copy & phba->work_ha_mask; | 5301 | work_ha_copy = ha_copy & phba->work_ha_mask; |
4121 | 5302 | ||
4122 | if (unlikely(work_ha_copy)) { | 5303 | if (work_ha_copy) { |
4123 | if (work_ha_copy & HA_LATT) { | 5304 | if (work_ha_copy & HA_LATT) { |
4124 | if (phba->sli.sli_flag & LPFC_PROCESS_LA) { | 5305 | if (phba->sli.sli_flag & LPFC_PROCESS_LA) { |
4125 | /* | 5306 | /* |
@@ -4138,7 +5319,7 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
4138 | work_ha_copy &= ~HA_LATT; | 5319 | work_ha_copy &= ~HA_LATT; |
4139 | } | 5320 | } |
4140 | 5321 | ||
4141 | if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) { | 5322 | if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { |
4142 | /* | 5323 | /* |
4143 | * Turn off Slow Rings interrupts, LPFC_ELS_RING is | 5324 | * Turn off Slow Rings interrupts, LPFC_ELS_RING is |
4144 | * the only slow ring. | 5325 | * the only slow ring. |
@@ -4179,31 +5360,13 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
4179 | spin_unlock(&phba->hbalock); | 5360 | spin_unlock(&phba->hbalock); |
4180 | } | 5361 | } |
4181 | } | 5362 | } |
4182 | |||
4183 | if (work_ha_copy & HA_ERATT) { | ||
4184 | /* | ||
4185 | * There was a link/board error. Read the | ||
4186 | * status register to retrieve the error event | ||
4187 | * and process it. | ||
4188 | */ | ||
4189 | phba->sli.slistat.err_attn_event++; | ||
4190 | /* Save status info */ | ||
4191 | phba->work_hs = readl(phba->HSregaddr); | ||
4192 | phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); | ||
4193 | phba->work_status[1] = readl(phba->MBslimaddr + 0xac); | ||
4194 | |||
4195 | /* Clear Chip error bit */ | ||
4196 | writel(HA_ERATT, phba->HAregaddr); | ||
4197 | readl(phba->HAregaddr); /* flush */ | ||
4198 | phba->pport->stopped = 1; | ||
4199 | } | ||
4200 | |||
4201 | spin_lock(&phba->hbalock); | 5363 | spin_lock(&phba->hbalock); |
4202 | if ((work_ha_copy & HA_MBATT) && | 5364 | if (work_ha_copy & HA_ERATT) |
4203 | (phba->sli.mbox_active)) { | 5365 | lpfc_sli_read_hs(phba); |
5366 | if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { | ||
4204 | pmb = phba->sli.mbox_active; | 5367 | pmb = phba->sli.mbox_active; |
4205 | pmbox = &pmb->mb; | 5368 | pmbox = &pmb->mb; |
4206 | mbox = &phba->slim2p->mbx; | 5369 | mbox = phba->mbox; |
4207 | vport = pmb->vport; | 5370 | vport = pmb->vport; |
4208 | 5371 | ||
4209 | /* First check out the status word */ | 5372 | /* First check out the status word */ |
@@ -4270,7 +5433,7 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
4270 | lpfc_printf_log(phba, | 5433 | lpfc_printf_log(phba, |
4271 | KERN_ERR, | 5434 | KERN_ERR, |
4272 | LOG_MBOX | LOG_SLI, | 5435 | LOG_MBOX | LOG_SLI, |
4273 | "0306 rc should have" | 5436 | "0350 rc should have" |
4274 | "been MBX_BUSY"); | 5437 | "been MBX_BUSY"); |
4275 | goto send_current_mbox; | 5438 | goto send_current_mbox; |
4276 | } | 5439 | } |
@@ -4283,6 +5446,7 @@ lpfc_intr_handler(int irq, void *dev_id) | |||
4283 | } | 5446 | } |
4284 | } else | 5447 | } else |
4285 | spin_unlock(&phba->hbalock); | 5448 | spin_unlock(&phba->hbalock); |
5449 | |||
4286 | if ((work_ha_copy & HA_MBATT) && | 5450 | if ((work_ha_copy & HA_MBATT) && |
4287 | (phba->sli.mbox_active == NULL)) { | 5451 | (phba->sli.mbox_active == NULL)) { |
4288 | send_current_mbox: | 5452 | send_current_mbox: |
@@ -4302,15 +5466,74 @@ send_current_mbox: | |||
4302 | spin_unlock(&phba->hbalock); | 5466 | spin_unlock(&phba->hbalock); |
4303 | lpfc_worker_wake_up(phba); | 5467 | lpfc_worker_wake_up(phba); |
4304 | } | 5468 | } |
5469 | return IRQ_HANDLED; | ||
4305 | 5470 | ||
4306 | ha_copy &= ~(phba->work_ha_mask); | 5471 | } /* lpfc_sp_intr_handler */ |
5472 | |||
5473 | /** | ||
5474 | * lpfc_fp_intr_handler: The fast-path interrupt handler of lpfc driver. | ||
5475 | * @irq: Interrupt number. | ||
5476 | * @dev_id: The device context pointer. | ||
5477 | * | ||
5478 | * This function is directly called from the PCI layer as an interrupt | ||
5479 | * service routine when the device is enabled with MSI-X multi-message | ||
5480 | * interrupt mode and there is a fast-path FCP IOCB ring event in the | ||
5481 | * HBA. However, when the device is enabled with either MSI or Pin-IRQ | ||
5482 | * interrupt mode, this function is called as part of the device-level | ||
5483 | * interrupt handler. When the PCI slot is in error recovery or the HBA | ||
5484 | * is undergoing initialization, the interrupt handler will not process | ||
5485 | * the interrupt. The SCSI FCP fast-path ring event are handled in the | ||
5486 | * intrrupt context. This function is called without any lock held. It | ||
5487 | * gets the hbalock to access and update SLI data structures. | ||
5488 | * | ||
5489 | * This function returns IRQ_HANDLED when interrupt is handled else it | ||
5490 | * returns IRQ_NONE. | ||
5491 | **/ | ||
5492 | irqreturn_t | ||
5493 | lpfc_fp_intr_handler(int irq, void *dev_id) | ||
5494 | { | ||
5495 | struct lpfc_hba *phba; | ||
5496 | uint32_t ha_copy; | ||
5497 | unsigned long status; | ||
5498 | |||
5499 | /* Get the driver's phba structure from the dev_id and | ||
5500 | * assume the HBA is not interrupting. | ||
5501 | */ | ||
5502 | phba = (struct lpfc_hba *) dev_id; | ||
5503 | |||
5504 | if (unlikely(!phba)) | ||
5505 | return IRQ_NONE; | ||
5506 | |||
5507 | /* | ||
5508 | * Stuff needs to be attented to when this function is invoked as an | ||
5509 | * individual interrupt handler in MSI-X multi-message interrupt mode | ||
5510 | */ | ||
5511 | if (phba->intr_type == MSIX) { | ||
5512 | /* If pci channel is offline, ignore all the interrupts */ | ||
5513 | if (unlikely(pci_channel_offline(phba->pcidev))) | ||
5514 | return IRQ_NONE; | ||
5515 | /* Update device-level interrupt statistics */ | ||
5516 | phba->sli.slistat.sli_intr++; | ||
5517 | /* Ignore all interrupts during initialization. */ | ||
5518 | if (unlikely(phba->link_state < LPFC_LINK_DOWN)) | ||
5519 | return IRQ_NONE; | ||
5520 | /* Need to read HA REG for FCP ring and other ring events */ | ||
5521 | ha_copy = readl(phba->HAregaddr); | ||
5522 | /* Clear up only attention source related to fast-path */ | ||
5523 | spin_lock(&phba->hbalock); | ||
5524 | writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), | ||
5525 | phba->HAregaddr); | ||
5526 | readl(phba->HAregaddr); /* flush */ | ||
5527 | spin_unlock(&phba->hbalock); | ||
5528 | } else | ||
5529 | ha_copy = phba->ha_copy; | ||
4307 | 5530 | ||
4308 | /* | 5531 | /* |
4309 | * Process all events on FCP ring. Take the optimized path for | 5532 | * Process all events on FCP ring. Take the optimized path for FCP IO. |
4310 | * FCP IO. Any other IO is slow path and is handled by | ||
4311 | * the worker thread. | ||
4312 | */ | 5533 | */ |
4313 | status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); | 5534 | ha_copy &= ~(phba->work_ha_mask); |
5535 | |||
5536 | status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); | ||
4314 | status >>= (4*LPFC_FCP_RING); | 5537 | status >>= (4*LPFC_FCP_RING); |
4315 | if (status & HA_RXMASK) | 5538 | if (status & HA_RXMASK) |
4316 | lpfc_sli_handle_fast_ring_event(phba, | 5539 | lpfc_sli_handle_fast_ring_event(phba, |
@@ -4319,11 +5542,10 @@ send_current_mbox: | |||
4319 | 5542 | ||
4320 | if (phba->cfg_multi_ring_support == 2) { | 5543 | if (phba->cfg_multi_ring_support == 2) { |
4321 | /* | 5544 | /* |
4322 | * Process all events on extra ring. Take the optimized path | 5545 | * Process all events on extra ring. Take the optimized path |
4323 | * for extra ring IO. Any other IO is slow path and is handled | 5546 | * for extra ring IO. |
4324 | * by the worker thread. | ||
4325 | */ | 5547 | */ |
4326 | status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); | 5548 | status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); |
4327 | status >>= (4*LPFC_EXTRA_RING); | 5549 | status >>= (4*LPFC_EXTRA_RING); |
4328 | if (status & HA_RXMASK) { | 5550 | if (status & HA_RXMASK) { |
4329 | lpfc_sli_handle_fast_ring_event(phba, | 5551 | lpfc_sli_handle_fast_ring_event(phba, |
@@ -4332,5 +5554,106 @@ send_current_mbox: | |||
4332 | } | 5554 | } |
4333 | } | 5555 | } |
4334 | return IRQ_HANDLED; | 5556 | return IRQ_HANDLED; |
5557 | } /* lpfc_fp_intr_handler */ | ||
5558 | |||
5559 | /** | ||
5560 | * lpfc_intr_handler: The device-level interrupt handler of lpfc driver. | ||
5561 | * @irq: Interrupt number. | ||
5562 | * @dev_id: The device context pointer. | ||
5563 | * | ||
5564 | * This function is the device-level interrupt handler called from the PCI | ||
5565 | * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is | ||
5566 | * an event in the HBA which requires driver attention. This function | ||
5567 | * invokes the slow-path interrupt attention handling function and fast-path | ||
5568 | * interrupt attention handling function in turn to process the relevant | ||
5569 | * HBA attention events. This function is called without any lock held. It | ||
5570 | * gets the hbalock to access and update SLI data structures. | ||
5571 | * | ||
5572 | * This function returns IRQ_HANDLED when interrupt is handled, else it | ||
5573 | * returns IRQ_NONE. | ||
5574 | **/ | ||
5575 | irqreturn_t | ||
5576 | lpfc_intr_handler(int irq, void *dev_id) | ||
5577 | { | ||
5578 | struct lpfc_hba *phba; | ||
5579 | irqreturn_t sp_irq_rc, fp_irq_rc; | ||
5580 | unsigned long status1, status2; | ||
5581 | |||
5582 | /* | ||
5583 | * Get the driver's phba structure from the dev_id and | ||
5584 | * assume the HBA is not interrupting. | ||
5585 | */ | ||
5586 | phba = (struct lpfc_hba *) dev_id; | ||
5587 | |||
5588 | if (unlikely(!phba)) | ||
5589 | return IRQ_NONE; | ||
5590 | |||
5591 | /* If the pci channel is offline, ignore all the interrupts. */ | ||
5592 | if (unlikely(pci_channel_offline(phba->pcidev))) | ||
5593 | return IRQ_NONE; | ||
5594 | |||
5595 | /* Update device level interrupt statistics */ | ||
5596 | phba->sli.slistat.sli_intr++; | ||
5597 | |||
5598 | /* Ignore all interrupts during initialization. */ | ||
5599 | if (unlikely(phba->link_state < LPFC_LINK_DOWN)) | ||
5600 | return IRQ_NONE; | ||
5601 | |||
5602 | spin_lock(&phba->hbalock); | ||
5603 | phba->ha_copy = readl(phba->HAregaddr); | ||
5604 | if (unlikely(!phba->ha_copy)) { | ||
5605 | spin_unlock(&phba->hbalock); | ||
5606 | return IRQ_NONE; | ||
5607 | } else if (phba->ha_copy & HA_ERATT) { | ||
5608 | if (phba->hba_flag & HBA_ERATT_HANDLED) | ||
5609 | /* ERATT polling has handled ERATT */ | ||
5610 | phba->ha_copy &= ~HA_ERATT; | ||
5611 | else | ||
5612 | /* Indicate interrupt handler handles ERATT */ | ||
5613 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
5614 | } | ||
5615 | |||
5616 | /* Clear attention sources except link and error attentions */ | ||
5617 | writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); | ||
5618 | readl(phba->HAregaddr); /* flush */ | ||
5619 | spin_unlock(&phba->hbalock); | ||
5620 | |||
5621 | /* | ||
5622 | * Invokes slow-path host attention interrupt handling as appropriate. | ||
5623 | */ | ||
5624 | |||
5625 | /* status of events with mailbox and link attention */ | ||
5626 | status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); | ||
5627 | |||
5628 | /* status of events with ELS ring */ | ||
5629 | status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); | ||
5630 | status2 >>= (4*LPFC_ELS_RING); | ||
5631 | |||
5632 | if (status1 || (status2 & HA_RXMASK)) | ||
5633 | sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); | ||
5634 | else | ||
5635 | sp_irq_rc = IRQ_NONE; | ||
5636 | |||
5637 | /* | ||
5638 | * Invoke fast-path host attention interrupt handling as appropriate. | ||
5639 | */ | ||
5640 | |||
5641 | /* status of events with FCP ring */ | ||
5642 | status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); | ||
5643 | status1 >>= (4*LPFC_FCP_RING); | ||
5644 | |||
5645 | /* status of events with extra ring */ | ||
5646 | if (phba->cfg_multi_ring_support == 2) { | ||
5647 | status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); | ||
5648 | status2 >>= (4*LPFC_EXTRA_RING); | ||
5649 | } else | ||
5650 | status2 = 0; | ||
5651 | |||
5652 | if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) | ||
5653 | fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); | ||
5654 | else | ||
5655 | fp_irq_rc = IRQ_NONE; | ||
4335 | 5656 | ||
4336 | } /* lpfc_intr_handler */ | 5657 | /* Return device-level interrupt handling status */ |
5658 | return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; | ||
5659 | } /* lpfc_intr_handler */ | ||
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 7249fd252cbb..883938652a6a 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h | |||
@@ -233,6 +233,7 @@ struct lpfc_sli { | |||
233 | #define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ | 233 | #define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ |
234 | #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ | 234 | #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ |
235 | #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ | 235 | #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ |
236 | #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ | ||
236 | 237 | ||
237 | struct lpfc_sli_ring ring[LPFC_MAX_RING]; | 238 | struct lpfc_sli_ring ring[LPFC_MAX_RING]; |
238 | int fcp_ring; /* ring used for FCP initiator commands */ | 239 | int fcp_ring; /* ring used for FCP initiator commands */ |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index ad24cacfbe10..cc43e9de22cc 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,9 +18,11 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.2.7" | 21 | #define LPFC_DRIVER_VERSION "8.2.8" |
22 | 22 | ||
23 | #define LPFC_DRIVER_NAME "lpfc" | 23 | #define LPFC_DRIVER_NAME "lpfc" |
24 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | ||
25 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" | ||
24 | 26 | ||
25 | #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ | 27 | #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ |
26 | LPFC_DRIVER_VERSION | 28 | LPFC_DRIVER_VERSION |
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 109f89d98830..a7de1cc02b40 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <scsi/scsi_transport_fc.h> | 34 | #include <scsi/scsi_transport_fc.h> |
35 | #include "lpfc_hw.h" | 35 | #include "lpfc_hw.h" |
36 | #include "lpfc_sli.h" | 36 | #include "lpfc_sli.h" |
37 | #include "lpfc_nl.h" | ||
37 | #include "lpfc_disc.h" | 38 | #include "lpfc_disc.h" |
38 | #include "lpfc_scsi.h" | 39 | #include "lpfc_scsi.h" |
39 | #include "lpfc.h" | 40 | #include "lpfc.h" |
@@ -204,6 +205,77 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport) | |||
204 | return 1; | 205 | return 1; |
205 | } | 206 | } |
206 | 207 | ||
208 | /** | ||
209 | * lpfc_discovery_wait: Wait for driver discovery to quiesce. | ||
210 | * @vport: The virtual port for which this call is being executed. | ||
211 | * | ||
212 | * This driver calls this routine specifically from lpfc_vport_delete | ||
213 | * to enforce a synchronous execution of vport | ||
214 | * delete relative to discovery activities. The | ||
215 | * lpfc_vport_delete routine should not return until it | ||
216 | * can reasonably guarantee that discovery has quiesced. | ||
217 | * Post FDISC LOGO, the driver must wait until its SAN teardown is | ||
218 | * complete and all resources recovered before allowing | ||
219 | * cleanup. | ||
220 | * | ||
221 | * This routine does not require any locks held. | ||
222 | **/ | ||
223 | static void lpfc_discovery_wait(struct lpfc_vport *vport) | ||
224 | { | ||
225 | struct lpfc_hba *phba = vport->phba; | ||
226 | uint32_t wait_flags = 0; | ||
227 | unsigned long wait_time_max; | ||
228 | unsigned long start_time; | ||
229 | |||
230 | wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE | | ||
231 | FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO; | ||
232 | |||
233 | /* | ||
234 | * The time constraint on this loop is a balance between the | ||
235 | * fabric RA_TOV value and dev_loss tmo. The driver's | ||
236 | * devloss_tmo is 10 giving this loop a 3x multiplier minimally. | ||
237 | */ | ||
238 | wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000); | ||
239 | wait_time_max += jiffies; | ||
240 | start_time = jiffies; | ||
241 | while (time_before(jiffies, wait_time_max)) { | ||
242 | if ((vport->num_disc_nodes > 0) || | ||
243 | (vport->fc_flag & wait_flags) || | ||
244 | ((vport->port_state > LPFC_VPORT_FAILED) && | ||
245 | (vport->port_state < LPFC_VPORT_READY))) { | ||
246 | lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, | ||
247 | "1833 Vport discovery quiesce Wait:" | ||
248 | " vpi x%x state x%x fc_flags x%x" | ||
249 | " num_nodes x%x, waiting 1000 msecs" | ||
250 | " total wait msecs x%x\n", | ||
251 | vport->vpi, vport->port_state, | ||
252 | vport->fc_flag, vport->num_disc_nodes, | ||
253 | jiffies_to_msecs(jiffies - start_time)); | ||
254 | msleep(1000); | ||
255 | } else { | ||
256 | /* Base case. Wait variants satisfied. Break out */ | ||
257 | lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, | ||
258 | "1834 Vport discovery quiesced:" | ||
259 | " vpi x%x state x%x fc_flags x%x" | ||
260 | " wait msecs x%x\n", | ||
261 | vport->vpi, vport->port_state, | ||
262 | vport->fc_flag, | ||
263 | jiffies_to_msecs(jiffies | ||
264 | - start_time)); | ||
265 | break; | ||
266 | } | ||
267 | } | ||
268 | |||
269 | if (time_after(jiffies, wait_time_max)) | ||
270 | lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, | ||
271 | "1835 Vport discovery quiesce failed:" | ||
272 | " vpi x%x state x%x fc_flags x%x" | ||
273 | " wait msecs x%x\n", | ||
274 | vport->vpi, vport->port_state, | ||
275 | vport->fc_flag, | ||
276 | jiffies_to_msecs(jiffies - start_time)); | ||
277 | } | ||
278 | |||
207 | int | 279 | int |
208 | lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | 280 | lpfc_vport_create(struct fc_vport *fc_vport, bool disable) |
209 | { | 281 | { |
@@ -506,8 +578,12 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
506 | * initiated after we've disposed of all other resources associated | 578 | * initiated after we've disposed of all other resources associated |
507 | * with the port. | 579 | * with the port. |
508 | */ | 580 | */ |
509 | if (!scsi_host_get(shost) || !scsi_host_get(shost)) | 581 | if (!scsi_host_get(shost)) |
582 | return VPORT_INVAL; | ||
583 | if (!scsi_host_get(shost)) { | ||
584 | scsi_host_put(shost); | ||
510 | return VPORT_INVAL; | 585 | return VPORT_INVAL; |
586 | } | ||
511 | spin_lock_irq(&phba->hbalock); | 587 | spin_lock_irq(&phba->hbalock); |
512 | vport->load_flag |= FC_UNLOADING; | 588 | vport->load_flag |= FC_UNLOADING; |
513 | spin_unlock_irq(&phba->hbalock); | 589 | spin_unlock_irq(&phba->hbalock); |
@@ -597,11 +673,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
597 | } | 673 | } |
598 | vport->unreg_vpi_cmpl = VPORT_INVAL; | 674 | vport->unreg_vpi_cmpl = VPORT_INVAL; |
599 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | 675 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); |
676 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | ||
677 | goto skip_logo; | ||
600 | if (!lpfc_issue_els_npiv_logo(vport, ndlp)) | 678 | if (!lpfc_issue_els_npiv_logo(vport, ndlp)) |
601 | while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) | 679 | while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) |
602 | timeout = schedule_timeout(timeout); | 680 | timeout = schedule_timeout(timeout); |
603 | } | 681 | } |
604 | 682 | ||
683 | if (!(phba->pport->load_flag & FC_UNLOADING)) | ||
684 | lpfc_discovery_wait(vport); | ||
685 | |||
605 | skip_logo: | 686 | skip_logo: |
606 | lpfc_cleanup(vport); | 687 | lpfc_cleanup(vport); |
607 | lpfc_sli_host_down(vport); | 688 | lpfc_sli_host_down(vport); |
@@ -615,8 +696,10 @@ skip_logo: | |||
615 | * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) | 696 | * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) |
616 | * does the scsi_host_put() to release the vport. | 697 | * does the scsi_host_put() to release the vport. |
617 | */ | 698 | */ |
618 | lpfc_mbx_unreg_vpi(vport); | 699 | if (lpfc_mbx_unreg_vpi(vport)) |
619 | } | 700 | scsi_host_put(shost); |
701 | } else | ||
702 | scsi_host_put(shost); | ||
620 | 703 | ||
621 | lpfc_free_vpi(phba, vport->vpi); | 704 | lpfc_free_vpi(phba, vport->vpi); |
622 | vport->work_port_events = 0; | 705 | vport->work_port_events = 0; |
@@ -663,3 +746,82 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports) | |||
663 | scsi_host_put(lpfc_shost_from_vport(vports[i])); | 746 | scsi_host_put(lpfc_shost_from_vport(vports[i])); |
664 | kfree(vports); | 747 | kfree(vports); |
665 | } | 748 | } |
749 | |||
750 | |||
751 | /** | ||
752 | * lpfc_vport_reset_stat_data: Reset the statistical data for the vport. | ||
753 | * @vport: Pointer to vport object. | ||
754 | * | ||
755 | * This function resets the statistical data for the vport. This function | ||
756 | * is called with the host_lock held | ||
757 | **/ | ||
758 | void | ||
759 | lpfc_vport_reset_stat_data(struct lpfc_vport *vport) | ||
760 | { | ||
761 | struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; | ||
762 | |||
763 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | ||
764 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
765 | continue; | ||
766 | if (ndlp->lat_data) | ||
767 | memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT * | ||
768 | sizeof(struct lpfc_scsicmd_bkt)); | ||
769 | } | ||
770 | } | ||
771 | |||
772 | |||
773 | /** | ||
774 | * lpfc_alloc_bucket: Allocate data buffer required for collecting | ||
775 | * statistical data. | ||
776 | * @vport: Pointer to vport object. | ||
777 | * | ||
778 | * This function allocates data buffer required for all the FC | ||
779 | * nodes of the vport to collect statistical data. | ||
780 | **/ | ||
781 | void | ||
782 | lpfc_alloc_bucket(struct lpfc_vport *vport) | ||
783 | { | ||
784 | struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; | ||
785 | |||
786 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | ||
787 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
788 | continue; | ||
789 | |||
790 | kfree(ndlp->lat_data); | ||
791 | ndlp->lat_data = NULL; | ||
792 | |||
793 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { | ||
794 | ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT, | ||
795 | sizeof(struct lpfc_scsicmd_bkt), | ||
796 | GFP_ATOMIC); | ||
797 | |||
798 | if (!ndlp->lat_data) | ||
799 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, | ||
800 | "0287 lpfc_alloc_bucket failed to " | ||
801 | "allocate statistical data buffer DID " | ||
802 | "0x%x\n", ndlp->nlp_DID); | ||
803 | } | ||
804 | } | ||
805 | } | ||
806 | |||
807 | /** | ||
808 | * lpfc_free_bucket: Free data buffer required for collecting | ||
809 | * statistical data. | ||
810 | * @vport: Pointer to vport object. | ||
811 | * | ||
812 | * Th function frees statistical data buffer of all the FC | ||
813 | * nodes of the vport. | ||
814 | **/ | ||
815 | void | ||
816 | lpfc_free_bucket(struct lpfc_vport *vport) | ||
817 | { | ||
818 | struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; | ||
819 | |||
820 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | ||
821 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
822 | continue; | ||
823 | |||
824 | kfree(ndlp->lat_data); | ||
825 | ndlp->lat_data = NULL; | ||
826 | } | ||
827 | } | ||
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h index 96c445333b69..90828340acea 100644 --- a/drivers/scsi/lpfc/lpfc_vport.h +++ b/drivers/scsi/lpfc/lpfc_vport.h | |||
@@ -112,4 +112,8 @@ struct vport_cmd_tag { | |||
112 | void lpfc_vport_set_state(struct lpfc_vport *vport, | 112 | void lpfc_vport_set_state(struct lpfc_vport *vport, |
113 | enum fc_vport_state new_state); | 113 | enum fc_vport_state new_state); |
114 | 114 | ||
115 | void lpfc_vport_reset_stat_data(struct lpfc_vport *); | ||
116 | void lpfc_alloc_bucket(struct lpfc_vport *); | ||
117 | void lpfc_free_bucket(struct lpfc_vport *); | ||
118 | |||
115 | #endif /* H_LPFC_VPORT */ | 119 | #endif /* H_LPFC_VPORT */ |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 0ddfe7106b3b..ed731968f15f 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1006,7 +1006,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) | |||
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | qla2x00_abort_fcport_cmds(fcport); | 1008 | qla2x00_abort_fcport_cmds(fcport); |
1009 | scsi_target_unblock(&rport->dev); | ||
1010 | } | 1009 | } |
1011 | 1010 | ||
1012 | static int | 1011 | static int |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index fc4bfa7f839c..a76efd99d007 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -1187,7 +1187,12 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1187 | cp->serial_number, comp_status, | 1187 | cp->serial_number, comp_status, |
1188 | atomic_read(&fcport->state))); | 1188 | atomic_read(&fcport->state))); |
1189 | 1189 | ||
1190 | cp->result = DID_BUS_BUSY << 16; | 1190 | /* |
1191 | * We are going to have the fc class block the rport | ||
1192 | * while we try to recover so instruct the mid layer | ||
1193 | * to requeue until the class decides how to handle this. | ||
1194 | */ | ||
1195 | cp->result = DID_TRANSPORT_DISRUPTED << 16; | ||
1191 | if (atomic_read(&fcport->state) == FCS_ONLINE) | 1196 | if (atomic_read(&fcport->state) == FCS_ONLINE) |
1192 | qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); | 1197 | qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); |
1193 | break; | 1198 | break; |
@@ -1214,7 +1219,12 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1214 | break; | 1219 | break; |
1215 | 1220 | ||
1216 | case CS_TIMEOUT: | 1221 | case CS_TIMEOUT: |
1217 | cp->result = DID_BUS_BUSY << 16; | 1222 | /* |
1223 | * We are going to have the fc class block the rport | ||
1224 | * while we try to recover so instruct the mid layer | ||
1225 | * to requeue until the class decides how to handle this. | ||
1226 | */ | ||
1227 | cp->result = DID_TRANSPORT_DISRUPTED << 16; | ||
1218 | 1228 | ||
1219 | if (IS_FWI2_CAPABLE(ha)) { | 1229 | if (IS_FWI2_CAPABLE(ha)) { |
1220 | DEBUG2(printk(KERN_INFO | 1230 | DEBUG2(printk(KERN_INFO |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 3433441b956a..2aed4721c0d0 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -394,10 +394,8 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
394 | } | 394 | } |
395 | 395 | ||
396 | /* Close window on fcport/rport state-transitioning. */ | 396 | /* Close window on fcport/rport state-transitioning. */ |
397 | if (fcport->drport) { | 397 | if (fcport->drport) |
398 | cmd->result = DID_IMM_RETRY << 16; | 398 | goto qc_target_busy; |
399 | goto qc_fail_command; | ||
400 | } | ||
401 | 399 | ||
402 | if (atomic_read(&fcport->state) != FCS_ONLINE) { | 400 | if (atomic_read(&fcport->state) != FCS_ONLINE) { |
403 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || | 401 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || |
@@ -405,7 +403,7 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
405 | cmd->result = DID_NO_CONNECT << 16; | 403 | cmd->result = DID_NO_CONNECT << 16; |
406 | goto qc_fail_command; | 404 | goto qc_fail_command; |
407 | } | 405 | } |
408 | goto qc_host_busy; | 406 | goto qc_target_busy; |
409 | } | 407 | } |
410 | 408 | ||
411 | spin_unlock_irq(ha->host->host_lock); | 409 | spin_unlock_irq(ha->host->host_lock); |
@@ -428,10 +426,11 @@ qc_host_busy_free_sp: | |||
428 | 426 | ||
429 | qc_host_busy_lock: | 427 | qc_host_busy_lock: |
430 | spin_lock_irq(ha->host->host_lock); | 428 | spin_lock_irq(ha->host->host_lock); |
431 | |||
432 | qc_host_busy: | ||
433 | return SCSI_MLQUEUE_HOST_BUSY; | 429 | return SCSI_MLQUEUE_HOST_BUSY; |
434 | 430 | ||
431 | qc_target_busy: | ||
432 | return SCSI_MLQUEUE_TARGET_BUSY; | ||
433 | |||
435 | qc_fail_command: | 434 | qc_fail_command: |
436 | done(cmd); | 435 | done(cmd); |
437 | 436 | ||
@@ -461,10 +460,8 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
461 | } | 460 | } |
462 | 461 | ||
463 | /* Close window on fcport/rport state-transitioning. */ | 462 | /* Close window on fcport/rport state-transitioning. */ |
464 | if (fcport->drport) { | 463 | if (fcport->drport) |
465 | cmd->result = DID_IMM_RETRY << 16; | 464 | goto qc24_target_busy; |
466 | goto qc24_fail_command; | ||
467 | } | ||
468 | 465 | ||
469 | if (atomic_read(&fcport->state) != FCS_ONLINE) { | 466 | if (atomic_read(&fcport->state) != FCS_ONLINE) { |
470 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || | 467 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || |
@@ -472,7 +469,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
472 | cmd->result = DID_NO_CONNECT << 16; | 469 | cmd->result = DID_NO_CONNECT << 16; |
473 | goto qc24_fail_command; | 470 | goto qc24_fail_command; |
474 | } | 471 | } |
475 | goto qc24_host_busy; | 472 | goto qc24_target_busy; |
476 | } | 473 | } |
477 | 474 | ||
478 | spin_unlock_irq(ha->host->host_lock); | 475 | spin_unlock_irq(ha->host->host_lock); |
@@ -495,10 +492,11 @@ qc24_host_busy_free_sp: | |||
495 | 492 | ||
496 | qc24_host_busy_lock: | 493 | qc24_host_busy_lock: |
497 | spin_lock_irq(ha->host->host_lock); | 494 | spin_lock_irq(ha->host->host_lock); |
498 | |||
499 | qc24_host_busy: | ||
500 | return SCSI_MLQUEUE_HOST_BUSY; | 495 | return SCSI_MLQUEUE_HOST_BUSY; |
501 | 496 | ||
497 | qc24_target_busy: | ||
498 | return SCSI_MLQUEUE_TARGET_BUSY; | ||
499 | |||
502 | qc24_fail_command: | 500 | qc24_fail_command: |
503 | done(cmd); | 501 | done(cmd); |
504 | 502 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index a91a57c57bff..799120fcb9be 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c | |||
@@ -139,7 +139,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
139 | ha->host_no, cmd->device->channel, | 139 | ha->host_no, cmd->device->channel, |
140 | cmd->device->id, cmd->device->lun)); | 140 | cmd->device->id, cmd->device->lun)); |
141 | 141 | ||
142 | cmd->result = DID_BUS_BUSY << 16; | 142 | cmd->result = DID_TRANSPORT_DISRUPTED << 16; |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * Mark device missing so that we won't continue to send | 145 | * Mark device missing so that we won't continue to send |
@@ -243,7 +243,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
243 | if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) | 243 | if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) |
244 | qla4xxx_mark_device_missing(ha, ddb_entry); | 244 | qla4xxx_mark_device_missing(ha, ddb_entry); |
245 | 245 | ||
246 | cmd->result = DID_BUS_BUSY << 16; | 246 | cmd->result = DID_TRANSPORT_DISRUPTED << 16; |
247 | break; | 247 | break; |
248 | 248 | ||
249 | case SCS_QUEUE_FULL: | 249 | case SCS_QUEUE_FULL: |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index de8279ad7d89..db7ea3bb4e83 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -353,7 +353,7 @@ void qla4xxx_mark_device_missing(struct scsi_qla_host *ha, | |||
353 | ha->host_no, ddb_entry->bus, ddb_entry->target, | 353 | ha->host_no, ddb_entry->bus, ddb_entry->target, |
354 | ddb_entry->fw_ddb_index)); | 354 | ddb_entry->fw_ddb_index)); |
355 | iscsi_block_session(ddb_entry->sess); | 355 | iscsi_block_session(ddb_entry->sess); |
356 | iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED); | 356 | iscsi_conn_error_event(ddb_entry->conn, ISCSI_ERR_CONN_FAILED); |
357 | } | 357 | } |
358 | 358 | ||
359 | static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, | 359 | static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, |
@@ -439,7 +439,7 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd, | |||
439 | cmd->result = DID_NO_CONNECT << 16; | 439 | cmd->result = DID_NO_CONNECT << 16; |
440 | goto qc_fail_command; | 440 | goto qc_fail_command; |
441 | } | 441 | } |
442 | goto qc_host_busy; | 442 | return SCSI_MLQUEUE_TARGET_BUSY; |
443 | } | 443 | } |
444 | 444 | ||
445 | if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) | 445 | if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 2ac3cb2b9081..f8b79d401d58 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -754,8 +754,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
754 | } | 754 | } |
755 | spin_unlock_irqrestore(host->host_lock, flags); | 755 | spin_unlock_irqrestore(host->host_lock, flags); |
756 | if (rtn) { | 756 | if (rtn) { |
757 | scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? | 757 | if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && |
758 | rtn : SCSI_MLQUEUE_HOST_BUSY); | 758 | rtn != SCSI_MLQUEUE_TARGET_BUSY) |
759 | rtn = SCSI_MLQUEUE_HOST_BUSY; | ||
760 | |||
761 | scsi_queue_insert(cmd, rtn); | ||
762 | |||
759 | SCSI_LOG_MLQUEUE(3, | 763 | SCSI_LOG_MLQUEUE(3, |
760 | printk("queuecommand : request rejected\n")); | 764 | printk("queuecommand : request rejected\n")); |
761 | } | 765 | } |
@@ -800,6 +804,7 @@ static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd) | |||
800 | void scsi_finish_command(struct scsi_cmnd *cmd) | 804 | void scsi_finish_command(struct scsi_cmnd *cmd) |
801 | { | 805 | { |
802 | struct scsi_device *sdev = cmd->device; | 806 | struct scsi_device *sdev = cmd->device; |
807 | struct scsi_target *starget = scsi_target(sdev); | ||
803 | struct Scsi_Host *shost = sdev->host; | 808 | struct Scsi_Host *shost = sdev->host; |
804 | struct scsi_driver *drv; | 809 | struct scsi_driver *drv; |
805 | unsigned int good_bytes; | 810 | unsigned int good_bytes; |
@@ -815,6 +820,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) | |||
815 | * XXX(hch): What about locking? | 820 | * XXX(hch): What about locking? |
816 | */ | 821 | */ |
817 | shost->host_blocked = 0; | 822 | shost->host_blocked = 0; |
823 | starget->target_blocked = 0; | ||
818 | sdev->device_blocked = 0; | 824 | sdev->device_blocked = 0; |
819 | 825 | ||
820 | /* | 826 | /* |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index fecefa05cb62..94ed262bdf0c 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -1065,10 +1065,10 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost, | |||
1065 | struct list_head *done_q) | 1065 | struct list_head *done_q) |
1066 | { | 1066 | { |
1067 | struct scsi_cmnd *scmd, *tgtr_scmd, *next; | 1067 | struct scsi_cmnd *scmd, *tgtr_scmd, *next; |
1068 | unsigned int id; | 1068 | unsigned int id = 0; |
1069 | int rtn; | 1069 | int rtn; |
1070 | 1070 | ||
1071 | for (id = 0; id <= shost->max_id; id++) { | 1071 | do { |
1072 | tgtr_scmd = NULL; | 1072 | tgtr_scmd = NULL; |
1073 | list_for_each_entry(scmd, work_q, eh_entry) { | 1073 | list_for_each_entry(scmd, work_q, eh_entry) { |
1074 | if (id == scmd_id(scmd)) { | 1074 | if (id == scmd_id(scmd)) { |
@@ -1076,8 +1076,18 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost, | |||
1076 | break; | 1076 | break; |
1077 | } | 1077 | } |
1078 | } | 1078 | } |
1079 | if (!tgtr_scmd) { | ||
1080 | /* not one exactly equal; find the next highest */ | ||
1081 | list_for_each_entry(scmd, work_q, eh_entry) { | ||
1082 | if (scmd_id(scmd) > id && | ||
1083 | (!tgtr_scmd || | ||
1084 | scmd_id(tgtr_scmd) > scmd_id(scmd))) | ||
1085 | tgtr_scmd = scmd; | ||
1086 | } | ||
1087 | } | ||
1079 | if (!tgtr_scmd) | 1088 | if (!tgtr_scmd) |
1080 | continue; | 1089 | /* no more commands, that's it */ |
1090 | break; | ||
1081 | 1091 | ||
1082 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset " | 1092 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset " |
1083 | "to target %d\n", | 1093 | "to target %d\n", |
@@ -1096,7 +1106,8 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost, | |||
1096 | " failed target: " | 1106 | " failed target: " |
1097 | "%d\n", | 1107 | "%d\n", |
1098 | current->comm, id)); | 1108 | current->comm, id)); |
1099 | } | 1109 | id++; |
1110 | } while(id != 0); | ||
1100 | 1111 | ||
1101 | return list_empty(work_q); | 1112 | return list_empty(work_q); |
1102 | } | 1113 | } |
@@ -1219,6 +1230,40 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q, | |||
1219 | } | 1230 | } |
1220 | 1231 | ||
1221 | /** | 1232 | /** |
1233 | * scsi_noretry_cmd - determinte if command should be failed fast | ||
1234 | * @scmd: SCSI cmd to examine. | ||
1235 | */ | ||
1236 | int scsi_noretry_cmd(struct scsi_cmnd *scmd) | ||
1237 | { | ||
1238 | switch (host_byte(scmd->result)) { | ||
1239 | case DID_OK: | ||
1240 | break; | ||
1241 | case DID_BUS_BUSY: | ||
1242 | return blk_failfast_transport(scmd->request); | ||
1243 | case DID_PARITY: | ||
1244 | return blk_failfast_dev(scmd->request); | ||
1245 | case DID_ERROR: | ||
1246 | if (msg_byte(scmd->result) == COMMAND_COMPLETE && | ||
1247 | status_byte(scmd->result) == RESERVATION_CONFLICT) | ||
1248 | return 0; | ||
1249 | /* fall through */ | ||
1250 | case DID_SOFT_ERROR: | ||
1251 | return blk_failfast_driver(scmd->request); | ||
1252 | } | ||
1253 | |||
1254 | switch (status_byte(scmd->result)) { | ||
1255 | case CHECK_CONDITION: | ||
1256 | /* | ||
1257 | * assume caller has checked sense and determinted | ||
1258 | * the check condition was retryable. | ||
1259 | */ | ||
1260 | return blk_failfast_dev(scmd->request); | ||
1261 | } | ||
1262 | |||
1263 | return 0; | ||
1264 | } | ||
1265 | |||
1266 | /** | ||
1222 | * scsi_decide_disposition - Disposition a cmd on return from LLD. | 1267 | * scsi_decide_disposition - Disposition a cmd on return from LLD. |
1223 | * @scmd: SCSI cmd to examine. | 1268 | * @scmd: SCSI cmd to examine. |
1224 | * | 1269 | * |
@@ -1290,7 +1335,20 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) | |||
1290 | 1335 | ||
1291 | case DID_REQUEUE: | 1336 | case DID_REQUEUE: |
1292 | return ADD_TO_MLQUEUE; | 1337 | return ADD_TO_MLQUEUE; |
1293 | 1338 | case DID_TRANSPORT_DISRUPTED: | |
1339 | /* | ||
1340 | * LLD/transport was disrupted during processing of the IO. | ||
1341 | * The transport class is now blocked/blocking, | ||
1342 | * and the transport will decide what to do with the IO | ||
1343 | * based on its timers and recovery capablilities. | ||
1344 | */ | ||
1345 | return ADD_TO_MLQUEUE; | ||
1346 | case DID_TRANSPORT_FAILFAST: | ||
1347 | /* | ||
1348 | * The transport decided to failfast the IO (most likely | ||
1349 | * the fast io fail tmo fired), so send IO directly upwards. | ||
1350 | */ | ||
1351 | return SUCCESS; | ||
1294 | case DID_ERROR: | 1352 | case DID_ERROR: |
1295 | if (msg_byte(scmd->result) == COMMAND_COMPLETE && | 1353 | if (msg_byte(scmd->result) == COMMAND_COMPLETE && |
1296 | status_byte(scmd->result) == RESERVATION_CONFLICT) | 1354 | status_byte(scmd->result) == RESERVATION_CONFLICT) |
@@ -1383,7 +1441,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) | |||
1383 | * even if the request is marked fast fail, we still requeue | 1441 | * even if the request is marked fast fail, we still requeue |
1384 | * for queue congestion conditions (QUEUE_FULL or BUSY) */ | 1442 | * for queue congestion conditions (QUEUE_FULL or BUSY) */ |
1385 | if ((++scmd->retries) <= scmd->allowed | 1443 | if ((++scmd->retries) <= scmd->allowed |
1386 | && !blk_noretry_request(scmd->request)) { | 1444 | && !scsi_noretry_cmd(scmd)) { |
1387 | return NEEDS_RETRY; | 1445 | return NEEDS_RETRY; |
1388 | } else { | 1446 | } else { |
1389 | /* | 1447 | /* |
@@ -1508,7 +1566,7 @@ void scsi_eh_flush_done_q(struct list_head *done_q) | |||
1508 | list_for_each_entry_safe(scmd, next, done_q, eh_entry) { | 1566 | list_for_each_entry_safe(scmd, next, done_q, eh_entry) { |
1509 | list_del_init(&scmd->eh_entry); | 1567 | list_del_init(&scmd->eh_entry); |
1510 | if (scsi_device_online(scmd->device) && | 1568 | if (scsi_device_online(scmd->device) && |
1511 | !blk_noretry_request(scmd->request) && | 1569 | !scsi_noretry_cmd(scmd) && |
1512 | (++scmd->retries <= scmd->allowed)) { | 1570 | (++scmd->retries <= scmd->allowed)) { |
1513 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush" | 1571 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush" |
1514 | " retry cmd: %p\n", | 1572 | " retry cmd: %p\n", |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 98ee55ced592..e5a9526d2037 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -114,6 +114,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | |||
114 | { | 114 | { |
115 | struct Scsi_Host *host = cmd->device->host; | 115 | struct Scsi_Host *host = cmd->device->host; |
116 | struct scsi_device *device = cmd->device; | 116 | struct scsi_device *device = cmd->device; |
117 | struct scsi_target *starget = scsi_target(device); | ||
117 | struct request_queue *q = device->request_queue; | 118 | struct request_queue *q = device->request_queue; |
118 | unsigned long flags; | 119 | unsigned long flags; |
119 | 120 | ||
@@ -133,10 +134,17 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | |||
133 | * if a command is requeued with no other commands outstanding | 134 | * if a command is requeued with no other commands outstanding |
134 | * either for the device or for the host. | 135 | * either for the device or for the host. |
135 | */ | 136 | */ |
136 | if (reason == SCSI_MLQUEUE_HOST_BUSY) | 137 | switch (reason) { |
138 | case SCSI_MLQUEUE_HOST_BUSY: | ||
137 | host->host_blocked = host->max_host_blocked; | 139 | host->host_blocked = host->max_host_blocked; |
138 | else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) | 140 | break; |
141 | case SCSI_MLQUEUE_DEVICE_BUSY: | ||
139 | device->device_blocked = device->max_device_blocked; | 142 | device->device_blocked = device->max_device_blocked; |
143 | break; | ||
144 | case SCSI_MLQUEUE_TARGET_BUSY: | ||
145 | starget->target_blocked = starget->max_target_blocked; | ||
146 | break; | ||
147 | } | ||
140 | 148 | ||
141 | /* | 149 | /* |
142 | * Decrement the counters, since these commands are no longer | 150 | * Decrement the counters, since these commands are no longer |
@@ -460,10 +468,12 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) | |||
460 | void scsi_device_unbusy(struct scsi_device *sdev) | 468 | void scsi_device_unbusy(struct scsi_device *sdev) |
461 | { | 469 | { |
462 | struct Scsi_Host *shost = sdev->host; | 470 | struct Scsi_Host *shost = sdev->host; |
471 | struct scsi_target *starget = scsi_target(sdev); | ||
463 | unsigned long flags; | 472 | unsigned long flags; |
464 | 473 | ||
465 | spin_lock_irqsave(shost->host_lock, flags); | 474 | spin_lock_irqsave(shost->host_lock, flags); |
466 | shost->host_busy--; | 475 | shost->host_busy--; |
476 | starget->target_busy--; | ||
467 | if (unlikely(scsi_host_in_recovery(shost) && | 477 | if (unlikely(scsi_host_in_recovery(shost) && |
468 | (shost->host_failed || shost->host_eh_scheduled))) | 478 | (shost->host_failed || shost->host_eh_scheduled))) |
469 | scsi_eh_wakeup(shost); | 479 | scsi_eh_wakeup(shost); |
@@ -519,6 +529,13 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) | |||
519 | spin_unlock_irqrestore(shost->host_lock, flags); | 529 | spin_unlock_irqrestore(shost->host_lock, flags); |
520 | } | 530 | } |
521 | 531 | ||
532 | static inline int scsi_target_is_busy(struct scsi_target *starget) | ||
533 | { | ||
534 | return ((starget->can_queue > 0 && | ||
535 | starget->target_busy >= starget->can_queue) || | ||
536 | starget->target_blocked); | ||
537 | } | ||
538 | |||
522 | /* | 539 | /* |
523 | * Function: scsi_run_queue() | 540 | * Function: scsi_run_queue() |
524 | * | 541 | * |
@@ -533,7 +550,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) | |||
533 | */ | 550 | */ |
534 | static void scsi_run_queue(struct request_queue *q) | 551 | static void scsi_run_queue(struct request_queue *q) |
535 | { | 552 | { |
536 | struct scsi_device *sdev = q->queuedata; | 553 | struct scsi_device *starved_head = NULL, *sdev = q->queuedata; |
537 | struct Scsi_Host *shost = sdev->host; | 554 | struct Scsi_Host *shost = sdev->host; |
538 | unsigned long flags; | 555 | unsigned long flags; |
539 | 556 | ||
@@ -560,6 +577,21 @@ static void scsi_run_queue(struct request_queue *q) | |||
560 | */ | 577 | */ |
561 | sdev = list_entry(shost->starved_list.next, | 578 | sdev = list_entry(shost->starved_list.next, |
562 | struct scsi_device, starved_entry); | 579 | struct scsi_device, starved_entry); |
580 | /* | ||
581 | * The *queue_ready functions can add a device back onto the | ||
582 | * starved list's tail, so we must check for a infinite loop. | ||
583 | */ | ||
584 | if (sdev == starved_head) | ||
585 | break; | ||
586 | if (!starved_head) | ||
587 | starved_head = sdev; | ||
588 | |||
589 | if (scsi_target_is_busy(scsi_target(sdev))) { | ||
590 | list_move_tail(&sdev->starved_entry, | ||
591 | &shost->starved_list); | ||
592 | continue; | ||
593 | } | ||
594 | |||
563 | list_del_init(&sdev->starved_entry); | 595 | list_del_init(&sdev->starved_entry); |
564 | spin_unlock(shost->host_lock); | 596 | spin_unlock(shost->host_lock); |
565 | 597 | ||
@@ -575,13 +607,6 @@ static void scsi_run_queue(struct request_queue *q) | |||
575 | spin_unlock(sdev->request_queue->queue_lock); | 607 | spin_unlock(sdev->request_queue->queue_lock); |
576 | 608 | ||
577 | spin_lock(shost->host_lock); | 609 | spin_lock(shost->host_lock); |
578 | if (unlikely(!list_empty(&sdev->starved_entry))) | ||
579 | /* | ||
580 | * sdev lost a race, and was put back on the | ||
581 | * starved list. This is unlikely but without this | ||
582 | * in theory we could loop forever. | ||
583 | */ | ||
584 | break; | ||
585 | } | 610 | } |
586 | spin_unlock_irqrestore(shost->host_lock, flags); | 611 | spin_unlock_irqrestore(shost->host_lock, flags); |
587 | 612 | ||
@@ -681,7 +706,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, | |||
681 | leftover = req->data_len; | 706 | leftover = req->data_len; |
682 | 707 | ||
683 | /* kill remainder if no retrys */ | 708 | /* kill remainder if no retrys */ |
684 | if (error && blk_noretry_request(req)) | 709 | if (error && scsi_noretry_cmd(cmd)) |
685 | blk_end_request(req, error, leftover); | 710 | blk_end_request(req, error, leftover); |
686 | else { | 711 | else { |
687 | if (requeue) { | 712 | if (requeue) { |
@@ -1344,6 +1369,52 @@ static inline int scsi_dev_queue_ready(struct request_queue *q, | |||
1344 | return 1; | 1369 | return 1; |
1345 | } | 1370 | } |
1346 | 1371 | ||
1372 | |||
1373 | /* | ||
1374 | * scsi_target_queue_ready: checks if there we can send commands to target | ||
1375 | * @sdev: scsi device on starget to check. | ||
1376 | * | ||
1377 | * Called with the host lock held. | ||
1378 | */ | ||
1379 | static inline int scsi_target_queue_ready(struct Scsi_Host *shost, | ||
1380 | struct scsi_device *sdev) | ||
1381 | { | ||
1382 | struct scsi_target *starget = scsi_target(sdev); | ||
1383 | |||
1384 | if (starget->single_lun) { | ||
1385 | if (starget->starget_sdev_user && | ||
1386 | starget->starget_sdev_user != sdev) | ||
1387 | return 0; | ||
1388 | starget->starget_sdev_user = sdev; | ||
1389 | } | ||
1390 | |||
1391 | if (starget->target_busy == 0 && starget->target_blocked) { | ||
1392 | /* | ||
1393 | * unblock after target_blocked iterates to zero | ||
1394 | */ | ||
1395 | if (--starget->target_blocked == 0) { | ||
1396 | SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, | ||
1397 | "unblocking target at zero depth\n")); | ||
1398 | } else { | ||
1399 | blk_plug_device(sdev->request_queue); | ||
1400 | return 0; | ||
1401 | } | ||
1402 | } | ||
1403 | |||
1404 | if (scsi_target_is_busy(starget)) { | ||
1405 | if (list_empty(&sdev->starved_entry)) { | ||
1406 | list_add_tail(&sdev->starved_entry, | ||
1407 | &shost->starved_list); | ||
1408 | return 0; | ||
1409 | } | ||
1410 | } | ||
1411 | |||
1412 | /* We're OK to process the command, so we can't be starved */ | ||
1413 | if (!list_empty(&sdev->starved_entry)) | ||
1414 | list_del_init(&sdev->starved_entry); | ||
1415 | return 1; | ||
1416 | } | ||
1417 | |||
1347 | /* | 1418 | /* |
1348 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else | 1419 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else |
1349 | * return 0. We must end up running the queue again whenever 0 is | 1420 | * return 0. We must end up running the queue again whenever 0 is |
@@ -1390,6 +1461,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
1390 | { | 1461 | { |
1391 | struct scsi_cmnd *cmd = req->special; | 1462 | struct scsi_cmnd *cmd = req->special; |
1392 | struct scsi_device *sdev = cmd->device; | 1463 | struct scsi_device *sdev = cmd->device; |
1464 | struct scsi_target *starget = scsi_target(sdev); | ||
1393 | struct Scsi_Host *shost = sdev->host; | 1465 | struct Scsi_Host *shost = sdev->host; |
1394 | 1466 | ||
1395 | blkdev_dequeue_request(req); | 1467 | blkdev_dequeue_request(req); |
@@ -1413,6 +1485,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
1413 | spin_unlock(sdev->request_queue->queue_lock); | 1485 | spin_unlock(sdev->request_queue->queue_lock); |
1414 | spin_lock(shost->host_lock); | 1486 | spin_lock(shost->host_lock); |
1415 | shost->host_busy++; | 1487 | shost->host_busy++; |
1488 | starget->target_busy++; | ||
1416 | spin_unlock(shost->host_lock); | 1489 | spin_unlock(shost->host_lock); |
1417 | spin_lock(sdev->request_queue->queue_lock); | 1490 | spin_lock(sdev->request_queue->queue_lock); |
1418 | 1491 | ||
@@ -1550,14 +1623,13 @@ static void scsi_request_fn(struct request_queue *q) | |||
1550 | goto not_ready; | 1623 | goto not_ready; |
1551 | } | 1624 | } |
1552 | 1625 | ||
1626 | if (!scsi_target_queue_ready(shost, sdev)) | ||
1627 | goto not_ready; | ||
1628 | |||
1553 | if (!scsi_host_queue_ready(q, shost, sdev)) | 1629 | if (!scsi_host_queue_ready(q, shost, sdev)) |
1554 | goto not_ready; | 1630 | goto not_ready; |
1555 | if (scsi_target(sdev)->single_lun) { | 1631 | |
1556 | if (scsi_target(sdev)->starget_sdev_user && | 1632 | scsi_target(sdev)->target_busy++; |
1557 | scsi_target(sdev)->starget_sdev_user != sdev) | ||
1558 | goto not_ready; | ||
1559 | scsi_target(sdev)->starget_sdev_user = sdev; | ||
1560 | } | ||
1561 | shost->host_busy++; | 1633 | shost->host_busy++; |
1562 | 1634 | ||
1563 | /* | 1635 | /* |
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 6cddd5dd323c..e1850904ff73 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h | |||
@@ -59,6 +59,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost, | |||
59 | struct list_head *done_q); | 59 | struct list_head *done_q); |
60 | int scsi_eh_get_sense(struct list_head *work_q, | 60 | int scsi_eh_get_sense(struct list_head *work_q, |
61 | struct list_head *done_q); | 61 | struct list_head *done_q); |
62 | int scsi_noretry_cmd(struct scsi_cmnd *scmd); | ||
62 | 63 | ||
63 | /* scsi_lib.c */ | 64 | /* scsi_lib.c */ |
64 | extern int scsi_maybe_unblock_host(struct scsi_device *sdev); | 65 | extern int scsi_maybe_unblock_host(struct scsi_device *sdev); |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 334862e26a1b..b14dc02c3ded 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -419,6 +419,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, | |||
419 | dev->type = &scsi_target_type; | 419 | dev->type = &scsi_target_type; |
420 | starget->id = id; | 420 | starget->id = id; |
421 | starget->channel = channel; | 421 | starget->channel = channel; |
422 | starget->can_queue = 0; | ||
422 | INIT_LIST_HEAD(&starget->siblings); | 423 | INIT_LIST_HEAD(&starget->siblings); |
423 | INIT_LIST_HEAD(&starget->devices); | 424 | INIT_LIST_HEAD(&starget->devices); |
424 | starget->state = STARGET_CREATED; | 425 | starget->state = STARGET_CREATED; |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index d5f7653bb94b..1e71abf0607a 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -2133,8 +2133,7 @@ fc_attach_transport(struct fc_function_template *ft) | |||
2133 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles); | 2133 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles); |
2134 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state); | 2134 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state); |
2135 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id); | 2135 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id); |
2136 | if (ft->terminate_rport_io) | 2136 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo); |
2137 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo); | ||
2138 | 2137 | ||
2139 | BUG_ON(count > FC_RPORT_NUM_ATTRS); | 2138 | BUG_ON(count > FC_RPORT_NUM_ATTRS); |
2140 | 2139 | ||
@@ -2328,6 +2327,22 @@ fc_remove_host(struct Scsi_Host *shost) | |||
2328 | } | 2327 | } |
2329 | EXPORT_SYMBOL(fc_remove_host); | 2328 | EXPORT_SYMBOL(fc_remove_host); |
2330 | 2329 | ||
2330 | static void fc_terminate_rport_io(struct fc_rport *rport) | ||
2331 | { | ||
2332 | struct Scsi_Host *shost = rport_to_shost(rport); | ||
2333 | struct fc_internal *i = to_fc_internal(shost->transportt); | ||
2334 | |||
2335 | /* Involve the LLDD if possible to terminate all io on the rport. */ | ||
2336 | if (i->f->terminate_rport_io) | ||
2337 | i->f->terminate_rport_io(rport); | ||
2338 | |||
2339 | /* | ||
2340 | * must unblock to flush queued IO. The caller will have set | ||
2341 | * the port_state or flags, so that fc_remote_port_chkready will | ||
2342 | * fail IO. | ||
2343 | */ | ||
2344 | scsi_target_unblock(&rport->dev); | ||
2345 | } | ||
2331 | 2346 | ||
2332 | /** | 2347 | /** |
2333 | * fc_starget_delete - called to delete the scsi decendents of an rport | 2348 | * fc_starget_delete - called to delete the scsi decendents of an rport |
@@ -2340,13 +2355,8 @@ fc_starget_delete(struct work_struct *work) | |||
2340 | { | 2355 | { |
2341 | struct fc_rport *rport = | 2356 | struct fc_rport *rport = |
2342 | container_of(work, struct fc_rport, stgt_delete_work); | 2357 | container_of(work, struct fc_rport, stgt_delete_work); |
2343 | struct Scsi_Host *shost = rport_to_shost(rport); | ||
2344 | struct fc_internal *i = to_fc_internal(shost->transportt); | ||
2345 | |||
2346 | /* Involve the LLDD if possible to terminate all io on the rport. */ | ||
2347 | if (i->f->terminate_rport_io) | ||
2348 | i->f->terminate_rport_io(rport); | ||
2349 | 2358 | ||
2359 | fc_terminate_rport_io(rport); | ||
2350 | scsi_remove_target(&rport->dev); | 2360 | scsi_remove_target(&rport->dev); |
2351 | } | 2361 | } |
2352 | 2362 | ||
@@ -2372,10 +2382,7 @@ fc_rport_final_delete(struct work_struct *work) | |||
2372 | if (rport->flags & FC_RPORT_SCAN_PENDING) | 2382 | if (rport->flags & FC_RPORT_SCAN_PENDING) |
2373 | scsi_flush_work(shost); | 2383 | scsi_flush_work(shost); |
2374 | 2384 | ||
2375 | /* involve the LLDD to terminate all pending i/o */ | 2385 | fc_terminate_rport_io(rport); |
2376 | if (i->f->terminate_rport_io) | ||
2377 | i->f->terminate_rport_io(rport); | ||
2378 | |||
2379 | /* | 2386 | /* |
2380 | * Cancel any outstanding timers. These should really exist | 2387 | * Cancel any outstanding timers. These should really exist |
2381 | * only when rmmod'ing the LLDD and we're asking for | 2388 | * only when rmmod'ing the LLDD and we're asking for |
@@ -2639,7 +2646,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, | |||
2639 | 2646 | ||
2640 | spin_lock_irqsave(shost->host_lock, flags); | 2647 | spin_lock_irqsave(shost->host_lock, flags); |
2641 | 2648 | ||
2642 | rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; | 2649 | rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | |
2650 | FC_RPORT_DEVLOSS_PENDING); | ||
2643 | 2651 | ||
2644 | /* if target, initiate a scan */ | 2652 | /* if target, initiate a scan */ |
2645 | if (rport->scsi_target_id != -1) { | 2653 | if (rport->scsi_target_id != -1) { |
@@ -2702,6 +2710,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, | |||
2702 | rport->port_id = ids->port_id; | 2710 | rport->port_id = ids->port_id; |
2703 | rport->roles = ids->roles; | 2711 | rport->roles = ids->roles; |
2704 | rport->port_state = FC_PORTSTATE_ONLINE; | 2712 | rport->port_state = FC_PORTSTATE_ONLINE; |
2713 | rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; | ||
2705 | 2714 | ||
2706 | if (fci->f->dd_fcrport_size) | 2715 | if (fci->f->dd_fcrport_size) |
2707 | memset(rport->dd_data, 0, | 2716 | memset(rport->dd_data, 0, |
@@ -2784,7 +2793,6 @@ void | |||
2784 | fc_remote_port_delete(struct fc_rport *rport) | 2793 | fc_remote_port_delete(struct fc_rport *rport) |
2785 | { | 2794 | { |
2786 | struct Scsi_Host *shost = rport_to_shost(rport); | 2795 | struct Scsi_Host *shost = rport_to_shost(rport); |
2787 | struct fc_internal *i = to_fc_internal(shost->transportt); | ||
2788 | int timeout = rport->dev_loss_tmo; | 2796 | int timeout = rport->dev_loss_tmo; |
2789 | unsigned long flags; | 2797 | unsigned long flags; |
2790 | 2798 | ||
@@ -2830,7 +2838,7 @@ fc_remote_port_delete(struct fc_rport *rport) | |||
2830 | 2838 | ||
2831 | /* see if we need to kill io faster than waiting for device loss */ | 2839 | /* see if we need to kill io faster than waiting for device loss */ |
2832 | if ((rport->fast_io_fail_tmo != -1) && | 2840 | if ((rport->fast_io_fail_tmo != -1) && |
2833 | (rport->fast_io_fail_tmo < timeout) && (i->f->terminate_rport_io)) | 2841 | (rport->fast_io_fail_tmo < timeout)) |
2834 | fc_queue_devloss_work(shost, &rport->fail_io_work, | 2842 | fc_queue_devloss_work(shost, &rport->fail_io_work, |
2835 | rport->fast_io_fail_tmo * HZ); | 2843 | rport->fast_io_fail_tmo * HZ); |
2836 | 2844 | ||
@@ -2906,7 +2914,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) | |||
2906 | fc_flush_devloss(shost); | 2914 | fc_flush_devloss(shost); |
2907 | 2915 | ||
2908 | spin_lock_irqsave(shost->host_lock, flags); | 2916 | spin_lock_irqsave(shost->host_lock, flags); |
2909 | rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; | 2917 | rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | |
2918 | FC_RPORT_DEVLOSS_PENDING); | ||
2910 | spin_unlock_irqrestore(shost->host_lock, flags); | 2919 | spin_unlock_irqrestore(shost->host_lock, flags); |
2911 | 2920 | ||
2912 | /* ensure any stgt delete functions are done */ | 2921 | /* ensure any stgt delete functions are done */ |
@@ -3001,6 +3010,7 @@ fc_timeout_deleted_rport(struct work_struct *work) | |||
3001 | rport->supported_classes = FC_COS_UNSPECIFIED; | 3010 | rport->supported_classes = FC_COS_UNSPECIFIED; |
3002 | rport->roles = FC_PORT_ROLE_UNKNOWN; | 3011 | rport->roles = FC_PORT_ROLE_UNKNOWN; |
3003 | rport->port_state = FC_PORTSTATE_NOTPRESENT; | 3012 | rport->port_state = FC_PORTSTATE_NOTPRESENT; |
3013 | rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; | ||
3004 | 3014 | ||
3005 | /* remove the identifiers that aren't used in the consisting binding */ | 3015 | /* remove the identifiers that aren't used in the consisting binding */ |
3006 | switch (fc_host->tgtid_bind_type) { | 3016 | switch (fc_host->tgtid_bind_type) { |
@@ -3043,13 +3053,12 @@ fc_timeout_fail_rport_io(struct work_struct *work) | |||
3043 | { | 3053 | { |
3044 | struct fc_rport *rport = | 3054 | struct fc_rport *rport = |
3045 | container_of(work, struct fc_rport, fail_io_work.work); | 3055 | container_of(work, struct fc_rport, fail_io_work.work); |
3046 | struct Scsi_Host *shost = rport_to_shost(rport); | ||
3047 | struct fc_internal *i = to_fc_internal(shost->transportt); | ||
3048 | 3056 | ||
3049 | if (rport->port_state != FC_PORTSTATE_BLOCKED) | 3057 | if (rport->port_state != FC_PORTSTATE_BLOCKED) |
3050 | return; | 3058 | return; |
3051 | 3059 | ||
3052 | i->f->terminate_rport_io(rport); | 3060 | rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT; |
3061 | fc_terminate_rport_io(rport); | ||
3053 | } | 3062 | } |
3054 | 3063 | ||
3055 | /** | 3064 | /** |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 0ce5f7cdfe2a..4a803ebaf508 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -138,7 +138,7 @@ static ssize_t | |||
138 | show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) | 138 | show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) |
139 | { | 139 | { |
140 | struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); | 140 | struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); |
141 | return sprintf(buf, "%u\n", ep->id); | 141 | return sprintf(buf, "%llu\n", (unsigned long long) ep->id); |
142 | } | 142 | } |
143 | static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); | 143 | static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); |
144 | 144 | ||
@@ -156,7 +156,7 @@ static struct attribute_group iscsi_endpoint_group = { | |||
156 | static int iscsi_match_epid(struct device *dev, void *data) | 156 | static int iscsi_match_epid(struct device *dev, void *data) |
157 | { | 157 | { |
158 | struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); | 158 | struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); |
159 | unsigned int *epid = (unsigned int *) data; | 159 | uint64_t *epid = (uint64_t *) data; |
160 | 160 | ||
161 | return *epid == ep->id; | 161 | return *epid == ep->id; |
162 | } | 162 | } |
@@ -166,7 +166,7 @@ iscsi_create_endpoint(int dd_size) | |||
166 | { | 166 | { |
167 | struct device *dev; | 167 | struct device *dev; |
168 | struct iscsi_endpoint *ep; | 168 | struct iscsi_endpoint *ep; |
169 | unsigned int id; | 169 | uint64_t id; |
170 | int err; | 170 | int err; |
171 | 171 | ||
172 | for (id = 1; id < ISCSI_MAX_EPID; id++) { | 172 | for (id = 1; id < ISCSI_MAX_EPID; id++) { |
@@ -187,7 +187,8 @@ iscsi_create_endpoint(int dd_size) | |||
187 | 187 | ||
188 | ep->id = id; | 188 | ep->id = id; |
189 | ep->dev.class = &iscsi_endpoint_class; | 189 | ep->dev.class = &iscsi_endpoint_class; |
190 | snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id); | 190 | snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%llu", |
191 | (unsigned long long) id); | ||
191 | err = device_register(&ep->dev); | 192 | err = device_register(&ep->dev); |
192 | if (err) | 193 | if (err) |
193 | goto free_ep; | 194 | goto free_ep; |
@@ -374,10 +375,10 @@ int iscsi_session_chkready(struct iscsi_cls_session *session) | |||
374 | err = 0; | 375 | err = 0; |
375 | break; | 376 | break; |
376 | case ISCSI_SESSION_FAILED: | 377 | case ISCSI_SESSION_FAILED: |
377 | err = DID_IMM_RETRY << 16; | 378 | err = DID_TRANSPORT_DISRUPTED << 16; |
378 | break; | 379 | break; |
379 | case ISCSI_SESSION_FREE: | 380 | case ISCSI_SESSION_FREE: |
380 | err = DID_NO_CONNECT << 16; | 381 | err = DID_TRANSPORT_FAILFAST << 16; |
381 | break; | 382 | break; |
382 | default: | 383 | default: |
383 | err = DID_NO_CONNECT << 16; | 384 | err = DID_NO_CONNECT << 16; |
@@ -1010,7 +1011,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, | |||
1010 | 1011 | ||
1011 | skb = alloc_skb(len, GFP_ATOMIC); | 1012 | skb = alloc_skb(len, GFP_ATOMIC); |
1012 | if (!skb) { | 1013 | if (!skb) { |
1013 | iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED); | 1014 | iscsi_conn_error_event(conn, ISCSI_ERR_CONN_FAILED); |
1014 | iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver " | 1015 | iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver " |
1015 | "control PDU: OOM\n"); | 1016 | "control PDU: OOM\n"); |
1016 | return -ENOMEM; | 1017 | return -ENOMEM; |
@@ -1031,7 +1032,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, | |||
1031 | } | 1032 | } |
1032 | EXPORT_SYMBOL_GPL(iscsi_recv_pdu); | 1033 | EXPORT_SYMBOL_GPL(iscsi_recv_pdu); |
1033 | 1034 | ||
1034 | void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) | 1035 | void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) |
1035 | { | 1036 | { |
1036 | struct nlmsghdr *nlh; | 1037 | struct nlmsghdr *nlh; |
1037 | struct sk_buff *skb; | 1038 | struct sk_buff *skb; |
@@ -1063,7 +1064,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) | |||
1063 | iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", | 1064 | iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", |
1064 | error); | 1065 | error); |
1065 | } | 1066 | } |
1066 | EXPORT_SYMBOL_GPL(iscsi_conn_error); | 1067 | EXPORT_SYMBOL_GPL(iscsi_conn_error_event); |
1067 | 1068 | ||
1068 | static int | 1069 | static int |
1069 | iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, | 1070 | iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, |
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index b29360ed0bdc..7c2d28924d2a 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c | |||
@@ -109,7 +109,9 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd, | |||
109 | for(i = 0; i < DV_RETRIES; i++) { | 109 | for(i = 0; i < DV_RETRIES; i++) { |
110 | result = scsi_execute(sdev, cmd, dir, buffer, bufflen, | 110 | result = scsi_execute(sdev, cmd, dir, buffer, bufflen, |
111 | sense, DV_TIMEOUT, /* retries */ 1, | 111 | sense, DV_TIMEOUT, /* retries */ 1, |
112 | REQ_FAILFAST); | 112 | REQ_FAILFAST_DEV | |
113 | REQ_FAILFAST_TRANSPORT | | ||
114 | REQ_FAILFAST_DRIVER); | ||
113 | if (result & DRIVER_SENSE) { | 115 | if (result & DRIVER_SENSE) { |
114 | struct scsi_sense_hdr sshdr_tmp; | 116 | struct scsi_sense_hdr sshdr_tmp; |
115 | if (!sshdr) | 117 | if (!sshdr) |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a7b53be63367..7c4d2e68df1c 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -384,7 +384,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
384 | sector_t block = rq->sector; | 384 | sector_t block = rq->sector; |
385 | sector_t threshold; | 385 | sector_t threshold; |
386 | unsigned int this_count = rq->nr_sectors; | 386 | unsigned int this_count = rq->nr_sectors; |
387 | int ret; | 387 | int ret, host_dif; |
388 | 388 | ||
389 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 389 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
390 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); | 390 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); |
@@ -515,7 +515,8 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
515 | rq->nr_sectors)); | 515 | rq->nr_sectors)); |
516 | 516 | ||
517 | /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ | 517 | /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ |
518 | if (scsi_host_dif_capable(sdp->host, sdkp->protection_type)) | 518 | host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); |
519 | if (host_dif) | ||
519 | SCpnt->cmnd[1] = 1 << 5; | 520 | SCpnt->cmnd[1] = 1 << 5; |
520 | else | 521 | else |
521 | SCpnt->cmnd[1] = 0; | 522 | SCpnt->cmnd[1] = 0; |
@@ -573,8 +574,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
573 | SCpnt->sdb.length = this_count * sdp->sector_size; | 574 | SCpnt->sdb.length = this_count * sdp->sector_size; |
574 | 575 | ||
575 | /* If DIF or DIX is enabled, tell HBA how to handle request */ | 576 | /* If DIF or DIX is enabled, tell HBA how to handle request */ |
576 | if (sdkp->protection_type || scsi_prot_sg_count(SCpnt)) | 577 | if (host_dif || scsi_prot_sg_count(SCpnt)) |
577 | sd_dif_op(SCpnt, sdkp->protection_type, scsi_prot_sg_count(SCpnt)); | 578 | sd_dif_op(SCpnt, host_dif, scsi_prot_sg_count(SCpnt), |
579 | sdkp->protection_type); | ||
578 | 580 | ||
579 | /* | 581 | /* |
580 | * We shouldn't disconnect in the middle of a sector, so with a dumb | 582 | * We shouldn't disconnect in the middle of a sector, so with a dumb |
@@ -1252,14 +1254,12 @@ void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
1252 | else | 1254 | else |
1253 | type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ | 1255 | type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ |
1254 | 1256 | ||
1257 | sdkp->protection_type = type; | ||
1258 | |||
1255 | switch (type) { | 1259 | switch (type) { |
1256 | case SD_DIF_TYPE0_PROTECTION: | 1260 | case SD_DIF_TYPE0_PROTECTION: |
1257 | sdkp->protection_type = 0; | ||
1258 | break; | ||
1259 | |||
1260 | case SD_DIF_TYPE1_PROTECTION: | 1261 | case SD_DIF_TYPE1_PROTECTION: |
1261 | case SD_DIF_TYPE3_PROTECTION: | 1262 | case SD_DIF_TYPE3_PROTECTION: |
1262 | sdkp->protection_type = type; | ||
1263 | break; | 1263 | break; |
1264 | 1264 | ||
1265 | case SD_DIF_TYPE2_PROTECTION: | 1265 | case SD_DIF_TYPE2_PROTECTION: |
@@ -1277,7 +1277,6 @@ void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
1277 | return; | 1277 | return; |
1278 | 1278 | ||
1279 | disable: | 1279 | disable: |
1280 | sdkp->protection_type = 0; | ||
1281 | sdkp->capacity = 0; | 1280 | sdkp->capacity = 0; |
1282 | } | 1281 | } |
1283 | 1282 | ||
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 95b9f06534d5..75638e7d3f66 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h | |||
@@ -97,19 +97,28 @@ struct sd_dif_tuple { | |||
97 | __be32 ref_tag; /* Target LBA or indirect LBA */ | 97 | __be32 ref_tag; /* Target LBA or indirect LBA */ |
98 | }; | 98 | }; |
99 | 99 | ||
100 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 100 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
101 | 101 | ||
102 | extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int); | 102 | extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int, unsigned int); |
103 | extern void sd_dif_config_host(struct scsi_disk *); | 103 | extern void sd_dif_config_host(struct scsi_disk *); |
104 | extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int); | 104 | extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int); |
105 | extern void sd_dif_complete(struct scsi_cmnd *, unsigned int); | 105 | extern void sd_dif_complete(struct scsi_cmnd *, unsigned int); |
106 | 106 | ||
107 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 107 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
108 | 108 | ||
109 | #define sd_dif_op(a, b, c) do { } while (0) | 109 | static inline void sd_dif_op(struct scsi_cmnd *cmd, unsigned int a, unsigned int b, unsigned int c) |
110 | #define sd_dif_config_host(a) do { } while (0) | 110 | { |
111 | #define sd_dif_prepare(a, b, c) (0) | 111 | } |
112 | #define sd_dif_complete(a, b) (0) | 112 | static inline void sd_dif_config_host(struct scsi_disk *disk) |
113 | { | ||
114 | } | ||
115 | static inline int sd_dif_prepare(struct request *rq, sector_t s, unsigned int a) | ||
116 | { | ||
117 | return 0; | ||
118 | } | ||
119 | static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a) | ||
120 | { | ||
121 | } | ||
113 | 122 | ||
114 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 123 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
115 | 124 | ||
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 4d17f3d35aac..3ebb1f289490 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c | |||
@@ -311,25 +311,26 @@ void sd_dif_config_host(struct scsi_disk *sdkp) | |||
311 | struct scsi_device *sdp = sdkp->device; | 311 | struct scsi_device *sdp = sdkp->device; |
312 | struct gendisk *disk = sdkp->disk; | 312 | struct gendisk *disk = sdkp->disk; |
313 | u8 type = sdkp->protection_type; | 313 | u8 type = sdkp->protection_type; |
314 | int dif, dix; | ||
314 | 315 | ||
315 | /* If this HBA doesn't support DIX, resort to normal I/O or DIF */ | 316 | dif = scsi_host_dif_capable(sdp->host, type); |
316 | if (scsi_host_dix_capable(sdp->host, type) == 0) { | 317 | dix = scsi_host_dix_capable(sdp->host, type); |
317 | 318 | ||
318 | if (type == SD_DIF_TYPE0_PROTECTION) | 319 | if (!dix && scsi_host_dix_capable(sdp->host, 0)) { |
319 | return; | 320 | dif = 0; dix = 1; |
320 | 321 | } | |
321 | if (scsi_host_dif_capable(sdp->host, type) == 0) { | ||
322 | sd_printk(KERN_INFO, sdkp, "Type %d protection " \ | ||
323 | "unsupported by HBA. Disabling DIF.\n", type); | ||
324 | sdkp->protection_type = 0; | ||
325 | return; | ||
326 | } | ||
327 | 322 | ||
328 | sd_printk(KERN_INFO, sdkp, "Enabling DIF Type %d protection\n", | 323 | if (type) { |
329 | type); | 324 | if (dif) |
325 | sd_printk(KERN_NOTICE, sdkp, | ||
326 | "Enabling DIF Type %d protection\n", type); | ||
327 | else | ||
328 | sd_printk(KERN_NOTICE, sdkp, | ||
329 | "Disabling DIF Type %d protection\n", type); | ||
330 | } | ||
330 | 331 | ||
332 | if (!dix) | ||
331 | return; | 333 | return; |
332 | } | ||
333 | 334 | ||
334 | /* Enable DMA of protection information */ | 335 | /* Enable DMA of protection information */ |
335 | if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) | 336 | if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) |
@@ -343,17 +344,17 @@ void sd_dif_config_host(struct scsi_disk *sdkp) | |||
343 | else | 344 | else |
344 | blk_integrity_register(disk, &dif_type1_integrity_crc); | 345 | blk_integrity_register(disk, &dif_type1_integrity_crc); |
345 | 346 | ||
346 | sd_printk(KERN_INFO, sdkp, | 347 | sd_printk(KERN_NOTICE, sdkp, |
347 | "Enabling %s integrity protection\n", disk->integrity->name); | 348 | "Enabling DIX %s protection\n", disk->integrity->name); |
348 | 349 | ||
349 | /* Signal to block layer that we support sector tagging */ | 350 | /* Signal to block layer that we support sector tagging */ |
350 | if (type && sdkp->ATO) { | 351 | if (dif && type && sdkp->ATO) { |
351 | if (type == SD_DIF_TYPE3_PROTECTION) | 352 | if (type == SD_DIF_TYPE3_PROTECTION) |
352 | disk->integrity->tag_size = sizeof(u16) + sizeof(u32); | 353 | disk->integrity->tag_size = sizeof(u16) + sizeof(u32); |
353 | else | 354 | else |
354 | disk->integrity->tag_size = sizeof(u16); | 355 | disk->integrity->tag_size = sizeof(u16); |
355 | 356 | ||
356 | sd_printk(KERN_INFO, sdkp, "DIF application tag size %u\n", | 357 | sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n", |
357 | disk->integrity->tag_size); | 358 | disk->integrity->tag_size); |
358 | } | 359 | } |
359 | } | 360 | } |
@@ -361,7 +362,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp) | |||
361 | /* | 362 | /* |
362 | * DIF DMA operation magic decoder ring. | 363 | * DIF DMA operation magic decoder ring. |
363 | */ | 364 | */ |
364 | void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix) | 365 | void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix, unsigned int type) |
365 | { | 366 | { |
366 | int csum_convert, prot_op; | 367 | int csum_convert, prot_op; |
367 | 368 | ||
@@ -406,7 +407,8 @@ void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix) | |||
406 | } | 407 | } |
407 | 408 | ||
408 | scsi_set_prot_op(scmd, prot_op); | 409 | scsi_set_prot_op(scmd, prot_op); |
409 | scsi_set_prot_type(scmd, dif); | 410 | if (dif) |
411 | scsi_set_prot_type(scmd, type); | ||
410 | } | 412 | } |
411 | 413 | ||
412 | /* | 414 | /* |