diff options
Diffstat (limited to 'drivers/scsi/ibmvscsi')
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvfc.c | 466 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvfc.h | 41 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.c | 469 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.h | 4 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvstgt.c | 2 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/viosrp.h | 68 |
6 files changed, 779 insertions, 271 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index ea4abee7a2a9..166d96450a0e 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -110,7 +110,7 @@ static const struct { | |||
110 | { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, | 110 | { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, |
111 | { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, | 111 | { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, |
112 | { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, | 112 | { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, |
113 | { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" }, | 113 | { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" }, |
114 | { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, | 114 | { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, |
115 | 115 | ||
116 | { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, | 116 | { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, |
@@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *); | |||
143 | static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); | 143 | static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); |
144 | static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); | 144 | static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); |
145 | static void ibmvfc_tgt_query_target(struct ibmvfc_target *); | 145 | static void ibmvfc_tgt_query_target(struct ibmvfc_target *); |
146 | static void ibmvfc_npiv_logout(struct ibmvfc_host *); | ||
146 | 147 | ||
147 | static const char *unknown_error = "unknown error"; | 148 | static const char *unknown_error = "unknown error"; |
148 | 149 | ||
@@ -275,7 +276,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd) | |||
275 | int fc_rsp_len = rsp->fcp_rsp_len; | 276 | int fc_rsp_len = rsp->fcp_rsp_len; |
276 | 277 | ||
277 | if ((rsp->flags & FCP_RSP_LEN_VALID) && | 278 | if ((rsp->flags & FCP_RSP_LEN_VALID) && |
278 | ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || | 279 | ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || |
279 | rsp->data.info.rsp_code)) | 280 | rsp->data.info.rsp_code)) |
280 | return DID_ERROR << 16; | 281 | return DID_ERROR << 16; |
281 | 282 | ||
@@ -431,6 +432,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt, | |||
431 | case IBMVFC_TGT_ACTION_DEL_RPORT: | 432 | case IBMVFC_TGT_ACTION_DEL_RPORT: |
432 | break; | 433 | break; |
433 | default: | 434 | default: |
435 | if (action == IBMVFC_TGT_ACTION_DEL_RPORT) | ||
436 | tgt->add_rport = 0; | ||
434 | tgt->action = action; | 437 | tgt->action = action; |
435 | break; | 438 | break; |
436 | } | 439 | } |
@@ -475,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, | |||
475 | if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) | 478 | if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) |
476 | vhost->action = action; | 479 | vhost->action = action; |
477 | break; | 480 | break; |
481 | case IBMVFC_HOST_ACTION_LOGO_WAIT: | ||
482 | if (vhost->action == IBMVFC_HOST_ACTION_LOGO) | ||
483 | vhost->action = action; | ||
484 | break; | ||
478 | case IBMVFC_HOST_ACTION_INIT_WAIT: | 485 | case IBMVFC_HOST_ACTION_INIT_WAIT: |
479 | if (vhost->action == IBMVFC_HOST_ACTION_INIT) | 486 | if (vhost->action == IBMVFC_HOST_ACTION_INIT) |
480 | vhost->action = action; | 487 | vhost->action = action; |
@@ -483,7 +490,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, | |||
483 | switch (vhost->action) { | 490 | switch (vhost->action) { |
484 | case IBMVFC_HOST_ACTION_INIT_WAIT: | 491 | case IBMVFC_HOST_ACTION_INIT_WAIT: |
485 | case IBMVFC_HOST_ACTION_NONE: | 492 | case IBMVFC_HOST_ACTION_NONE: |
486 | case IBMVFC_HOST_ACTION_TGT_ADD: | 493 | case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: |
487 | vhost->action = action; | 494 | vhost->action = action; |
488 | break; | 495 | break; |
489 | default: | 496 | default: |
@@ -494,11 +501,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, | |||
494 | if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) | 501 | if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) |
495 | vhost->action = action; | 502 | vhost->action = action; |
496 | break; | 503 | break; |
504 | case IBMVFC_HOST_ACTION_LOGO: | ||
497 | case IBMVFC_HOST_ACTION_INIT: | 505 | case IBMVFC_HOST_ACTION_INIT: |
498 | case IBMVFC_HOST_ACTION_TGT_DEL: | 506 | case IBMVFC_HOST_ACTION_TGT_DEL: |
499 | case IBMVFC_HOST_ACTION_QUERY_TGTS: | 507 | case IBMVFC_HOST_ACTION_QUERY_TGTS: |
500 | case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: | 508 | case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: |
501 | case IBMVFC_HOST_ACTION_TGT_ADD: | ||
502 | case IBMVFC_HOST_ACTION_NONE: | 509 | case IBMVFC_HOST_ACTION_NONE: |
503 | default: | 510 | default: |
504 | vhost->action = action; | 511 | vhost->action = action; |
@@ -576,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) | |||
576 | } | 583 | } |
577 | 584 | ||
578 | list_for_each_entry(tgt, &vhost->targets, queue) | 585 | list_for_each_entry(tgt, &vhost->targets, queue) |
579 | tgt->need_login = 1; | 586 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
580 | scsi_block_requests(vhost->host); | 587 | scsi_block_requests(vhost->host); |
581 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); | 588 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); |
582 | vhost->job_step = ibmvfc_npiv_login; | 589 | vhost->job_step = ibmvfc_npiv_login; |
@@ -646,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost) | |||
646 | } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); | 653 | } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); |
647 | 654 | ||
648 | vhost->state = IBMVFC_NO_CRQ; | 655 | vhost->state = IBMVFC_NO_CRQ; |
656 | vhost->logged_in = 0; | ||
649 | dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); | 657 | dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); |
650 | free_page((unsigned long)crq->msgs); | 658 | free_page((unsigned long)crq->msgs); |
651 | } | 659 | } |
@@ -692,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) | |||
692 | } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); | 700 | } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); |
693 | 701 | ||
694 | vhost->state = IBMVFC_NO_CRQ; | 702 | vhost->state = IBMVFC_NO_CRQ; |
703 | vhost->logged_in = 0; | ||
695 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); | 704 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); |
696 | 705 | ||
697 | /* Clean out the queue */ | 706 | /* Clean out the queue */ |
@@ -807,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code) | |||
807 | } | 816 | } |
808 | 817 | ||
809 | /** | 818 | /** |
810 | * __ibmvfc_reset_host - Reset the connection to the server (no locking) | 819 | * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ |
811 | * @vhost: struct ibmvfc host to reset | 820 | * @vhost: struct ibmvfc host to reset |
812 | **/ | 821 | **/ |
813 | static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) | 822 | static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost) |
814 | { | 823 | { |
815 | int rc; | 824 | int rc; |
816 | 825 | ||
@@ -826,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) | |||
826 | } | 835 | } |
827 | 836 | ||
828 | /** | 837 | /** |
829 | * ibmvfc_reset_host - Reset the connection to the server | 838 | * __ibmvfc_reset_host - Reset the connection to the server (no locking) |
830 | * @vhost: struct ibmvfc host to reset | 839 | * @vhost: struct ibmvfc host to reset |
831 | **/ | 840 | **/ |
841 | static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) | ||
842 | { | ||
843 | if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT && | ||
844 | !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { | ||
845 | scsi_block_requests(vhost->host); | ||
846 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO); | ||
847 | vhost->job_step = ibmvfc_npiv_logout; | ||
848 | wake_up(&vhost->work_wait_q); | ||
849 | } else | ||
850 | ibmvfc_hard_reset_host(vhost); | ||
851 | } | ||
852 | |||
853 | /** | ||
854 | * ibmvfc_reset_host - Reset the connection to the server | ||
855 | * @vhost: ibmvfc host struct | ||
856 | **/ | ||
832 | static void ibmvfc_reset_host(struct ibmvfc_host *vhost) | 857 | static void ibmvfc_reset_host(struct ibmvfc_host *vhost) |
833 | { | 858 | { |
834 | unsigned long flags; | 859 | unsigned long flags; |
@@ -842,9 +867,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost) | |||
842 | * ibmvfc_retry_host_init - Retry host initialization if allowed | 867 | * ibmvfc_retry_host_init - Retry host initialization if allowed |
843 | * @vhost: ibmvfc host struct | 868 | * @vhost: ibmvfc host struct |
844 | * | 869 | * |
870 | * Returns: 1 if init will be retried / 0 if not | ||
871 | * | ||
845 | **/ | 872 | **/ |
846 | static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) | 873 | static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost) |
847 | { | 874 | { |
875 | int retry = 0; | ||
876 | |||
848 | if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { | 877 | if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { |
849 | vhost->delay_init = 1; | 878 | vhost->delay_init = 1; |
850 | if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { | 879 | if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { |
@@ -853,11 +882,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) | |||
853 | ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); | 882 | ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); |
854 | } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) | 883 | } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) |
855 | __ibmvfc_reset_host(vhost); | 884 | __ibmvfc_reset_host(vhost); |
856 | else | 885 | else { |
857 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); | 886 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); |
887 | retry = 1; | ||
888 | } | ||
858 | } | 889 | } |
859 | 890 | ||
860 | wake_up(&vhost->work_wait_q); | 891 | wake_up(&vhost->work_wait_q); |
892 | return retry; | ||
861 | } | 893 | } |
862 | 894 | ||
863 | /** | 895 | /** |
@@ -1137,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) | |||
1137 | login_info->partition_num = vhost->partition_number; | 1169 | login_info->partition_num = vhost->partition_number; |
1138 | login_info->vfc_frame_version = 1; | 1170 | login_info->vfc_frame_version = 1; |
1139 | login_info->fcp_version = 3; | 1171 | login_info->fcp_version = 3; |
1172 | login_info->flags = IBMVFC_FLUSH_ON_HALT; | ||
1140 | if (vhost->client_migrated) | 1173 | if (vhost->client_migrated) |
1141 | login_info->flags = IBMVFC_CLIENT_MIGRATED; | 1174 | login_info->flags |= IBMVFC_CLIENT_MIGRATED; |
1142 | 1175 | ||
1143 | login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; | 1176 | login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; |
1144 | login_info->capabilities = IBMVFC_CAN_MIGRATE; | 1177 | login_info->capabilities = IBMVFC_CAN_MIGRATE; |
@@ -1452,6 +1485,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt) | |||
1452 | } | 1485 | } |
1453 | 1486 | ||
1454 | /** | 1487 | /** |
1488 | * ibmvfc_relogin - Log back into the specified device | ||
1489 | * @sdev: scsi device struct | ||
1490 | * | ||
1491 | **/ | ||
1492 | static void ibmvfc_relogin(struct scsi_device *sdev) | ||
1493 | { | ||
1494 | struct ibmvfc_host *vhost = shost_priv(sdev->host); | ||
1495 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); | ||
1496 | struct ibmvfc_target *tgt; | ||
1497 | |||
1498 | list_for_each_entry(tgt, &vhost->targets, queue) { | ||
1499 | if (rport == tgt->rport) { | ||
1500 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | ||
1501 | break; | ||
1502 | } | ||
1503 | } | ||
1504 | |||
1505 | ibmvfc_reinit_host(vhost); | ||
1506 | } | ||
1507 | |||
1508 | /** | ||
1455 | * ibmvfc_scsi_done - Handle responses from commands | 1509 | * ibmvfc_scsi_done - Handle responses from commands |
1456 | * @evt: ibmvfc event to be handled | 1510 | * @evt: ibmvfc event to be handled |
1457 | * | 1511 | * |
@@ -1483,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt) | |||
1483 | if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) | 1537 | if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) |
1484 | memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); | 1538 | memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); |
1485 | if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) | 1539 | if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) |
1486 | ibmvfc_reinit_host(evt->vhost); | 1540 | ibmvfc_relogin(cmnd->device); |
1487 | 1541 | ||
1488 | if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) | 1542 | if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) |
1489 | cmnd->result = (DID_ERROR << 16); | 1543 | cmnd->result = (DID_ERROR << 16); |
@@ -2148,13 +2202,31 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, | |||
2148 | struct ibmvfc_host *vhost) | 2202 | struct ibmvfc_host *vhost) |
2149 | { | 2203 | { |
2150 | const char *desc = ibmvfc_get_ae_desc(crq->event); | 2204 | const char *desc = ibmvfc_get_ae_desc(crq->event); |
2205 | struct ibmvfc_target *tgt; | ||
2151 | 2206 | ||
2152 | ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," | 2207 | ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," |
2153 | " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); | 2208 | " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); |
2154 | 2209 | ||
2155 | switch (crq->event) { | 2210 | switch (crq->event) { |
2156 | case IBMVFC_AE_LINK_UP: | ||
2157 | case IBMVFC_AE_RESUME: | 2211 | case IBMVFC_AE_RESUME: |
2212 | switch (crq->link_state) { | ||
2213 | case IBMVFC_AE_LS_LINK_DOWN: | ||
2214 | ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); | ||
2215 | break; | ||
2216 | case IBMVFC_AE_LS_LINK_DEAD: | ||
2217 | ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); | ||
2218 | break; | ||
2219 | case IBMVFC_AE_LS_LINK_UP: | ||
2220 | case IBMVFC_AE_LS_LINK_BOUNCED: | ||
2221 | default: | ||
2222 | vhost->events_to_log |= IBMVFC_AE_LINKUP; | ||
2223 | vhost->delay_init = 1; | ||
2224 | __ibmvfc_reset_host(vhost); | ||
2225 | break; | ||
2226 | }; | ||
2227 | |||
2228 | break; | ||
2229 | case IBMVFC_AE_LINK_UP: | ||
2158 | vhost->events_to_log |= IBMVFC_AE_LINKUP; | 2230 | vhost->events_to_log |= IBMVFC_AE_LINKUP; |
2159 | vhost->delay_init = 1; | 2231 | vhost->delay_init = 1; |
2160 | __ibmvfc_reset_host(vhost); | 2232 | __ibmvfc_reset_host(vhost); |
@@ -2168,10 +2240,27 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, | |||
2168 | case IBMVFC_AE_SCN_NPORT: | 2240 | case IBMVFC_AE_SCN_NPORT: |
2169 | case IBMVFC_AE_SCN_GROUP: | 2241 | case IBMVFC_AE_SCN_GROUP: |
2170 | vhost->events_to_log |= IBMVFC_AE_RSCN; | 2242 | vhost->events_to_log |= IBMVFC_AE_RSCN; |
2243 | ibmvfc_reinit_host(vhost); | ||
2244 | break; | ||
2171 | case IBMVFC_AE_ELS_LOGO: | 2245 | case IBMVFC_AE_ELS_LOGO: |
2172 | case IBMVFC_AE_ELS_PRLO: | 2246 | case IBMVFC_AE_ELS_PRLO: |
2173 | case IBMVFC_AE_ELS_PLOGI: | 2247 | case IBMVFC_AE_ELS_PLOGI: |
2174 | ibmvfc_reinit_host(vhost); | 2248 | list_for_each_entry(tgt, &vhost->targets, queue) { |
2249 | if (!crq->scsi_id && !crq->wwpn && !crq->node_name) | ||
2250 | break; | ||
2251 | if (crq->scsi_id && tgt->scsi_id != crq->scsi_id) | ||
2252 | continue; | ||
2253 | if (crq->wwpn && tgt->ids.port_name != crq->wwpn) | ||
2254 | continue; | ||
2255 | if (crq->node_name && tgt->ids.node_name != crq->node_name) | ||
2256 | continue; | ||
2257 | if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO) | ||
2258 | tgt->logo_rcvd = 1; | ||
2259 | if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) { | ||
2260 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | ||
2261 | ibmvfc_reinit_host(vhost); | ||
2262 | } | ||
2263 | } | ||
2175 | break; | 2264 | break; |
2176 | case IBMVFC_AE_LINK_DOWN: | 2265 | case IBMVFC_AE_LINK_DOWN: |
2177 | case IBMVFC_AE_ADAPTER_FAILED: | 2266 | case IBMVFC_AE_ADAPTER_FAILED: |
@@ -2222,6 +2311,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) | |||
2222 | return; | 2311 | return; |
2223 | case IBMVFC_CRQ_XPORT_EVENT: | 2312 | case IBMVFC_CRQ_XPORT_EVENT: |
2224 | vhost->state = IBMVFC_NO_CRQ; | 2313 | vhost->state = IBMVFC_NO_CRQ; |
2314 | vhost->logged_in = 0; | ||
2225 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); | 2315 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); |
2226 | if (crq->format == IBMVFC_PARTITION_MIGRATED) { | 2316 | if (crq->format == IBMVFC_PARTITION_MIGRATED) { |
2227 | /* We need to re-setup the interpartition connection */ | 2317 | /* We need to re-setup the interpartition connection */ |
@@ -2299,7 +2389,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time) | |||
2299 | done = 1; | 2389 | done = 1; |
2300 | } | 2390 | } |
2301 | 2391 | ||
2302 | if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE) | 2392 | if (vhost->scan_complete) |
2303 | done = 1; | 2393 | done = 1; |
2304 | spin_unlock_irqrestore(shost->host_lock, flags); | 2394 | spin_unlock_irqrestore(shost->host_lock, flags); |
2305 | return done; | 2395 | return done; |
@@ -2434,14 +2524,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev, | |||
2434 | vhost->login_buf->resp.partition_name); | 2524 | vhost->login_buf->resp.partition_name); |
2435 | } | 2525 | } |
2436 | 2526 | ||
2437 | static struct device_attribute ibmvfc_host_partition_name = { | ||
2438 | .attr = { | ||
2439 | .name = "partition_name", | ||
2440 | .mode = S_IRUGO, | ||
2441 | }, | ||
2442 | .show = ibmvfc_show_host_partition_name, | ||
2443 | }; | ||
2444 | |||
2445 | static ssize_t ibmvfc_show_host_device_name(struct device *dev, | 2527 | static ssize_t ibmvfc_show_host_device_name(struct device *dev, |
2446 | struct device_attribute *attr, char *buf) | 2528 | struct device_attribute *attr, char *buf) |
2447 | { | 2529 | { |
@@ -2452,14 +2534,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev, | |||
2452 | vhost->login_buf->resp.device_name); | 2534 | vhost->login_buf->resp.device_name); |
2453 | } | 2535 | } |
2454 | 2536 | ||
2455 | static struct device_attribute ibmvfc_host_device_name = { | ||
2456 | .attr = { | ||
2457 | .name = "device_name", | ||
2458 | .mode = S_IRUGO, | ||
2459 | }, | ||
2460 | .show = ibmvfc_show_host_device_name, | ||
2461 | }; | ||
2462 | |||
2463 | static ssize_t ibmvfc_show_host_loc_code(struct device *dev, | 2537 | static ssize_t ibmvfc_show_host_loc_code(struct device *dev, |
2464 | struct device_attribute *attr, char *buf) | 2538 | struct device_attribute *attr, char *buf) |
2465 | { | 2539 | { |
@@ -2470,14 +2544,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev, | |||
2470 | vhost->login_buf->resp.port_loc_code); | 2544 | vhost->login_buf->resp.port_loc_code); |
2471 | } | 2545 | } |
2472 | 2546 | ||
2473 | static struct device_attribute ibmvfc_host_loc_code = { | ||
2474 | .attr = { | ||
2475 | .name = "port_loc_code", | ||
2476 | .mode = S_IRUGO, | ||
2477 | }, | ||
2478 | .show = ibmvfc_show_host_loc_code, | ||
2479 | }; | ||
2480 | |||
2481 | static ssize_t ibmvfc_show_host_drc_name(struct device *dev, | 2547 | static ssize_t ibmvfc_show_host_drc_name(struct device *dev, |
2482 | struct device_attribute *attr, char *buf) | 2548 | struct device_attribute *attr, char *buf) |
2483 | { | 2549 | { |
@@ -2488,14 +2554,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev, | |||
2488 | vhost->login_buf->resp.drc_name); | 2554 | vhost->login_buf->resp.drc_name); |
2489 | } | 2555 | } |
2490 | 2556 | ||
2491 | static struct device_attribute ibmvfc_host_drc_name = { | ||
2492 | .attr = { | ||
2493 | .name = "drc_name", | ||
2494 | .mode = S_IRUGO, | ||
2495 | }, | ||
2496 | .show = ibmvfc_show_host_drc_name, | ||
2497 | }; | ||
2498 | |||
2499 | static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, | 2557 | static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, |
2500 | struct device_attribute *attr, char *buf) | 2558 | struct device_attribute *attr, char *buf) |
2501 | { | 2559 | { |
@@ -2504,13 +2562,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, | |||
2504 | return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); | 2562 | return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); |
2505 | } | 2563 | } |
2506 | 2564 | ||
2507 | static struct device_attribute ibmvfc_host_npiv_version = { | 2565 | static ssize_t ibmvfc_show_host_capabilities(struct device *dev, |
2508 | .attr = { | 2566 | struct device_attribute *attr, char *buf) |
2509 | .name = "npiv_version", | 2567 | { |
2510 | .mode = S_IRUGO, | 2568 | struct Scsi_Host *shost = class_to_shost(dev); |
2511 | }, | 2569 | struct ibmvfc_host *vhost = shost_priv(shost); |
2512 | .show = ibmvfc_show_host_npiv_version, | 2570 | return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities); |
2513 | }; | 2571 | } |
2514 | 2572 | ||
2515 | /** | 2573 | /** |
2516 | * ibmvfc_show_log_level - Show the adapter's error logging level | 2574 | * ibmvfc_show_log_level - Show the adapter's error logging level |
@@ -2556,14 +2614,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev, | |||
2556 | return strlen(buf); | 2614 | return strlen(buf); |
2557 | } | 2615 | } |
2558 | 2616 | ||
2559 | static struct device_attribute ibmvfc_log_level_attr = { | 2617 | static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL); |
2560 | .attr = { | 2618 | static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL); |
2561 | .name = "log_level", | 2619 | static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL); |
2562 | .mode = S_IRUGO | S_IWUSR, | 2620 | static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL); |
2563 | }, | 2621 | static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL); |
2564 | .show = ibmvfc_show_log_level, | 2622 | static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL); |
2565 | .store = ibmvfc_store_log_level | 2623 | static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR, |
2566 | }; | 2624 | ibmvfc_show_log_level, ibmvfc_store_log_level); |
2567 | 2625 | ||
2568 | #ifdef CONFIG_SCSI_IBMVFC_TRACE | 2626 | #ifdef CONFIG_SCSI_IBMVFC_TRACE |
2569 | /** | 2627 | /** |
@@ -2612,12 +2670,13 @@ static struct bin_attribute ibmvfc_trace_attr = { | |||
2612 | #endif | 2670 | #endif |
2613 | 2671 | ||
2614 | static struct device_attribute *ibmvfc_attrs[] = { | 2672 | static struct device_attribute *ibmvfc_attrs[] = { |
2615 | &ibmvfc_host_partition_name, | 2673 | &dev_attr_partition_name, |
2616 | &ibmvfc_host_device_name, | 2674 | &dev_attr_device_name, |
2617 | &ibmvfc_host_loc_code, | 2675 | &dev_attr_port_loc_code, |
2618 | &ibmvfc_host_drc_name, | 2676 | &dev_attr_drc_name, |
2619 | &ibmvfc_host_npiv_version, | 2677 | &dev_attr_npiv_version, |
2620 | &ibmvfc_log_level_attr, | 2678 | &dev_attr_capabilities, |
2679 | &dev_attr_log_level, | ||
2621 | NULL | 2680 | NULL |
2622 | }; | 2681 | }; |
2623 | 2682 | ||
@@ -2727,27 +2786,27 @@ static void ibmvfc_tasklet(void *data) | |||
2727 | 2786 | ||
2728 | spin_lock_irqsave(vhost->host->host_lock, flags); | 2787 | spin_lock_irqsave(vhost->host->host_lock, flags); |
2729 | while (!done) { | 2788 | while (!done) { |
2730 | /* Pull all the valid messages off the CRQ */ | ||
2731 | while ((crq = ibmvfc_next_crq(vhost)) != NULL) { | ||
2732 | ibmvfc_handle_crq(crq, vhost); | ||
2733 | crq->valid = 0; | ||
2734 | } | ||
2735 | |||
2736 | /* Pull all the valid messages off the async CRQ */ | 2789 | /* Pull all the valid messages off the async CRQ */ |
2737 | while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { | 2790 | while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { |
2738 | ibmvfc_handle_async(async, vhost); | 2791 | ibmvfc_handle_async(async, vhost); |
2739 | async->valid = 0; | 2792 | async->valid = 0; |
2740 | } | 2793 | } |
2741 | 2794 | ||
2742 | vio_enable_interrupts(vdev); | 2795 | /* Pull all the valid messages off the CRQ */ |
2743 | if ((crq = ibmvfc_next_crq(vhost)) != NULL) { | 2796 | while ((crq = ibmvfc_next_crq(vhost)) != NULL) { |
2744 | vio_disable_interrupts(vdev); | ||
2745 | ibmvfc_handle_crq(crq, vhost); | 2797 | ibmvfc_handle_crq(crq, vhost); |
2746 | crq->valid = 0; | 2798 | crq->valid = 0; |
2747 | } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { | 2799 | } |
2800 | |||
2801 | vio_enable_interrupts(vdev); | ||
2802 | if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { | ||
2748 | vio_disable_interrupts(vdev); | 2803 | vio_disable_interrupts(vdev); |
2749 | ibmvfc_handle_async(async, vhost); | 2804 | ibmvfc_handle_async(async, vhost); |
2750 | async->valid = 0; | 2805 | async->valid = 0; |
2806 | } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) { | ||
2807 | vio_disable_interrupts(vdev); | ||
2808 | ibmvfc_handle_crq(crq, vhost); | ||
2809 | crq->valid = 0; | ||
2751 | } else | 2810 | } else |
2752 | done = 1; | 2811 | done = 1; |
2753 | } | 2812 | } |
@@ -2774,15 +2833,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt, | |||
2774 | * @tgt: ibmvfc target struct | 2833 | * @tgt: ibmvfc target struct |
2775 | * @job_step: initialization job step | 2834 | * @job_step: initialization job step |
2776 | * | 2835 | * |
2836 | * Returns: 1 if step will be retried / 0 if not | ||
2837 | * | ||
2777 | **/ | 2838 | **/ |
2778 | static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, | 2839 | static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, |
2779 | void (*job_step) (struct ibmvfc_target *)) | 2840 | void (*job_step) (struct ibmvfc_target *)) |
2780 | { | 2841 | { |
2781 | if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { | 2842 | if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { |
2782 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 2843 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
2783 | wake_up(&tgt->vhost->work_wait_q); | 2844 | wake_up(&tgt->vhost->work_wait_q); |
2845 | return 0; | ||
2784 | } else | 2846 | } else |
2785 | ibmvfc_init_tgt(tgt, job_step); | 2847 | ibmvfc_init_tgt(tgt, job_step); |
2848 | return 1; | ||
2786 | } | 2849 | } |
2787 | 2850 | ||
2788 | /* Defined in FC-LS */ | 2851 | /* Defined in FC-LS */ |
@@ -2831,7 +2894,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) | |||
2831 | struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; | 2894 | struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; |
2832 | struct ibmvfc_prli_svc_parms *parms = &rsp->parms; | 2895 | struct ibmvfc_prli_svc_parms *parms = &rsp->parms; |
2833 | u32 status = rsp->common.status; | 2896 | u32 status = rsp->common.status; |
2834 | int index; | 2897 | int index, level = IBMVFC_DEFAULT_LOG_LEVEL; |
2835 | 2898 | ||
2836 | vhost->discovery_threads--; | 2899 | vhost->discovery_threads--; |
2837 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); | 2900 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); |
@@ -2850,7 +2913,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) | |||
2850 | tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; | 2913 | tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; |
2851 | if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) | 2914 | if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) |
2852 | tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; | 2915 | tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; |
2853 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT); | 2916 | tgt->add_rport = 1; |
2854 | } else | 2917 | } else |
2855 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 2918 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
2856 | } else if (prli_rsp[index].retry) | 2919 | } else if (prli_rsp[index].retry) |
@@ -2867,13 +2930,18 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) | |||
2867 | break; | 2930 | break; |
2868 | case IBMVFC_MAD_FAILED: | 2931 | case IBMVFC_MAD_FAILED: |
2869 | default: | 2932 | default: |
2870 | tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n", | 2933 | if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED) |
2871 | ibmvfc_get_cmd_error(rsp->status, rsp->error), | 2934 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); |
2872 | rsp->status, rsp->error, status); | 2935 | else if (tgt->logo_rcvd) |
2873 | if (ibmvfc_retry_cmd(rsp->status, rsp->error)) | 2936 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); |
2874 | ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); | 2937 | else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) |
2938 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); | ||
2875 | else | 2939 | else |
2876 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 2940 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
2941 | |||
2942 | tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", | ||
2943 | ibmvfc_get_cmd_error(rsp->status, rsp->error), | ||
2944 | rsp->status, rsp->error, status); | ||
2877 | break; | 2945 | break; |
2878 | }; | 2946 | }; |
2879 | 2947 | ||
@@ -2932,6 +3000,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) | |||
2932 | struct ibmvfc_host *vhost = evt->vhost; | 3000 | struct ibmvfc_host *vhost = evt->vhost; |
2933 | struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; | 3001 | struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; |
2934 | u32 status = rsp->common.status; | 3002 | u32 status = rsp->common.status; |
3003 | int level = IBMVFC_DEFAULT_LOG_LEVEL; | ||
2935 | 3004 | ||
2936 | vhost->discovery_threads--; | 3005 | vhost->discovery_threads--; |
2937 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); | 3006 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); |
@@ -2960,15 +3029,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) | |||
2960 | break; | 3029 | break; |
2961 | case IBMVFC_MAD_FAILED: | 3030 | case IBMVFC_MAD_FAILED: |
2962 | default: | 3031 | default: |
2963 | tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", | ||
2964 | ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, | ||
2965 | ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, | ||
2966 | ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status); | ||
2967 | |||
2968 | if (ibmvfc_retry_cmd(rsp->status, rsp->error)) | 3032 | if (ibmvfc_retry_cmd(rsp->status, rsp->error)) |
2969 | ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); | 3033 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); |
2970 | else | 3034 | else |
2971 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 3035 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
3036 | |||
3037 | tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", | ||
3038 | ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, | ||
3039 | ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, | ||
3040 | ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status); | ||
2972 | break; | 3041 | break; |
2973 | }; | 3042 | }; |
2974 | 3043 | ||
@@ -2992,6 +3061,7 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt) | |||
2992 | return; | 3061 | return; |
2993 | 3062 | ||
2994 | kref_get(&tgt->kref); | 3063 | kref_get(&tgt->kref); |
3064 | tgt->logo_rcvd = 0; | ||
2995 | evt = ibmvfc_get_event(vhost); | 3065 | evt = ibmvfc_get_event(vhost); |
2996 | vhost->discovery_threads++; | 3066 | vhost->discovery_threads++; |
2997 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); | 3067 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); |
@@ -3129,13 +3199,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) | |||
3129 | case IBMVFC_MAD_SUCCESS: | 3199 | case IBMVFC_MAD_SUCCESS: |
3130 | tgt_dbg(tgt, "ADISC succeeded\n"); | 3200 | tgt_dbg(tgt, "ADISC succeeded\n"); |
3131 | if (ibmvfc_adisc_needs_plogi(mad, tgt)) | 3201 | if (ibmvfc_adisc_needs_plogi(mad, tgt)) |
3132 | tgt->need_login = 1; | 3202 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
3133 | break; | 3203 | break; |
3134 | case IBMVFC_MAD_DRIVER_FAILED: | 3204 | case IBMVFC_MAD_DRIVER_FAILED: |
3135 | break; | 3205 | break; |
3136 | case IBMVFC_MAD_FAILED: | 3206 | case IBMVFC_MAD_FAILED: |
3137 | default: | 3207 | default: |
3138 | tgt->need_login = 1; | 3208 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
3139 | fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; | 3209 | fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; |
3140 | fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; | 3210 | fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; |
3141 | tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", | 3211 | tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", |
@@ -3322,6 +3392,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) | |||
3322 | struct ibmvfc_host *vhost = evt->vhost; | 3392 | struct ibmvfc_host *vhost = evt->vhost; |
3323 | struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; | 3393 | struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; |
3324 | u32 status = rsp->common.status; | 3394 | u32 status = rsp->common.status; |
3395 | int level = IBMVFC_DEFAULT_LOG_LEVEL; | ||
3325 | 3396 | ||
3326 | vhost->discovery_threads--; | 3397 | vhost->discovery_threads--; |
3327 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); | 3398 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); |
@@ -3341,19 +3412,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) | |||
3341 | break; | 3412 | break; |
3342 | case IBMVFC_MAD_FAILED: | 3413 | case IBMVFC_MAD_FAILED: |
3343 | default: | 3414 | default: |
3344 | tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", | ||
3345 | ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, | ||
3346 | ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, | ||
3347 | ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); | ||
3348 | |||
3349 | if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && | 3415 | if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && |
3350 | rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && | 3416 | rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && |
3351 | rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) | 3417 | rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) |
3352 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 3418 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
3353 | else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) | 3419 | else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) |
3354 | ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); | 3420 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); |
3355 | else | 3421 | else |
3356 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 3422 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
3423 | |||
3424 | tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", | ||
3425 | ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, | ||
3426 | ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, | ||
3427 | ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); | ||
3357 | break; | 3428 | break; |
3358 | }; | 3429 | }; |
3359 | 3430 | ||
@@ -3420,7 +3491,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id) | |||
3420 | } | 3491 | } |
3421 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | 3492 | spin_unlock_irqrestore(vhost->host->host_lock, flags); |
3422 | 3493 | ||
3423 | tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); | 3494 | tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO); |
3424 | if (!tgt) { | 3495 | if (!tgt) { |
3425 | dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", | 3496 | dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", |
3426 | scsi_id); | 3497 | scsi_id); |
@@ -3472,6 +3543,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) | |||
3472 | struct ibmvfc_host *vhost = evt->vhost; | 3543 | struct ibmvfc_host *vhost = evt->vhost; |
3473 | struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; | 3544 | struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; |
3474 | u32 mad_status = rsp->common.status; | 3545 | u32 mad_status = rsp->common.status; |
3546 | int level = IBMVFC_DEFAULT_LOG_LEVEL; | ||
3475 | 3547 | ||
3476 | switch (mad_status) { | 3548 | switch (mad_status) { |
3477 | case IBMVFC_MAD_SUCCESS: | 3549 | case IBMVFC_MAD_SUCCESS: |
@@ -3480,9 +3552,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) | |||
3480 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); | 3552 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); |
3481 | break; | 3553 | break; |
3482 | case IBMVFC_MAD_FAILED: | 3554 | case IBMVFC_MAD_FAILED: |
3483 | dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n", | 3555 | level += ibmvfc_retry_host_init(vhost); |
3484 | ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); | 3556 | ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", |
3485 | ibmvfc_retry_host_init(vhost); | 3557 | ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); |
3486 | break; | 3558 | break; |
3487 | case IBMVFC_MAD_DRIVER_FAILED: | 3559 | case IBMVFC_MAD_DRIVER_FAILED: |
3488 | break; | 3560 | break; |
@@ -3534,18 +3606,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) | |||
3534 | u32 mad_status = evt->xfer_iu->npiv_login.common.status; | 3606 | u32 mad_status = evt->xfer_iu->npiv_login.common.status; |
3535 | struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; | 3607 | struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; |
3536 | unsigned int npiv_max_sectors; | 3608 | unsigned int npiv_max_sectors; |
3609 | int level = IBMVFC_DEFAULT_LOG_LEVEL; | ||
3537 | 3610 | ||
3538 | switch (mad_status) { | 3611 | switch (mad_status) { |
3539 | case IBMVFC_MAD_SUCCESS: | 3612 | case IBMVFC_MAD_SUCCESS: |
3540 | ibmvfc_free_event(evt); | 3613 | ibmvfc_free_event(evt); |
3541 | break; | 3614 | break; |
3542 | case IBMVFC_MAD_FAILED: | 3615 | case IBMVFC_MAD_FAILED: |
3543 | dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n", | ||
3544 | ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); | ||
3545 | if (ibmvfc_retry_cmd(rsp->status, rsp->error)) | 3616 | if (ibmvfc_retry_cmd(rsp->status, rsp->error)) |
3546 | ibmvfc_retry_host_init(vhost); | 3617 | level += ibmvfc_retry_host_init(vhost); |
3547 | else | 3618 | else |
3548 | ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); | 3619 | ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); |
3620 | ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", | ||
3621 | ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); | ||
3549 | ibmvfc_free_event(evt); | 3622 | ibmvfc_free_event(evt); |
3550 | return; | 3623 | return; |
3551 | case IBMVFC_MAD_CRQ_ERROR: | 3624 | case IBMVFC_MAD_CRQ_ERROR: |
@@ -3578,6 +3651,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) | |||
3578 | return; | 3651 | return; |
3579 | } | 3652 | } |
3580 | 3653 | ||
3654 | vhost->logged_in = 1; | ||
3581 | npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); | 3655 | npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); |
3582 | dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", | 3656 | dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", |
3583 | rsp->partition_name, rsp->device_name, rsp->port_loc_code, | 3657 | rsp->partition_name, rsp->device_name, rsp->port_loc_code, |
@@ -3636,6 +3710,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) | |||
3636 | }; | 3710 | }; |
3637 | 3711 | ||
3638 | /** | 3712 | /** |
3713 | * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout | ||
3714 | * @vhost: ibmvfc host struct | ||
3715 | * | ||
3716 | **/ | ||
3717 | static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) | ||
3718 | { | ||
3719 | struct ibmvfc_host *vhost = evt->vhost; | ||
3720 | u32 mad_status = evt->xfer_iu->npiv_logout.common.status; | ||
3721 | |||
3722 | ibmvfc_free_event(evt); | ||
3723 | |||
3724 | switch (mad_status) { | ||
3725 | case IBMVFC_MAD_SUCCESS: | ||
3726 | if (list_empty(&vhost->sent) && | ||
3727 | vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { | ||
3728 | ibmvfc_init_host(vhost, 0); | ||
3729 | return; | ||
3730 | } | ||
3731 | break; | ||
3732 | case IBMVFC_MAD_FAILED: | ||
3733 | case IBMVFC_MAD_NOT_SUPPORTED: | ||
3734 | case IBMVFC_MAD_CRQ_ERROR: | ||
3735 | case IBMVFC_MAD_DRIVER_FAILED: | ||
3736 | default: | ||
3737 | ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status); | ||
3738 | break; | ||
3739 | } | ||
3740 | |||
3741 | ibmvfc_hard_reset_host(vhost); | ||
3742 | } | ||
3743 | |||
3744 | /** | ||
3745 | * ibmvfc_npiv_logout - Issue an NPIV Logout | ||
3746 | * @vhost: ibmvfc host struct | ||
3747 | * | ||
3748 | **/ | ||
3749 | static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost) | ||
3750 | { | ||
3751 | struct ibmvfc_npiv_logout_mad *mad; | ||
3752 | struct ibmvfc_event *evt; | ||
3753 | |||
3754 | evt = ibmvfc_get_event(vhost); | ||
3755 | ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT); | ||
3756 | |||
3757 | mad = &evt->iu.npiv_logout; | ||
3758 | memset(mad, 0, sizeof(*mad)); | ||
3759 | mad->common.version = 1; | ||
3760 | mad->common.opcode = IBMVFC_NPIV_LOGOUT; | ||
3761 | mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad); | ||
3762 | |||
3763 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT); | ||
3764 | |||
3765 | if (!ibmvfc_send_event(evt, vhost, default_timeout)) | ||
3766 | ibmvfc_dbg(vhost, "Sent NPIV logout\n"); | ||
3767 | else | ||
3768 | ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); | ||
3769 | } | ||
3770 | |||
3771 | /** | ||
3639 | * ibmvfc_dev_init_to_do - Is there target initialization work to do? | 3772 | * ibmvfc_dev_init_to_do - Is there target initialization work to do? |
3640 | * @vhost: ibmvfc host struct | 3773 | * @vhost: ibmvfc host struct |
3641 | * | 3774 | * |
@@ -3671,6 +3804,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) | |||
3671 | switch (vhost->action) { | 3804 | switch (vhost->action) { |
3672 | case IBMVFC_HOST_ACTION_NONE: | 3805 | case IBMVFC_HOST_ACTION_NONE: |
3673 | case IBMVFC_HOST_ACTION_INIT_WAIT: | 3806 | case IBMVFC_HOST_ACTION_INIT_WAIT: |
3807 | case IBMVFC_HOST_ACTION_LOGO_WAIT: | ||
3674 | return 0; | 3808 | return 0; |
3675 | case IBMVFC_HOST_ACTION_TGT_INIT: | 3809 | case IBMVFC_HOST_ACTION_TGT_INIT: |
3676 | case IBMVFC_HOST_ACTION_QUERY_TGTS: | 3810 | case IBMVFC_HOST_ACTION_QUERY_TGTS: |
@@ -3683,9 +3817,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) | |||
3683 | if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) | 3817 | if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) |
3684 | return 0; | 3818 | return 0; |
3685 | return 1; | 3819 | return 1; |
3820 | case IBMVFC_HOST_ACTION_LOGO: | ||
3686 | case IBMVFC_HOST_ACTION_INIT: | 3821 | case IBMVFC_HOST_ACTION_INIT: |
3687 | case IBMVFC_HOST_ACTION_ALLOC_TGTS: | 3822 | case IBMVFC_HOST_ACTION_ALLOC_TGTS: |
3688 | case IBMVFC_HOST_ACTION_TGT_ADD: | ||
3689 | case IBMVFC_HOST_ACTION_TGT_DEL: | 3823 | case IBMVFC_HOST_ACTION_TGT_DEL: |
3690 | case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: | 3824 | case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: |
3691 | case IBMVFC_HOST_ACTION_QUERY: | 3825 | case IBMVFC_HOST_ACTION_QUERY: |
@@ -3740,25 +3874,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events) | |||
3740 | static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) | 3874 | static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) |
3741 | { | 3875 | { |
3742 | struct ibmvfc_host *vhost = tgt->vhost; | 3876 | struct ibmvfc_host *vhost = tgt->vhost; |
3743 | struct fc_rport *rport = tgt->rport; | 3877 | struct fc_rport *rport; |
3744 | unsigned long flags; | 3878 | unsigned long flags; |
3745 | 3879 | ||
3746 | if (rport) { | 3880 | tgt_dbg(tgt, "Adding rport\n"); |
3747 | tgt_dbg(tgt, "Setting rport roles\n"); | 3881 | rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); |
3748 | fc_remote_port_rolechg(rport, tgt->ids.roles); | 3882 | spin_lock_irqsave(vhost->host->host_lock, flags); |
3749 | spin_lock_irqsave(vhost->host->host_lock, flags); | 3883 | |
3750 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); | 3884 | if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { |
3885 | tgt_dbg(tgt, "Deleting rport\n"); | ||
3886 | list_del(&tgt->queue); | ||
3751 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | 3887 | spin_unlock_irqrestore(vhost->host->host_lock, flags); |
3888 | fc_remote_port_delete(rport); | ||
3889 | del_timer_sync(&tgt->timer); | ||
3890 | kref_put(&tgt->kref, ibmvfc_release_tgt); | ||
3752 | return; | 3891 | return; |
3753 | } | 3892 | } |
3754 | 3893 | ||
3755 | tgt_dbg(tgt, "Adding rport\n"); | ||
3756 | rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); | ||
3757 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
3758 | tgt->rport = rport; | ||
3759 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); | ||
3760 | if (rport) { | 3894 | if (rport) { |
3761 | tgt_dbg(tgt, "rport add succeeded\n"); | 3895 | tgt_dbg(tgt, "rport add succeeded\n"); |
3896 | tgt->rport = rport; | ||
3762 | rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; | 3897 | rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; |
3763 | rport->supported_classes = 0; | 3898 | rport->supported_classes = 0; |
3764 | tgt->target_id = rport->scsi_target_id; | 3899 | tgt->target_id = rport->scsi_target_id; |
@@ -3789,8 +3924,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) | |||
3789 | vhost->events_to_log = 0; | 3924 | vhost->events_to_log = 0; |
3790 | switch (vhost->action) { | 3925 | switch (vhost->action) { |
3791 | case IBMVFC_HOST_ACTION_NONE: | 3926 | case IBMVFC_HOST_ACTION_NONE: |
3927 | case IBMVFC_HOST_ACTION_LOGO_WAIT: | ||
3792 | case IBMVFC_HOST_ACTION_INIT_WAIT: | 3928 | case IBMVFC_HOST_ACTION_INIT_WAIT: |
3793 | break; | 3929 | break; |
3930 | case IBMVFC_HOST_ACTION_LOGO: | ||
3931 | vhost->job_step(vhost); | ||
3932 | break; | ||
3794 | case IBMVFC_HOST_ACTION_INIT: | 3933 | case IBMVFC_HOST_ACTION_INIT: |
3795 | BUG_ON(vhost->state != IBMVFC_INITIALIZING); | 3934 | BUG_ON(vhost->state != IBMVFC_INITIALIZING); |
3796 | if (vhost->delay_init) { | 3935 | if (vhost->delay_init) { |
@@ -3836,11 +3975,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) | |||
3836 | 3975 | ||
3837 | if (vhost->state == IBMVFC_INITIALIZING) { | 3976 | if (vhost->state == IBMVFC_INITIALIZING) { |
3838 | if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { | 3977 | if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { |
3839 | ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); | 3978 | if (vhost->reinit) { |
3840 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD); | 3979 | vhost->reinit = 0; |
3841 | vhost->init_retries = 0; | 3980 | scsi_block_requests(vhost->host); |
3842 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | 3981 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); |
3843 | scsi_unblock_requests(vhost->host); | 3982 | spin_unlock_irqrestore(vhost->host->host_lock, flags); |
3983 | } else { | ||
3984 | ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); | ||
3985 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); | ||
3986 | wake_up(&vhost->init_wait_q); | ||
3987 | schedule_work(&vhost->rport_add_work_q); | ||
3988 | vhost->init_retries = 0; | ||
3989 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
3990 | scsi_unblock_requests(vhost->host); | ||
3991 | } | ||
3992 | |||
3844 | return; | 3993 | return; |
3845 | } else { | 3994 | } else { |
3846 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); | 3995 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); |
@@ -3871,24 +4020,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) | |||
3871 | if (!ibmvfc_dev_init_to_do(vhost)) | 4020 | if (!ibmvfc_dev_init_to_do(vhost)) |
3872 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); | 4021 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); |
3873 | break; | 4022 | break; |
3874 | case IBMVFC_HOST_ACTION_TGT_ADD: | ||
3875 | list_for_each_entry(tgt, &vhost->targets, queue) { | ||
3876 | if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) { | ||
3877 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
3878 | ibmvfc_tgt_add_rport(tgt); | ||
3879 | return; | ||
3880 | } | ||
3881 | } | ||
3882 | |||
3883 | if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { | ||
3884 | vhost->reinit = 0; | ||
3885 | scsi_block_requests(vhost->host); | ||
3886 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); | ||
3887 | } else { | ||
3888 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); | ||
3889 | wake_up(&vhost->init_wait_q); | ||
3890 | } | ||
3891 | break; | ||
3892 | default: | 4023 | default: |
3893 | break; | 4024 | break; |
3894 | }; | 4025 | }; |
@@ -4118,6 +4249,56 @@ nomem: | |||
4118 | } | 4249 | } |
4119 | 4250 | ||
4120 | /** | 4251 | /** |
4252 | * ibmvfc_rport_add_thread - Worker thread for rport adds | ||
4253 | * @work: work struct | ||
4254 | * | ||
4255 | **/ | ||
4256 | static void ibmvfc_rport_add_thread(struct work_struct *work) | ||
4257 | { | ||
4258 | struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host, | ||
4259 | rport_add_work_q); | ||
4260 | struct ibmvfc_target *tgt; | ||
4261 | struct fc_rport *rport; | ||
4262 | unsigned long flags; | ||
4263 | int did_work; | ||
4264 | |||
4265 | ENTER; | ||
4266 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
4267 | do { | ||
4268 | did_work = 0; | ||
4269 | if (vhost->state != IBMVFC_ACTIVE) | ||
4270 | break; | ||
4271 | |||
4272 | list_for_each_entry(tgt, &vhost->targets, queue) { | ||
4273 | if (tgt->add_rport) { | ||
4274 | did_work = 1; | ||
4275 | tgt->add_rport = 0; | ||
4276 | kref_get(&tgt->kref); | ||
4277 | rport = tgt->rport; | ||
4278 | if (!rport) { | ||
4279 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
4280 | ibmvfc_tgt_add_rport(tgt); | ||
4281 | } else if (get_device(&rport->dev)) { | ||
4282 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
4283 | tgt_dbg(tgt, "Setting rport roles\n"); | ||
4284 | fc_remote_port_rolechg(rport, tgt->ids.roles); | ||
4285 | put_device(&rport->dev); | ||
4286 | } | ||
4287 | |||
4288 | kref_put(&tgt->kref, ibmvfc_release_tgt); | ||
4289 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
4290 | break; | ||
4291 | } | ||
4292 | } | ||
4293 | } while(did_work); | ||
4294 | |||
4295 | if (vhost->state == IBMVFC_ACTIVE) | ||
4296 | vhost->scan_complete = 1; | ||
4297 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
4298 | LEAVE; | ||
4299 | } | ||
4300 | |||
4301 | /** | ||
4121 | * ibmvfc_probe - Adapter hot plug add entry point | 4302 | * ibmvfc_probe - Adapter hot plug add entry point |
4122 | * @vdev: vio device struct | 4303 | * @vdev: vio device struct |
4123 | * @id: vio device id struct | 4304 | * @id: vio device id struct |
@@ -4160,6 +4341,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
4160 | strcpy(vhost->partition_name, "UNKNOWN"); | 4341 | strcpy(vhost->partition_name, "UNKNOWN"); |
4161 | init_waitqueue_head(&vhost->work_wait_q); | 4342 | init_waitqueue_head(&vhost->work_wait_q); |
4162 | init_waitqueue_head(&vhost->init_wait_q); | 4343 | init_waitqueue_head(&vhost->init_wait_q); |
4344 | INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread); | ||
4163 | 4345 | ||
4164 | if ((rc = ibmvfc_alloc_mem(vhost))) | 4346 | if ((rc = ibmvfc_alloc_mem(vhost))) |
4165 | goto free_scsi_host; | 4347 | goto free_scsi_host; |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index ca1dcf7a7568..007fa1c9ef14 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
@@ -29,8 +29,8 @@ | |||
29 | #include "viosrp.h" | 29 | #include "viosrp.h" |
30 | 30 | ||
31 | #define IBMVFC_NAME "ibmvfc" | 31 | #define IBMVFC_NAME "ibmvfc" |
32 | #define IBMVFC_DRIVER_VERSION "1.0.5" | 32 | #define IBMVFC_DRIVER_VERSION "1.0.6" |
33 | #define IBMVFC_DRIVER_DATE "(March 19, 2009)" | 33 | #define IBMVFC_DRIVER_DATE "(May 28, 2009)" |
34 | 34 | ||
35 | #define IBMVFC_DEFAULT_TIMEOUT 60 | 35 | #define IBMVFC_DEFAULT_TIMEOUT 60 |
36 | #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 | 36 | #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 |
@@ -57,9 +57,10 @@ | |||
57 | * Ensure we have resources for ERP and initialization: | 57 | * Ensure we have resources for ERP and initialization: |
58 | * 1 for ERP | 58 | * 1 for ERP |
59 | * 1 for initialization | 59 | * 1 for initialization |
60 | * 1 for NPIV Logout | ||
60 | * 2 for each discovery thread | 61 | * 2 for each discovery thread |
61 | */ | 62 | */ |
62 | #define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2)) | 63 | #define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2)) |
63 | 64 | ||
64 | #define IBMVFC_MAD_SUCCESS 0x00 | 65 | #define IBMVFC_MAD_SUCCESS 0x00 |
65 | #define IBMVFC_MAD_NOT_SUPPORTED 0xF1 | 66 | #define IBMVFC_MAD_NOT_SUPPORTED 0xF1 |
@@ -127,6 +128,7 @@ enum ibmvfc_mad_types { | |||
127 | IBMVFC_IMPLICIT_LOGOUT = 0x0040, | 128 | IBMVFC_IMPLICIT_LOGOUT = 0x0040, |
128 | IBMVFC_PASSTHRU = 0x0200, | 129 | IBMVFC_PASSTHRU = 0x0200, |
129 | IBMVFC_TMF_MAD = 0x0100, | 130 | IBMVFC_TMF_MAD = 0x0100, |
131 | IBMVFC_NPIV_LOGOUT = 0x0800, | ||
130 | }; | 132 | }; |
131 | 133 | ||
132 | struct ibmvfc_mad_common { | 134 | struct ibmvfc_mad_common { |
@@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad { | |||
143 | struct srp_direct_buf buffer; | 145 | struct srp_direct_buf buffer; |
144 | }__attribute__((packed, aligned (8))); | 146 | }__attribute__((packed, aligned (8))); |
145 | 147 | ||
148 | struct ibmvfc_npiv_logout_mad { | ||
149 | struct ibmvfc_mad_common common; | ||
150 | }__attribute__((packed, aligned (8))); | ||
151 | |||
146 | #define IBMVFC_MAX_NAME 256 | 152 | #define IBMVFC_MAX_NAME 256 |
147 | 153 | ||
148 | struct ibmvfc_npiv_login { | 154 | struct ibmvfc_npiv_login { |
@@ -201,7 +207,8 @@ struct ibmvfc_npiv_login_resp { | |||
201 | #define IBMVFC_NATIVE_FC 0x01 | 207 | #define IBMVFC_NATIVE_FC 0x01 |
202 | #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 | 208 | #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 |
203 | u32 reserved; | 209 | u32 reserved; |
204 | u64 capabilites; | 210 | u64 capabilities; |
211 | #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 | ||
205 | u32 max_cmds; | 212 | u32 max_cmds; |
206 | u32 scsi_id_sz; | 213 | u32 scsi_id_sz; |
207 | u64 max_dma_len; | 214 | u64 max_dma_len; |
@@ -541,9 +548,17 @@ struct ibmvfc_crq_queue { | |||
541 | dma_addr_t msg_token; | 548 | dma_addr_t msg_token; |
542 | }; | 549 | }; |
543 | 550 | ||
551 | enum ibmvfc_ae_link_state { | ||
552 | IBMVFC_AE_LS_LINK_UP = 0x01, | ||
553 | IBMVFC_AE_LS_LINK_BOUNCED = 0x02, | ||
554 | IBMVFC_AE_LS_LINK_DOWN = 0x04, | ||
555 | IBMVFC_AE_LS_LINK_DEAD = 0x08, | ||
556 | }; | ||
557 | |||
544 | struct ibmvfc_async_crq { | 558 | struct ibmvfc_async_crq { |
545 | volatile u8 valid; | 559 | volatile u8 valid; |
546 | u8 pad[3]; | 560 | u8 link_state; |
561 | u8 pad[2]; | ||
547 | u32 pad2; | 562 | u32 pad2; |
548 | volatile u64 event; | 563 | volatile u64 event; |
549 | volatile u64 scsi_id; | 564 | volatile u64 scsi_id; |
@@ -561,6 +576,7 @@ struct ibmvfc_async_crq_queue { | |||
561 | union ibmvfc_iu { | 576 | union ibmvfc_iu { |
562 | struct ibmvfc_mad_common mad_common; | 577 | struct ibmvfc_mad_common mad_common; |
563 | struct ibmvfc_npiv_login_mad npiv_login; | 578 | struct ibmvfc_npiv_login_mad npiv_login; |
579 | struct ibmvfc_npiv_logout_mad npiv_logout; | ||
564 | struct ibmvfc_discover_targets discover_targets; | 580 | struct ibmvfc_discover_targets discover_targets; |
565 | struct ibmvfc_port_login plogi; | 581 | struct ibmvfc_port_login plogi; |
566 | struct ibmvfc_process_login prli; | 582 | struct ibmvfc_process_login prli; |
@@ -575,7 +591,6 @@ enum ibmvfc_target_action { | |||
575 | IBMVFC_TGT_ACTION_NONE = 0, | 591 | IBMVFC_TGT_ACTION_NONE = 0, |
576 | IBMVFC_TGT_ACTION_INIT, | 592 | IBMVFC_TGT_ACTION_INIT, |
577 | IBMVFC_TGT_ACTION_INIT_WAIT, | 593 | IBMVFC_TGT_ACTION_INIT_WAIT, |
578 | IBMVFC_TGT_ACTION_ADD_RPORT, | ||
579 | IBMVFC_TGT_ACTION_DEL_RPORT, | 594 | IBMVFC_TGT_ACTION_DEL_RPORT, |
580 | }; | 595 | }; |
581 | 596 | ||
@@ -588,7 +603,9 @@ struct ibmvfc_target { | |||
588 | int target_id; | 603 | int target_id; |
589 | enum ibmvfc_target_action action; | 604 | enum ibmvfc_target_action action; |
590 | int need_login; | 605 | int need_login; |
606 | int add_rport; | ||
591 | int init_retries; | 607 | int init_retries; |
608 | int logo_rcvd; | ||
592 | u32 cancel_key; | 609 | u32 cancel_key; |
593 | struct ibmvfc_service_parms service_parms; | 610 | struct ibmvfc_service_parms service_parms; |
594 | struct ibmvfc_service_parms service_parms_change; | 611 | struct ibmvfc_service_parms service_parms_change; |
@@ -627,6 +644,8 @@ struct ibmvfc_event_pool { | |||
627 | 644 | ||
628 | enum ibmvfc_host_action { | 645 | enum ibmvfc_host_action { |
629 | IBMVFC_HOST_ACTION_NONE = 0, | 646 | IBMVFC_HOST_ACTION_NONE = 0, |
647 | IBMVFC_HOST_ACTION_LOGO, | ||
648 | IBMVFC_HOST_ACTION_LOGO_WAIT, | ||
630 | IBMVFC_HOST_ACTION_INIT, | 649 | IBMVFC_HOST_ACTION_INIT, |
631 | IBMVFC_HOST_ACTION_INIT_WAIT, | 650 | IBMVFC_HOST_ACTION_INIT_WAIT, |
632 | IBMVFC_HOST_ACTION_QUERY, | 651 | IBMVFC_HOST_ACTION_QUERY, |
@@ -635,7 +654,6 @@ enum ibmvfc_host_action { | |||
635 | IBMVFC_HOST_ACTION_ALLOC_TGTS, | 654 | IBMVFC_HOST_ACTION_ALLOC_TGTS, |
636 | IBMVFC_HOST_ACTION_TGT_INIT, | 655 | IBMVFC_HOST_ACTION_TGT_INIT, |
637 | IBMVFC_HOST_ACTION_TGT_DEL_FAILED, | 656 | IBMVFC_HOST_ACTION_TGT_DEL_FAILED, |
638 | IBMVFC_HOST_ACTION_TGT_ADD, | ||
639 | }; | 657 | }; |
640 | 658 | ||
641 | enum ibmvfc_host_state { | 659 | enum ibmvfc_host_state { |
@@ -682,6 +700,8 @@ struct ibmvfc_host { | |||
682 | int client_migrated; | 700 | int client_migrated; |
683 | int reinit; | 701 | int reinit; |
684 | int delay_init; | 702 | int delay_init; |
703 | int scan_complete; | ||
704 | int logged_in; | ||
685 | int events_to_log; | 705 | int events_to_log; |
686 | #define IBMVFC_AE_LINKUP 0x0001 | 706 | #define IBMVFC_AE_LINKUP 0x0001 |
687 | #define IBMVFC_AE_LINKDOWN 0x0002 | 707 | #define IBMVFC_AE_LINKDOWN 0x0002 |
@@ -692,6 +712,7 @@ struct ibmvfc_host { | |||
692 | void (*job_step) (struct ibmvfc_host *); | 712 | void (*job_step) (struct ibmvfc_host *); |
693 | struct task_struct *work_thread; | 713 | struct task_struct *work_thread; |
694 | struct tasklet_struct tasklet; | 714 | struct tasklet_struct tasklet; |
715 | struct work_struct rport_add_work_q; | ||
695 | wait_queue_head_t init_wait_q; | 716 | wait_queue_head_t init_wait_q; |
696 | wait_queue_head_t work_wait_q; | 717 | wait_queue_head_t work_wait_q; |
697 | }; | 718 | }; |
@@ -707,6 +728,12 @@ struct ibmvfc_host { | |||
707 | #define tgt_err(t, fmt, ...) \ | 728 | #define tgt_err(t, fmt, ...) \ |
708 | dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) | 729 | dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) |
709 | 730 | ||
731 | #define tgt_log(t, level, fmt, ...) \ | ||
732 | do { \ | ||
733 | if ((t)->vhost->log_level >= level) \ | ||
734 | tgt_err(t, fmt, ##__VA_ARGS__); \ | ||
735 | } while (0) | ||
736 | |||
710 | #define ibmvfc_dbg(vhost, ...) \ | 737 | #define ibmvfc_dbg(vhost, ...) \ |
711 | DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) | 738 | DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) |
712 | 739 | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index c9aa7611e408..869a11bdccbd 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -70,6 +70,7 @@ | |||
70 | #include <linux/moduleparam.h> | 70 | #include <linux/moduleparam.h> |
71 | #include <linux/dma-mapping.h> | 71 | #include <linux/dma-mapping.h> |
72 | #include <linux/delay.h> | 72 | #include <linux/delay.h> |
73 | #include <linux/of.h> | ||
73 | #include <asm/firmware.h> | 74 | #include <asm/firmware.h> |
74 | #include <asm/vio.h> | 75 | #include <asm/vio.h> |
75 | #include <asm/firmware.h> | 76 | #include <asm/firmware.h> |
@@ -87,9 +88,15 @@ | |||
87 | */ | 88 | */ |
88 | static int max_id = 64; | 89 | static int max_id = 64; |
89 | static int max_channel = 3; | 90 | static int max_channel = 3; |
90 | static int init_timeout = 5; | 91 | static int init_timeout = 300; |
92 | static int login_timeout = 60; | ||
93 | static int info_timeout = 30; | ||
94 | static int abort_timeout = 60; | ||
95 | static int reset_timeout = 60; | ||
91 | static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; | 96 | static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; |
92 | static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; | 97 | static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; |
98 | static int fast_fail = 1; | ||
99 | static int client_reserve = 1; | ||
93 | 100 | ||
94 | static struct scsi_transport_template *ibmvscsi_transport_template; | 101 | static struct scsi_transport_template *ibmvscsi_transport_template; |
95 | 102 | ||
@@ -110,6 +117,10 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); | |||
110 | MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); | 117 | MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); |
111 | module_param_named(max_requests, max_requests, int, S_IRUGO); | 118 | module_param_named(max_requests, max_requests, int, S_IRUGO); |
112 | MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); | 119 | MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); |
120 | module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR); | ||
121 | MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]"); | ||
122 | module_param_named(client_reserve, client_reserve, int, S_IRUGO ); | ||
123 | MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release"); | ||
113 | 124 | ||
114 | /* ------------------------------------------------------------ | 125 | /* ------------------------------------------------------------ |
115 | * Routines for the event pool and event structs | 126 | * Routines for the event pool and event structs |
@@ -781,105 +792,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
781 | /* ------------------------------------------------------------ | 792 | /* ------------------------------------------------------------ |
782 | * Routines for driver initialization | 793 | * Routines for driver initialization |
783 | */ | 794 | */ |
795 | |||
784 | /** | 796 | /** |
785 | * adapter_info_rsp: - Handle response to MAD adapter info request | 797 | * map_persist_bufs: - Pre-map persistent data for adapter logins |
786 | * @evt_struct: srp_event_struct with the response | 798 | * @hostdata: ibmvscsi_host_data of host |
787 | * | 799 | * |
788 | * Used as a "done" callback by when sending adapter_info. Gets called | 800 | * Map the capabilities and adapter info DMA buffers to avoid runtime failures. |
789 | * by ibmvscsi_handle_crq() | 801 | * Return 1 on error, 0 on success. |
790 | */ | 802 | */ |
791 | static void adapter_info_rsp(struct srp_event_struct *evt_struct) | 803 | static int map_persist_bufs(struct ibmvscsi_host_data *hostdata) |
792 | { | 804 | { |
793 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | ||
794 | dma_unmap_single(hostdata->dev, | ||
795 | evt_struct->iu.mad.adapter_info.buffer, | ||
796 | evt_struct->iu.mad.adapter_info.common.length, | ||
797 | DMA_BIDIRECTIONAL); | ||
798 | 805 | ||
799 | if (evt_struct->xfer_iu->mad.adapter_info.common.status) { | 806 | hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps, |
800 | dev_err(hostdata->dev, "error %d getting adapter info\n", | 807 | sizeof(hostdata->caps), DMA_BIDIRECTIONAL); |
801 | evt_struct->xfer_iu->mad.adapter_info.common.status); | 808 | |
802 | } else { | 809 | if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) { |
803 | dev_info(hostdata->dev, "host srp version: %s, " | 810 | dev_err(hostdata->dev, "Unable to map capabilities buffer!\n"); |
804 | "host partition %s (%d), OS %d, max io %u\n", | 811 | return 1; |
805 | hostdata->madapter_info.srp_version, | ||
806 | hostdata->madapter_info.partition_name, | ||
807 | hostdata->madapter_info.partition_number, | ||
808 | hostdata->madapter_info.os_type, | ||
809 | hostdata->madapter_info.port_max_txu[0]); | ||
810 | |||
811 | if (hostdata->madapter_info.port_max_txu[0]) | ||
812 | hostdata->host->max_sectors = | ||
813 | hostdata->madapter_info.port_max_txu[0] >> 9; | ||
814 | |||
815 | if (hostdata->madapter_info.os_type == 3 && | ||
816 | strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { | ||
817 | dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", | ||
818 | hostdata->madapter_info.srp_version); | ||
819 | dev_err(hostdata->dev, "limiting scatterlists to %d\n", | ||
820 | MAX_INDIRECT_BUFS); | ||
821 | hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; | ||
822 | } | ||
823 | } | 812 | } |
813 | |||
814 | hostdata->adapter_info_addr = dma_map_single(hostdata->dev, | ||
815 | &hostdata->madapter_info, | ||
816 | sizeof(hostdata->madapter_info), | ||
817 | DMA_BIDIRECTIONAL); | ||
818 | if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) { | ||
819 | dev_err(hostdata->dev, "Unable to map adapter info buffer!\n"); | ||
820 | dma_unmap_single(hostdata->dev, hostdata->caps_addr, | ||
821 | sizeof(hostdata->caps), DMA_BIDIRECTIONAL); | ||
822 | return 1; | ||
823 | } | ||
824 | |||
825 | return 0; | ||
824 | } | 826 | } |
825 | 827 | ||
826 | /** | 828 | /** |
827 | * send_mad_adapter_info: - Sends the mad adapter info request | 829 | * unmap_persist_bufs: - Unmap persistent data needed for adapter logins |
828 | * and stores the result so it can be retrieved with | 830 | * @hostdata: ibmvscsi_host_data of host |
829 | * sysfs. We COULD consider causing a failure if the | 831 | * |
830 | * returned SRP version doesn't match ours. | 832 | * Unmap the capabilities and adapter info DMA buffers |
831 | * @hostdata: ibmvscsi_host_data of host | 833 | */ |
832 | * | 834 | static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata) |
833 | * Returns zero if successful. | ||
834 | */ | ||
835 | static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | ||
836 | { | 835 | { |
837 | struct viosrp_adapter_info *req; | 836 | dma_unmap_single(hostdata->dev, hostdata->caps_addr, |
838 | struct srp_event_struct *evt_struct; | 837 | sizeof(hostdata->caps), DMA_BIDIRECTIONAL); |
839 | unsigned long flags; | ||
840 | dma_addr_t addr; | ||
841 | |||
842 | evt_struct = get_event_struct(&hostdata->pool); | ||
843 | if (!evt_struct) { | ||
844 | dev_err(hostdata->dev, | ||
845 | "couldn't allocate an event for ADAPTER_INFO_REQ!\n"); | ||
846 | return; | ||
847 | } | ||
848 | |||
849 | init_event_struct(evt_struct, | ||
850 | adapter_info_rsp, | ||
851 | VIOSRP_MAD_FORMAT, | ||
852 | init_timeout); | ||
853 | |||
854 | req = &evt_struct->iu.mad.adapter_info; | ||
855 | memset(req, 0x00, sizeof(*req)); | ||
856 | |||
857 | req->common.type = VIOSRP_ADAPTER_INFO_TYPE; | ||
858 | req->common.length = sizeof(hostdata->madapter_info); | ||
859 | req->buffer = addr = dma_map_single(hostdata->dev, | ||
860 | &hostdata->madapter_info, | ||
861 | sizeof(hostdata->madapter_info), | ||
862 | DMA_BIDIRECTIONAL); | ||
863 | 838 | ||
864 | if (dma_mapping_error(hostdata->dev, req->buffer)) { | 839 | dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr, |
865 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 840 | sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL); |
866 | dev_err(hostdata->dev, | 841 | } |
867 | "Unable to map request_buffer for " | ||
868 | "adapter_info!\n"); | ||
869 | free_event_struct(&hostdata->pool, evt_struct); | ||
870 | return; | ||
871 | } | ||
872 | |||
873 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
874 | if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) { | ||
875 | dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); | ||
876 | dma_unmap_single(hostdata->dev, | ||
877 | addr, | ||
878 | sizeof(hostdata->madapter_info), | ||
879 | DMA_BIDIRECTIONAL); | ||
880 | } | ||
881 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
882 | }; | ||
883 | 842 | ||
884 | /** | 843 | /** |
885 | * login_rsp: - Handle response to SRP login request | 844 | * login_rsp: - Handle response to SRP login request |
@@ -909,9 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct) | |||
909 | } | 868 | } |
910 | 869 | ||
911 | dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); | 870 | dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); |
912 | 871 | hostdata->client_migrated = 0; | |
913 | if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0) | ||
914 | dev_err(hostdata->dev, "Invalid request_limit.\n"); | ||
915 | 872 | ||
916 | /* Now we know what the real request-limit is. | 873 | /* Now we know what the real request-limit is. |
917 | * This value is set rather than added to request_limit because | 874 | * This value is set rather than added to request_limit because |
@@ -922,15 +879,12 @@ static void login_rsp(struct srp_event_struct *evt_struct) | |||
922 | 879 | ||
923 | /* If we had any pending I/Os, kick them */ | 880 | /* If we had any pending I/Os, kick them */ |
924 | scsi_unblock_requests(hostdata->host); | 881 | scsi_unblock_requests(hostdata->host); |
925 | |||
926 | send_mad_adapter_info(hostdata); | ||
927 | return; | ||
928 | } | 882 | } |
929 | 883 | ||
930 | /** | 884 | /** |
931 | * send_srp_login: - Sends the srp login | 885 | * send_srp_login: - Sends the srp login |
932 | * @hostdata: ibmvscsi_host_data of host | 886 | * @hostdata: ibmvscsi_host_data of host |
933 | * | 887 | * |
934 | * Returns zero if successful. | 888 | * Returns zero if successful. |
935 | */ | 889 | */ |
936 | static int send_srp_login(struct ibmvscsi_host_data *hostdata) | 890 | static int send_srp_login(struct ibmvscsi_host_data *hostdata) |
@@ -939,22 +893,17 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) | |||
939 | unsigned long flags; | 893 | unsigned long flags; |
940 | struct srp_login_req *login; | 894 | struct srp_login_req *login; |
941 | struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); | 895 | struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); |
942 | if (!evt_struct) { | ||
943 | dev_err(hostdata->dev, "couldn't allocate an event for login req!\n"); | ||
944 | return FAILED; | ||
945 | } | ||
946 | 896 | ||
947 | init_event_struct(evt_struct, | 897 | BUG_ON(!evt_struct); |
948 | login_rsp, | 898 | init_event_struct(evt_struct, login_rsp, |
949 | VIOSRP_SRP_FORMAT, | 899 | VIOSRP_SRP_FORMAT, login_timeout); |
950 | init_timeout); | ||
951 | 900 | ||
952 | login = &evt_struct->iu.srp.login_req; | 901 | login = &evt_struct->iu.srp.login_req; |
953 | memset(login, 0x00, sizeof(struct srp_login_req)); | 902 | memset(login, 0, sizeof(*login)); |
954 | login->opcode = SRP_LOGIN_REQ; | 903 | login->opcode = SRP_LOGIN_REQ; |
955 | login->req_it_iu_len = sizeof(union srp_iu); | 904 | login->req_it_iu_len = sizeof(union srp_iu); |
956 | login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; | 905 | login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; |
957 | 906 | ||
958 | spin_lock_irqsave(hostdata->host->host_lock, flags); | 907 | spin_lock_irqsave(hostdata->host->host_lock, flags); |
959 | /* Start out with a request limit of 0, since this is negotiated in | 908 | /* Start out with a request limit of 0, since this is negotiated in |
960 | * the login request we are just sending and login requests always | 909 | * the login request we are just sending and login requests always |
@@ -962,13 +911,241 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) | |||
962 | */ | 911 | */ |
963 | atomic_set(&hostdata->request_limit, 0); | 912 | atomic_set(&hostdata->request_limit, 0); |
964 | 913 | ||
965 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); | 914 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2); |
966 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 915 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); |
967 | dev_info(hostdata->dev, "sent SRP login\n"); | 916 | dev_info(hostdata->dev, "sent SRP login\n"); |
968 | return rc; | 917 | return rc; |
969 | }; | 918 | }; |
970 | 919 | ||
971 | /** | 920 | /** |
921 | * capabilities_rsp: - Handle response to MAD adapter capabilities request | ||
922 | * @evt_struct: srp_event_struct with the response | ||
923 | * | ||
924 | * Used as a "done" callback by when sending adapter_info. | ||
925 | */ | ||
926 | static void capabilities_rsp(struct srp_event_struct *evt_struct) | ||
927 | { | ||
928 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | ||
929 | |||
930 | if (evt_struct->xfer_iu->mad.capabilities.common.status) { | ||
931 | dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", | ||
932 | evt_struct->xfer_iu->mad.capabilities.common.status); | ||
933 | } else { | ||
934 | if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) | ||
935 | dev_info(hostdata->dev, "Partition migration not supported\n"); | ||
936 | |||
937 | if (client_reserve) { | ||
938 | if (hostdata->caps.reserve.common.server_support == | ||
939 | SERVER_SUPPORTS_CAP) | ||
940 | dev_info(hostdata->dev, "Client reserve enabled\n"); | ||
941 | else | ||
942 | dev_info(hostdata->dev, "Client reserve not supported\n"); | ||
943 | } | ||
944 | } | ||
945 | |||
946 | send_srp_login(hostdata); | ||
947 | } | ||
948 | |||
949 | /** | ||
950 | * send_mad_capabilities: - Sends the mad capabilities request | ||
951 | * and stores the result so it can be retrieved with | ||
952 | * @hostdata: ibmvscsi_host_data of host | ||
953 | */ | ||
954 | static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) | ||
955 | { | ||
956 | struct viosrp_capabilities *req; | ||
957 | struct srp_event_struct *evt_struct; | ||
958 | unsigned long flags; | ||
959 | struct device_node *of_node = hostdata->dev->archdata.of_node; | ||
960 | const char *location; | ||
961 | |||
962 | evt_struct = get_event_struct(&hostdata->pool); | ||
963 | BUG_ON(!evt_struct); | ||
964 | |||
965 | init_event_struct(evt_struct, capabilities_rsp, | ||
966 | VIOSRP_MAD_FORMAT, info_timeout); | ||
967 | |||
968 | req = &evt_struct->iu.mad.capabilities; | ||
969 | memset(req, 0, sizeof(*req)); | ||
970 | |||
971 | hostdata->caps.flags = CAP_LIST_SUPPORTED; | ||
972 | if (hostdata->client_migrated) | ||
973 | hostdata->caps.flags |= CLIENT_MIGRATED; | ||
974 | |||
975 | strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), | ||
976 | sizeof(hostdata->caps.name)); | ||
977 | hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0'; | ||
978 | |||
979 | location = of_get_property(of_node, "ibm,loc-code", NULL); | ||
980 | location = location ? location : dev_name(hostdata->dev); | ||
981 | strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); | ||
982 | hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; | ||
983 | |||
984 | req->common.type = VIOSRP_CAPABILITIES_TYPE; | ||
985 | req->buffer = hostdata->caps_addr; | ||
986 | |||
987 | hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; | ||
988 | hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); | ||
989 | hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; | ||
990 | hostdata->caps.migration.ecl = 1; | ||
991 | |||
992 | if (client_reserve) { | ||
993 | hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; | ||
994 | hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); | ||
995 | hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; | ||
996 | hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; | ||
997 | req->common.length = sizeof(hostdata->caps); | ||
998 | } else | ||
999 | req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); | ||
1000 | |||
1001 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
1002 | if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) | ||
1003 | dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n"); | ||
1004 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
1005 | }; | ||
1006 | |||
1007 | /** | ||
1008 | * fast_fail_rsp: - Handle response to MAD enable fast fail | ||
1009 | * @evt_struct: srp_event_struct with the response | ||
1010 | * | ||
1011 | * Used as a "done" callback by when sending enable fast fail. Gets called | ||
1012 | * by ibmvscsi_handle_crq() | ||
1013 | */ | ||
1014 | static void fast_fail_rsp(struct srp_event_struct *evt_struct) | ||
1015 | { | ||
1016 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | ||
1017 | u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status; | ||
1018 | |||
1019 | if (status == VIOSRP_MAD_NOT_SUPPORTED) | ||
1020 | dev_err(hostdata->dev, "fast_fail not supported in server\n"); | ||
1021 | else if (status == VIOSRP_MAD_FAILED) | ||
1022 | dev_err(hostdata->dev, "fast_fail request failed\n"); | ||
1023 | else if (status != VIOSRP_MAD_SUCCESS) | ||
1024 | dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status); | ||
1025 | |||
1026 | send_mad_capabilities(hostdata); | ||
1027 | } | ||
1028 | |||
1029 | /** | ||
1030 | * init_host - Start host initialization | ||
1031 | * @hostdata: ibmvscsi_host_data of host | ||
1032 | * | ||
1033 | * Returns zero if successful. | ||
1034 | */ | ||
1035 | static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) | ||
1036 | { | ||
1037 | int rc; | ||
1038 | unsigned long flags; | ||
1039 | struct viosrp_fast_fail *fast_fail_mad; | ||
1040 | struct srp_event_struct *evt_struct; | ||
1041 | |||
1042 | if (!fast_fail) { | ||
1043 | send_mad_capabilities(hostdata); | ||
1044 | return 0; | ||
1045 | } | ||
1046 | |||
1047 | evt_struct = get_event_struct(&hostdata->pool); | ||
1048 | BUG_ON(!evt_struct); | ||
1049 | |||
1050 | init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout); | ||
1051 | |||
1052 | fast_fail_mad = &evt_struct->iu.mad.fast_fail; | ||
1053 | memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); | ||
1054 | fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL; | ||
1055 | fast_fail_mad->common.length = sizeof(*fast_fail_mad); | ||
1056 | |||
1057 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
1058 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); | ||
1059 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
1060 | return rc; | ||
1061 | } | ||
1062 | |||
1063 | /** | ||
1064 | * adapter_info_rsp: - Handle response to MAD adapter info request | ||
1065 | * @evt_struct: srp_event_struct with the response | ||
1066 | * | ||
1067 | * Used as a "done" callback by when sending adapter_info. Gets called | ||
1068 | * by ibmvscsi_handle_crq() | ||
1069 | */ | ||
1070 | static void adapter_info_rsp(struct srp_event_struct *evt_struct) | ||
1071 | { | ||
1072 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | ||
1073 | |||
1074 | if (evt_struct->xfer_iu->mad.adapter_info.common.status) { | ||
1075 | dev_err(hostdata->dev, "error %d getting adapter info\n", | ||
1076 | evt_struct->xfer_iu->mad.adapter_info.common.status); | ||
1077 | } else { | ||
1078 | dev_info(hostdata->dev, "host srp version: %s, " | ||
1079 | "host partition %s (%d), OS %d, max io %u\n", | ||
1080 | hostdata->madapter_info.srp_version, | ||
1081 | hostdata->madapter_info.partition_name, | ||
1082 | hostdata->madapter_info.partition_number, | ||
1083 | hostdata->madapter_info.os_type, | ||
1084 | hostdata->madapter_info.port_max_txu[0]); | ||
1085 | |||
1086 | if (hostdata->madapter_info.port_max_txu[0]) | ||
1087 | hostdata->host->max_sectors = | ||
1088 | hostdata->madapter_info.port_max_txu[0] >> 9; | ||
1089 | |||
1090 | if (hostdata->madapter_info.os_type == 3 && | ||
1091 | strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { | ||
1092 | dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", | ||
1093 | hostdata->madapter_info.srp_version); | ||
1094 | dev_err(hostdata->dev, "limiting scatterlists to %d\n", | ||
1095 | MAX_INDIRECT_BUFS); | ||
1096 | hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; | ||
1097 | } | ||
1098 | } | ||
1099 | |||
1100 | enable_fast_fail(hostdata); | ||
1101 | } | ||
1102 | |||
1103 | /** | ||
1104 | * send_mad_adapter_info: - Sends the mad adapter info request | ||
1105 | * and stores the result so it can be retrieved with | ||
1106 | * sysfs. We COULD consider causing a failure if the | ||
1107 | * returned SRP version doesn't match ours. | ||
1108 | * @hostdata: ibmvscsi_host_data of host | ||
1109 | * | ||
1110 | * Returns zero if successful. | ||
1111 | */ | ||
1112 | static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | ||
1113 | { | ||
1114 | struct viosrp_adapter_info *req; | ||
1115 | struct srp_event_struct *evt_struct; | ||
1116 | unsigned long flags; | ||
1117 | |||
1118 | evt_struct = get_event_struct(&hostdata->pool); | ||
1119 | BUG_ON(!evt_struct); | ||
1120 | |||
1121 | init_event_struct(evt_struct, | ||
1122 | adapter_info_rsp, | ||
1123 | VIOSRP_MAD_FORMAT, | ||
1124 | info_timeout); | ||
1125 | |||
1126 | req = &evt_struct->iu.mad.adapter_info; | ||
1127 | memset(req, 0x00, sizeof(*req)); | ||
1128 | |||
1129 | req->common.type = VIOSRP_ADAPTER_INFO_TYPE; | ||
1130 | req->common.length = sizeof(hostdata->madapter_info); | ||
1131 | req->buffer = hostdata->adapter_info_addr; | ||
1132 | |||
1133 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
1134 | if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) | ||
1135 | dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); | ||
1136 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
1137 | }; | ||
1138 | |||
1139 | /** | ||
1140 | * init_adapter: Start virtual adapter initialization sequence | ||
1141 | * | ||
1142 | */ | ||
1143 | static void init_adapter(struct ibmvscsi_host_data *hostdata) | ||
1144 | { | ||
1145 | send_mad_adapter_info(hostdata); | ||
1146 | } | ||
1147 | |||
1148 | /** | ||
972 | * sync_completion: Signal that a synchronous command has completed | 1149 | * sync_completion: Signal that a synchronous command has completed |
973 | * Note that after returning from this call, the evt_struct is freed. | 1150 | * Note that after returning from this call, the evt_struct is freed. |
974 | * the caller waiting on this completion shouldn't touch the evt_struct | 1151 | * the caller waiting on this completion shouldn't touch the evt_struct |
@@ -1029,7 +1206,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
1029 | init_event_struct(evt, | 1206 | init_event_struct(evt, |
1030 | sync_completion, | 1207 | sync_completion, |
1031 | VIOSRP_SRP_FORMAT, | 1208 | VIOSRP_SRP_FORMAT, |
1032 | init_timeout); | 1209 | abort_timeout); |
1033 | 1210 | ||
1034 | tsk_mgmt = &evt->iu.srp.tsk_mgmt; | 1211 | tsk_mgmt = &evt->iu.srp.tsk_mgmt; |
1035 | 1212 | ||
@@ -1043,7 +1220,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
1043 | evt->sync_srp = &srp_rsp; | 1220 | evt->sync_srp = &srp_rsp; |
1044 | 1221 | ||
1045 | init_completion(&evt->comp); | 1222 | init_completion(&evt->comp); |
1046 | rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); | 1223 | rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2); |
1047 | 1224 | ||
1048 | if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) | 1225 | if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) |
1049 | break; | 1226 | break; |
@@ -1152,7 +1329,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
1152 | init_event_struct(evt, | 1329 | init_event_struct(evt, |
1153 | sync_completion, | 1330 | sync_completion, |
1154 | VIOSRP_SRP_FORMAT, | 1331 | VIOSRP_SRP_FORMAT, |
1155 | init_timeout); | 1332 | reset_timeout); |
1156 | 1333 | ||
1157 | tsk_mgmt = &evt->iu.srp.tsk_mgmt; | 1334 | tsk_mgmt = &evt->iu.srp.tsk_mgmt; |
1158 | 1335 | ||
@@ -1165,7 +1342,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
1165 | evt->sync_srp = &srp_rsp; | 1342 | evt->sync_srp = &srp_rsp; |
1166 | 1343 | ||
1167 | init_completion(&evt->comp); | 1344 | init_completion(&evt->comp); |
1168 | rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); | 1345 | rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2); |
1169 | 1346 | ||
1170 | if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) | 1347 | if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) |
1171 | break; | 1348 | break; |
@@ -1281,7 +1458,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1281 | if ((rc = ibmvscsi_ops->send_crq(hostdata, | 1458 | if ((rc = ibmvscsi_ops->send_crq(hostdata, |
1282 | 0xC002000000000000LL, 0)) == 0) { | 1459 | 0xC002000000000000LL, 0)) == 0) { |
1283 | /* Now login */ | 1460 | /* Now login */ |
1284 | send_srp_login(hostdata); | 1461 | init_adapter(hostdata); |
1285 | } else { | 1462 | } else { |
1286 | dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); | 1463 | dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); |
1287 | } | 1464 | } |
@@ -1291,7 +1468,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1291 | dev_info(hostdata->dev, "partner initialization complete\n"); | 1468 | dev_info(hostdata->dev, "partner initialization complete\n"); |
1292 | 1469 | ||
1293 | /* Now login */ | 1470 | /* Now login */ |
1294 | send_srp_login(hostdata); | 1471 | init_adapter(hostdata); |
1295 | break; | 1472 | break; |
1296 | default: | 1473 | default: |
1297 | dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); | 1474 | dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); |
@@ -1303,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1303 | if (crq->format == 0x06) { | 1480 | if (crq->format == 0x06) { |
1304 | /* We need to re-setup the interpartition connection */ | 1481 | /* We need to re-setup the interpartition connection */ |
1305 | dev_info(hostdata->dev, "Re-enabling adapter!\n"); | 1482 | dev_info(hostdata->dev, "Re-enabling adapter!\n"); |
1483 | hostdata->client_migrated = 1; | ||
1306 | purge_requests(hostdata, DID_REQUEUE); | 1484 | purge_requests(hostdata, DID_REQUEUE); |
1307 | if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, | 1485 | if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, |
1308 | hostdata)) || | 1486 | hostdata)) || |
@@ -1397,7 +1575,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, | |||
1397 | init_event_struct(evt_struct, | 1575 | init_event_struct(evt_struct, |
1398 | sync_completion, | 1576 | sync_completion, |
1399 | VIOSRP_MAD_FORMAT, | 1577 | VIOSRP_MAD_FORMAT, |
1400 | init_timeout); | 1578 | info_timeout); |
1401 | 1579 | ||
1402 | host_config = &evt_struct->iu.mad.host_config; | 1580 | host_config = &evt_struct->iu.mad.host_config; |
1403 | 1581 | ||
@@ -1419,7 +1597,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, | |||
1419 | 1597 | ||
1420 | init_completion(&evt_struct->comp); | 1598 | init_completion(&evt_struct->comp); |
1421 | spin_lock_irqsave(hostdata->host->host_lock, flags); | 1599 | spin_lock_irqsave(hostdata->host->host_lock, flags); |
1422 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); | 1600 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); |
1423 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 1601 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); |
1424 | if (rc == 0) | 1602 | if (rc == 0) |
1425 | wait_for_completion(&evt_struct->comp); | 1603 | wait_for_completion(&evt_struct->comp); |
@@ -1444,7 +1622,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) | |||
1444 | spin_lock_irqsave(shost->host_lock, lock_flags); | 1622 | spin_lock_irqsave(shost->host_lock, lock_flags); |
1445 | if (sdev->type == TYPE_DISK) { | 1623 | if (sdev->type == TYPE_DISK) { |
1446 | sdev->allow_restart = 1; | 1624 | sdev->allow_restart = 1; |
1447 | blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); | 1625 | blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); |
1448 | } | 1626 | } |
1449 | scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); | 1627 | scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); |
1450 | spin_unlock_irqrestore(shost->host_lock, lock_flags); | 1628 | spin_unlock_irqrestore(shost->host_lock, lock_flags); |
@@ -1471,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) | |||
1471 | /* ------------------------------------------------------------ | 1649 | /* ------------------------------------------------------------ |
1472 | * sysfs attributes | 1650 | * sysfs attributes |
1473 | */ | 1651 | */ |
1652 | static ssize_t show_host_vhost_loc(struct device *dev, | ||
1653 | struct device_attribute *attr, char *buf) | ||
1654 | { | ||
1655 | struct Scsi_Host *shost = class_to_shost(dev); | ||
1656 | struct ibmvscsi_host_data *hostdata = shost_priv(shost); | ||
1657 | int len; | ||
1658 | |||
1659 | len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n", | ||
1660 | hostdata->caps.loc); | ||
1661 | return len; | ||
1662 | } | ||
1663 | |||
1664 | static struct device_attribute ibmvscsi_host_vhost_loc = { | ||
1665 | .attr = { | ||
1666 | .name = "vhost_loc", | ||
1667 | .mode = S_IRUGO, | ||
1668 | }, | ||
1669 | .show = show_host_vhost_loc, | ||
1670 | }; | ||
1671 | |||
1672 | static ssize_t show_host_vhost_name(struct device *dev, | ||
1673 | struct device_attribute *attr, char *buf) | ||
1674 | { | ||
1675 | struct Scsi_Host *shost = class_to_shost(dev); | ||
1676 | struct ibmvscsi_host_data *hostdata = shost_priv(shost); | ||
1677 | int len; | ||
1678 | |||
1679 | len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n", | ||
1680 | hostdata->caps.name); | ||
1681 | return len; | ||
1682 | } | ||
1683 | |||
1684 | static struct device_attribute ibmvscsi_host_vhost_name = { | ||
1685 | .attr = { | ||
1686 | .name = "vhost_name", | ||
1687 | .mode = S_IRUGO, | ||
1688 | }, | ||
1689 | .show = show_host_vhost_name, | ||
1690 | }; | ||
1691 | |||
1474 | static ssize_t show_host_srp_version(struct device *dev, | 1692 | static ssize_t show_host_srp_version(struct device *dev, |
1475 | struct device_attribute *attr, char *buf) | 1693 | struct device_attribute *attr, char *buf) |
1476 | { | 1694 | { |
@@ -1594,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = { | |||
1594 | }; | 1812 | }; |
1595 | 1813 | ||
1596 | static struct device_attribute *ibmvscsi_attrs[] = { | 1814 | static struct device_attribute *ibmvscsi_attrs[] = { |
1815 | &ibmvscsi_host_vhost_loc, | ||
1816 | &ibmvscsi_host_vhost_name, | ||
1597 | &ibmvscsi_host_srp_version, | 1817 | &ibmvscsi_host_srp_version, |
1598 | &ibmvscsi_host_partition_name, | 1818 | &ibmvscsi_host_partition_name, |
1599 | &ibmvscsi_host_partition_number, | 1819 | &ibmvscsi_host_partition_number, |
@@ -1657,7 +1877,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1657 | unsigned long wait_switch = 0; | 1877 | unsigned long wait_switch = 0; |
1658 | int rc; | 1878 | int rc; |
1659 | 1879 | ||
1660 | vdev->dev.driver_data = NULL; | 1880 | dev_set_drvdata(&vdev->dev, NULL); |
1661 | 1881 | ||
1662 | host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); | 1882 | host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); |
1663 | if (!host) { | 1883 | if (!host) { |
@@ -1674,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1674 | atomic_set(&hostdata->request_limit, -1); | 1894 | atomic_set(&hostdata->request_limit, -1); |
1675 | hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; | 1895 | hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; |
1676 | 1896 | ||
1897 | if (map_persist_bufs(hostdata)) { | ||
1898 | dev_err(&vdev->dev, "couldn't map persistent buffers\n"); | ||
1899 | goto persist_bufs_failed; | ||
1900 | } | ||
1901 | |||
1677 | rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); | 1902 | rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); |
1678 | if (rc != 0 && rc != H_RESOURCE) { | 1903 | if (rc != 0 && rc != H_RESOURCE) { |
1679 | dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); | 1904 | dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); |
@@ -1687,6 +1912,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1687 | host->max_lun = 8; | 1912 | host->max_lun = 8; |
1688 | host->max_id = max_id; | 1913 | host->max_id = max_id; |
1689 | host->max_channel = max_channel; | 1914 | host->max_channel = max_channel; |
1915 | host->max_cmd_len = 16; | ||
1690 | 1916 | ||
1691 | if (scsi_add_host(hostdata->host, hostdata->dev)) | 1917 | if (scsi_add_host(hostdata->host, hostdata->dev)) |
1692 | goto add_host_failed; | 1918 | goto add_host_failed; |
@@ -1723,7 +1949,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1723 | scsi_scan_host(host); | 1949 | scsi_scan_host(host); |
1724 | } | 1950 | } |
1725 | 1951 | ||
1726 | vdev->dev.driver_data = hostdata; | 1952 | dev_set_drvdata(&vdev->dev, hostdata); |
1727 | return 0; | 1953 | return 0; |
1728 | 1954 | ||
1729 | add_srp_port_failed: | 1955 | add_srp_port_failed: |
@@ -1733,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1733 | init_pool_failed: | 1959 | init_pool_failed: |
1734 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); | 1960 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); |
1735 | init_crq_failed: | 1961 | init_crq_failed: |
1962 | unmap_persist_bufs(hostdata); | ||
1963 | persist_bufs_failed: | ||
1736 | scsi_host_put(host); | 1964 | scsi_host_put(host); |
1737 | scsi_host_alloc_failed: | 1965 | scsi_host_alloc_failed: |
1738 | return -1; | 1966 | return -1; |
@@ -1740,7 +1968,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1740 | 1968 | ||
1741 | static int ibmvscsi_remove(struct vio_dev *vdev) | 1969 | static int ibmvscsi_remove(struct vio_dev *vdev) |
1742 | { | 1970 | { |
1743 | struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; | 1971 | struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); |
1972 | unmap_persist_bufs(hostdata); | ||
1744 | release_event_pool(&hostdata->pool, hostdata); | 1973 | release_event_pool(&hostdata->pool, hostdata); |
1745 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, | 1974 | ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, |
1746 | max_events); | 1975 | max_events); |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h index 2d4339d5e16e..76425303def0 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.h +++ b/drivers/scsi/ibmvscsi/ibmvscsi.h | |||
@@ -90,6 +90,7 @@ struct event_pool { | |||
90 | /* all driver data associated with a host adapter */ | 90 | /* all driver data associated with a host adapter */ |
91 | struct ibmvscsi_host_data { | 91 | struct ibmvscsi_host_data { |
92 | atomic_t request_limit; | 92 | atomic_t request_limit; |
93 | int client_migrated; | ||
93 | struct device *dev; | 94 | struct device *dev; |
94 | struct event_pool pool; | 95 | struct event_pool pool; |
95 | struct crq_queue queue; | 96 | struct crq_queue queue; |
@@ -97,6 +98,9 @@ struct ibmvscsi_host_data { | |||
97 | struct list_head sent; | 98 | struct list_head sent; |
98 | struct Scsi_Host *host; | 99 | struct Scsi_Host *host; |
99 | struct mad_adapter_info_data madapter_info; | 100 | struct mad_adapter_info_data madapter_info; |
101 | struct capabilities caps; | ||
102 | dma_addr_t caps_addr; | ||
103 | dma_addr_t adapter_info_addr; | ||
100 | }; | 104 | }; |
101 | 105 | ||
102 | /* routines for managing a command/response queue */ | 106 | /* routines for managing a command/response queue */ |
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index e2dd6a45924a..d5eaf9727109 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c | |||
@@ -892,7 +892,7 @@ free_vport: | |||
892 | 892 | ||
893 | static int ibmvstgt_remove(struct vio_dev *dev) | 893 | static int ibmvstgt_remove(struct vio_dev *dev) |
894 | { | 894 | { |
895 | struct srp_target *target = (struct srp_target *) dev->dev.driver_data; | 895 | struct srp_target *target = dev_get_drvdata(&dev->dev); |
896 | struct Scsi_Host *shost = target->shost; | 896 | struct Scsi_Host *shost = target->shost; |
897 | struct vio_port *vport = target->ldata; | 897 | struct vio_port *vport = target->ldata; |
898 | 898 | ||
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h index 204604501ad8..2cd735d1d196 100644 --- a/drivers/scsi/ibmvscsi/viosrp.h +++ b/drivers/scsi/ibmvscsi/viosrp.h | |||
@@ -37,6 +37,7 @@ | |||
37 | 37 | ||
38 | #define SRP_VERSION "16.a" | 38 | #define SRP_VERSION "16.a" |
39 | #define SRP_MAX_IU_LEN 256 | 39 | #define SRP_MAX_IU_LEN 256 |
40 | #define SRP_MAX_LOC_LEN 32 | ||
40 | 41 | ||
41 | union srp_iu { | 42 | union srp_iu { |
42 | struct srp_login_req login_req; | 43 | struct srp_login_req login_req; |
@@ -86,7 +87,37 @@ enum viosrp_mad_types { | |||
86 | VIOSRP_EMPTY_IU_TYPE = 0x01, | 87 | VIOSRP_EMPTY_IU_TYPE = 0x01, |
87 | VIOSRP_ERROR_LOG_TYPE = 0x02, | 88 | VIOSRP_ERROR_LOG_TYPE = 0x02, |
88 | VIOSRP_ADAPTER_INFO_TYPE = 0x03, | 89 | VIOSRP_ADAPTER_INFO_TYPE = 0x03, |
89 | VIOSRP_HOST_CONFIG_TYPE = 0x04 | 90 | VIOSRP_HOST_CONFIG_TYPE = 0x04, |
91 | VIOSRP_CAPABILITIES_TYPE = 0x05, | ||
92 | VIOSRP_ENABLE_FAST_FAIL = 0x08, | ||
93 | }; | ||
94 | |||
95 | enum viosrp_mad_status { | ||
96 | VIOSRP_MAD_SUCCESS = 0x00, | ||
97 | VIOSRP_MAD_NOT_SUPPORTED = 0xF1, | ||
98 | VIOSRP_MAD_FAILED = 0xF7, | ||
99 | }; | ||
100 | |||
101 | enum viosrp_capability_type { | ||
102 | MIGRATION_CAPABILITIES = 0x01, | ||
103 | RESERVATION_CAPABILITIES = 0x02, | ||
104 | }; | ||
105 | |||
106 | enum viosrp_capability_support { | ||
107 | SERVER_DOES_NOT_SUPPORTS_CAP = 0x0, | ||
108 | SERVER_SUPPORTS_CAP = 0x01, | ||
109 | SERVER_CAP_DATA = 0x02, | ||
110 | }; | ||
111 | |||
112 | enum viosrp_reserve_type { | ||
113 | CLIENT_RESERVE_SCSI_2 = 0x01, | ||
114 | }; | ||
115 | |||
116 | enum viosrp_capability_flag { | ||
117 | CLIENT_MIGRATED = 0x01, | ||
118 | CLIENT_RECONNECT = 0x02, | ||
119 | CAP_LIST_SUPPORTED = 0x04, | ||
120 | CAP_LIST_DATA = 0x08, | ||
90 | }; | 121 | }; |
91 | 122 | ||
92 | /* | 123 | /* |
@@ -127,11 +158,46 @@ struct viosrp_host_config { | |||
127 | u64 buffer; | 158 | u64 buffer; |
128 | }; | 159 | }; |
129 | 160 | ||
161 | struct viosrp_fast_fail { | ||
162 | struct mad_common common; | ||
163 | }; | ||
164 | |||
165 | struct viosrp_capabilities { | ||
166 | struct mad_common common; | ||
167 | u64 buffer; | ||
168 | }; | ||
169 | |||
170 | struct mad_capability_common { | ||
171 | u32 cap_type; | ||
172 | u16 length; | ||
173 | u16 server_support; | ||
174 | }; | ||
175 | |||
176 | struct mad_reserve_cap { | ||
177 | struct mad_capability_common common; | ||
178 | u32 type; | ||
179 | }; | ||
180 | |||
181 | struct mad_migration_cap { | ||
182 | struct mad_capability_common common; | ||
183 | u32 ecl; | ||
184 | }; | ||
185 | |||
186 | struct capabilities{ | ||
187 | u32 flags; | ||
188 | char name[SRP_MAX_LOC_LEN]; | ||
189 | char loc[SRP_MAX_LOC_LEN]; | ||
190 | struct mad_migration_cap migration; | ||
191 | struct mad_reserve_cap reserve; | ||
192 | }; | ||
193 | |||
130 | union mad_iu { | 194 | union mad_iu { |
131 | struct viosrp_empty_iu empty_iu; | 195 | struct viosrp_empty_iu empty_iu; |
132 | struct viosrp_error_log error_log; | 196 | struct viosrp_error_log error_log; |
133 | struct viosrp_adapter_info adapter_info; | 197 | struct viosrp_adapter_info adapter_info; |
134 | struct viosrp_host_config host_config; | 198 | struct viosrp_host_config host_config; |
199 | struct viosrp_fast_fail fast_fail; | ||
200 | struct viosrp_capabilities capabilities; | ||
135 | }; | 201 | }; |
136 | 202 | ||
137 | union viosrp_iu { | 203 | union viosrp_iu { |