diff options
Diffstat (limited to 'drivers')
43 files changed, 930 insertions, 1259 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 34b0da5cfa0a..1437d7ee3b19 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -378,21 +378,6 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) | |||
378 | return iser_conn_set_full_featured_mode(conn); | 378 | return iser_conn_set_full_featured_mode(conn); |
379 | } | 379 | } |
380 | 380 | ||
381 | static void | ||
382 | iscsi_iser_conn_terminate(struct iscsi_conn *conn) | ||
383 | { | ||
384 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | ||
385 | struct iser_conn *ib_conn = iser_conn->ib_conn; | ||
386 | |||
387 | BUG_ON(!ib_conn); | ||
388 | /* starts conn teardown process, waits until all previously * | ||
389 | * posted buffers get flushed, deallocates all conn resources */ | ||
390 | iser_conn_terminate(ib_conn); | ||
391 | iser_conn->ib_conn = NULL; | ||
392 | conn->recv_lock = NULL; | ||
393 | } | ||
394 | |||
395 | |||
396 | static struct iscsi_transport iscsi_iser_transport; | 381 | static struct iscsi_transport iscsi_iser_transport; |
397 | 382 | ||
398 | static struct iscsi_cls_session * | 383 | static struct iscsi_cls_session * |
@@ -555,13 +540,13 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) | |||
555 | static void | 540 | static void |
556 | iscsi_iser_ep_disconnect(__u64 ep_handle) | 541 | iscsi_iser_ep_disconnect(__u64 ep_handle) |
557 | { | 542 | { |
558 | struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); | 543 | struct iser_conn *ib_conn; |
559 | 544 | ||
545 | ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); | ||
560 | if (!ib_conn) | 546 | if (!ib_conn) |
561 | return; | 547 | return; |
562 | 548 | ||
563 | iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); | 549 | iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); |
564 | |||
565 | iser_conn_terminate(ib_conn); | 550 | iser_conn_terminate(ib_conn); |
566 | } | 551 | } |
567 | 552 | ||
@@ -614,9 +599,6 @@ static struct iscsi_transport iscsi_iser_transport = { | |||
614 | .get_session_param = iscsi_session_get_param, | 599 | .get_session_param = iscsi_session_get_param, |
615 | .start_conn = iscsi_iser_conn_start, | 600 | .start_conn = iscsi_iser_conn_start, |
616 | .stop_conn = iscsi_conn_stop, | 601 | .stop_conn = iscsi_conn_stop, |
617 | /* these are called as part of conn recovery */ | ||
618 | .suspend_conn_recv = NULL, /* FIXME is/how this relvant to iser? */ | ||
619 | .terminate_conn = iscsi_iser_conn_terminate, | ||
620 | /* IO */ | 602 | /* IO */ |
621 | .send_pdu = iscsi_conn_send_pdu, | 603 | .send_pdu = iscsi_conn_send_pdu, |
622 | .get_stats = iscsi_iser_conn_get_stats, | 604 | .get_stats = iscsi_iser_conn_get_stats, |
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index d4cb144ab402..c537d71c18e4 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h | |||
@@ -640,7 +640,6 @@ typedef struct _MPT_ADAPTER | |||
640 | struct work_struct fc_setup_reset_work; | 640 | struct work_struct fc_setup_reset_work; |
641 | struct list_head fc_rports; | 641 | struct list_head fc_rports; |
642 | spinlock_t fc_rescan_work_lock; | 642 | spinlock_t fc_rescan_work_lock; |
643 | int fc_rescan_work_count; | ||
644 | struct work_struct fc_rescan_work; | 643 | struct work_struct fc_rescan_work; |
645 | char fc_rescan_work_q_name[KOBJ_NAME_LEN]; | 644 | char fc_rescan_work_q_name[KOBJ_NAME_LEN]; |
646 | struct workqueue_struct *fc_rescan_work_q; | 645 | struct workqueue_struct *fc_rescan_work_q; |
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index 90da7d63b08e..85696f34c310 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c | |||
@@ -669,7 +669,10 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum) | |||
669 | * if still doing discovery, | 669 | * if still doing discovery, |
670 | * hang loose a while until finished | 670 | * hang loose a while until finished |
671 | */ | 671 | */ |
672 | if (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) { | 672 | if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) || |
673 | (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE && | ||
674 | (pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) | ||
675 | == MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) { | ||
673 | if (count-- > 0) { | 676 | if (count-- > 0) { |
674 | msleep(100); | 677 | msleep(100); |
675 | goto try_again; | 678 | goto try_again; |
@@ -895,59 +898,45 @@ mptfc_rescan_devices(void *arg) | |||
895 | { | 898 | { |
896 | MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; | 899 | MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; |
897 | int ii; | 900 | int ii; |
898 | int work_to_do; | ||
899 | u64 pn; | 901 | u64 pn; |
900 | unsigned long flags; | ||
901 | struct mptfc_rport_info *ri; | 902 | struct mptfc_rport_info *ri; |
902 | 903 | ||
903 | do { | 904 | /* start by tagging all ports as missing */ |
904 | /* start by tagging all ports as missing */ | 905 | list_for_each_entry(ri, &ioc->fc_rports, list) { |
905 | list_for_each_entry(ri, &ioc->fc_rports, list) { | 906 | if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { |
906 | if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { | 907 | ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; |
907 | ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; | ||
908 | } | ||
909 | } | 908 | } |
909 | } | ||
910 | 910 | ||
911 | /* | 911 | /* |
912 | * now rescan devices known to adapter, | 912 | * now rescan devices known to adapter, |
913 | * will reregister existing rports | 913 | * will reregister existing rports |
914 | */ | 914 | */ |
915 | for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { | 915 | for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { |
916 | (void) mptfc_GetFcPortPage0(ioc, ii); | 916 | (void) mptfc_GetFcPortPage0(ioc, ii); |
917 | mptfc_init_host_attr(ioc,ii); /* refresh */ | 917 | mptfc_init_host_attr(ioc, ii); /* refresh */ |
918 | mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev); | 918 | mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev); |
919 | } | 919 | } |
920 | 920 | ||
921 | /* delete devices still missing */ | 921 | /* delete devices still missing */ |
922 | list_for_each_entry(ri, &ioc->fc_rports, list) { | 922 | list_for_each_entry(ri, &ioc->fc_rports, list) { |
923 | /* if newly missing, delete it */ | 923 | /* if newly missing, delete it */ |
924 | if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { | 924 | if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { |
925 | 925 | ||
926 | ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| | 926 | ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| |
927 | MPT_RPORT_INFO_FLAGS_MISSING); | 927 | MPT_RPORT_INFO_FLAGS_MISSING); |
928 | fc_remote_port_delete(ri->rport); /* won't sleep */ | 928 | fc_remote_port_delete(ri->rport); /* won't sleep */ |
929 | ri->rport = NULL; | 929 | ri->rport = NULL; |
930 | 930 | ||
931 | pn = (u64)ri->pg0.WWPN.High << 32 | | 931 | pn = (u64)ri->pg0.WWPN.High << 32 | |
932 | (u64)ri->pg0.WWPN.Low; | 932 | (u64)ri->pg0.WWPN.Low; |
933 | dfcprintk ((MYIOC_s_INFO_FMT | 933 | dfcprintk ((MYIOC_s_INFO_FMT |
934 | "mptfc_rescan.%d: %llx deleted\n", | 934 | "mptfc_rescan.%d: %llx deleted\n", |
935 | ioc->name, | 935 | ioc->name, |
936 | ioc->sh->host_no, | 936 | ioc->sh->host_no, |
937 | (unsigned long long)pn)); | 937 | (unsigned long long)pn)); |
938 | } | ||
939 | } | 938 | } |
940 | 939 | } | |
941 | /* | ||
942 | * allow multiple passes as target state | ||
943 | * might have changed during scan | ||
944 | */ | ||
945 | spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); | ||
946 | if (ioc->fc_rescan_work_count > 2) /* only need one more */ | ||
947 | ioc->fc_rescan_work_count = 2; | ||
948 | work_to_do = --ioc->fc_rescan_work_count; | ||
949 | spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); | ||
950 | } while (work_to_do); | ||
951 | } | 940 | } |
952 | 941 | ||
953 | static int | 942 | static int |
@@ -1159,7 +1148,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1159 | * by doing it via the workqueue, some locking is eliminated | 1148 | * by doing it via the workqueue, some locking is eliminated |
1160 | */ | 1149 | */ |
1161 | 1150 | ||
1162 | ioc->fc_rescan_work_count = 1; | ||
1163 | queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); | 1151 | queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); |
1164 | flush_workqueue(ioc->fc_rescan_work_q); | 1152 | flush_workqueue(ioc->fc_rescan_work_q); |
1165 | 1153 | ||
@@ -1202,10 +1190,8 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) | |||
1202 | case MPI_EVENT_RESCAN: | 1190 | case MPI_EVENT_RESCAN: |
1203 | spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); | 1191 | spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); |
1204 | if (ioc->fc_rescan_work_q) { | 1192 | if (ioc->fc_rescan_work_q) { |
1205 | if (ioc->fc_rescan_work_count++ == 0) { | 1193 | queue_work(ioc->fc_rescan_work_q, |
1206 | queue_work(ioc->fc_rescan_work_q, | 1194 | &ioc->fc_rescan_work); |
1207 | &ioc->fc_rescan_work); | ||
1208 | } | ||
1209 | } | 1195 | } |
1210 | spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); | 1196 | spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); |
1211 | break; | 1197 | break; |
@@ -1248,10 +1234,8 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) | |||
1248 | mptfc_SetFcPortPage1_defaults(ioc); | 1234 | mptfc_SetFcPortPage1_defaults(ioc); |
1249 | spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); | 1235 | spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); |
1250 | if (ioc->fc_rescan_work_q) { | 1236 | if (ioc->fc_rescan_work_q) { |
1251 | if (ioc->fc_rescan_work_count++ == 0) { | 1237 | queue_work(ioc->fc_rescan_work_q, |
1252 | queue_work(ioc->fc_rescan_work_q, | 1238 | &ioc->fc_rescan_work); |
1253 | &ioc->fc_rescan_work); | ||
1254 | } | ||
1255 | } | 1239 | } |
1256 | spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); | 1240 | spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); |
1257 | } | 1241 | } |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 9cd789b8acd4..adc9d8f2c28f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -112,6 +112,105 @@ _zfcp_hex_dump(char *addr, int count) | |||
112 | printk("\n"); | 112 | printk("\n"); |
113 | } | 113 | } |
114 | 114 | ||
115 | |||
116 | /****************************************************************/ | ||
117 | /****** Functions to handle the request ID hash table ********/ | ||
118 | /****************************************************************/ | ||
119 | |||
120 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF | ||
121 | |||
122 | static int zfcp_reqlist_init(struct zfcp_adapter *adapter) | ||
123 | { | ||
124 | int i; | ||
125 | |||
126 | adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head), | ||
127 | GFP_KERNEL); | ||
128 | |||
129 | if (!adapter->req_list) | ||
130 | return -ENOMEM; | ||
131 | |||
132 | for (i=0; i<REQUEST_LIST_SIZE; i++) | ||
133 | INIT_LIST_HEAD(&adapter->req_list[i]); | ||
134 | |||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static void zfcp_reqlist_free(struct zfcp_adapter *adapter) | ||
139 | { | ||
140 | struct zfcp_fsf_req *request, *tmp; | ||
141 | unsigned int i; | ||
142 | |||
143 | for (i=0; i<REQUEST_LIST_SIZE; i++) { | ||
144 | if (list_empty(&adapter->req_list[i])) | ||
145 | continue; | ||
146 | |||
147 | list_for_each_entry_safe(request, tmp, | ||
148 | &adapter->req_list[i], list) | ||
149 | list_del(&request->list); | ||
150 | } | ||
151 | |||
152 | kfree(adapter->req_list); | ||
153 | } | ||
154 | |||
155 | void zfcp_reqlist_add(struct zfcp_adapter *adapter, | ||
156 | struct zfcp_fsf_req *fsf_req) | ||
157 | { | ||
158 | unsigned int i; | ||
159 | |||
160 | i = fsf_req->req_id % REQUEST_LIST_SIZE; | ||
161 | list_add_tail(&fsf_req->list, &adapter->req_list[i]); | ||
162 | } | ||
163 | |||
164 | void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id) | ||
165 | { | ||
166 | struct zfcp_fsf_req *request, *tmp; | ||
167 | unsigned int i, counter; | ||
168 | u64 dbg_tmp[2]; | ||
169 | |||
170 | i = req_id % REQUEST_LIST_SIZE; | ||
171 | BUG_ON(list_empty(&adapter->req_list[i])); | ||
172 | |||
173 | counter = 0; | ||
174 | list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) { | ||
175 | if (request->req_id == req_id) { | ||
176 | dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active); | ||
177 | dbg_tmp[1] = (u64) counter; | ||
178 | debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); | ||
179 | list_del(&request->list); | ||
180 | break; | ||
181 | } | ||
182 | counter++; | ||
183 | } | ||
184 | } | ||
185 | |||
186 | struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter, | ||
187 | unsigned long req_id) | ||
188 | { | ||
189 | struct zfcp_fsf_req *request, *tmp; | ||
190 | unsigned int i; | ||
191 | |||
192 | i = req_id % REQUEST_LIST_SIZE; | ||
193 | |||
194 | list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) | ||
195 | if (request->req_id == req_id) | ||
196 | return request; | ||
197 | |||
198 | return NULL; | ||
199 | } | ||
200 | |||
201 | int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) | ||
202 | { | ||
203 | unsigned int i; | ||
204 | |||
205 | for (i=0; i<REQUEST_LIST_SIZE; i++) | ||
206 | if (!list_empty(&adapter->req_list[i])) | ||
207 | return 0; | ||
208 | |||
209 | return 1; | ||
210 | } | ||
211 | |||
212 | #undef ZFCP_LOG_AREA | ||
213 | |||
115 | /****************************************************************/ | 214 | /****************************************************************/ |
116 | /************** Uncategorised Functions *************************/ | 215 | /************** Uncategorised Functions *************************/ |
117 | /****************************************************************/ | 216 | /****************************************************************/ |
@@ -961,8 +1060,12 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
961 | INIT_LIST_HEAD(&adapter->port_remove_lh); | 1060 | INIT_LIST_HEAD(&adapter->port_remove_lh); |
962 | 1061 | ||
963 | /* initialize list of fsf requests */ | 1062 | /* initialize list of fsf requests */ |
964 | spin_lock_init(&adapter->fsf_req_list_lock); | 1063 | spin_lock_init(&adapter->req_list_lock); |
965 | INIT_LIST_HEAD(&adapter->fsf_req_list_head); | 1064 | retval = zfcp_reqlist_init(adapter); |
1065 | if (retval) { | ||
1066 | ZFCP_LOG_INFO("request list initialization failed\n"); | ||
1067 | goto failed_low_mem_buffers; | ||
1068 | } | ||
966 | 1069 | ||
967 | /* initialize debug locks */ | 1070 | /* initialize debug locks */ |
968 | 1071 | ||
@@ -1041,8 +1144,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
1041 | * !0 - struct zfcp_adapter data structure could not be removed | 1144 | * !0 - struct zfcp_adapter data structure could not be removed |
1042 | * (e.g. still used) | 1145 | * (e.g. still used) |
1043 | * locks: adapter list write lock is assumed to be held by caller | 1146 | * locks: adapter list write lock is assumed to be held by caller |
1044 | * adapter->fsf_req_list_lock is taken and released within this | ||
1045 | * function and must not be held on entry | ||
1046 | */ | 1147 | */ |
1047 | void | 1148 | void |
1048 | zfcp_adapter_dequeue(struct zfcp_adapter *adapter) | 1149 | zfcp_adapter_dequeue(struct zfcp_adapter *adapter) |
@@ -1054,14 +1155,14 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) | |||
1054 | zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); | 1155 | zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); |
1055 | dev_set_drvdata(&adapter->ccw_device->dev, NULL); | 1156 | dev_set_drvdata(&adapter->ccw_device->dev, NULL); |
1056 | /* sanity check: no pending FSF requests */ | 1157 | /* sanity check: no pending FSF requests */ |
1057 | spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); | 1158 | spin_lock_irqsave(&adapter->req_list_lock, flags); |
1058 | retval = !list_empty(&adapter->fsf_req_list_head); | 1159 | retval = zfcp_reqlist_isempty(adapter); |
1059 | spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); | 1160 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); |
1060 | if (retval) { | 1161 | if (!retval) { |
1061 | ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " | 1162 | ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " |
1062 | "%i requests outstanding\n", | 1163 | "%i requests outstanding\n", |
1063 | zfcp_get_busid_by_adapter(adapter), adapter, | 1164 | zfcp_get_busid_by_adapter(adapter), adapter, |
1064 | atomic_read(&adapter->fsf_reqs_active)); | 1165 | atomic_read(&adapter->reqs_active)); |
1065 | retval = -EBUSY; | 1166 | retval = -EBUSY; |
1066 | goto out; | 1167 | goto out; |
1067 | } | 1168 | } |
@@ -1087,6 +1188,7 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) | |||
1087 | zfcp_free_low_mem_buffers(adapter); | 1188 | zfcp_free_low_mem_buffers(adapter); |
1088 | /* free memory of adapter data structure and queues */ | 1189 | /* free memory of adapter data structure and queues */ |
1089 | zfcp_qdio_free_queues(adapter); | 1190 | zfcp_qdio_free_queues(adapter); |
1191 | zfcp_reqlist_free(adapter); | ||
1090 | kfree(adapter->fc_stats); | 1192 | kfree(adapter->fc_stats); |
1091 | kfree(adapter->stats_reset_data); | 1193 | kfree(adapter->stats_reset_data); |
1092 | ZFCP_LOG_TRACE("freeing adapter structure\n"); | 1194 | ZFCP_LOG_TRACE("freeing adapter structure\n"); |
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 57d8e4bfb8d9..fdabadeaa9ee 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c | |||
@@ -164,6 +164,11 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device) | |||
164 | retval = zfcp_adapter_scsi_register(adapter); | 164 | retval = zfcp_adapter_scsi_register(adapter); |
165 | if (retval) | 165 | if (retval) |
166 | goto out_scsi_register; | 166 | goto out_scsi_register; |
167 | |||
168 | /* initialize request counter */ | ||
169 | BUG_ON(!zfcp_reqlist_isempty(adapter)); | ||
170 | adapter->req_no = 0; | ||
171 | |||
167 | zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, | 172 | zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, |
168 | ZFCP_SET); | 173 | ZFCP_SET); |
169 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); | 174 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 2df512a18e2c..94d1b74db356 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -52,7 +52,7 @@ | |||
52 | /********************* GENERAL DEFINES *********************************/ | 52 | /********************* GENERAL DEFINES *********************************/ |
53 | 53 | ||
54 | /* zfcp version number, it consists of major, minor, and patch-level number */ | 54 | /* zfcp version number, it consists of major, minor, and patch-level number */ |
55 | #define ZFCP_VERSION "4.7.0" | 55 | #define ZFCP_VERSION "4.8.0" |
56 | 56 | ||
57 | /** | 57 | /** |
58 | * zfcp_sg_to_address - determine kernel address from struct scatterlist | 58 | * zfcp_sg_to_address - determine kernel address from struct scatterlist |
@@ -80,7 +80,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list) | |||
80 | #define REQUEST_LIST_SIZE 128 | 80 | #define REQUEST_LIST_SIZE 128 |
81 | 81 | ||
82 | /********************* SCSI SPECIFIC DEFINES *********************************/ | 82 | /********************* SCSI SPECIFIC DEFINES *********************************/ |
83 | #define ZFCP_SCSI_ER_TIMEOUT (100*HZ) | 83 | #define ZFCP_SCSI_ER_TIMEOUT (10*HZ) |
84 | 84 | ||
85 | /********************* CIO/QDIO SPECIFIC DEFINES *****************************/ | 85 | /********************* CIO/QDIO SPECIFIC DEFINES *****************************/ |
86 | 86 | ||
@@ -886,11 +886,11 @@ struct zfcp_adapter { | |||
886 | struct list_head port_remove_lh; /* head of ports to be | 886 | struct list_head port_remove_lh; /* head of ports to be |
887 | removed */ | 887 | removed */ |
888 | u32 ports; /* number of remote ports */ | 888 | u32 ports; /* number of remote ports */ |
889 | struct timer_list scsi_er_timer; /* SCSI err recovery watch */ | 889 | struct timer_list scsi_er_timer; /* SCSI err recovery watch */ |
890 | struct list_head fsf_req_list_head; /* head of FSF req list */ | 890 | atomic_t reqs_active; /* # active FSF reqs */ |
891 | spinlock_t fsf_req_list_lock; /* lock for ops on list of | 891 | unsigned long req_no; /* unique FSF req number */ |
892 | FSF requests */ | 892 | struct list_head *req_list; /* list of pending reqs */ |
893 | atomic_t fsf_reqs_active; /* # active FSF reqs */ | 893 | spinlock_t req_list_lock; /* request list lock */ |
894 | struct zfcp_qdio_queue request_queue; /* request queue */ | 894 | struct zfcp_qdio_queue request_queue; /* request queue */ |
895 | u32 fsf_req_seq_no; /* FSF cmnd seq number */ | 895 | u32 fsf_req_seq_no; /* FSF cmnd seq number */ |
896 | wait_queue_head_t request_wq; /* can be used to wait for | 896 | wait_queue_head_t request_wq; /* can be used to wait for |
@@ -986,6 +986,7 @@ struct zfcp_unit { | |||
986 | /* FSF request */ | 986 | /* FSF request */ |
987 | struct zfcp_fsf_req { | 987 | struct zfcp_fsf_req { |
988 | struct list_head list; /* list of FSF requests */ | 988 | struct list_head list; /* list of FSF requests */ |
989 | unsigned long req_id; /* unique request ID */ | ||
989 | struct zfcp_adapter *adapter; /* adapter request belongs to */ | 990 | struct zfcp_adapter *adapter; /* adapter request belongs to */ |
990 | u8 sbal_number; /* nr of SBALs free for use */ | 991 | u8 sbal_number; /* nr of SBALs free for use */ |
991 | u8 sbal_first; /* first SBAL for this request */ | 992 | u8 sbal_first; /* first SBAL for this request */ |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 8ec8da0beaa8..7f60b6fdf724 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -64,8 +64,8 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int); | |||
64 | static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); | 64 | static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); |
65 | static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); | 65 | static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); |
66 | static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); | 66 | static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); |
67 | static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); | 67 | static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); |
68 | static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); | 68 | static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); |
69 | static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); | 69 | static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); |
70 | static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); | 70 | static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); |
71 | static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); | 71 | static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); |
@@ -93,10 +93,9 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *); | |||
93 | static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); | 93 | static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); |
94 | static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); | 94 | static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); |
95 | 95 | ||
96 | static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); | 96 | static void zfcp_erp_action_dismiss_port(struct zfcp_port *); |
97 | static int zfcp_erp_action_dismiss_port(struct zfcp_port *); | 97 | static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *); |
98 | static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *); | 98 | static void zfcp_erp_action_dismiss(struct zfcp_erp_action *); |
99 | static int zfcp_erp_action_dismiss(struct zfcp_erp_action *); | ||
100 | 99 | ||
101 | static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, | 100 | static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, |
102 | struct zfcp_port *, struct zfcp_unit *); | 101 | struct zfcp_port *, struct zfcp_unit *); |
@@ -135,29 +134,39 @@ zfcp_fsf_request_timeout_handler(unsigned long data) | |||
135 | zfcp_erp_adapter_reopen(adapter, 0); | 134 | zfcp_erp_adapter_reopen(adapter, 0); |
136 | } | 135 | } |
137 | 136 | ||
138 | /* | 137 | /** |
139 | * function: zfcp_fsf_scsi_er_timeout_handler | 138 | * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks |
140 | * | 139 | * |
141 | * purpose: This function needs to be called whenever a SCSI error recovery | 140 | * This function needs to be called whenever a SCSI error recovery |
142 | * action (abort/reset) does not return. | 141 | * action (abort/reset) does not return. Re-opening the adapter means |
143 | * Re-opening the adapter means that the command can be returned | 142 | * that the abort/reset command can be returned by zfcp. It won't complete |
144 | * by zfcp (it is guarranteed that it does not return via the | 143 | * via the adapter anymore (because qdio queues are closed). If ERP is |
145 | * adapter anymore). The buffer can then be used again. | 144 | * already running on this adapter it will be stopped. |
146 | * | ||
147 | * returns: sod all | ||
148 | */ | 145 | */ |
149 | void | 146 | void zfcp_fsf_scsi_er_timeout_handler(unsigned long data) |
150 | zfcp_fsf_scsi_er_timeout_handler(unsigned long data) | ||
151 | { | 147 | { |
152 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; | 148 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; |
149 | unsigned long flags; | ||
153 | 150 | ||
154 | ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " | 151 | ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " |
155 | "Restarting all operations on the adapter %s\n", | 152 | "Restarting all operations on the adapter %s\n", |
156 | zfcp_get_busid_by_adapter(adapter)); | 153 | zfcp_get_busid_by_adapter(adapter)); |
157 | debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); | 154 | debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); |
158 | zfcp_erp_adapter_reopen(adapter, 0); | ||
159 | 155 | ||
160 | return; | 156 | write_lock_irqsave(&adapter->erp_lock, flags); |
157 | if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, | ||
158 | &adapter->status)) { | ||
159 | zfcp_erp_modify_adapter_status(adapter, | ||
160 | ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, | ||
161 | ZFCP_CLEAR); | ||
162 | zfcp_erp_action_dismiss_adapter(adapter); | ||
163 | write_unlock_irqrestore(&adapter->erp_lock, flags); | ||
164 | /* dismiss all pending requests including requests for ERP */ | ||
165 | zfcp_fsf_req_dismiss_all(adapter); | ||
166 | adapter->fsf_req_seq_no = 0; | ||
167 | } else | ||
168 | write_unlock_irqrestore(&adapter->erp_lock, flags); | ||
169 | zfcp_erp_adapter_reopen(adapter, 0); | ||
161 | } | 170 | } |
162 | 171 | ||
163 | /* | 172 | /* |
@@ -670,17 +679,10 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask) | |||
670 | return retval; | 679 | return retval; |
671 | } | 680 | } |
672 | 681 | ||
673 | /* | 682 | /** |
674 | * function: | 683 | * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests |
675 | * | ||
676 | * purpose: disable I/O, | ||
677 | * return any open requests and clean them up, | ||
678 | * aim: no pending and incoming I/O | ||
679 | * | ||
680 | * returns: | ||
681 | */ | 684 | */ |
682 | static void | 685 | static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) |
683 | zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) | ||
684 | { | 686 | { |
685 | debug_text_event(adapter->erp_dbf, 6, "a_bl"); | 687 | debug_text_event(adapter->erp_dbf, 6, "a_bl"); |
686 | zfcp_erp_modify_adapter_status(adapter, | 688 | zfcp_erp_modify_adapter_status(adapter, |
@@ -688,15 +690,10 @@ zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) | |||
688 | clear_mask, ZFCP_CLEAR); | 690 | clear_mask, ZFCP_CLEAR); |
689 | } | 691 | } |
690 | 692 | ||
691 | /* | 693 | /** |
692 | * function: | 694 | * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests |
693 | * | ||
694 | * purpose: enable I/O | ||
695 | * | ||
696 | * returns: | ||
697 | */ | 695 | */ |
698 | static void | 696 | static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) |
699 | zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) | ||
700 | { | 697 | { |
701 | debug_text_event(adapter->erp_dbf, 6, "a_ubl"); | 698 | debug_text_event(adapter->erp_dbf, 6, "a_ubl"); |
702 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); | 699 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); |
@@ -848,18 +845,16 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) | |||
848 | struct zfcp_adapter *adapter = erp_action->adapter; | 845 | struct zfcp_adapter *adapter = erp_action->adapter; |
849 | 846 | ||
850 | if (erp_action->fsf_req) { | 847 | if (erp_action->fsf_req) { |
851 | /* take lock to ensure that request is not being deleted meanwhile */ | 848 | /* take lock to ensure that request is not deleted meanwhile */ |
852 | spin_lock(&adapter->fsf_req_list_lock); | 849 | spin_lock(&adapter->req_list_lock); |
853 | /* check whether fsf req does still exist */ | 850 | if ((!zfcp_reqlist_ismember(adapter, |
854 | list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) | 851 | erp_action->fsf_req->req_id)) && |
855 | if (fsf_req == erp_action->fsf_req) | 852 | (fsf_req->erp_action == erp_action)) { |
856 | break; | ||
857 | if (fsf_req && (fsf_req->erp_action == erp_action)) { | ||
858 | /* fsf_req still exists */ | 853 | /* fsf_req still exists */ |
859 | debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); | 854 | debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); |
860 | debug_event(adapter->erp_dbf, 3, &fsf_req, | 855 | debug_event(adapter->erp_dbf, 3, &fsf_req, |
861 | sizeof (unsigned long)); | 856 | sizeof (unsigned long)); |
862 | /* dismiss fsf_req of timed out or dismissed erp_action */ | 857 | /* dismiss fsf_req of timed out/dismissed erp_action */ |
863 | if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | | 858 | if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | |
864 | ZFCP_STATUS_ERP_TIMEDOUT)) { | 859 | ZFCP_STATUS_ERP_TIMEDOUT)) { |
865 | debug_text_event(adapter->erp_dbf, 3, | 860 | debug_text_event(adapter->erp_dbf, 3, |
@@ -892,30 +887,22 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) | |||
892 | */ | 887 | */ |
893 | erp_action->fsf_req = NULL; | 888 | erp_action->fsf_req = NULL; |
894 | } | 889 | } |
895 | spin_unlock(&adapter->fsf_req_list_lock); | 890 | spin_unlock(&adapter->req_list_lock); |
896 | } else | 891 | } else |
897 | debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); | 892 | debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); |
898 | 893 | ||
899 | return retval; | 894 | return retval; |
900 | } | 895 | } |
901 | 896 | ||
902 | /* | 897 | /** |
903 | * purpose: generic handler for asynchronous events related to erp_action events | 898 | * zfcp_erp_async_handler_nolock - complete erp_action |
904 | * (normal completion, time-out, dismissing, retry after | ||
905 | * low memory condition) | ||
906 | * | ||
907 | * note: deletion of timer is not required (e.g. in case of a time-out), | ||
908 | * but a second try does no harm, | ||
909 | * we leave it in here to allow for greater simplification | ||
910 | * | 899 | * |
911 | * returns: 0 - there was an action to handle | 900 | * Used for normal completion, time-out, dismissal and failure after |
912 | * !0 - otherwise | 901 | * low memory condition. |
913 | */ | 902 | */ |
914 | static int | 903 | static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, |
915 | zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, | 904 | unsigned long set_mask) |
916 | unsigned long set_mask) | ||
917 | { | 905 | { |
918 | int retval; | ||
919 | struct zfcp_adapter *adapter = erp_action->adapter; | 906 | struct zfcp_adapter *adapter = erp_action->adapter; |
920 | 907 | ||
921 | if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { | 908 | if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { |
@@ -926,43 +913,26 @@ zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, | |||
926 | del_timer(&erp_action->timer); | 913 | del_timer(&erp_action->timer); |
927 | erp_action->status |= set_mask; | 914 | erp_action->status |= set_mask; |
928 | zfcp_erp_action_ready(erp_action); | 915 | zfcp_erp_action_ready(erp_action); |
929 | retval = 0; | ||
930 | } else { | 916 | } else { |
931 | /* action is ready or gone - nothing to do */ | 917 | /* action is ready or gone - nothing to do */ |
932 | debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); | 918 | debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); |
933 | debug_event(adapter->erp_dbf, 3, &erp_action->action, | 919 | debug_event(adapter->erp_dbf, 3, &erp_action->action, |
934 | sizeof (int)); | 920 | sizeof (int)); |
935 | retval = 1; | ||
936 | } | 921 | } |
937 | |||
938 | return retval; | ||
939 | } | 922 | } |
940 | 923 | ||
941 | /* | 924 | /** |
942 | * purpose: generic handler for asynchronous events related to erp_action | 925 | * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking |
943 | * events (normal completion, time-out, dismissing, retry after | ||
944 | * low memory condition) | ||
945 | * | ||
946 | * note: deletion of timer is not required (e.g. in case of a time-out), | ||
947 | * but a second try does no harm, | ||
948 | * we leave it in here to allow for greater simplification | ||
949 | * | ||
950 | * returns: 0 - there was an action to handle | ||
951 | * !0 - otherwise | ||
952 | */ | 926 | */ |
953 | int | 927 | void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, |
954 | zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, | 928 | unsigned long set_mask) |
955 | unsigned long set_mask) | ||
956 | { | 929 | { |
957 | struct zfcp_adapter *adapter = erp_action->adapter; | 930 | struct zfcp_adapter *adapter = erp_action->adapter; |
958 | unsigned long flags; | 931 | unsigned long flags; |
959 | int retval; | ||
960 | 932 | ||
961 | write_lock_irqsave(&adapter->erp_lock, flags); | 933 | write_lock_irqsave(&adapter->erp_lock, flags); |
962 | retval = zfcp_erp_async_handler_nolock(erp_action, set_mask); | 934 | zfcp_erp_async_handler_nolock(erp_action, set_mask); |
963 | write_unlock_irqrestore(&adapter->erp_lock, flags); | 935 | write_unlock_irqrestore(&adapter->erp_lock, flags); |
964 | |||
965 | return retval; | ||
966 | } | 936 | } |
967 | 937 | ||
968 | /* | 938 | /* |
@@ -999,17 +969,15 @@ zfcp_erp_timeout_handler(unsigned long data) | |||
999 | zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); | 969 | zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); |
1000 | } | 970 | } |
1001 | 971 | ||
1002 | /* | 972 | /** |
1003 | * purpose: is called for an erp_action which needs to be ended | 973 | * zfcp_erp_action_dismiss - dismiss an erp_action |
1004 | * though not being done, | ||
1005 | * this is usually required if an higher is generated, | ||
1006 | * action gets an appropriate flag and will be processed | ||
1007 | * accordingly | ||
1008 | * | 974 | * |
1009 | * locks: erp_lock held (thus we need to call another handler variant) | 975 | * adapter->erp_lock must be held |
976 | * | ||
977 | * Dismissal of an erp_action is usually required if an erp_action of | ||
978 | * higher priority is generated. | ||
1010 | */ | 979 | */ |
1011 | static int | 980 | static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) |
1012 | zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) | ||
1013 | { | 981 | { |
1014 | struct zfcp_adapter *adapter = erp_action->adapter; | 982 | struct zfcp_adapter *adapter = erp_action->adapter; |
1015 | 983 | ||
@@ -1017,8 +985,6 @@ zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) | |||
1017 | debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); | 985 | debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); |
1018 | 986 | ||
1019 | zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); | 987 | zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); |
1020 | |||
1021 | return 0; | ||
1022 | } | 988 | } |
1023 | 989 | ||
1024 | int | 990 | int |
@@ -2074,18 +2040,12 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) | |||
2074 | return retval; | 2040 | return retval; |
2075 | } | 2041 | } |
2076 | 2042 | ||
2077 | /* | 2043 | /** |
2078 | * function: zfcp_qdio_cleanup | 2044 | * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter |
2079 | * | ||
2080 | * purpose: cleans up QDIO operation for the specified adapter | ||
2081 | * | ||
2082 | * returns: 0 - successful cleanup | ||
2083 | * !0 - failed cleanup | ||
2084 | */ | 2045 | */ |
2085 | int | 2046 | static void |
2086 | zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) | 2047 | zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) |
2087 | { | 2048 | { |
2088 | int retval = ZFCP_ERP_SUCCEEDED; | ||
2089 | int first_used; | 2049 | int first_used; |
2090 | int used_count; | 2050 | int used_count; |
2091 | struct zfcp_adapter *adapter = erp_action->adapter; | 2051 | struct zfcp_adapter *adapter = erp_action->adapter; |
@@ -2094,15 +2054,13 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) | |||
2094 | ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO " | 2054 | ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO " |
2095 | "queues on adapter %s\n", | 2055 | "queues on adapter %s\n", |
2096 | zfcp_get_busid_by_adapter(adapter)); | 2056 | zfcp_get_busid_by_adapter(adapter)); |
2097 | retval = ZFCP_ERP_FAILED; | 2057 | return; |
2098 | goto out; | ||
2099 | } | 2058 | } |
2100 | 2059 | ||
2101 | /* | 2060 | /* |
2102 | * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that | 2061 | * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that |
2103 | * do_QDIO won't be called while qdio_shutdown is in progress. | 2062 | * do_QDIO won't be called while qdio_shutdown is in progress. |
2104 | */ | 2063 | */ |
2105 | |||
2106 | write_lock_irq(&adapter->request_queue.queue_lock); | 2064 | write_lock_irq(&adapter->request_queue.queue_lock); |
2107 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | 2065 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); |
2108 | write_unlock_irq(&adapter->request_queue.queue_lock); | 2066 | write_unlock_irq(&adapter->request_queue.queue_lock); |
@@ -2134,8 +2092,6 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) | |||
2134 | adapter->request_queue.free_index = 0; | 2092 | adapter->request_queue.free_index = 0; |
2135 | atomic_set(&adapter->request_queue.free_count, 0); | 2093 | atomic_set(&adapter->request_queue.free_count, 0); |
2136 | adapter->request_queue.distance_from_int = 0; | 2094 | adapter->request_queue.distance_from_int = 0; |
2137 | out: | ||
2138 | return retval; | ||
2139 | } | 2095 | } |
2140 | 2096 | ||
2141 | static int | 2097 | static int |
@@ -2258,11 +2214,11 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) | |||
2258 | "%s)\n", zfcp_get_busid_by_adapter(adapter)); | 2214 | "%s)\n", zfcp_get_busid_by_adapter(adapter)); |
2259 | ret = ZFCP_ERP_FAILED; | 2215 | ret = ZFCP_ERP_FAILED; |
2260 | } | 2216 | } |
2261 | if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { | 2217 | |
2262 | ZFCP_LOG_INFO("error: exchange port data failed (adapter " | 2218 | /* don't treat as error for the sake of compatibility */ |
2219 | if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) | ||
2220 | ZFCP_LOG_INFO("warning: exchange port data failed (adapter " | ||
2263 | "%s\n", zfcp_get_busid_by_adapter(adapter)); | 2221 | "%s\n", zfcp_get_busid_by_adapter(adapter)); |
2264 | ret = ZFCP_ERP_FAILED; | ||
2265 | } | ||
2266 | 2222 | ||
2267 | return ret; | 2223 | return ret; |
2268 | } | 2224 | } |
@@ -2292,18 +2248,12 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action | |||
2292 | return retval; | 2248 | return retval; |
2293 | } | 2249 | } |
2294 | 2250 | ||
2295 | /* | 2251 | /** |
2296 | * function: zfcp_fsf_cleanup | 2252 | * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter |
2297 | * | ||
2298 | * purpose: cleanup FSF operation for specified adapter | ||
2299 | * | ||
2300 | * returns: 0 - FSF operation successfully cleaned up | ||
2301 | * !0 - failed to cleanup FSF operation for this adapter | ||
2302 | */ | 2253 | */ |
2303 | static int | 2254 | static void |
2304 | zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) | 2255 | zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) |
2305 | { | 2256 | { |
2306 | int retval = ZFCP_ERP_SUCCEEDED; | ||
2307 | struct zfcp_adapter *adapter = erp_action->adapter; | 2257 | struct zfcp_adapter *adapter = erp_action->adapter; |
2308 | 2258 | ||
2309 | /* | 2259 | /* |
@@ -2317,8 +2267,6 @@ zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) | |||
2317 | /* all ports and units are closed */ | 2267 | /* all ports and units are closed */ |
2318 | zfcp_erp_modify_adapter_status(adapter, | 2268 | zfcp_erp_modify_adapter_status(adapter, |
2319 | ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); | 2269 | ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); |
2320 | |||
2321 | return retval; | ||
2322 | } | 2270 | } |
2323 | 2271 | ||
2324 | /* | 2272 | /* |
@@ -3293,10 +3241,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, | |||
3293 | } | 3241 | } |
3294 | 3242 | ||
3295 | 3243 | ||
3296 | static int | 3244 | void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) |
3297 | zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) | ||
3298 | { | 3245 | { |
3299 | int retval = 0; | ||
3300 | struct zfcp_port *port; | 3246 | struct zfcp_port *port; |
3301 | 3247 | ||
3302 | debug_text_event(adapter->erp_dbf, 5, "a_actab"); | 3248 | debug_text_event(adapter->erp_dbf, 5, "a_actab"); |
@@ -3305,14 +3251,10 @@ zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) | |||
3305 | else | 3251 | else |
3306 | list_for_each_entry(port, &adapter->port_list_head, list) | 3252 | list_for_each_entry(port, &adapter->port_list_head, list) |
3307 | zfcp_erp_action_dismiss_port(port); | 3253 | zfcp_erp_action_dismiss_port(port); |
3308 | |||
3309 | return retval; | ||
3310 | } | 3254 | } |
3311 | 3255 | ||
3312 | static int | 3256 | static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) |
3313 | zfcp_erp_action_dismiss_port(struct zfcp_port *port) | ||
3314 | { | 3257 | { |
3315 | int retval = 0; | ||
3316 | struct zfcp_unit *unit; | 3258 | struct zfcp_unit *unit; |
3317 | struct zfcp_adapter *adapter = port->adapter; | 3259 | struct zfcp_adapter *adapter = port->adapter; |
3318 | 3260 | ||
@@ -3323,22 +3265,16 @@ zfcp_erp_action_dismiss_port(struct zfcp_port *port) | |||
3323 | else | 3265 | else |
3324 | list_for_each_entry(unit, &port->unit_list_head, list) | 3266 | list_for_each_entry(unit, &port->unit_list_head, list) |
3325 | zfcp_erp_action_dismiss_unit(unit); | 3267 | zfcp_erp_action_dismiss_unit(unit); |
3326 | |||
3327 | return retval; | ||
3328 | } | 3268 | } |
3329 | 3269 | ||
3330 | static int | 3270 | static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) |
3331 | zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) | ||
3332 | { | 3271 | { |
3333 | int retval = 0; | ||
3334 | struct zfcp_adapter *adapter = unit->port->adapter; | 3272 | struct zfcp_adapter *adapter = unit->port->adapter; |
3335 | 3273 | ||
3336 | debug_text_event(adapter->erp_dbf, 5, "u_actab"); | 3274 | debug_text_event(adapter->erp_dbf, 5, "u_actab"); |
3337 | debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); | 3275 | debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); |
3338 | if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) | 3276 | if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) |
3339 | zfcp_erp_action_dismiss(&unit->erp_action); | 3277 | zfcp_erp_action_dismiss(&unit->erp_action); |
3340 | |||
3341 | return retval; | ||
3342 | } | 3278 | } |
3343 | 3279 | ||
3344 | static inline void | 3280 | static inline void |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index d02366004cdd..146d7a2b4c4a 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -63,7 +63,6 @@ extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *); | |||
63 | extern void zfcp_qdio_free_queues(struct zfcp_adapter *); | 63 | extern void zfcp_qdio_free_queues(struct zfcp_adapter *); |
64 | extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, | 64 | extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, |
65 | struct zfcp_fsf_req *); | 65 | struct zfcp_fsf_req *); |
66 | extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *); | ||
67 | 66 | ||
68 | extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req | 67 | extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req |
69 | (struct zfcp_fsf_req *, int, int); | 68 | (struct zfcp_fsf_req *, int, int); |
@@ -140,6 +139,7 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int); | |||
140 | extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); | 139 | extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); |
141 | extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); | 140 | extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); |
142 | extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); | 141 | extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); |
142 | extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); | ||
143 | 143 | ||
144 | extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); | 144 | extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); |
145 | extern int zfcp_erp_port_reopen(struct zfcp_port *, int); | 145 | extern int zfcp_erp_port_reopen(struct zfcp_port *, int); |
@@ -156,7 +156,7 @@ extern void zfcp_erp_unit_failed(struct zfcp_unit *); | |||
156 | extern int zfcp_erp_thread_setup(struct zfcp_adapter *); | 156 | extern int zfcp_erp_thread_setup(struct zfcp_adapter *); |
157 | extern int zfcp_erp_thread_kill(struct zfcp_adapter *); | 157 | extern int zfcp_erp_thread_kill(struct zfcp_adapter *); |
158 | extern int zfcp_erp_wait(struct zfcp_adapter *); | 158 | extern int zfcp_erp_wait(struct zfcp_adapter *); |
159 | extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); | 159 | extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); |
160 | 160 | ||
161 | extern int zfcp_test_link(struct zfcp_port *); | 161 | extern int zfcp_test_link(struct zfcp_port *); |
162 | 162 | ||
@@ -190,5 +190,10 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, | |||
190 | struct zfcp_fsf_req *); | 190 | struct zfcp_fsf_req *); |
191 | extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, | 191 | extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, |
192 | struct scsi_cmnd *); | 192 | struct scsi_cmnd *); |
193 | extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *); | ||
194 | extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long); | ||
195 | extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *, | ||
196 | unsigned long); | ||
197 | extern int zfcp_reqlist_isempty(struct zfcp_adapter *); | ||
193 | 198 | ||
194 | #endif /* ZFCP_EXT_H */ | 199 | #endif /* ZFCP_EXT_H */ |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 31db2b06faba..ff2eacf5ec8c 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -49,7 +49,6 @@ static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); | |||
49 | static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, | 49 | static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, |
50 | struct fsf_link_down_info *); | 50 | struct fsf_link_down_info *); |
51 | static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); | 51 | static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); |
52 | static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *); | ||
53 | 52 | ||
54 | /* association between FSF command and FSF QTCB type */ | 53 | /* association between FSF command and FSF QTCB type */ |
55 | static u32 fsf_qtcb_type[] = { | 54 | static u32 fsf_qtcb_type[] = { |
@@ -146,47 +145,48 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) | |||
146 | kfree(fsf_req); | 145 | kfree(fsf_req); |
147 | } | 146 | } |
148 | 147 | ||
149 | /* | 148 | /** |
150 | * function: | 149 | * zfcp_fsf_req_dismiss - dismiss a single fsf request |
151 | * | ||
152 | * purpose: | ||
153 | * | ||
154 | * returns: | ||
155 | * | ||
156 | * note: qdio queues shall be down (no ongoing inbound processing) | ||
157 | */ | 150 | */ |
158 | int | 151 | static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter, |
159 | zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) | 152 | struct zfcp_fsf_req *fsf_req, |
153 | unsigned int counter) | ||
160 | { | 154 | { |
161 | struct zfcp_fsf_req *fsf_req, *tmp; | 155 | u64 dbg_tmp[2]; |
162 | unsigned long flags; | ||
163 | LIST_HEAD(remove_queue); | ||
164 | 156 | ||
165 | spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); | 157 | dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active); |
166 | list_splice_init(&adapter->fsf_req_list_head, &remove_queue); | 158 | dbg_tmp[1] = (u64) counter; |
167 | atomic_set(&adapter->fsf_reqs_active, 0); | 159 | debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); |
168 | spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); | 160 | list_del(&fsf_req->list); |
169 | 161 | fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; | |
170 | list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { | 162 | zfcp_fsf_req_complete(fsf_req); |
171 | list_del(&fsf_req->list); | ||
172 | zfcp_fsf_req_dismiss(fsf_req); | ||
173 | } | ||
174 | |||
175 | return 0; | ||
176 | } | 163 | } |
177 | 164 | ||
178 | /* | 165 | /** |
179 | * function: | 166 | * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests |
180 | * | ||
181 | * purpose: | ||
182 | * | ||
183 | * returns: | ||
184 | */ | 167 | */ |
185 | static void | 168 | int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) |
186 | zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req) | ||
187 | { | 169 | { |
188 | fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; | 170 | struct zfcp_fsf_req *request, *tmp; |
189 | zfcp_fsf_req_complete(fsf_req); | 171 | unsigned long flags; |
172 | unsigned int i, counter; | ||
173 | |||
174 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
175 | atomic_set(&adapter->reqs_active, 0); | ||
176 | for (i=0; i<REQUEST_LIST_SIZE; i++) { | ||
177 | if (list_empty(&adapter->req_list[i])) | ||
178 | continue; | ||
179 | |||
180 | counter = 0; | ||
181 | list_for_each_entry_safe(request, tmp, | ||
182 | &adapter->req_list[i], list) { | ||
183 | zfcp_fsf_req_dismiss(adapter, request, counter); | ||
184 | counter++; | ||
185 | } | ||
186 | } | ||
187 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
188 | |||
189 | return 0; | ||
190 | } | 190 | } |
191 | 191 | ||
192 | /* | 192 | /* |
@@ -4592,12 +4592,14 @@ static inline void | |||
4592 | zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) | 4592 | zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) |
4593 | { | 4593 | { |
4594 | if (likely(fsf_req->qtcb != NULL)) { | 4594 | if (likely(fsf_req->qtcb != NULL)) { |
4595 | fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no; | 4595 | fsf_req->qtcb->prefix.req_seq_no = |
4596 | fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; | 4596 | fsf_req->adapter->fsf_req_seq_no; |
4597 | fsf_req->qtcb->prefix.req_id = fsf_req->req_id; | ||
4597 | fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; | 4598 | fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; |
4598 | fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command]; | 4599 | fsf_req->qtcb->prefix.qtcb_type = |
4600 | fsf_qtcb_type[fsf_req->fsf_command]; | ||
4599 | fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; | 4601 | fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; |
4600 | fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; | 4602 | fsf_req->qtcb->header.req_handle = fsf_req->req_id; |
4601 | fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; | 4603 | fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; |
4602 | } | 4604 | } |
4603 | } | 4605 | } |
@@ -4654,6 +4656,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, | |||
4654 | { | 4656 | { |
4655 | volatile struct qdio_buffer_element *sbale; | 4657 | volatile struct qdio_buffer_element *sbale; |
4656 | struct zfcp_fsf_req *fsf_req = NULL; | 4658 | struct zfcp_fsf_req *fsf_req = NULL; |
4659 | unsigned long flags; | ||
4657 | int ret = 0; | 4660 | int ret = 0; |
4658 | struct zfcp_qdio_queue *req_queue = &adapter->request_queue; | 4661 | struct zfcp_qdio_queue *req_queue = &adapter->request_queue; |
4659 | 4662 | ||
@@ -4668,6 +4671,12 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, | |||
4668 | 4671 | ||
4669 | fsf_req->adapter = adapter; | 4672 | fsf_req->adapter = adapter; |
4670 | fsf_req->fsf_command = fsf_cmd; | 4673 | fsf_req->fsf_command = fsf_cmd; |
4674 | INIT_LIST_HEAD(&fsf_req->list); | ||
4675 | |||
4676 | /* unique request id */ | ||
4677 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
4678 | fsf_req->req_id = adapter->req_no++; | ||
4679 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
4671 | 4680 | ||
4672 | zfcp_fsf_req_qtcb_init(fsf_req); | 4681 | zfcp_fsf_req_qtcb_init(fsf_req); |
4673 | 4682 | ||
@@ -4707,7 +4716,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, | |||
4707 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); | 4716 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); |
4708 | 4717 | ||
4709 | /* setup common SBALE fields */ | 4718 | /* setup common SBALE fields */ |
4710 | sbale[0].addr = fsf_req; | 4719 | sbale[0].addr = (void *) fsf_req->req_id; |
4711 | sbale[0].flags |= SBAL_FLAGS0_COMMAND; | 4720 | sbale[0].flags |= SBAL_FLAGS0_COMMAND; |
4712 | if (likely(fsf_req->qtcb != NULL)) { | 4721 | if (likely(fsf_req->qtcb != NULL)) { |
4713 | sbale[1].addr = (void *) fsf_req->qtcb; | 4722 | sbale[1].addr = (void *) fsf_req->qtcb; |
@@ -4747,7 +4756,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) | |||
4747 | volatile struct qdio_buffer_element *sbale; | 4756 | volatile struct qdio_buffer_element *sbale; |
4748 | int inc_seq_no; | 4757 | int inc_seq_no; |
4749 | int new_distance_from_int; | 4758 | int new_distance_from_int; |
4750 | unsigned long flags; | 4759 | u64 dbg_tmp[2]; |
4751 | int retval = 0; | 4760 | int retval = 0; |
4752 | 4761 | ||
4753 | adapter = fsf_req->adapter; | 4762 | adapter = fsf_req->adapter; |
@@ -4761,10 +4770,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) | |||
4761 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, | 4770 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, |
4762 | sbale[1].length); | 4771 | sbale[1].length); |
4763 | 4772 | ||
4764 | /* put allocated FSF request at list tail */ | 4773 | /* put allocated FSF request into hash table */ |
4765 | spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); | 4774 | spin_lock(&adapter->req_list_lock); |
4766 | list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); | 4775 | zfcp_reqlist_add(adapter, fsf_req); |
4767 | spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); | 4776 | spin_unlock(&adapter->req_list_lock); |
4768 | 4777 | ||
4769 | inc_seq_no = (fsf_req->qtcb != NULL); | 4778 | inc_seq_no = (fsf_req->qtcb != NULL); |
4770 | 4779 | ||
@@ -4803,6 +4812,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) | |||
4803 | QDIO_FLAG_SYNC_OUTPUT, | 4812 | QDIO_FLAG_SYNC_OUTPUT, |
4804 | 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); | 4813 | 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); |
4805 | 4814 | ||
4815 | dbg_tmp[0] = (unsigned long) sbale[0].addr; | ||
4816 | dbg_tmp[1] = (u64) retval; | ||
4817 | debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); | ||
4818 | |||
4806 | if (unlikely(retval)) { | 4819 | if (unlikely(retval)) { |
4807 | /* Queues are down..... */ | 4820 | /* Queues are down..... */ |
4808 | retval = -EIO; | 4821 | retval = -EIO; |
@@ -4812,22 +4825,17 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) | |||
4812 | */ | 4825 | */ |
4813 | if (timer) | 4826 | if (timer) |
4814 | del_timer(timer); | 4827 | del_timer(timer); |
4815 | spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); | 4828 | spin_lock(&adapter->req_list_lock); |
4816 | list_del(&fsf_req->list); | 4829 | zfcp_reqlist_remove(adapter, fsf_req->req_id); |
4817 | spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); | 4830 | spin_unlock(&adapter->req_list_lock); |
4818 | /* | 4831 | /* undo changes in request queue made for this request */ |
4819 | * adjust the number of free SBALs in request queue as well as | ||
4820 | * position of first one | ||
4821 | */ | ||
4822 | zfcp_qdio_zero_sbals(req_queue->buffer, | 4832 | zfcp_qdio_zero_sbals(req_queue->buffer, |
4823 | fsf_req->sbal_first, fsf_req->sbal_number); | 4833 | fsf_req->sbal_first, fsf_req->sbal_number); |
4824 | atomic_add(fsf_req->sbal_number, &req_queue->free_count); | 4834 | atomic_add(fsf_req->sbal_number, &req_queue->free_count); |
4825 | req_queue->free_index -= fsf_req->sbal_number; /* increase */ | 4835 | req_queue->free_index -= fsf_req->sbal_number; |
4826 | req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; | 4836 | req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; |
4827 | req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ | 4837 | req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ |
4828 | ZFCP_LOG_DEBUG | 4838 | zfcp_erp_adapter_reopen(adapter, 0); |
4829 | ("error: do_QDIO failed. Buffers could not be enqueued " | ||
4830 | "to request queue.\n"); | ||
4831 | } else { | 4839 | } else { |
4832 | req_queue->distance_from_int = new_distance_from_int; | 4840 | req_queue->distance_from_int = new_distance_from_int; |
4833 | /* | 4841 | /* |
@@ -4843,7 +4851,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) | |||
4843 | adapter->fsf_req_seq_no++; | 4851 | adapter->fsf_req_seq_no++; |
4844 | 4852 | ||
4845 | /* count FSF requests pending */ | 4853 | /* count FSF requests pending */ |
4846 | atomic_inc(&adapter->fsf_reqs_active); | 4854 | atomic_inc(&adapter->reqs_active); |
4847 | } | 4855 | } |
4848 | return retval; | 4856 | return retval; |
4849 | } | 4857 | } |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 49ea5add4abc..dbd9f48e863e 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -282,6 +282,37 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device, | |||
282 | return; | 282 | return; |
283 | } | 283 | } |
284 | 284 | ||
285 | /** | ||
286 | * zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status | ||
287 | */ | ||
288 | static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, | ||
289 | unsigned long req_id) | ||
290 | { | ||
291 | struct zfcp_fsf_req *fsf_req; | ||
292 | unsigned long flags; | ||
293 | |||
294 | debug_long_event(adapter->erp_dbf, 4, req_id); | ||
295 | |||
296 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
297 | fsf_req = zfcp_reqlist_ismember(adapter, req_id); | ||
298 | |||
299 | if (!fsf_req) { | ||
300 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
301 | ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id); | ||
302 | zfcp_erp_adapter_reopen(adapter, 0); | ||
303 | return -EINVAL; | ||
304 | } | ||
305 | |||
306 | zfcp_reqlist_remove(adapter, req_id); | ||
307 | atomic_dec(&adapter->reqs_active); | ||
308 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
309 | |||
310 | /* finish the FSF request */ | ||
311 | zfcp_fsf_req_complete(fsf_req); | ||
312 | |||
313 | return 0; | ||
314 | } | ||
315 | |||
285 | /* | 316 | /* |
286 | * function: zfcp_qdio_response_handler | 317 | * function: zfcp_qdio_response_handler |
287 | * | 318 | * |
@@ -344,7 +375,7 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device, | |||
344 | /* look for QDIO request identifiers in SB */ | 375 | /* look for QDIO request identifiers in SB */ |
345 | buffere = &buffer->element[buffere_index]; | 376 | buffere = &buffer->element[buffere_index]; |
346 | retval = zfcp_qdio_reqid_check(adapter, | 377 | retval = zfcp_qdio_reqid_check(adapter, |
347 | (void *) buffere->addr); | 378 | (unsigned long) buffere->addr); |
348 | 379 | ||
349 | if (retval) { | 380 | if (retval) { |
350 | ZFCP_LOG_NORMAL("bug: unexpected inbound " | 381 | ZFCP_LOG_NORMAL("bug: unexpected inbound " |
@@ -415,52 +446,6 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device, | |||
415 | return; | 446 | return; |
416 | } | 447 | } |
417 | 448 | ||
418 | /* | ||
419 | * function: zfcp_qdio_reqid_check | ||
420 | * | ||
421 | * purpose: checks for valid reqids or unsolicited status | ||
422 | * | ||
423 | * returns: 0 - valid request id or unsolicited status | ||
424 | * !0 - otherwise | ||
425 | */ | ||
426 | int | ||
427 | zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr) | ||
428 | { | ||
429 | struct zfcp_fsf_req *fsf_req; | ||
430 | unsigned long flags; | ||
431 | |||
432 | /* invalid (per convention used in this driver) */ | ||
433 | if (unlikely(!sbale_addr)) { | ||
434 | ZFCP_LOG_NORMAL("bug: invalid reqid\n"); | ||
435 | return -EINVAL; | ||
436 | } | ||
437 | |||
438 | /* valid request id and thus (hopefully :) valid fsf_req address */ | ||
439 | fsf_req = (struct zfcp_fsf_req *) sbale_addr; | ||
440 | |||
441 | /* serialize with zfcp_fsf_req_dismiss_all */ | ||
442 | spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); | ||
443 | if (list_empty(&adapter->fsf_req_list_head)) { | ||
444 | spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); | ||
445 | return 0; | ||
446 | } | ||
447 | list_del(&fsf_req->list); | ||
448 | atomic_dec(&adapter->fsf_reqs_active); | ||
449 | spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); | ||
450 | |||
451 | if (unlikely(adapter != fsf_req->adapter)) { | ||
452 | ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, " | ||
453 | "fsf_req->adapter=%p, adapter=%p)\n", | ||
454 | fsf_req, fsf_req->adapter, adapter); | ||
455 | return -EINVAL; | ||
456 | } | ||
457 | |||
458 | /* finish the FSF request */ | ||
459 | zfcp_fsf_req_complete(fsf_req); | ||
460 | |||
461 | return 0; | ||
462 | } | ||
463 | |||
464 | /** | 449 | /** |
465 | * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue | 450 | * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue |
466 | * @queue: queue from which SBALE should be returned | 451 | * @queue: queue from which SBALE should be returned |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 671f4a6a5d18..1bb55086db9f 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -30,7 +30,6 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *, | |||
30 | void (*done) (struct scsi_cmnd *)); | 30 | void (*done) (struct scsi_cmnd *)); |
31 | static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); | 31 | static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); |
32 | static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); | 32 | static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); |
33 | static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *); | ||
34 | static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); | 33 | static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); |
35 | static int zfcp_task_management_function(struct zfcp_unit *, u8, | 34 | static int zfcp_task_management_function(struct zfcp_unit *, u8, |
36 | struct scsi_cmnd *); | 35 | struct scsi_cmnd *); |
@@ -46,30 +45,22 @@ struct zfcp_data zfcp_data = { | |||
46 | .scsi_host_template = { | 45 | .scsi_host_template = { |
47 | .name = ZFCP_NAME, | 46 | .name = ZFCP_NAME, |
48 | .proc_name = "zfcp", | 47 | .proc_name = "zfcp", |
49 | .proc_info = NULL, | ||
50 | .detect = NULL, | ||
51 | .slave_alloc = zfcp_scsi_slave_alloc, | 48 | .slave_alloc = zfcp_scsi_slave_alloc, |
52 | .slave_configure = zfcp_scsi_slave_configure, | 49 | .slave_configure = zfcp_scsi_slave_configure, |
53 | .slave_destroy = zfcp_scsi_slave_destroy, | 50 | .slave_destroy = zfcp_scsi_slave_destroy, |
54 | .queuecommand = zfcp_scsi_queuecommand, | 51 | .queuecommand = zfcp_scsi_queuecommand, |
55 | .eh_abort_handler = zfcp_scsi_eh_abort_handler, | 52 | .eh_abort_handler = zfcp_scsi_eh_abort_handler, |
56 | .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, | 53 | .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, |
57 | .eh_bus_reset_handler = zfcp_scsi_eh_bus_reset_handler, | 54 | .eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler, |
58 | .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, | 55 | .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, |
59 | .can_queue = 4096, | 56 | .can_queue = 4096, |
60 | .this_id = -1, | 57 | .this_id = -1, |
61 | /* | ||
62 | * FIXME: | ||
63 | * one less? can zfcp_create_sbale cope with it? | ||
64 | */ | ||
65 | .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, | 58 | .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, |
66 | .cmd_per_lun = 1, | 59 | .cmd_per_lun = 1, |
67 | .unchecked_isa_dma = 0, | ||
68 | .use_clustering = 1, | 60 | .use_clustering = 1, |
69 | .sdev_attrs = zfcp_sysfs_sdev_attrs, | 61 | .sdev_attrs = zfcp_sysfs_sdev_attrs, |
70 | }, | 62 | }, |
71 | .driver_version = ZFCP_VERSION, | 63 | .driver_version = ZFCP_VERSION, |
72 | /* rest initialised with zeros */ | ||
73 | }; | 64 | }; |
74 | 65 | ||
75 | /* Find start of Response Information in FCP response unit*/ | 66 | /* Find start of Response Information in FCP response unit*/ |
@@ -176,8 +167,14 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp) | |||
176 | return retval; | 167 | return retval; |
177 | } | 168 | } |
178 | 169 | ||
179 | static void | 170 | /** |
180 | zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) | 171 | * zfcp_scsi_slave_destroy - called when scsi device is removed |
172 | * | ||
173 | * Remove reference to associated scsi device for an zfcp_unit. | ||
174 | * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs | ||
175 | * or a scan for this device might have failed. | ||
176 | */ | ||
177 | static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) | ||
181 | { | 178 | { |
182 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; | 179 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; |
183 | 180 | ||
@@ -185,6 +182,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) | |||
185 | atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); | 182 | atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); |
186 | sdpnt->hostdata = NULL; | 183 | sdpnt->hostdata = NULL; |
187 | unit->device = NULL; | 184 | unit->device = NULL; |
185 | zfcp_erp_unit_failed(unit); | ||
188 | zfcp_unit_put(unit); | 186 | zfcp_unit_put(unit); |
189 | } else { | 187 | } else { |
190 | ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " | 188 | ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " |
@@ -549,35 +547,38 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, | |||
549 | } | 547 | } |
550 | 548 | ||
551 | /** | 549 | /** |
552 | * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter) | 550 | * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset |
551 | * | ||
552 | * If ERP is already running it will be stopped. | ||
553 | */ | 553 | */ |
554 | int | 554 | int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) |
555 | zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt) | ||
556 | { | 555 | { |
557 | struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; | 556 | struct zfcp_unit *unit; |
558 | struct zfcp_adapter *adapter = unit->port->adapter; | 557 | struct zfcp_adapter *adapter; |
559 | 558 | unsigned long flags; | |
560 | ZFCP_LOG_NORMAL("bus reset because of problems with " | ||
561 | "unit 0x%016Lx\n", unit->fcp_lun); | ||
562 | zfcp_erp_adapter_reopen(adapter, 0); | ||
563 | zfcp_erp_wait(adapter); | ||
564 | |||
565 | return SUCCESS; | ||
566 | } | ||
567 | 559 | ||
568 | /** | 560 | unit = (struct zfcp_unit*) scpnt->device->hostdata; |
569 | * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter) | 561 | adapter = unit->port->adapter; |
570 | */ | ||
571 | int | ||
572 | zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) | ||
573 | { | ||
574 | struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; | ||
575 | struct zfcp_adapter *adapter = unit->port->adapter; | ||
576 | 562 | ||
577 | ZFCP_LOG_NORMAL("host reset because of problems with " | 563 | ZFCP_LOG_NORMAL("host/bus reset because of problems with " |
578 | "unit 0x%016Lx\n", unit->fcp_lun); | 564 | "unit 0x%016Lx\n", unit->fcp_lun); |
579 | zfcp_erp_adapter_reopen(adapter, 0); | 565 | |
580 | zfcp_erp_wait(adapter); | 566 | write_lock_irqsave(&adapter->erp_lock, flags); |
567 | if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, | ||
568 | &adapter->status)) { | ||
569 | zfcp_erp_modify_adapter_status(adapter, | ||
570 | ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, | ||
571 | ZFCP_CLEAR); | ||
572 | zfcp_erp_action_dismiss_adapter(adapter); | ||
573 | write_unlock_irqrestore(&adapter->erp_lock, flags); | ||
574 | zfcp_fsf_req_dismiss_all(adapter); | ||
575 | adapter->fsf_req_seq_no = 0; | ||
576 | zfcp_erp_adapter_reopen(adapter, 0); | ||
577 | } else { | ||
578 | write_unlock_irqrestore(&adapter->erp_lock, flags); | ||
579 | zfcp_erp_adapter_reopen(adapter, 0); | ||
580 | zfcp_erp_wait(adapter); | ||
581 | } | ||
581 | 582 | ||
582 | return SUCCESS; | 583 | return SUCCESS; |
583 | } | 584 | } |
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index ab2f8b267908..bcb3444f1dcf 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
@@ -45,10 +45,6 @@ static char driver_name[] = "hptiop"; | |||
45 | static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; | 45 | static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; |
46 | static const char driver_ver[] = "v1.0 (060426)"; | 46 | static const char driver_ver[] = "v1.0 (060426)"; |
47 | 47 | ||
48 | static DEFINE_SPINLOCK(hptiop_hba_list_lock); | ||
49 | static LIST_HEAD(hptiop_hba_list); | ||
50 | static int hptiop_cdev_major = -1; | ||
51 | |||
52 | static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); | 48 | static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); |
53 | static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); | 49 | static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); |
54 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); | 50 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); |
@@ -577,7 +573,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba) | |||
577 | if (atomic_xchg(&hba->resetting, 1) == 0) { | 573 | if (atomic_xchg(&hba->resetting, 1) == 0) { |
578 | atomic_inc(&hba->reset_count); | 574 | atomic_inc(&hba->reset_count); |
579 | writel(IOPMU_INBOUND_MSG0_RESET, | 575 | writel(IOPMU_INBOUND_MSG0_RESET, |
580 | &hba->iop->outbound_msgaddr0); | 576 | &hba->iop->inbound_msgaddr0); |
581 | hptiop_pci_posting_flush(hba->iop); | 577 | hptiop_pci_posting_flush(hba->iop); |
582 | } | 578 | } |
583 | 579 | ||
@@ -620,532 +616,11 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, | |||
620 | return queue_depth; | 616 | return queue_depth; |
621 | } | 617 | } |
622 | 618 | ||
623 | struct hptiop_getinfo { | ||
624 | char __user *buffer; | ||
625 | loff_t buflength; | ||
626 | loff_t bufoffset; | ||
627 | loff_t buffillen; | ||
628 | loff_t filpos; | ||
629 | }; | ||
630 | |||
631 | static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo, | ||
632 | char *data, int datalen) | ||
633 | { | ||
634 | if (pinfo->filpos < pinfo->bufoffset) { | ||
635 | if (pinfo->filpos + datalen <= pinfo->bufoffset) { | ||
636 | pinfo->filpos += datalen; | ||
637 | return; | ||
638 | } else { | ||
639 | data += (pinfo->bufoffset - pinfo->filpos); | ||
640 | datalen -= (pinfo->bufoffset - pinfo->filpos); | ||
641 | pinfo->filpos = pinfo->bufoffset; | ||
642 | } | ||
643 | } | ||
644 | |||
645 | pinfo->filpos += datalen; | ||
646 | if (pinfo->buffillen == pinfo->buflength) | ||
647 | return; | ||
648 | |||
649 | if (pinfo->buflength - pinfo->buffillen < datalen) | ||
650 | datalen = pinfo->buflength - pinfo->buffillen; | ||
651 | |||
652 | if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen)) | ||
653 | return; | ||
654 | |||
655 | pinfo->buffillen += datalen; | ||
656 | } | ||
657 | |||
658 | static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...) | ||
659 | { | ||
660 | va_list args; | ||
661 | char buf[128]; | ||
662 | int len; | ||
663 | |||
664 | va_start(args, fmt); | ||
665 | len = vsnprintf(buf, sizeof(buf), fmt, args); | ||
666 | va_end(args); | ||
667 | hptiop_copy_mem_info(pinfo, buf, len); | ||
668 | return len; | ||
669 | } | ||
670 | |||
671 | static void hptiop_ioctl_done(struct hpt_ioctl_k *arg) | ||
672 | { | ||
673 | arg->done = NULL; | ||
674 | wake_up(&arg->hba->ioctl_wq); | ||
675 | } | ||
676 | |||
677 | static void hptiop_do_ioctl(struct hpt_ioctl_k *arg) | ||
678 | { | ||
679 | struct hptiop_hba *hba = arg->hba; | ||
680 | u32 val; | ||
681 | struct hpt_iop_request_ioctl_command __iomem *req; | ||
682 | int ioctl_retry = 0; | ||
683 | |||
684 | dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no); | ||
685 | |||
686 | /* | ||
687 | * check (in + out) buff size from application. | ||
688 | * outbuf must be dword aligned. | ||
689 | */ | ||
690 | if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size > | ||
691 | hba->max_request_size | ||
692 | - sizeof(struct hpt_iop_request_header) | ||
693 | - 4 * sizeof(u32)) { | ||
694 | dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n", | ||
695 | hba->host->host_no, | ||
696 | arg->inbuf_size, arg->outbuf_size); | ||
697 | arg->result = HPT_IOCTL_RESULT_FAILED; | ||
698 | return; | ||
699 | } | ||
700 | |||
701 | retry: | ||
702 | spin_lock_irq(hba->host->host_lock); | ||
703 | |||
704 | val = readl(&hba->iop->inbound_queue); | ||
705 | if (val == IOPMU_QUEUE_EMPTY) { | ||
706 | spin_unlock_irq(hba->host->host_lock); | ||
707 | dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no); | ||
708 | arg->result = -1; | ||
709 | return; | ||
710 | } | ||
711 | |||
712 | req = (struct hpt_iop_request_ioctl_command __iomem *) | ||
713 | ((unsigned long)hba->iop + val); | ||
714 | |||
715 | writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code), | ||
716 | &req->ioctl_code); | ||
717 | writel(arg->inbuf_size, &req->inbuf_size); | ||
718 | writel(arg->outbuf_size, &req->outbuf_size); | ||
719 | |||
720 | /* | ||
721 | * use the buffer on the IOP local memory first, then copy it | ||
722 | * back to host. | ||
723 | * the caller's request buffer shoudl be little-endian. | ||
724 | */ | ||
725 | if (arg->inbuf_size) | ||
726 | memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size); | ||
727 | |||
728 | /* correct the controller ID for IOP */ | ||
729 | if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO || | ||
730 | arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 || | ||
731 | arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO) | ||
732 | && arg->inbuf_size >= sizeof(u32)) | ||
733 | writel(0, req->buf); | ||
734 | |||
735 | writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type); | ||
736 | writel(0, &req->header.flags); | ||
737 | writel(offsetof(struct hpt_iop_request_ioctl_command, buf) | ||
738 | + arg->inbuf_size, &req->header.size); | ||
739 | writel((u32)(unsigned long)arg, &req->header.context); | ||
740 | writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0, | ||
741 | &req->header.context_hi32); | ||
742 | writel(IOP_RESULT_PENDING, &req->header.result); | ||
743 | |||
744 | arg->result = HPT_IOCTL_RESULT_FAILED; | ||
745 | arg->done = hptiop_ioctl_done; | ||
746 | |||
747 | writel(val, &hba->iop->inbound_queue); | ||
748 | hptiop_pci_posting_flush(hba->iop); | ||
749 | |||
750 | spin_unlock_irq(hba->host->host_lock); | ||
751 | |||
752 | wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ); | ||
753 | |||
754 | if (arg->done != NULL) { | ||
755 | hptiop_reset_hba(hba); | ||
756 | if (ioctl_retry++ < 3) | ||
757 | goto retry; | ||
758 | } | ||
759 | |||
760 | dprintk("hpt_iop_ioctl %x result %d\n", | ||
761 | arg->ioctl_code, arg->result); | ||
762 | } | ||
763 | |||
764 | static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf, | ||
765 | u32 insize, void *outbuf, u32 outsize) | ||
766 | { | ||
767 | struct hpt_ioctl_k arg; | ||
768 | arg.hba = hba; | ||
769 | arg.ioctl_code = code; | ||
770 | arg.inbuf = inbuf; | ||
771 | arg.outbuf = outbuf; | ||
772 | arg.inbuf_size = insize; | ||
773 | arg.outbuf_size = outsize; | ||
774 | arg.bytes_returned = NULL; | ||
775 | hptiop_do_ioctl(&arg); | ||
776 | return arg.result; | ||
777 | } | ||
778 | |||
779 | static inline int hpt_id_valid(__le32 id) | ||
780 | { | ||
781 | return id != 0 && id != cpu_to_le32(0xffffffff); | ||
782 | } | ||
783 | |||
784 | static int hptiop_get_controller_info(struct hptiop_hba *hba, | ||
785 | struct hpt_controller_info *pinfo) | ||
786 | { | ||
787 | int id = 0; | ||
788 | |||
789 | return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO, | ||
790 | &id, sizeof(int), pinfo, sizeof(*pinfo)); | ||
791 | } | ||
792 | |||
793 | |||
794 | static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus, | ||
795 | struct hpt_channel_info *pinfo) | ||
796 | { | ||
797 | u32 ids[2]; | ||
798 | |||
799 | ids[0] = 0; | ||
800 | ids[1] = bus; | ||
801 | return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO, | ||
802 | ids, sizeof(ids), pinfo, sizeof(*pinfo)); | ||
803 | |||
804 | } | ||
805 | |||
806 | static int hptiop_get_logical_devices(struct hptiop_hba *hba, | ||
807 | __le32 *pids, int maxcount) | ||
808 | { | ||
809 | int i; | ||
810 | u32 count = maxcount - 1; | ||
811 | |||
812 | if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES, | ||
813 | &count, sizeof(u32), | ||
814 | pids, sizeof(u32) * maxcount)) | ||
815 | return -1; | ||
816 | |||
817 | maxcount = le32_to_cpu(pids[0]); | ||
818 | for (i = 0; i < maxcount; i++) | ||
819 | pids[i] = pids[i+1]; | ||
820 | |||
821 | return maxcount; | ||
822 | } | ||
823 | |||
824 | static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id, | ||
825 | struct hpt_logical_device_info_v3 *pinfo) | ||
826 | { | ||
827 | return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3, | ||
828 | &id, sizeof(u32), | ||
829 | pinfo, sizeof(*pinfo)); | ||
830 | } | ||
831 | |||
832 | static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo) | ||
833 | { | ||
834 | static char s[64]; | ||
835 | u32 flags = le32_to_cpu(devinfo->u.array.flags); | ||
836 | u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress); | ||
837 | u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress); | ||
838 | |||
839 | if (flags & ARRAY_FLAG_DISABLED) | ||
840 | return "Disabled"; | ||
841 | else if (flags & ARRAY_FLAG_TRANSFORMING) | ||
842 | sprintf(s, "Expanding/Migrating %d.%d%%%s%s", | ||
843 | trans_prog / 100, | ||
844 | trans_prog % 100, | ||
845 | (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))? | ||
846 | ", Critical" : "", | ||
847 | ((flags & ARRAY_FLAG_NEEDINITIALIZING) && | ||
848 | !(flags & ARRAY_FLAG_REBUILDING) && | ||
849 | !(flags & ARRAY_FLAG_INITIALIZING))? | ||
850 | ", Unintialized" : ""); | ||
851 | else if ((flags & ARRAY_FLAG_BROKEN) && | ||
852 | devinfo->u.array.array_type != AT_RAID6) | ||
853 | return "Critical"; | ||
854 | else if (flags & ARRAY_FLAG_REBUILDING) | ||
855 | sprintf(s, | ||
856 | (flags & ARRAY_FLAG_NEEDINITIALIZING)? | ||
857 | "%sBackground initializing %d.%d%%" : | ||
858 | "%sRebuilding %d.%d%%", | ||
859 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
860 | reb_prog / 100, | ||
861 | reb_prog % 100); | ||
862 | else if (flags & ARRAY_FLAG_VERIFYING) | ||
863 | sprintf(s, "%sVerifying %d.%d%%", | ||
864 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
865 | reb_prog / 100, | ||
866 | reb_prog % 100); | ||
867 | else if (flags & ARRAY_FLAG_INITIALIZING) | ||
868 | sprintf(s, "%sForground initializing %d.%d%%", | ||
869 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
870 | reb_prog / 100, | ||
871 | reb_prog % 100); | ||
872 | else if (flags & ARRAY_FLAG_NEEDTRANSFORM) | ||
873 | sprintf(s,"%s%s%s", "Need Expanding/Migrating", | ||
874 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
875 | ((flags & ARRAY_FLAG_NEEDINITIALIZING) && | ||
876 | !(flags & ARRAY_FLAG_REBUILDING) && | ||
877 | !(flags & ARRAY_FLAG_INITIALIZING))? | ||
878 | ", Unintialized" : ""); | ||
879 | else if (flags & ARRAY_FLAG_NEEDINITIALIZING && | ||
880 | !(flags & ARRAY_FLAG_REBUILDING) && | ||
881 | !(flags & ARRAY_FLAG_INITIALIZING)) | ||
882 | sprintf(s,"%sUninitialized", | ||
883 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : ""); | ||
884 | else if ((flags & ARRAY_FLAG_NEEDBUILDING) || | ||
885 | (flags & ARRAY_FLAG_BROKEN)) | ||
886 | return "Critical"; | ||
887 | else | ||
888 | return "Normal"; | ||
889 | return s; | ||
890 | } | ||
891 | |||
892 | static void hptiop_dump_devinfo(struct hptiop_hba *hba, | ||
893 | struct hptiop_getinfo *pinfo, __le32 id, int indent) | ||
894 | { | ||
895 | struct hpt_logical_device_info_v3 devinfo; | ||
896 | int i; | ||
897 | u64 capacity; | ||
898 | |||
899 | for (i = 0; i < indent; i++) | ||
900 | hptiop_copy_info(pinfo, "\t"); | ||
901 | |||
902 | if (hptiop_get_device_info_v3(hba, id, &devinfo)) { | ||
903 | hptiop_copy_info(pinfo, "unknown\n"); | ||
904 | return; | ||
905 | } | ||
906 | |||
907 | switch (devinfo.type) { | ||
908 | |||
909 | case LDT_DEVICE: { | ||
910 | struct hd_driveid *driveid; | ||
911 | u32 flags = le32_to_cpu(devinfo.u.device.flags); | ||
912 | |||
913 | driveid = (struct hd_driveid *)devinfo.u.device.ident; | ||
914 | /* model[] is 40 chars long, but we just want 20 chars here */ | ||
915 | driveid->model[20] = 0; | ||
916 | |||
917 | if (indent) | ||
918 | if (flags & DEVICE_FLAG_DISABLED) | ||
919 | hptiop_copy_info(pinfo,"Missing\n"); | ||
920 | else | ||
921 | hptiop_copy_info(pinfo, "CH%d %s\n", | ||
922 | devinfo.u.device.path_id + 1, | ||
923 | driveid->model); | ||
924 | else { | ||
925 | capacity = le64_to_cpu(devinfo.capacity) * 512; | ||
926 | do_div(capacity, 1000000); | ||
927 | hptiop_copy_info(pinfo, | ||
928 | "CH%d %s, %lluMB, %s %s%s%s%s\n", | ||
929 | devinfo.u.device.path_id + 1, | ||
930 | driveid->model, | ||
931 | capacity, | ||
932 | (flags & DEVICE_FLAG_DISABLED)? | ||
933 | "Disabled" : "Normal", | ||
934 | devinfo.u.device.read_ahead_enabled? | ||
935 | "[RA]" : "", | ||
936 | devinfo.u.device.write_cache_enabled? | ||
937 | "[WC]" : "", | ||
938 | devinfo.u.device.TCQ_enabled? | ||
939 | "[TCQ]" : "", | ||
940 | devinfo.u.device.NCQ_enabled? | ||
941 | "[NCQ]" : "" | ||
942 | ); | ||
943 | } | ||
944 | break; | ||
945 | } | ||
946 | |||
947 | case LDT_ARRAY: | ||
948 | if (devinfo.target_id != INVALID_TARGET_ID) | ||
949 | hptiop_copy_info(pinfo, "[DISK %d_%d] ", | ||
950 | devinfo.vbus_id, devinfo.target_id); | ||
951 | |||
952 | capacity = le64_to_cpu(devinfo.capacity) * 512; | ||
953 | do_div(capacity, 1000000); | ||
954 | hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n", | ||
955 | devinfo.u.array.name, | ||
956 | devinfo.u.array.array_type==AT_RAID0? "RAID0" : | ||
957 | devinfo.u.array.array_type==AT_RAID1? "RAID1" : | ||
958 | devinfo.u.array.array_type==AT_RAID5? "RAID5" : | ||
959 | devinfo.u.array.array_type==AT_RAID6? "RAID6" : | ||
960 | devinfo.u.array.array_type==AT_JBOD? "JBOD" : | ||
961 | "unknown", | ||
962 | capacity, | ||
963 | get_array_status(&devinfo)); | ||
964 | for (i = 0; i < devinfo.u.array.ndisk; i++) { | ||
965 | if (hpt_id_valid(devinfo.u.array.members[i])) { | ||
966 | if (cpu_to_le16(1<<i) & | ||
967 | devinfo.u.array.critical_members) | ||
968 | hptiop_copy_info(pinfo, "\t*"); | ||
969 | hptiop_dump_devinfo(hba, pinfo, | ||
970 | devinfo.u.array.members[i], indent+1); | ||
971 | } | ||
972 | else | ||
973 | hptiop_copy_info(pinfo, "\tMissing\n"); | ||
974 | } | ||
975 | if (id == devinfo.u.array.transform_source) { | ||
976 | hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n"); | ||
977 | hptiop_dump_devinfo(hba, pinfo, | ||
978 | devinfo.u.array.transform_target, indent+1); | ||
979 | } | ||
980 | break; | ||
981 | } | ||
982 | } | ||
983 | |||
984 | static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) | 619 | static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) |
985 | { | 620 | { |
986 | return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); | 621 | return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); |
987 | } | 622 | } |
988 | 623 | ||
989 | static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf, | ||
990 | size_t count, loff_t *ppos) | ||
991 | { | ||
992 | struct hptiop_hba *hba = filp->private_data; | ||
993 | struct hptiop_getinfo info; | ||
994 | int i, j, ndev; | ||
995 | struct hpt_controller_info con_info; | ||
996 | struct hpt_channel_info chan_info; | ||
997 | __le32 ids[32]; | ||
998 | |||
999 | info.buffer = buf; | ||
1000 | info.buflength = count; | ||
1001 | info.bufoffset = ppos ? *ppos : 0; | ||
1002 | info.filpos = 0; | ||
1003 | info.buffillen = 0; | ||
1004 | |||
1005 | if (hptiop_get_controller_info(hba, &con_info)) | ||
1006 | return -EIO; | ||
1007 | |||
1008 | for (i = 0; i < con_info.num_buses; i++) { | ||
1009 | if (hptiop_get_channel_info(hba, i, &chan_info) == 0) { | ||
1010 | if (hpt_id_valid(chan_info.devices[0])) | ||
1011 | hptiop_dump_devinfo(hba, &info, | ||
1012 | chan_info.devices[0], 0); | ||
1013 | if (hpt_id_valid(chan_info.devices[1])) | ||
1014 | hptiop_dump_devinfo(hba, &info, | ||
1015 | chan_info.devices[1], 0); | ||
1016 | } | ||
1017 | } | ||
1018 | |||
1019 | ndev = hptiop_get_logical_devices(hba, ids, | ||
1020 | sizeof(ids) / sizeof(ids[0])); | ||
1021 | |||
1022 | /* | ||
1023 | * if hptiop_get_logical_devices fails, ndev==-1 and it just | ||
1024 | * output nothing here | ||
1025 | */ | ||
1026 | for (j = 0; j < ndev; j++) | ||
1027 | hptiop_dump_devinfo(hba, &info, ids[j], 0); | ||
1028 | |||
1029 | if (ppos) | ||
1030 | *ppos += info.buffillen; | ||
1031 | |||
1032 | return info.buffillen; | ||
1033 | } | ||
1034 | |||
1035 | static int hptiop_cdev_ioctl(struct inode *inode, struct file *file, | ||
1036 | unsigned int cmd, unsigned long arg) | ||
1037 | { | ||
1038 | struct hptiop_hba *hba = file->private_data; | ||
1039 | struct hpt_ioctl_u ioctl_u; | ||
1040 | struct hpt_ioctl_k ioctl_k; | ||
1041 | u32 bytes_returned; | ||
1042 | int err = -EINVAL; | ||
1043 | |||
1044 | if (copy_from_user(&ioctl_u, | ||
1045 | (void __user *)arg, sizeof(struct hpt_ioctl_u))) | ||
1046 | return -EINVAL; | ||
1047 | |||
1048 | if (ioctl_u.magic != HPT_IOCTL_MAGIC) | ||
1049 | return -EINVAL; | ||
1050 | |||
1051 | ioctl_k.ioctl_code = ioctl_u.ioctl_code; | ||
1052 | ioctl_k.inbuf = NULL; | ||
1053 | ioctl_k.inbuf_size = ioctl_u.inbuf_size; | ||
1054 | ioctl_k.outbuf = NULL; | ||
1055 | ioctl_k.outbuf_size = ioctl_u.outbuf_size; | ||
1056 | ioctl_k.hba = hba; | ||
1057 | ioctl_k.bytes_returned = &bytes_returned; | ||
1058 | |||
1059 | /* verify user buffer */ | ||
1060 | if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ, | ||
1061 | ioctl_u.inbuf, ioctl_k.inbuf_size)) || | ||
1062 | (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE, | ||
1063 | ioctl_u.outbuf, ioctl_k.outbuf_size)) || | ||
1064 | (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE, | ||
1065 | ioctl_u.bytes_returned, sizeof(u32))) || | ||
1066 | ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) { | ||
1067 | |||
1068 | dprintk("scsi%d: got bad user address\n", hba->host->host_no); | ||
1069 | return -EINVAL; | ||
1070 | } | ||
1071 | |||
1072 | /* map buffer to kernel. */ | ||
1073 | if (ioctl_k.inbuf_size) { | ||
1074 | ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL); | ||
1075 | if (!ioctl_k.inbuf) { | ||
1076 | dprintk("scsi%d: fail to alloc inbuf\n", | ||
1077 | hba->host->host_no); | ||
1078 | err = -ENOMEM; | ||
1079 | goto err_exit; | ||
1080 | } | ||
1081 | |||
1082 | if (copy_from_user(ioctl_k.inbuf, | ||
1083 | ioctl_u.inbuf, ioctl_k.inbuf_size)) { | ||
1084 | goto err_exit; | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1088 | if (ioctl_k.outbuf_size) { | ||
1089 | ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL); | ||
1090 | if (!ioctl_k.outbuf) { | ||
1091 | dprintk("scsi%d: fail to alloc outbuf\n", | ||
1092 | hba->host->host_no); | ||
1093 | err = -ENOMEM; | ||
1094 | goto err_exit; | ||
1095 | } | ||
1096 | } | ||
1097 | |||
1098 | hptiop_do_ioctl(&ioctl_k); | ||
1099 | |||
1100 | if (ioctl_k.result == HPT_IOCTL_RESULT_OK) { | ||
1101 | if (ioctl_k.outbuf_size && | ||
1102 | copy_to_user(ioctl_u.outbuf, | ||
1103 | ioctl_k.outbuf, ioctl_k.outbuf_size)) | ||
1104 | goto err_exit; | ||
1105 | |||
1106 | if (ioctl_u.bytes_returned && | ||
1107 | copy_to_user(ioctl_u.bytes_returned, | ||
1108 | &bytes_returned, sizeof(u32))) | ||
1109 | goto err_exit; | ||
1110 | |||
1111 | err = 0; | ||
1112 | } | ||
1113 | |||
1114 | err_exit: | ||
1115 | kfree(ioctl_k.inbuf); | ||
1116 | kfree(ioctl_k.outbuf); | ||
1117 | |||
1118 | return err; | ||
1119 | } | ||
1120 | |||
1121 | static int hptiop_cdev_open(struct inode *inode, struct file *file) | ||
1122 | { | ||
1123 | struct hptiop_hba *hba; | ||
1124 | unsigned i = 0, minor = iminor(inode); | ||
1125 | int ret = -ENODEV; | ||
1126 | |||
1127 | spin_lock(&hptiop_hba_list_lock); | ||
1128 | list_for_each_entry(hba, &hptiop_hba_list, link) { | ||
1129 | if (i == minor) { | ||
1130 | file->private_data = hba; | ||
1131 | ret = 0; | ||
1132 | goto out; | ||
1133 | } | ||
1134 | i++; | ||
1135 | } | ||
1136 | |||
1137 | out: | ||
1138 | spin_unlock(&hptiop_hba_list_lock); | ||
1139 | return ret; | ||
1140 | } | ||
1141 | |||
1142 | static struct file_operations hptiop_cdev_fops = { | ||
1143 | .owner = THIS_MODULE, | ||
1144 | .read = hptiop_cdev_read, | ||
1145 | .ioctl = hptiop_cdev_ioctl, | ||
1146 | .open = hptiop_cdev_open, | ||
1147 | }; | ||
1148 | |||
1149 | static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) | 624 | static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) |
1150 | { | 625 | { |
1151 | struct Scsi_Host *host = class_to_shost(class_dev); | 626 | struct Scsi_Host *host = class_to_shost(class_dev); |
@@ -1296,19 +771,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, | |||
1296 | goto unmap_pci_bar; | 771 | goto unmap_pci_bar; |
1297 | } | 772 | } |
1298 | 773 | ||
1299 | if (scsi_add_host(host, &pcidev->dev)) { | ||
1300 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n", | ||
1301 | hba->host->host_no); | ||
1302 | goto unmap_pci_bar; | ||
1303 | } | ||
1304 | |||
1305 | pci_set_drvdata(pcidev, host); | 774 | pci_set_drvdata(pcidev, host); |
1306 | 775 | ||
1307 | if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, | 776 | if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, |
1308 | driver_name, hba)) { | 777 | driver_name, hba)) { |
1309 | printk(KERN_ERR "scsi%d: request irq %d failed\n", | 778 | printk(KERN_ERR "scsi%d: request irq %d failed\n", |
1310 | hba->host->host_no, pcidev->irq); | 779 | hba->host->host_no, pcidev->irq); |
1311 | goto remove_scsi_host; | 780 | goto unmap_pci_bar; |
1312 | } | 781 | } |
1313 | 782 | ||
1314 | /* Allocate request mem */ | 783 | /* Allocate request mem */ |
@@ -1355,9 +824,12 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, | |||
1355 | if (hptiop_initialize_iop(hba)) | 824 | if (hptiop_initialize_iop(hba)) |
1356 | goto free_request_mem; | 825 | goto free_request_mem; |
1357 | 826 | ||
1358 | spin_lock(&hptiop_hba_list_lock); | 827 | if (scsi_add_host(host, &pcidev->dev)) { |
1359 | list_add_tail(&hba->link, &hptiop_hba_list); | 828 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n", |
1360 | spin_unlock(&hptiop_hba_list_lock); | 829 | hba->host->host_no); |
830 | goto free_request_mem; | ||
831 | } | ||
832 | |||
1361 | 833 | ||
1362 | scsi_scan_host(host); | 834 | scsi_scan_host(host); |
1363 | 835 | ||
@@ -1372,9 +844,6 @@ free_request_mem: | |||
1372 | free_request_irq: | 844 | free_request_irq: |
1373 | free_irq(hba->pcidev->irq, hba); | 845 | free_irq(hba->pcidev->irq, hba); |
1374 | 846 | ||
1375 | remove_scsi_host: | ||
1376 | scsi_remove_host(host); | ||
1377 | |||
1378 | unmap_pci_bar: | 847 | unmap_pci_bar: |
1379 | iounmap(hba->iop); | 848 | iounmap(hba->iop); |
1380 | 849 | ||
@@ -1422,10 +891,6 @@ static void hptiop_remove(struct pci_dev *pcidev) | |||
1422 | 891 | ||
1423 | scsi_remove_host(host); | 892 | scsi_remove_host(host); |
1424 | 893 | ||
1425 | spin_lock(&hptiop_hba_list_lock); | ||
1426 | list_del_init(&hba->link); | ||
1427 | spin_unlock(&hptiop_hba_list_lock); | ||
1428 | |||
1429 | hptiop_shutdown(pcidev); | 894 | hptiop_shutdown(pcidev); |
1430 | 895 | ||
1431 | free_irq(hba->pcidev->irq, hba); | 896 | free_irq(hba->pcidev->irq, hba); |
@@ -1462,27 +927,12 @@ static struct pci_driver hptiop_pci_driver = { | |||
1462 | 927 | ||
1463 | static int __init hptiop_module_init(void) | 928 | static int __init hptiop_module_init(void) |
1464 | { | 929 | { |
1465 | int error; | ||
1466 | |||
1467 | printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); | 930 | printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); |
1468 | 931 | return pci_register_driver(&hptiop_pci_driver); | |
1469 | error = pci_register_driver(&hptiop_pci_driver); | ||
1470 | if (error < 0) | ||
1471 | return error; | ||
1472 | |||
1473 | hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops); | ||
1474 | if (hptiop_cdev_major < 0) { | ||
1475 | printk(KERN_WARNING "unable to register hptiop device.\n"); | ||
1476 | return hptiop_cdev_major; | ||
1477 | } | ||
1478 | |||
1479 | return 0; | ||
1480 | } | 932 | } |
1481 | 933 | ||
1482 | static void __exit hptiop_module_exit(void) | 934 | static void __exit hptiop_module_exit(void) |
1483 | { | 935 | { |
1484 | dprintk("hptiop_module_exit\n"); | ||
1485 | unregister_chrdev(hptiop_cdev_major, "hptiop"); | ||
1486 | pci_unregister_driver(&hptiop_pci_driver); | 936 | pci_unregister_driver(&hptiop_pci_driver); |
1487 | } | 937 | } |
1488 | 938 | ||
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 848fb2aa4ca3..058f094f945a 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -43,13 +43,10 @@ | |||
43 | 43 | ||
44 | #include "iscsi_tcp.h" | 44 | #include "iscsi_tcp.h" |
45 | 45 | ||
46 | #define ISCSI_TCP_VERSION "1.0-595" | ||
47 | |||
48 | MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " | 46 | MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " |
49 | "Alex Aizman <itn780@yahoo.com>"); | 47 | "Alex Aizman <itn780@yahoo.com>"); |
50 | MODULE_DESCRIPTION("iSCSI/TCP data-path"); | 48 | MODULE_DESCRIPTION("iSCSI/TCP data-path"); |
51 | MODULE_LICENSE("GPL"); | 49 | MODULE_LICENSE("GPL"); |
52 | MODULE_VERSION(ISCSI_TCP_VERSION); | ||
53 | /* #define DEBUG_TCP */ | 50 | /* #define DEBUG_TCP */ |
54 | #define DEBUG_ASSERT | 51 | #define DEBUG_ASSERT |
55 | 52 | ||
@@ -185,11 +182,19 @@ iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn) | |||
185 | * must be called with session lock | 182 | * must be called with session lock |
186 | */ | 183 | */ |
187 | static void | 184 | static void |
188 | __iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | 185 | iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) |
189 | { | 186 | { |
190 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 187 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
188 | struct iscsi_r2t_info *r2t; | ||
191 | struct scsi_cmnd *sc; | 189 | struct scsi_cmnd *sc; |
192 | 190 | ||
191 | /* flush ctask's r2t queues */ | ||
192 | while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { | ||
193 | __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, | ||
194 | sizeof(void*)); | ||
195 | debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); | ||
196 | } | ||
197 | |||
193 | sc = ctask->sc; | 198 | sc = ctask->sc; |
194 | if (unlikely(!sc)) | 199 | if (unlikely(!sc)) |
195 | return; | 200 | return; |
@@ -374,6 +379,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
374 | spin_unlock(&session->lock); | 379 | spin_unlock(&session->lock); |
375 | return 0; | 380 | return 0; |
376 | } | 381 | } |
382 | |||
377 | rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); | 383 | rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); |
378 | BUG_ON(!rc); | 384 | BUG_ON(!rc); |
379 | 385 | ||
@@ -399,7 +405,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
399 | tcp_ctask->exp_r2tsn = r2tsn + 1; | 405 | tcp_ctask->exp_r2tsn = r2tsn + 1; |
400 | tcp_ctask->xmstate |= XMSTATE_SOL_HDR; | 406 | tcp_ctask->xmstate |= XMSTATE_SOL_HDR; |
401 | __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); | 407 | __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); |
402 | __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); | 408 | list_move_tail(&ctask->running, &conn->xmitqueue); |
403 | 409 | ||
404 | scsi_queue_work(session->host, &conn->xmitwork); | 410 | scsi_queue_work(session->host, &conn->xmitwork); |
405 | conn->r2t_pdus_cnt++; | 411 | conn->r2t_pdus_cnt++; |
@@ -477,6 +483,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) | |||
477 | case ISCSI_OP_SCSI_DATA_IN: | 483 | case ISCSI_OP_SCSI_DATA_IN: |
478 | tcp_conn->in.ctask = session->cmds[itt]; | 484 | tcp_conn->in.ctask = session->cmds[itt]; |
479 | rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); | 485 | rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); |
486 | if (rc) | ||
487 | return rc; | ||
480 | /* fall through */ | 488 | /* fall through */ |
481 | case ISCSI_OP_SCSI_CMD_RSP: | 489 | case ISCSI_OP_SCSI_CMD_RSP: |
482 | tcp_conn->in.ctask = session->cmds[itt]; | 490 | tcp_conn->in.ctask = session->cmds[itt]; |
@@ -484,7 +492,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) | |||
484 | goto copy_hdr; | 492 | goto copy_hdr; |
485 | 493 | ||
486 | spin_lock(&session->lock); | 494 | spin_lock(&session->lock); |
487 | __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); | 495 | iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); |
488 | rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); | 496 | rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); |
489 | spin_unlock(&session->lock); | 497 | spin_unlock(&session->lock); |
490 | break; | 498 | break; |
@@ -500,13 +508,28 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) | |||
500 | break; | 508 | break; |
501 | case ISCSI_OP_LOGIN_RSP: | 509 | case ISCSI_OP_LOGIN_RSP: |
502 | case ISCSI_OP_TEXT_RSP: | 510 | case ISCSI_OP_TEXT_RSP: |
503 | case ISCSI_OP_LOGOUT_RSP: | ||
504 | case ISCSI_OP_NOOP_IN: | ||
505 | case ISCSI_OP_REJECT: | 511 | case ISCSI_OP_REJECT: |
506 | case ISCSI_OP_ASYNC_EVENT: | 512 | case ISCSI_OP_ASYNC_EVENT: |
513 | /* | ||
514 | * It is possible that we could get a PDU with a buffer larger | ||
515 | * than 8K, but there are no targets that currently do this. | ||
516 | * For now we fail until we find a vendor that needs it | ||
517 | */ | ||
518 | if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH < | ||
519 | tcp_conn->in.datalen) { | ||
520 | printk(KERN_ERR "iscsi_tcp: received buffer of len %u " | ||
521 | "but conn buffer is only %u (opcode %0x)\n", | ||
522 | tcp_conn->in.datalen, | ||
523 | DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode); | ||
524 | rc = ISCSI_ERR_PROTO; | ||
525 | break; | ||
526 | } | ||
527 | |||
507 | if (tcp_conn->in.datalen) | 528 | if (tcp_conn->in.datalen) |
508 | goto copy_hdr; | 529 | goto copy_hdr; |
509 | /* fall through */ | 530 | /* fall through */ |
531 | case ISCSI_OP_LOGOUT_RSP: | ||
532 | case ISCSI_OP_NOOP_IN: | ||
510 | case ISCSI_OP_SCSI_TMFUNC_RSP: | 533 | case ISCSI_OP_SCSI_TMFUNC_RSP: |
511 | rc = iscsi_complete_pdu(conn, hdr, NULL, 0); | 534 | rc = iscsi_complete_pdu(conn, hdr, NULL, 0); |
512 | break; | 535 | break; |
@@ -523,7 +546,7 @@ copy_hdr: | |||
523 | * skbs to complete the command then we have to copy the header | 546 | * skbs to complete the command then we have to copy the header |
524 | * for later use | 547 | * for later use |
525 | */ | 548 | */ |
526 | if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy < | 549 | if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <= |
527 | (tcp_conn->in.datalen + tcp_conn->in.padding + | 550 | (tcp_conn->in.datalen + tcp_conn->in.padding + |
528 | (conn->datadgst_en ? 4 : 0))) { | 551 | (conn->datadgst_en ? 4 : 0))) { |
529 | debug_tcp("Copying header for later use. in.copy %d in.datalen" | 552 | debug_tcp("Copying header for later use. in.copy %d in.datalen" |
@@ -614,9 +637,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask, | |||
614 | * byte counters. | 637 | * byte counters. |
615 | **/ | 638 | **/ |
616 | static inline int | 639 | static inline int |
617 | iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) | 640 | iscsi_tcp_copy(struct iscsi_conn *conn) |
618 | { | 641 | { |
619 | void *buf = tcp_conn->data; | 642 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
620 | int buf_size = tcp_conn->in.datalen; | 643 | int buf_size = tcp_conn->in.datalen; |
621 | int buf_left = buf_size - tcp_conn->data_copied; | 644 | int buf_left = buf_size - tcp_conn->data_copied; |
622 | int size = min(tcp_conn->in.copy, buf_left); | 645 | int size = min(tcp_conn->in.copy, buf_left); |
@@ -627,7 +650,7 @@ iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) | |||
627 | BUG_ON(size <= 0); | 650 | BUG_ON(size <= 0); |
628 | 651 | ||
629 | rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, | 652 | rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, |
630 | (char*)buf + tcp_conn->data_copied, size); | 653 | (char*)conn->data + tcp_conn->data_copied, size); |
631 | BUG_ON(rc); | 654 | BUG_ON(rc); |
632 | 655 | ||
633 | tcp_conn->in.offset += size; | 656 | tcp_conn->in.offset += size; |
@@ -745,10 +768,11 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) | |||
745 | done: | 768 | done: |
746 | /* check for non-exceptional status */ | 769 | /* check for non-exceptional status */ |
747 | if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { | 770 | if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { |
748 | debug_scsi("done [sc %lx res %d itt 0x%x]\n", | 771 | debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n", |
749 | (long)sc, sc->result, ctask->itt); | 772 | (long)sc, sc->result, ctask->itt, |
773 | tcp_conn->in.hdr->flags); | ||
750 | spin_lock(&conn->session->lock); | 774 | spin_lock(&conn->session->lock); |
751 | __iscsi_ctask_cleanup(conn, ctask); | 775 | iscsi_tcp_cleanup_ctask(conn, ctask); |
752 | __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); | 776 | __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); |
753 | spin_unlock(&conn->session->lock); | 777 | spin_unlock(&conn->session->lock); |
754 | } | 778 | } |
@@ -769,26 +793,25 @@ iscsi_data_recv(struct iscsi_conn *conn) | |||
769 | break; | 793 | break; |
770 | case ISCSI_OP_SCSI_CMD_RSP: | 794 | case ISCSI_OP_SCSI_CMD_RSP: |
771 | spin_lock(&conn->session->lock); | 795 | spin_lock(&conn->session->lock); |
772 | __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); | 796 | iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); |
773 | spin_unlock(&conn->session->lock); | 797 | spin_unlock(&conn->session->lock); |
774 | case ISCSI_OP_TEXT_RSP: | 798 | case ISCSI_OP_TEXT_RSP: |
775 | case ISCSI_OP_LOGIN_RSP: | 799 | case ISCSI_OP_LOGIN_RSP: |
776 | case ISCSI_OP_NOOP_IN: | ||
777 | case ISCSI_OP_ASYNC_EVENT: | 800 | case ISCSI_OP_ASYNC_EVENT: |
778 | case ISCSI_OP_REJECT: | 801 | case ISCSI_OP_REJECT: |
779 | /* | 802 | /* |
780 | * Collect data segment to the connection's data | 803 | * Collect data segment to the connection's data |
781 | * placeholder | 804 | * placeholder |
782 | */ | 805 | */ |
783 | if (iscsi_tcp_copy(tcp_conn)) { | 806 | if (iscsi_tcp_copy(conn)) { |
784 | rc = -EAGAIN; | 807 | rc = -EAGAIN; |
785 | goto exit; | 808 | goto exit; |
786 | } | 809 | } |
787 | 810 | ||
788 | rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, tcp_conn->data, | 811 | rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data, |
789 | tcp_conn->in.datalen); | 812 | tcp_conn->in.datalen); |
790 | if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) | 813 | if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) |
791 | iscsi_recv_digest_update(tcp_conn, tcp_conn->data, | 814 | iscsi_recv_digest_update(tcp_conn, conn->data, |
792 | tcp_conn->in.datalen); | 815 | tcp_conn->in.datalen); |
793 | break; | 816 | break; |
794 | default: | 817 | default: |
@@ -843,7 +866,7 @@ more: | |||
843 | if (rc == -EAGAIN) | 866 | if (rc == -EAGAIN) |
844 | goto nomore; | 867 | goto nomore; |
845 | else { | 868 | else { |
846 | iscsi_conn_failure(conn, rc); | 869 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
847 | return 0; | 870 | return 0; |
848 | } | 871 | } |
849 | } | 872 | } |
@@ -897,7 +920,7 @@ more: | |||
897 | if (rc) { | 920 | if (rc) { |
898 | if (rc == -EAGAIN) | 921 | if (rc == -EAGAIN) |
899 | goto again; | 922 | goto again; |
900 | iscsi_conn_failure(conn, rc); | 923 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
901 | return 0; | 924 | return 0; |
902 | } | 925 | } |
903 | tcp_conn->in.copy -= tcp_conn->in.padding; | 926 | tcp_conn->in.copy -= tcp_conn->in.padding; |
@@ -1028,9 +1051,8 @@ iscsi_conn_set_callbacks(struct iscsi_conn *conn) | |||
1028 | } | 1051 | } |
1029 | 1052 | ||
1030 | static void | 1053 | static void |
1031 | iscsi_conn_restore_callbacks(struct iscsi_conn *conn) | 1054 | iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn) |
1032 | { | 1055 | { |
1033 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1034 | struct sock *sk = tcp_conn->sock->sk; | 1056 | struct sock *sk = tcp_conn->sock->sk; |
1035 | 1057 | ||
1036 | /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ | 1058 | /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ |
@@ -1308,7 +1330,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) | |||
1308 | ctask->imm_count - | 1330 | ctask->imm_count - |
1309 | ctask->unsol_count; | 1331 | ctask->unsol_count; |
1310 | 1332 | ||
1311 | debug_scsi("cmd [itt %x total %d imm %d imm_data %d " | 1333 | debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d " |
1312 | "r2t_data %d]\n", | 1334 | "r2t_data %d]\n", |
1313 | ctask->itt, ctask->total_length, ctask->imm_count, | 1335 | ctask->itt, ctask->total_length, ctask->imm_count, |
1314 | ctask->unsol_count, tcp_ctask->r2t_data_count); | 1336 | ctask->unsol_count, tcp_ctask->r2t_data_count); |
@@ -1636,7 +1658,7 @@ handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1636 | } | 1658 | } |
1637 | solicit_again: | 1659 | solicit_again: |
1638 | /* | 1660 | /* |
1639 | * send Data-Out whitnin this R2T sequence. | 1661 | * send Data-Out within this R2T sequence. |
1640 | */ | 1662 | */ |
1641 | if (!r2t->data_count) | 1663 | if (!r2t->data_count) |
1642 | goto data_out_done; | 1664 | goto data_out_done; |
@@ -1731,7 +1753,7 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1731 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1753 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
1732 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 1754 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1733 | struct iscsi_data_task *dtask = tcp_ctask->dtask; | 1755 | struct iscsi_data_task *dtask = tcp_ctask->dtask; |
1734 | int sent, rc; | 1756 | int sent = 0, rc; |
1735 | 1757 | ||
1736 | tcp_ctask->xmstate &= ~XMSTATE_W_PAD; | 1758 | tcp_ctask->xmstate &= ~XMSTATE_W_PAD; |
1737 | iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, | 1759 | iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, |
@@ -1900,27 +1922,32 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
1900 | tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; | 1922 | tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; |
1901 | /* initial operational parameters */ | 1923 | /* initial operational parameters */ |
1902 | tcp_conn->hdr_size = sizeof(struct iscsi_hdr); | 1924 | tcp_conn->hdr_size = sizeof(struct iscsi_hdr); |
1903 | tcp_conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; | ||
1904 | |||
1905 | /* allocate initial PDU receive place holder */ | ||
1906 | if (tcp_conn->data_size <= PAGE_SIZE) | ||
1907 | tcp_conn->data = kmalloc(tcp_conn->data_size, GFP_KERNEL); | ||
1908 | else | ||
1909 | tcp_conn->data = (void*)__get_free_pages(GFP_KERNEL, | ||
1910 | get_order(tcp_conn->data_size)); | ||
1911 | if (!tcp_conn->data) | ||
1912 | goto max_recv_dlenght_alloc_fail; | ||
1913 | 1925 | ||
1914 | return cls_conn; | 1926 | return cls_conn; |
1915 | 1927 | ||
1916 | max_recv_dlenght_alloc_fail: | ||
1917 | kfree(tcp_conn); | ||
1918 | tcp_conn_alloc_fail: | 1928 | tcp_conn_alloc_fail: |
1919 | iscsi_conn_teardown(cls_conn); | 1929 | iscsi_conn_teardown(cls_conn); |
1920 | return NULL; | 1930 | return NULL; |
1921 | } | 1931 | } |
1922 | 1932 | ||
1923 | static void | 1933 | static void |
1934 | iscsi_tcp_release_conn(struct iscsi_conn *conn) | ||
1935 | { | ||
1936 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
1937 | |||
1938 | if (!tcp_conn->sock) | ||
1939 | return; | ||
1940 | |||
1941 | sock_hold(tcp_conn->sock->sk); | ||
1942 | iscsi_conn_restore_callbacks(tcp_conn); | ||
1943 | sock_put(tcp_conn->sock->sk); | ||
1944 | |||
1945 | sock_release(tcp_conn->sock); | ||
1946 | tcp_conn->sock = NULL; | ||
1947 | conn->recv_lock = NULL; | ||
1948 | } | ||
1949 | |||
1950 | static void | ||
1924 | iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) | 1951 | iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) |
1925 | { | 1952 | { |
1926 | struct iscsi_conn *conn = cls_conn->dd_data; | 1953 | struct iscsi_conn *conn = cls_conn->dd_data; |
@@ -1930,6 +1957,7 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) | |||
1930 | if (conn->hdrdgst_en || conn->datadgst_en) | 1957 | if (conn->hdrdgst_en || conn->datadgst_en) |
1931 | digest = 1; | 1958 | digest = 1; |
1932 | 1959 | ||
1960 | iscsi_tcp_release_conn(conn); | ||
1933 | iscsi_conn_teardown(cls_conn); | 1961 | iscsi_conn_teardown(cls_conn); |
1934 | 1962 | ||
1935 | /* now free tcp_conn */ | 1963 | /* now free tcp_conn */ |
@@ -1944,15 +1972,18 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) | |||
1944 | crypto_free_tfm(tcp_conn->data_rx_tfm); | 1972 | crypto_free_tfm(tcp_conn->data_rx_tfm); |
1945 | } | 1973 | } |
1946 | 1974 | ||
1947 | /* free conn->data, size = MaxRecvDataSegmentLength */ | ||
1948 | if (tcp_conn->data_size <= PAGE_SIZE) | ||
1949 | kfree(tcp_conn->data); | ||
1950 | else | ||
1951 | free_pages((unsigned long)tcp_conn->data, | ||
1952 | get_order(tcp_conn->data_size)); | ||
1953 | kfree(tcp_conn); | 1975 | kfree(tcp_conn); |
1954 | } | 1976 | } |
1955 | 1977 | ||
1978 | static void | ||
1979 | iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | ||
1980 | { | ||
1981 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
1982 | |||
1983 | iscsi_conn_stop(cls_conn, flag); | ||
1984 | iscsi_tcp_release_conn(conn); | ||
1985 | } | ||
1986 | |||
1956 | static int | 1987 | static int |
1957 | iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, | 1988 | iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, |
1958 | struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, | 1989 | struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, |
@@ -2001,52 +2032,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, | |||
2001 | return 0; | 2032 | return 0; |
2002 | } | 2033 | } |
2003 | 2034 | ||
2004 | static void | ||
2005 | iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | ||
2006 | { | ||
2007 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | ||
2008 | struct iscsi_r2t_info *r2t; | ||
2009 | |||
2010 | /* flush ctask's r2t queues */ | ||
2011 | while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) | ||
2012 | __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, | ||
2013 | sizeof(void*)); | ||
2014 | |||
2015 | __iscsi_ctask_cleanup(conn, ctask); | ||
2016 | } | ||
2017 | |||
2018 | static void | ||
2019 | iscsi_tcp_suspend_conn_rx(struct iscsi_conn *conn) | ||
2020 | { | ||
2021 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
2022 | struct sock *sk; | ||
2023 | |||
2024 | if (!tcp_conn->sock) | ||
2025 | return; | ||
2026 | |||
2027 | sk = tcp_conn->sock->sk; | ||
2028 | write_lock_bh(&sk->sk_callback_lock); | ||
2029 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); | ||
2030 | write_unlock_bh(&sk->sk_callback_lock); | ||
2031 | } | ||
2032 | |||
2033 | static void | ||
2034 | iscsi_tcp_terminate_conn(struct iscsi_conn *conn) | ||
2035 | { | ||
2036 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | ||
2037 | |||
2038 | if (!tcp_conn->sock) | ||
2039 | return; | ||
2040 | |||
2041 | sock_hold(tcp_conn->sock->sk); | ||
2042 | iscsi_conn_restore_callbacks(conn); | ||
2043 | sock_put(tcp_conn->sock->sk); | ||
2044 | |||
2045 | sock_release(tcp_conn->sock); | ||
2046 | tcp_conn->sock = NULL; | ||
2047 | conn->recv_lock = NULL; | ||
2048 | } | ||
2049 | |||
2050 | /* called with host lock */ | 2035 | /* called with host lock */ |
2051 | static void | 2036 | static void |
2052 | iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, | 2037 | iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, |
@@ -2057,6 +2042,7 @@ iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, | |||
2057 | iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, | 2042 | iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, |
2058 | sizeof(struct iscsi_hdr)); | 2043 | sizeof(struct iscsi_hdr)); |
2059 | tcp_mtask->xmstate = XMSTATE_IMM_HDR; | 2044 | tcp_mtask->xmstate = XMSTATE_IMM_HDR; |
2045 | tcp_mtask->sent = 0; | ||
2060 | 2046 | ||
2061 | if (mtask->data_count) | 2047 | if (mtask->data_count) |
2062 | iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, | 2048 | iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, |
@@ -2138,39 +2124,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, | |||
2138 | int value; | 2124 | int value; |
2139 | 2125 | ||
2140 | switch(param) { | 2126 | switch(param) { |
2141 | case ISCSI_PARAM_MAX_RECV_DLENGTH: { | ||
2142 | char *saveptr = tcp_conn->data; | ||
2143 | gfp_t flags = GFP_KERNEL; | ||
2144 | |||
2145 | sscanf(buf, "%d", &value); | ||
2146 | if (tcp_conn->data_size >= value) { | ||
2147 | iscsi_set_param(cls_conn, param, buf, buflen); | ||
2148 | break; | ||
2149 | } | ||
2150 | |||
2151 | spin_lock_bh(&session->lock); | ||
2152 | if (conn->stop_stage == STOP_CONN_RECOVER) | ||
2153 | flags = GFP_ATOMIC; | ||
2154 | spin_unlock_bh(&session->lock); | ||
2155 | |||
2156 | if (value <= PAGE_SIZE) | ||
2157 | tcp_conn->data = kmalloc(value, flags); | ||
2158 | else | ||
2159 | tcp_conn->data = (void*)__get_free_pages(flags, | ||
2160 | get_order(value)); | ||
2161 | if (tcp_conn->data == NULL) { | ||
2162 | tcp_conn->data = saveptr; | ||
2163 | return -ENOMEM; | ||
2164 | } | ||
2165 | if (tcp_conn->data_size <= PAGE_SIZE) | ||
2166 | kfree(saveptr); | ||
2167 | else | ||
2168 | free_pages((unsigned long)saveptr, | ||
2169 | get_order(tcp_conn->data_size)); | ||
2170 | iscsi_set_param(cls_conn, param, buf, buflen); | ||
2171 | tcp_conn->data_size = value; | ||
2172 | break; | ||
2173 | } | ||
2174 | case ISCSI_PARAM_HDRDGST_EN: | 2127 | case ISCSI_PARAM_HDRDGST_EN: |
2175 | iscsi_set_param(cls_conn, param, buf, buflen); | 2128 | iscsi_set_param(cls_conn, param, buf, buflen); |
2176 | tcp_conn->hdr_size = sizeof(struct iscsi_hdr); | 2129 | tcp_conn->hdr_size = sizeof(struct iscsi_hdr); |
@@ -2361,8 +2314,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) | |||
2361 | } | 2314 | } |
2362 | 2315 | ||
2363 | static struct scsi_host_template iscsi_sht = { | 2316 | static struct scsi_host_template iscsi_sht = { |
2364 | .name = "iSCSI Initiator over TCP/IP, v" | 2317 | .name = "iSCSI Initiator over TCP/IP", |
2365 | ISCSI_TCP_VERSION, | ||
2366 | .queuecommand = iscsi_queuecommand, | 2318 | .queuecommand = iscsi_queuecommand, |
2367 | .change_queue_depth = iscsi_change_queue_depth, | 2319 | .change_queue_depth = iscsi_change_queue_depth, |
2368 | .can_queue = ISCSI_XMIT_CMDS_MAX - 1, | 2320 | .can_queue = ISCSI_XMIT_CMDS_MAX - 1, |
@@ -2414,10 +2366,7 @@ static struct iscsi_transport iscsi_tcp_transport = { | |||
2414 | .get_conn_param = iscsi_tcp_conn_get_param, | 2366 | .get_conn_param = iscsi_tcp_conn_get_param, |
2415 | .get_session_param = iscsi_session_get_param, | 2367 | .get_session_param = iscsi_session_get_param, |
2416 | .start_conn = iscsi_conn_start, | 2368 | .start_conn = iscsi_conn_start, |
2417 | .stop_conn = iscsi_conn_stop, | 2369 | .stop_conn = iscsi_tcp_conn_stop, |
2418 | /* these are called as part of conn recovery */ | ||
2419 | .suspend_conn_recv = iscsi_tcp_suspend_conn_rx, | ||
2420 | .terminate_conn = iscsi_tcp_terminate_conn, | ||
2421 | /* IO */ | 2370 | /* IO */ |
2422 | .send_pdu = iscsi_conn_send_pdu, | 2371 | .send_pdu = iscsi_conn_send_pdu, |
2423 | .get_stats = iscsi_conn_get_stats, | 2372 | .get_stats = iscsi_conn_get_stats, |
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 808302832e68..6a4ee704e46e 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h | |||
@@ -78,8 +78,6 @@ struct iscsi_tcp_conn { | |||
78 | char hdrext[4*sizeof(__u16) + | 78 | char hdrext[4*sizeof(__u16) + |
79 | sizeof(__u32)]; | 79 | sizeof(__u32)]; |
80 | int data_copied; | 80 | int data_copied; |
81 | char *data; /* data placeholder */ | ||
82 | int data_size; /* actual recv_dlength */ | ||
83 | int stop_stage; /* conn_stop() flag: * | 81 | int stop_stage; /* conn_stop() flag: * |
84 | * stop to recover, * | 82 | * stop to recover, * |
85 | * stop to terminate */ | 83 | * stop to terminate */ |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 7e6e031cc41b..5884cd26d53a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -189,6 +189,7 @@ static void iscsi_complete_command(struct iscsi_session *session, | |||
189 | { | 189 | { |
190 | struct scsi_cmnd *sc = ctask->sc; | 190 | struct scsi_cmnd *sc = ctask->sc; |
191 | 191 | ||
192 | ctask->state = ISCSI_TASK_COMPLETED; | ||
192 | ctask->sc = NULL; | 193 | ctask->sc = NULL; |
193 | list_del_init(&ctask->running); | 194 | list_del_init(&ctask->running); |
194 | __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); | 195 | __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); |
@@ -275,6 +276,25 @@ out: | |||
275 | return rc; | 276 | return rc; |
276 | } | 277 | } |
277 | 278 | ||
279 | static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | ||
280 | { | ||
281 | struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; | ||
282 | |||
283 | conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; | ||
284 | conn->tmfrsp_pdus_cnt++; | ||
285 | |||
286 | if (conn->tmabort_state != TMABORT_INITIAL) | ||
287 | return; | ||
288 | |||
289 | if (tmf->response == ISCSI_TMF_RSP_COMPLETE) | ||
290 | conn->tmabort_state = TMABORT_SUCCESS; | ||
291 | else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) | ||
292 | conn->tmabort_state = TMABORT_NOT_FOUND; | ||
293 | else | ||
294 | conn->tmabort_state = TMABORT_FAILED; | ||
295 | wake_up(&conn->ehwait); | ||
296 | } | ||
297 | |||
278 | /** | 298 | /** |
279 | * __iscsi_complete_pdu - complete pdu | 299 | * __iscsi_complete_pdu - complete pdu |
280 | * @conn: iscsi conn | 300 | * @conn: iscsi conn |
@@ -340,6 +360,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
340 | 360 | ||
341 | switch(opcode) { | 361 | switch(opcode) { |
342 | case ISCSI_OP_LOGOUT_RSP: | 362 | case ISCSI_OP_LOGOUT_RSP: |
363 | if (datalen) { | ||
364 | rc = ISCSI_ERR_PROTO; | ||
365 | break; | ||
366 | } | ||
343 | conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; | 367 | conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; |
344 | /* fall through */ | 368 | /* fall through */ |
345 | case ISCSI_OP_LOGIN_RSP: | 369 | case ISCSI_OP_LOGIN_RSP: |
@@ -348,7 +372,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
348 | * login related PDU's exp_statsn is handled in | 372 | * login related PDU's exp_statsn is handled in |
349 | * userspace | 373 | * userspace |
350 | */ | 374 | */ |
351 | rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); | 375 | if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) |
376 | rc = ISCSI_ERR_CONN_FAILED; | ||
352 | list_del(&mtask->running); | 377 | list_del(&mtask->running); |
353 | if (conn->login_mtask != mtask) | 378 | if (conn->login_mtask != mtask) |
354 | __kfifo_put(session->mgmtpool.queue, | 379 | __kfifo_put(session->mgmtpool.queue, |
@@ -360,25 +385,17 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
360 | break; | 385 | break; |
361 | } | 386 | } |
362 | 387 | ||
363 | conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; | 388 | iscsi_tmf_rsp(conn, hdr); |
364 | conn->tmfrsp_pdus_cnt++; | ||
365 | if (conn->tmabort_state == TMABORT_INITIAL) { | ||
366 | conn->tmabort_state = | ||
367 | ((struct iscsi_tm_rsp *)hdr)-> | ||
368 | response == ISCSI_TMF_RSP_COMPLETE ? | ||
369 | TMABORT_SUCCESS:TMABORT_FAILED; | ||
370 | /* unblock eh_abort() */ | ||
371 | wake_up(&conn->ehwait); | ||
372 | } | ||
373 | break; | 389 | break; |
374 | case ISCSI_OP_NOOP_IN: | 390 | case ISCSI_OP_NOOP_IN: |
375 | if (hdr->ttt != ISCSI_RESERVED_TAG) { | 391 | if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) { |
376 | rc = ISCSI_ERR_PROTO; | 392 | rc = ISCSI_ERR_PROTO; |
377 | break; | 393 | break; |
378 | } | 394 | } |
379 | conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; | 395 | conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; |
380 | 396 | ||
381 | rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); | 397 | if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) |
398 | rc = ISCSI_ERR_CONN_FAILED; | ||
382 | list_del(&mtask->running); | 399 | list_del(&mtask->running); |
383 | if (conn->login_mtask != mtask) | 400 | if (conn->login_mtask != mtask) |
384 | __kfifo_put(session->mgmtpool.queue, | 401 | __kfifo_put(session->mgmtpool.queue, |
@@ -391,14 +408,21 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
391 | } else if (itt == ISCSI_RESERVED_TAG) { | 408 | } else if (itt == ISCSI_RESERVED_TAG) { |
392 | switch(opcode) { | 409 | switch(opcode) { |
393 | case ISCSI_OP_NOOP_IN: | 410 | case ISCSI_OP_NOOP_IN: |
394 | if (!datalen) { | 411 | if (datalen) { |
395 | rc = iscsi_check_assign_cmdsn(session, | ||
396 | (struct iscsi_nopin*)hdr); | ||
397 | if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) | ||
398 | rc = iscsi_recv_pdu(conn->cls_conn, | ||
399 | hdr, NULL, 0); | ||
400 | } else | ||
401 | rc = ISCSI_ERR_PROTO; | 412 | rc = ISCSI_ERR_PROTO; |
413 | break; | ||
414 | } | ||
415 | |||
416 | rc = iscsi_check_assign_cmdsn(session, | ||
417 | (struct iscsi_nopin*)hdr); | ||
418 | if (rc) | ||
419 | break; | ||
420 | |||
421 | if (hdr->ttt == ISCSI_RESERVED_TAG) | ||
422 | break; | ||
423 | |||
424 | if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0)) | ||
425 | rc = ISCSI_ERR_CONN_FAILED; | ||
402 | break; | 426 | break; |
403 | case ISCSI_OP_REJECT: | 427 | case ISCSI_OP_REJECT: |
404 | /* we need sth like iscsi_reject_rsp()*/ | 428 | /* we need sth like iscsi_reject_rsp()*/ |
@@ -568,20 +592,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) | |||
568 | } | 592 | } |
569 | 593 | ||
570 | /* process command queue */ | 594 | /* process command queue */ |
571 | while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask, | 595 | spin_lock_bh(&conn->session->lock); |
572 | sizeof(void*))) { | 596 | while (!list_empty(&conn->xmitqueue)) { |
573 | /* | 597 | /* |
574 | * iscsi tcp may readd the task to the xmitqueue to send | 598 | * iscsi tcp may readd the task to the xmitqueue to send |
575 | * write data | 599 | * write data |
576 | */ | 600 | */ |
577 | spin_lock_bh(&conn->session->lock); | 601 | conn->ctask = list_entry(conn->xmitqueue.next, |
578 | if (list_empty(&conn->ctask->running)) | 602 | struct iscsi_cmd_task, running); |
579 | list_add_tail(&conn->ctask->running, &conn->run_list); | 603 | conn->ctask->state = ISCSI_TASK_RUNNING; |
604 | list_move_tail(conn->xmitqueue.next, &conn->run_list); | ||
580 | spin_unlock_bh(&conn->session->lock); | 605 | spin_unlock_bh(&conn->session->lock); |
606 | |||
581 | rc = tt->xmit_cmd_task(conn, conn->ctask); | 607 | rc = tt->xmit_cmd_task(conn, conn->ctask); |
582 | if (rc) | 608 | if (rc) |
583 | goto again; | 609 | goto again; |
610 | spin_lock_bh(&conn->session->lock); | ||
584 | } | 611 | } |
612 | spin_unlock_bh(&conn->session->lock); | ||
585 | /* done with this ctask */ | 613 | /* done with this ctask */ |
586 | conn->ctask = NULL; | 614 | conn->ctask = NULL; |
587 | 615 | ||
@@ -691,6 +719,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
691 | sc->SCp.phase = session->age; | 719 | sc->SCp.phase = session->age; |
692 | sc->SCp.ptr = (char *)ctask; | 720 | sc->SCp.ptr = (char *)ctask; |
693 | 721 | ||
722 | ctask->state = ISCSI_TASK_PENDING; | ||
694 | ctask->mtask = NULL; | 723 | ctask->mtask = NULL; |
695 | ctask->conn = conn; | 724 | ctask->conn = conn; |
696 | ctask->sc = sc; | 725 | ctask->sc = sc; |
@@ -700,7 +729,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
700 | 729 | ||
701 | session->tt->init_cmd_task(ctask); | 730 | session->tt->init_cmd_task(ctask); |
702 | 731 | ||
703 | __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); | 732 | list_add_tail(&ctask->running, &conn->xmitqueue); |
704 | debug_scsi( | 733 | debug_scsi( |
705 | "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", | 734 | "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", |
706 | sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", | 735 | sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", |
@@ -977,31 +1006,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc, | |||
977 | /* | 1006 | /* |
978 | * xmit mutex and session lock must be held | 1007 | * xmit mutex and session lock must be held |
979 | */ | 1008 | */ |
980 | #define iscsi_remove_task(tasktype) \ | 1009 | static struct iscsi_mgmt_task * |
981 | static struct iscsi_##tasktype * \ | 1010 | iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt) |
982 | iscsi_remove_##tasktype(struct kfifo *fifo, uint32_t itt) \ | 1011 | { |
983 | { \ | 1012 | int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); |
984 | int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); \ | 1013 | struct iscsi_mgmt_task *task; |
985 | struct iscsi_##tasktype *task; \ | ||
986 | \ | ||
987 | debug_scsi("searching %d tasks\n", nr_tasks); \ | ||
988 | \ | ||
989 | for (i = 0; i < nr_tasks; i++) { \ | ||
990 | __kfifo_get(fifo, (void*)&task, sizeof(void*)); \ | ||
991 | debug_scsi("check task %u\n", task->itt); \ | ||
992 | \ | ||
993 | if (task->itt == itt) { \ | ||
994 | debug_scsi("matched task\n"); \ | ||
995 | return task; \ | ||
996 | } \ | ||
997 | \ | ||
998 | __kfifo_put(fifo, (void*)&task, sizeof(void*)); \ | ||
999 | } \ | ||
1000 | return NULL; \ | ||
1001 | } | ||
1002 | 1014 | ||
1003 | iscsi_remove_task(mgmt_task); | 1015 | debug_scsi("searching %d tasks\n", nr_tasks); |
1004 | iscsi_remove_task(cmd_task); | 1016 | |
1017 | for (i = 0; i < nr_tasks; i++) { | ||
1018 | __kfifo_get(fifo, (void*)&task, sizeof(void*)); | ||
1019 | debug_scsi("check task %u\n", task->itt); | ||
1020 | |||
1021 | if (task->itt == itt) { | ||
1022 | debug_scsi("matched task\n"); | ||
1023 | return task; | ||
1024 | } | ||
1025 | |||
1026 | __kfifo_put(fifo, (void*)&task, sizeof(void*)); | ||
1027 | } | ||
1028 | return NULL; | ||
1029 | } | ||
1005 | 1030 | ||
1006 | static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) | 1031 | static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) |
1007 | { | 1032 | { |
@@ -1027,12 +1052,13 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | |||
1027 | { | 1052 | { |
1028 | struct scsi_cmnd *sc; | 1053 | struct scsi_cmnd *sc; |
1029 | 1054 | ||
1030 | conn->session->tt->cleanup_cmd_task(conn, ctask); | ||
1031 | iscsi_ctask_mtask_cleanup(ctask); | ||
1032 | |||
1033 | sc = ctask->sc; | 1055 | sc = ctask->sc; |
1034 | if (!sc) | 1056 | if (!sc) |
1035 | return; | 1057 | return; |
1058 | |||
1059 | conn->session->tt->cleanup_cmd_task(conn, ctask); | ||
1060 | iscsi_ctask_mtask_cleanup(ctask); | ||
1061 | |||
1036 | sc->result = err; | 1062 | sc->result = err; |
1037 | sc->resid = sc->request_bufflen; | 1063 | sc->resid = sc->request_bufflen; |
1038 | iscsi_complete_command(conn->session, ctask); | 1064 | iscsi_complete_command(conn->session, ctask); |
@@ -1043,7 +1069,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1043 | struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; | 1069 | struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; |
1044 | struct iscsi_conn *conn = ctask->conn; | 1070 | struct iscsi_conn *conn = ctask->conn; |
1045 | struct iscsi_session *session = conn->session; | 1071 | struct iscsi_session *session = conn->session; |
1046 | struct iscsi_cmd_task *pending_ctask; | ||
1047 | int rc; | 1072 | int rc; |
1048 | 1073 | ||
1049 | conn->eh_abort_cnt++; | 1074 | conn->eh_abort_cnt++; |
@@ -1061,8 +1086,11 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1061 | goto failed; | 1086 | goto failed; |
1062 | 1087 | ||
1063 | /* ctask completed before time out */ | 1088 | /* ctask completed before time out */ |
1064 | if (!ctask->sc) | 1089 | if (!ctask->sc) { |
1065 | goto success; | 1090 | spin_unlock_bh(&session->lock); |
1091 | debug_scsi("sc completed while abort in progress\n"); | ||
1092 | goto success_rel_mutex; | ||
1093 | } | ||
1066 | 1094 | ||
1067 | /* what should we do here ? */ | 1095 | /* what should we do here ? */ |
1068 | if (conn->ctask == ctask) { | 1096 | if (conn->ctask == ctask) { |
@@ -1071,17 +1099,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1071 | goto failed; | 1099 | goto failed; |
1072 | } | 1100 | } |
1073 | 1101 | ||
1074 | /* check for the easy pending cmd abort */ | 1102 | if (ctask->state == ISCSI_TASK_PENDING) |
1075 | pending_ctask = iscsi_remove_cmd_task(conn->xmitqueue, ctask->itt); | 1103 | goto success_cleanup; |
1076 | if (pending_ctask) { | ||
1077 | /* iscsi_tcp queues write transfers on the xmitqueue */ | ||
1078 | if (list_empty(&pending_ctask->running)) { | ||
1079 | debug_scsi("found pending task\n"); | ||
1080 | goto success; | ||
1081 | } else | ||
1082 | __kfifo_put(conn->xmitqueue, (void*)&pending_ctask, | ||
1083 | sizeof(void*)); | ||
1084 | } | ||
1085 | 1104 | ||
1086 | conn->tmabort_state = TMABORT_INITIAL; | 1105 | conn->tmabort_state = TMABORT_INITIAL; |
1087 | 1106 | ||
@@ -1089,25 +1108,31 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1089 | rc = iscsi_exec_abort_task(sc, ctask); | 1108 | rc = iscsi_exec_abort_task(sc, ctask); |
1090 | spin_lock_bh(&session->lock); | 1109 | spin_lock_bh(&session->lock); |
1091 | 1110 | ||
1092 | iscsi_ctask_mtask_cleanup(ctask); | ||
1093 | if (rc || sc->SCp.phase != session->age || | 1111 | if (rc || sc->SCp.phase != session->age || |
1094 | session->state != ISCSI_STATE_LOGGED_IN) | 1112 | session->state != ISCSI_STATE_LOGGED_IN) |
1095 | goto failed; | 1113 | goto failed; |
1114 | iscsi_ctask_mtask_cleanup(ctask); | ||
1096 | 1115 | ||
1097 | /* ctask completed before tmf abort response */ | 1116 | switch (conn->tmabort_state) { |
1098 | if (!ctask->sc) { | 1117 | case TMABORT_SUCCESS: |
1099 | debug_scsi("sc completed while abort in progress\n"); | 1118 | goto success_cleanup; |
1100 | goto success; | 1119 | case TMABORT_NOT_FOUND: |
1101 | } | 1120 | if (!ctask->sc) { |
1102 | 1121 | /* ctask completed before tmf abort response */ | |
1103 | if (conn->tmabort_state != TMABORT_SUCCESS) { | 1122 | spin_unlock_bh(&session->lock); |
1123 | debug_scsi("sc completed while abort in progress\n"); | ||
1124 | goto success_rel_mutex; | ||
1125 | } | ||
1126 | /* fall through */ | ||
1127 | default: | ||
1128 | /* timedout or failed */ | ||
1104 | spin_unlock_bh(&session->lock); | 1129 | spin_unlock_bh(&session->lock); |
1105 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 1130 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
1106 | spin_lock_bh(&session->lock); | 1131 | spin_lock_bh(&session->lock); |
1107 | goto failed; | 1132 | goto failed; |
1108 | } | 1133 | } |
1109 | 1134 | ||
1110 | success: | 1135 | success_cleanup: |
1111 | debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); | 1136 | debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); |
1112 | spin_unlock_bh(&session->lock); | 1137 | spin_unlock_bh(&session->lock); |
1113 | 1138 | ||
@@ -1121,6 +1146,7 @@ success: | |||
1121 | spin_unlock(&session->lock); | 1146 | spin_unlock(&session->lock); |
1122 | write_unlock_bh(conn->recv_lock); | 1147 | write_unlock_bh(conn->recv_lock); |
1123 | 1148 | ||
1149 | success_rel_mutex: | ||
1124 | mutex_unlock(&conn->xmitmutex); | 1150 | mutex_unlock(&conn->xmitmutex); |
1125 | return SUCCESS; | 1151 | return SUCCESS; |
1126 | 1152 | ||
@@ -1263,6 +1289,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, | |||
1263 | if (cmd_task_size) | 1289 | if (cmd_task_size) |
1264 | ctask->dd_data = &ctask[1]; | 1290 | ctask->dd_data = &ctask[1]; |
1265 | ctask->itt = cmd_i; | 1291 | ctask->itt = cmd_i; |
1292 | INIT_LIST_HEAD(&ctask->running); | ||
1266 | } | 1293 | } |
1267 | 1294 | ||
1268 | spin_lock_init(&session->lock); | 1295 | spin_lock_init(&session->lock); |
@@ -1282,6 +1309,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, | |||
1282 | if (mgmt_task_size) | 1309 | if (mgmt_task_size) |
1283 | mtask->dd_data = &mtask[1]; | 1310 | mtask->dd_data = &mtask[1]; |
1284 | mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; | 1311 | mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; |
1312 | INIT_LIST_HEAD(&mtask->running); | ||
1285 | } | 1313 | } |
1286 | 1314 | ||
1287 | if (scsi_add_host(shost, NULL)) | 1315 | if (scsi_add_host(shost, NULL)) |
@@ -1322,15 +1350,18 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) | |||
1322 | { | 1350 | { |
1323 | struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); | 1351 | struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); |
1324 | struct iscsi_session *session = iscsi_hostdata(shost->hostdata); | 1352 | struct iscsi_session *session = iscsi_hostdata(shost->hostdata); |
1353 | struct module *owner = cls_session->transport->owner; | ||
1325 | 1354 | ||
1326 | scsi_remove_host(shost); | 1355 | scsi_remove_host(shost); |
1327 | 1356 | ||
1328 | iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); | 1357 | iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); |
1329 | iscsi_pool_free(&session->cmdpool, (void**)session->cmds); | 1358 | iscsi_pool_free(&session->cmdpool, (void**)session->cmds); |
1330 | 1359 | ||
1360 | kfree(session->targetname); | ||
1361 | |||
1331 | iscsi_destroy_session(cls_session); | 1362 | iscsi_destroy_session(cls_session); |
1332 | scsi_host_put(shost); | 1363 | scsi_host_put(shost); |
1333 | module_put(cls_session->transport->owner); | 1364 | module_put(owner); |
1334 | } | 1365 | } |
1335 | EXPORT_SYMBOL_GPL(iscsi_session_teardown); | 1366 | EXPORT_SYMBOL_GPL(iscsi_session_teardown); |
1336 | 1367 | ||
@@ -1361,12 +1392,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
1361 | conn->tmabort_state = TMABORT_INITIAL; | 1392 | conn->tmabort_state = TMABORT_INITIAL; |
1362 | INIT_LIST_HEAD(&conn->run_list); | 1393 | INIT_LIST_HEAD(&conn->run_list); |
1363 | INIT_LIST_HEAD(&conn->mgmt_run_list); | 1394 | INIT_LIST_HEAD(&conn->mgmt_run_list); |
1364 | 1395 | INIT_LIST_HEAD(&conn->xmitqueue); | |
1365 | /* initialize general xmit PDU commands queue */ | ||
1366 | conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), | ||
1367 | GFP_KERNEL, NULL); | ||
1368 | if (conn->xmitqueue == ERR_PTR(-ENOMEM)) | ||
1369 | goto xmitqueue_alloc_fail; | ||
1370 | 1396 | ||
1371 | /* initialize general immediate & non-immediate PDU commands queue */ | 1397 | /* initialize general immediate & non-immediate PDU commands queue */ |
1372 | conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), | 1398 | conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), |
@@ -1394,7 +1420,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
1394 | data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); | 1420 | data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); |
1395 | if (!data) | 1421 | if (!data) |
1396 | goto login_mtask_data_alloc_fail; | 1422 | goto login_mtask_data_alloc_fail; |
1397 | conn->login_mtask->data = data; | 1423 | conn->login_mtask->data = conn->data = data; |
1398 | 1424 | ||
1399 | init_timer(&conn->tmabort_timer); | 1425 | init_timer(&conn->tmabort_timer); |
1400 | mutex_init(&conn->xmitmutex); | 1426 | mutex_init(&conn->xmitmutex); |
@@ -1410,8 +1436,6 @@ login_mtask_alloc_fail: | |||
1410 | mgmtqueue_alloc_fail: | 1436 | mgmtqueue_alloc_fail: |
1411 | kfifo_free(conn->immqueue); | 1437 | kfifo_free(conn->immqueue); |
1412 | immqueue_alloc_fail: | 1438 | immqueue_alloc_fail: |
1413 | kfifo_free(conn->xmitqueue); | ||
1414 | xmitqueue_alloc_fail: | ||
1415 | iscsi_destroy_conn(cls_conn); | 1439 | iscsi_destroy_conn(cls_conn); |
1416 | return NULL; | 1440 | return NULL; |
1417 | } | 1441 | } |
@@ -1432,12 +1456,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) | |||
1432 | 1456 | ||
1433 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); | 1457 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); |
1434 | mutex_lock(&conn->xmitmutex); | 1458 | mutex_lock(&conn->xmitmutex); |
1435 | if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE) { | ||
1436 | if (session->tt->suspend_conn_recv) | ||
1437 | session->tt->suspend_conn_recv(conn); | ||
1438 | |||
1439 | session->tt->terminate_conn(conn); | ||
1440 | } | ||
1441 | 1459 | ||
1442 | spin_lock_bh(&session->lock); | 1460 | spin_lock_bh(&session->lock); |
1443 | conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; | 1461 | conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; |
@@ -1474,7 +1492,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) | |||
1474 | } | 1492 | } |
1475 | 1493 | ||
1476 | spin_lock_bh(&session->lock); | 1494 | spin_lock_bh(&session->lock); |
1477 | kfree(conn->login_mtask->data); | 1495 | kfree(conn->data); |
1496 | kfree(conn->persistent_address); | ||
1478 | __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, | 1497 | __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, |
1479 | sizeof(void*)); | 1498 | sizeof(void*)); |
1480 | list_del(&conn->item); | 1499 | list_del(&conn->item); |
@@ -1489,7 +1508,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) | |||
1489 | session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; | 1508 | session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; |
1490 | spin_unlock_bh(&session->lock); | 1509 | spin_unlock_bh(&session->lock); |
1491 | 1510 | ||
1492 | kfifo_free(conn->xmitqueue); | ||
1493 | kfifo_free(conn->immqueue); | 1511 | kfifo_free(conn->immqueue); |
1494 | kfifo_free(conn->mgmtqueue); | 1512 | kfifo_free(conn->mgmtqueue); |
1495 | 1513 | ||
@@ -1572,7 +1590,7 @@ static void fail_all_commands(struct iscsi_conn *conn) | |||
1572 | struct iscsi_cmd_task *ctask, *tmp; | 1590 | struct iscsi_cmd_task *ctask, *tmp; |
1573 | 1591 | ||
1574 | /* flush pending */ | 1592 | /* flush pending */ |
1575 | while (__kfifo_get(conn->xmitqueue, (void*)&ctask, sizeof(void*))) { | 1593 | list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) { |
1576 | debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc, | 1594 | debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc, |
1577 | ctask->itt); | 1595 | ctask->itt); |
1578 | fail_command(conn, ctask, DID_BUS_BUSY << 16); | 1596 | fail_command(conn, ctask, DID_BUS_BUSY << 16); |
@@ -1615,8 +1633,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, | |||
1615 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); | 1633 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); |
1616 | spin_unlock_bh(&session->lock); | 1634 | spin_unlock_bh(&session->lock); |
1617 | 1635 | ||
1618 | if (session->tt->suspend_conn_recv) | 1636 | write_lock_bh(conn->recv_lock); |
1619 | session->tt->suspend_conn_recv(conn); | 1637 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); |
1638 | write_unlock_bh(conn->recv_lock); | ||
1620 | 1639 | ||
1621 | mutex_lock(&conn->xmitmutex); | 1640 | mutex_lock(&conn->xmitmutex); |
1622 | /* | 1641 | /* |
@@ -1635,7 +1654,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, | |||
1635 | } | 1654 | } |
1636 | } | 1655 | } |
1637 | 1656 | ||
1638 | session->tt->terminate_conn(conn); | ||
1639 | /* | 1657 | /* |
1640 | * flush queues. | 1658 | * flush queues. |
1641 | */ | 1659 | */ |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 5c68cdd8736f..d384c16f4a87 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -222,7 +222,7 @@ lpfc_issue_lip(struct Scsi_Host *host) | |||
222 | pmboxq->mb.mbxCommand = MBX_DOWN_LINK; | 222 | pmboxq->mb.mbxCommand = MBX_DOWN_LINK; |
223 | pmboxq->mb.mbxOwner = OWN_HOST; | 223 | pmboxq->mb.mbxOwner = OWN_HOST; |
224 | 224 | ||
225 | mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); | 225 | mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); |
226 | 226 | ||
227 | if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { | 227 | if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { |
228 | memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); | 228 | memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); |
@@ -884,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count) | |||
884 | phba->sysfs_mbox.mbox == NULL ) { | 884 | phba->sysfs_mbox.mbox == NULL ) { |
885 | sysfs_mbox_idle(phba); | 885 | sysfs_mbox_idle(phba); |
886 | spin_unlock_irq(host->host_lock); | 886 | spin_unlock_irq(host->host_lock); |
887 | return -EINVAL; | 887 | return -EAGAIN; |
888 | } | 888 | } |
889 | } | 889 | } |
890 | 890 | ||
@@ -1000,14 +1000,15 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) | |||
1000 | spin_unlock_irq(phba->host->host_lock); | 1000 | spin_unlock_irq(phba->host->host_lock); |
1001 | rc = lpfc_sli_issue_mbox_wait (phba, | 1001 | rc = lpfc_sli_issue_mbox_wait (phba, |
1002 | phba->sysfs_mbox.mbox, | 1002 | phba->sysfs_mbox.mbox, |
1003 | phba->fc_ratov * 2); | 1003 | lpfc_mbox_tmo_val(phba, |
1004 | phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); | ||
1004 | spin_lock_irq(phba->host->host_lock); | 1005 | spin_lock_irq(phba->host->host_lock); |
1005 | } | 1006 | } |
1006 | 1007 | ||
1007 | if (rc != MBX_SUCCESS) { | 1008 | if (rc != MBX_SUCCESS) { |
1008 | sysfs_mbox_idle(phba); | 1009 | sysfs_mbox_idle(phba); |
1009 | spin_unlock_irq(host->host_lock); | 1010 | spin_unlock_irq(host->host_lock); |
1010 | return -ENODEV; | 1011 | return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; |
1011 | } | 1012 | } |
1012 | phba->sysfs_mbox.state = SMBOX_READING; | 1013 | phba->sysfs_mbox.state = SMBOX_READING; |
1013 | } | 1014 | } |
@@ -1016,7 +1017,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) | |||
1016 | printk(KERN_WARNING "mbox_read: Bad State\n"); | 1017 | printk(KERN_WARNING "mbox_read: Bad State\n"); |
1017 | sysfs_mbox_idle(phba); | 1018 | sysfs_mbox_idle(phba); |
1018 | spin_unlock_irq(host->host_lock); | 1019 | spin_unlock_irq(host->host_lock); |
1019 | return -EINVAL; | 1020 | return -EAGAIN; |
1020 | } | 1021 | } |
1021 | 1022 | ||
1022 | memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); | 1023 | memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); |
@@ -1210,8 +1211,10 @@ lpfc_get_stats(struct Scsi_Host *shost) | |||
1210 | struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; | 1211 | struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; |
1211 | struct lpfc_sli *psli = &phba->sli; | 1212 | struct lpfc_sli *psli = &phba->sli; |
1212 | struct fc_host_statistics *hs = &phba->link_stats; | 1213 | struct fc_host_statistics *hs = &phba->link_stats; |
1214 | struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; | ||
1213 | LPFC_MBOXQ_t *pmboxq; | 1215 | LPFC_MBOXQ_t *pmboxq; |
1214 | MAILBOX_t *pmb; | 1216 | MAILBOX_t *pmb; |
1217 | unsigned long seconds; | ||
1215 | int rc = 0; | 1218 | int rc = 0; |
1216 | 1219 | ||
1217 | pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 1220 | pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
@@ -1272,22 +1275,103 @@ lpfc_get_stats(struct Scsi_Host *shost) | |||
1272 | hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; | 1275 | hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; |
1273 | hs->error_frames = pmb->un.varRdLnk.crcCnt; | 1276 | hs->error_frames = pmb->un.varRdLnk.crcCnt; |
1274 | 1277 | ||
1278 | hs->link_failure_count -= lso->link_failure_count; | ||
1279 | hs->loss_of_sync_count -= lso->loss_of_sync_count; | ||
1280 | hs->loss_of_signal_count -= lso->loss_of_signal_count; | ||
1281 | hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; | ||
1282 | hs->invalid_tx_word_count -= lso->invalid_tx_word_count; | ||
1283 | hs->invalid_crc_count -= lso->invalid_crc_count; | ||
1284 | hs->error_frames -= lso->error_frames; | ||
1285 | |||
1275 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 1286 | if (phba->fc_topology == TOPOLOGY_LOOP) { |
1276 | hs->lip_count = (phba->fc_eventTag >> 1); | 1287 | hs->lip_count = (phba->fc_eventTag >> 1); |
1288 | hs->lip_count -= lso->link_events; | ||
1277 | hs->nos_count = -1; | 1289 | hs->nos_count = -1; |
1278 | } else { | 1290 | } else { |
1279 | hs->lip_count = -1; | 1291 | hs->lip_count = -1; |
1280 | hs->nos_count = (phba->fc_eventTag >> 1); | 1292 | hs->nos_count = (phba->fc_eventTag >> 1); |
1293 | hs->nos_count -= lso->link_events; | ||
1281 | } | 1294 | } |
1282 | 1295 | ||
1283 | hs->dumped_frames = -1; | 1296 | hs->dumped_frames = -1; |
1284 | 1297 | ||
1285 | /* FIX ME */ | 1298 | seconds = get_seconds(); |
1286 | /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/ | 1299 | if (seconds < psli->stats_start) |
1300 | hs->seconds_since_last_reset = seconds + | ||
1301 | ((unsigned long)-1 - psli->stats_start); | ||
1302 | else | ||
1303 | hs->seconds_since_last_reset = seconds - psli->stats_start; | ||
1287 | 1304 | ||
1288 | return hs; | 1305 | return hs; |
1289 | } | 1306 | } |
1290 | 1307 | ||
1308 | static void | ||
1309 | lpfc_reset_stats(struct Scsi_Host *shost) | ||
1310 | { | ||
1311 | struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; | ||
1312 | struct lpfc_sli *psli = &phba->sli; | ||
1313 | struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; | ||
1314 | LPFC_MBOXQ_t *pmboxq; | ||
1315 | MAILBOX_t *pmb; | ||
1316 | int rc = 0; | ||
1317 | |||
1318 | pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
1319 | if (!pmboxq) | ||
1320 | return; | ||
1321 | memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); | ||
1322 | |||
1323 | pmb = &pmboxq->mb; | ||
1324 | pmb->mbxCommand = MBX_READ_STATUS; | ||
1325 | pmb->mbxOwner = OWN_HOST; | ||
1326 | pmb->un.varWords[0] = 0x1; /* reset request */ | ||
1327 | pmboxq->context1 = NULL; | ||
1328 | |||
1329 | if ((phba->fc_flag & FC_OFFLINE_MODE) || | ||
1330 | (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) | ||
1331 | rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); | ||
1332 | else | ||
1333 | rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); | ||
1334 | |||
1335 | if (rc != MBX_SUCCESS) { | ||
1336 | if (rc == MBX_TIMEOUT) | ||
1337 | pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
1338 | else | ||
1339 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
1340 | return; | ||
1341 | } | ||
1342 | |||
1343 | memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); | ||
1344 | pmb->mbxCommand = MBX_READ_LNK_STAT; | ||
1345 | pmb->mbxOwner = OWN_HOST; | ||
1346 | pmboxq->context1 = NULL; | ||
1347 | |||
1348 | if ((phba->fc_flag & FC_OFFLINE_MODE) || | ||
1349 | (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) | ||
1350 | rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); | ||
1351 | else | ||
1352 | rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); | ||
1353 | |||
1354 | if (rc != MBX_SUCCESS) { | ||
1355 | if (rc == MBX_TIMEOUT) | ||
1356 | pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
1357 | else | ||
1358 | mempool_free( pmboxq, phba->mbox_mem_pool); | ||
1359 | return; | ||
1360 | } | ||
1361 | |||
1362 | lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; | ||
1363 | lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; | ||
1364 | lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; | ||
1365 | lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; | ||
1366 | lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; | ||
1367 | lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; | ||
1368 | lso->error_frames = pmb->un.varRdLnk.crcCnt; | ||
1369 | lso->link_events = (phba->fc_eventTag >> 1); | ||
1370 | |||
1371 | psli->stats_start = get_seconds(); | ||
1372 | |||
1373 | return; | ||
1374 | } | ||
1291 | 1375 | ||
1292 | /* | 1376 | /* |
1293 | * The LPFC driver treats linkdown handling as target loss events so there | 1377 | * The LPFC driver treats linkdown handling as target loss events so there |
@@ -1431,8 +1515,7 @@ struct fc_function_template lpfc_transport_functions = { | |||
1431 | */ | 1515 | */ |
1432 | 1516 | ||
1433 | .get_fc_host_stats = lpfc_get_stats, | 1517 | .get_fc_host_stats = lpfc_get_stats, |
1434 | 1518 | .reset_fc_host_stats = lpfc_reset_stats, | |
1435 | /* the LPFC driver doesn't support resetting stats yet */ | ||
1436 | 1519 | ||
1437 | .dd_fcrport_size = sizeof(struct lpfc_rport_data), | 1520 | .dd_fcrport_size = sizeof(struct lpfc_rport_data), |
1438 | .show_rport_maxframe_size = 1, | 1521 | .show_rport_maxframe_size = 1, |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 517e9e4dd461..2a176467f71b 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -127,6 +127,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); | |||
127 | void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); | 127 | void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); |
128 | void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); | 128 | void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); |
129 | LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); | 129 | LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); |
130 | int lpfc_mbox_tmo_val(struct lpfc_hba *, int); | ||
130 | 131 | ||
131 | int lpfc_mem_alloc(struct lpfc_hba *); | 132 | int lpfc_mem_alloc(struct lpfc_hba *); |
132 | void lpfc_mem_free(struct lpfc_hba *); | 133 | void lpfc_mem_free(struct lpfc_hba *); |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index b65ee57af53e..bbb7310210b0 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba, | |||
131 | } | 131 | } |
132 | 132 | ||
133 | ct_unsol_event_exit_piocbq: | 133 | ct_unsol_event_exit_piocbq: |
134 | list_del(&head); | ||
134 | if (pmbuf) { | 135 | if (pmbuf) { |
135 | list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { | 136 | list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { |
136 | lpfc_mbuf_free(phba, matp->virt, matp->phys); | 137 | lpfc_mbuf_free(phba, matp->virt, matp->phys); |
@@ -481,7 +482,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, | |||
481 | if (CTrsp->CommandResponse.bits.CmdRsp == | 482 | if (CTrsp->CommandResponse.bits.CmdRsp == |
482 | be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { | 483 | be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { |
483 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, | 484 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, |
484 | "%d:0239 NameServer Rsp " | 485 | "%d:0208 NameServer Rsp " |
485 | "Data: x%x\n", | 486 | "Data: x%x\n", |
486 | phba->brd_no, | 487 | phba->brd_no, |
487 | phba->fc_flag); | 488 | phba->fc_flag); |
@@ -588,13 +589,9 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) | |||
588 | 589 | ||
589 | lpfc_decode_firmware_rev(phba, fwrev, 0); | 590 | lpfc_decode_firmware_rev(phba, fwrev, 0); |
590 | 591 | ||
591 | if (phba->Port[0]) { | 592 | sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, |
592 | sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName, | 593 | fwrev, lpfc_release_version); |
593 | phba->Port, fwrev, lpfc_release_version); | 594 | return; |
594 | } else { | ||
595 | sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, | ||
596 | fwrev, lpfc_release_version); | ||
597 | } | ||
598 | } | 595 | } |
599 | 596 | ||
600 | /* | 597 | /* |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b89f6cb641e6..3567de613162 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -1848,9 +1848,12 @@ static void | |||
1848 | lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, | 1848 | lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, |
1849 | struct lpfc_iocbq * rspiocb) | 1849 | struct lpfc_iocbq * rspiocb) |
1850 | { | 1850 | { |
1851 | IOCB_t *irsp; | ||
1851 | struct lpfc_nodelist *ndlp; | 1852 | struct lpfc_nodelist *ndlp; |
1852 | LPFC_MBOXQ_t *mbox = NULL; | 1853 | LPFC_MBOXQ_t *mbox = NULL; |
1853 | 1854 | ||
1855 | irsp = &rspiocb->iocb; | ||
1856 | |||
1854 | ndlp = (struct lpfc_nodelist *) cmdiocb->context1; | 1857 | ndlp = (struct lpfc_nodelist *) cmdiocb->context1; |
1855 | if (cmdiocb->context_un.mbox) | 1858 | if (cmdiocb->context_un.mbox) |
1856 | mbox = cmdiocb->context_un.mbox; | 1859 | mbox = cmdiocb->context_un.mbox; |
@@ -1893,9 +1896,15 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, | |||
1893 | mempool_free( mbox, phba->mbox_mem_pool); | 1896 | mempool_free( mbox, phba->mbox_mem_pool); |
1894 | } else { | 1897 | } else { |
1895 | mempool_free( mbox, phba->mbox_mem_pool); | 1898 | mempool_free( mbox, phba->mbox_mem_pool); |
1896 | if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { | 1899 | /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */ |
1897 | lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); | 1900 | if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && |
1898 | ndlp = NULL; | 1901 | ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || |
1902 | (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || | ||
1903 | (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) { | ||
1904 | if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { | ||
1905 | lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); | ||
1906 | ndlp = NULL; | ||
1907 | } | ||
1899 | } | 1908 | } |
1900 | } | 1909 | } |
1901 | } | 1910 | } |
@@ -2839,7 +2848,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
2839 | 2848 | ||
2840 | /* Xmit ELS RPS ACC response tag <ulpIoTag> */ | 2849 | /* Xmit ELS RPS ACC response tag <ulpIoTag> */ |
2841 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 2850 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
2842 | "%d:0128 Xmit ELS RPS ACC response tag x%x " | 2851 | "%d:0118 Xmit ELS RPS ACC response tag x%x " |
2843 | "Data: x%x x%x x%x x%x x%x\n", | 2852 | "Data: x%x x%x x%x x%x x%x\n", |
2844 | phba->brd_no, | 2853 | phba->brd_no, |
2845 | elsiocb->iocb.ulpIoTag, | 2854 | elsiocb->iocb.ulpIoTag, |
@@ -2948,7 +2957,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, | |||
2948 | 2957 | ||
2949 | /* Xmit ELS RPL ACC response tag <ulpIoTag> */ | 2958 | /* Xmit ELS RPL ACC response tag <ulpIoTag> */ |
2950 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 2959 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
2951 | "%d:0128 Xmit ELS RPL ACC response tag x%x " | 2960 | "%d:0120 Xmit ELS RPL ACC response tag x%x " |
2952 | "Data: x%x x%x x%x x%x x%x\n", | 2961 | "Data: x%x x%x x%x x%x x%x\n", |
2953 | phba->brd_no, | 2962 | phba->brd_no, |
2954 | elsiocb->iocb.ulpIoTag, | 2963 | elsiocb->iocb.ulpIoTag, |
@@ -3109,7 +3118,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, | |||
3109 | struct lpfc_nodelist *ndlp, *next_ndlp; | 3118 | struct lpfc_nodelist *ndlp, *next_ndlp; |
3110 | 3119 | ||
3111 | /* FAN received */ | 3120 | /* FAN received */ |
3112 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n", | 3121 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n", |
3113 | phba->brd_no); | 3122 | phba->brd_no); |
3114 | 3123 | ||
3115 | icmd = &cmdiocb->iocb; | 3124 | icmd = &cmdiocb->iocb; |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 4d6cf990c4fc..b2f1552f1848 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -1557,6 +1557,8 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1557 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | 1557 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
1558 | } | 1558 | } |
1559 | } | 1559 | } |
1560 | |||
1561 | spin_lock_irq(phba->host->host_lock); | ||
1560 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { | 1562 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { |
1561 | if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && | 1563 | if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && |
1562 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { | 1564 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { |
@@ -1569,6 +1571,7 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1569 | mempool_free(mb, phba->mbox_mem_pool); | 1571 | mempool_free(mb, phba->mbox_mem_pool); |
1570 | } | 1572 | } |
1571 | } | 1573 | } |
1574 | spin_unlock_irq(phba->host->host_lock); | ||
1572 | 1575 | ||
1573 | lpfc_els_abort(phba,ndlp,0); | 1576 | lpfc_els_abort(phba,ndlp,0); |
1574 | spin_lock_irq(phba->host->host_lock); | 1577 | spin_lock_irq(phba->host->host_lock); |
@@ -1782,7 +1785,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) | |||
1782 | /* LOG change to REGLOGIN */ | 1785 | /* LOG change to REGLOGIN */ |
1783 | /* FIND node DID reglogin */ | 1786 | /* FIND node DID reglogin */ |
1784 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | 1787 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, |
1785 | "%d:0931 FIND node DID reglogin" | 1788 | "%d:0901 FIND node DID reglogin" |
1786 | " Data: x%p x%x x%x x%x\n", | 1789 | " Data: x%p x%x x%x x%x\n", |
1787 | phba->brd_no, | 1790 | phba->brd_no, |
1788 | ndlp, ndlp->nlp_DID, | 1791 | ndlp, ndlp->nlp_DID, |
@@ -1805,7 +1808,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) | |||
1805 | /* LOG change to PRLI */ | 1808 | /* LOG change to PRLI */ |
1806 | /* FIND node DID prli */ | 1809 | /* FIND node DID prli */ |
1807 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | 1810 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, |
1808 | "%d:0931 FIND node DID prli " | 1811 | "%d:0902 FIND node DID prli " |
1809 | "Data: x%p x%x x%x x%x\n", | 1812 | "Data: x%p x%x x%x x%x\n", |
1810 | phba->brd_no, | 1813 | phba->brd_no, |
1811 | ndlp, ndlp->nlp_DID, | 1814 | ndlp, ndlp->nlp_DID, |
@@ -1828,7 +1831,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) | |||
1828 | /* LOG change to NPR */ | 1831 | /* LOG change to NPR */ |
1829 | /* FIND node DID npr */ | 1832 | /* FIND node DID npr */ |
1830 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | 1833 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, |
1831 | "%d:0931 FIND node DID npr " | 1834 | "%d:0903 FIND node DID npr " |
1832 | "Data: x%p x%x x%x x%x\n", | 1835 | "Data: x%p x%x x%x x%x\n", |
1833 | phba->brd_no, | 1836 | phba->brd_no, |
1834 | ndlp, ndlp->nlp_DID, | 1837 | ndlp, ndlp->nlp_DID, |
@@ -1851,7 +1854,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) | |||
1851 | /* LOG change to UNUSED */ | 1854 | /* LOG change to UNUSED */ |
1852 | /* FIND node DID unused */ | 1855 | /* FIND node DID unused */ |
1853 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | 1856 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, |
1854 | "%d:0931 FIND node DID unused " | 1857 | "%d:0905 FIND node DID unused " |
1855 | "Data: x%p x%x x%x x%x\n", | 1858 | "Data: x%p x%x x%x x%x\n", |
1856 | phba->brd_no, | 1859 | phba->brd_no, |
1857 | ndlp, ndlp->nlp_DID, | 1860 | ndlp, ndlp->nlp_DID, |
@@ -2335,7 +2338,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) | |||
2335 | initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 2338 | initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
2336 | if (!initlinkmbox) { | 2339 | if (!initlinkmbox) { |
2337 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 2340 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2338 | "%d:0226 Device Discovery " | 2341 | "%d:0206 Device Discovery " |
2339 | "completion error\n", | 2342 | "completion error\n", |
2340 | phba->brd_no); | 2343 | phba->brd_no); |
2341 | phba->hba_state = LPFC_HBA_ERROR; | 2344 | phba->hba_state = LPFC_HBA_ERROR; |
@@ -2365,7 +2368,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) | |||
2365 | if (!clearlambox) { | 2368 | if (!clearlambox) { |
2366 | clrlaerr = 1; | 2369 | clrlaerr = 1; |
2367 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 2370 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2368 | "%d:0226 Device Discovery " | 2371 | "%d:0207 Device Discovery " |
2369 | "completion error\n", | 2372 | "completion error\n", |
2370 | phba->brd_no); | 2373 | phba->brd_no); |
2371 | phba->hba_state = LPFC_HBA_ERROR; | 2374 | phba->hba_state = LPFC_HBA_ERROR; |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index ef47b824cbed..f6948ffe689a 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -1379,6 +1379,7 @@ lpfc_offline(struct lpfc_hba * phba) | |||
1379 | /* stop all timers associated with this hba */ | 1379 | /* stop all timers associated with this hba */ |
1380 | lpfc_stop_timer(phba); | 1380 | lpfc_stop_timer(phba); |
1381 | phba->work_hba_events = 0; | 1381 | phba->work_hba_events = 0; |
1382 | phba->work_ha = 0; | ||
1382 | 1383 | ||
1383 | lpfc_printf_log(phba, | 1384 | lpfc_printf_log(phba, |
1384 | KERN_WARNING, | 1385 | KERN_WARNING, |
@@ -1616,7 +1617,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
1616 | goto out_free_iocbq; | 1617 | goto out_free_iocbq; |
1617 | } | 1618 | } |
1618 | 1619 | ||
1619 | /* We can rely on a queue depth attribute only after SLI HBA setup */ | 1620 | /* |
1621 | * Set initial can_queue value since 0 is no longer supported and | ||
1622 | * scsi_add_host will fail. This will be adjusted later based on the | ||
1623 | * max xri value determined in hba setup. | ||
1624 | */ | ||
1620 | host->can_queue = phba->cfg_hba_queue_depth - 10; | 1625 | host->can_queue = phba->cfg_hba_queue_depth - 10; |
1621 | 1626 | ||
1622 | /* Tell the midlayer we support 16 byte commands */ | 1627 | /* Tell the midlayer we support 16 byte commands */ |
@@ -1656,6 +1661,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
1656 | goto out_free_irq; | 1661 | goto out_free_irq; |
1657 | } | 1662 | } |
1658 | 1663 | ||
1664 | /* | ||
1665 | * hba setup may have changed the hba_queue_depth so we need to adjust | ||
1666 | * the value of can_queue. | ||
1667 | */ | ||
1668 | host->can_queue = phba->cfg_hba_queue_depth - 10; | ||
1669 | |||
1659 | lpfc_discovery_wait(phba); | 1670 | lpfc_discovery_wait(phba); |
1660 | 1671 | ||
1661 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 1672 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index e42f22aaf71b..4d016c2a1b26 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -651,3 +651,19 @@ lpfc_mbox_get(struct lpfc_hba * phba) | |||
651 | 651 | ||
652 | return mbq; | 652 | return mbq; |
653 | } | 653 | } |
654 | |||
655 | int | ||
656 | lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) | ||
657 | { | ||
658 | switch (cmd) { | ||
659 | case MBX_WRITE_NV: /* 0x03 */ | ||
660 | case MBX_UPDATE_CFG: /* 0x1B */ | ||
661 | case MBX_DOWN_LOAD: /* 0x1C */ | ||
662 | case MBX_DEL_LD_ENTRY: /* 0x1D */ | ||
663 | case MBX_LOAD_AREA: /* 0x81 */ | ||
664 | case MBX_FLASH_WR_ULA: /* 0x98 */ | ||
665 | case MBX_LOAD_EXP_ROM: /* 0x9C */ | ||
666 | return LPFC_MBOX_TMO_FLASH_CMD; | ||
667 | } | ||
668 | return LPFC_MBOX_TMO; | ||
669 | } | ||
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index bd0b0e293d63..20449a8dd53d 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, | |||
179 | 179 | ||
180 | /* Abort outstanding I/O on NPort <nlp_DID> */ | 180 | /* Abort outstanding I/O on NPort <nlp_DID> */ |
181 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, | 181 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, |
182 | "%d:0201 Abort outstanding I/O on NPort x%x " | 182 | "%d:0205 Abort outstanding I/O on NPort x%x " |
183 | "Data: x%x x%x x%x\n", | 183 | "Data: x%x x%x x%x\n", |
184 | phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, | 184 | phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, |
185 | ndlp->nlp_state, ndlp->nlp_rpi); | 185 | ndlp->nlp_state, ndlp->nlp_rpi); |
@@ -393,6 +393,20 @@ lpfc_rcv_plogi(struct lpfc_hba * phba, | |||
393 | mbox->context2 = ndlp; | 393 | mbox->context2 = ndlp; |
394 | ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); | 394 | ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); |
395 | 395 | ||
396 | /* | ||
397 | * If there is an outstanding PLOGI issued, abort it before | ||
398 | * sending ACC rsp for received PLOGI. If pending plogi | ||
399 | * is not canceled here, the plogi will be rejected by | ||
400 | * remote port and will be retried. On a configuration with | ||
401 | * single discovery thread, this will cause a huge delay in | ||
402 | * discovery. Also this will cause multiple state machines | ||
403 | * running in parallel for this node. | ||
404 | */ | ||
405 | if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { | ||
406 | /* software abort outstanding PLOGI */ | ||
407 | lpfc_els_abort(phba, ndlp, 1); | ||
408 | } | ||
409 | |||
396 | lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); | 410 | lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); |
397 | return 1; | 411 | return 1; |
398 | 412 | ||
@@ -1601,7 +1615,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba, | |||
1601 | 1615 | ||
1602 | lpfc_rcv_padisc(phba, ndlp, cmdiocb); | 1616 | lpfc_rcv_padisc(phba, ndlp, cmdiocb); |
1603 | 1617 | ||
1604 | if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { | 1618 | /* |
1619 | * Do not start discovery if discovery is about to start | ||
1620 | * or discovery in progress for this node. Starting discovery | ||
1621 | * here will affect the counting of discovery threads. | ||
1622 | */ | ||
1623 | if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) && | ||
1624 | (ndlp->nlp_flag & NLP_NPR_2B_DISC)){ | ||
1605 | if (ndlp->nlp_flag & NLP_NPR_ADISC) { | 1625 | if (ndlp->nlp_flag & NLP_NPR_ADISC) { |
1606 | ndlp->nlp_prev_state = NLP_STE_NPR_NODE; | 1626 | ndlp->nlp_prev_state = NLP_STE_NPR_NODE; |
1607 | ndlp->nlp_state = NLP_STE_ADISC_ISSUE; | 1627 | ndlp->nlp_state = NLP_STE_ADISC_ISSUE; |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a760a44173df..a8816a8738f8 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/delay.h> | ||
24 | 25 | ||
25 | #include <scsi/scsi.h> | 26 | #include <scsi/scsi.h> |
26 | #include <scsi/scsi_device.h> | 27 | #include <scsi/scsi_device.h> |
@@ -841,6 +842,21 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
841 | return 0; | 842 | return 0; |
842 | } | 843 | } |
843 | 844 | ||
845 | static void | ||
846 | lpfc_block_error_handler(struct scsi_cmnd *cmnd) | ||
847 | { | ||
848 | struct Scsi_Host *shost = cmnd->device->host; | ||
849 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); | ||
850 | |||
851 | spin_lock_irq(shost->host_lock); | ||
852 | while (rport->port_state == FC_PORTSTATE_BLOCKED) { | ||
853 | spin_unlock_irq(shost->host_lock); | ||
854 | msleep(1000); | ||
855 | spin_lock_irq(shost->host_lock); | ||
856 | } | ||
857 | spin_unlock_irq(shost->host_lock); | ||
858 | return; | ||
859 | } | ||
844 | 860 | ||
845 | static int | 861 | static int |
846 | lpfc_abort_handler(struct scsi_cmnd *cmnd) | 862 | lpfc_abort_handler(struct scsi_cmnd *cmnd) |
@@ -855,6 +871,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
855 | unsigned int loop_count = 0; | 871 | unsigned int loop_count = 0; |
856 | int ret = SUCCESS; | 872 | int ret = SUCCESS; |
857 | 873 | ||
874 | lpfc_block_error_handler(cmnd); | ||
858 | spin_lock_irq(shost->host_lock); | 875 | spin_lock_irq(shost->host_lock); |
859 | 876 | ||
860 | lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; | 877 | lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; |
@@ -957,6 +974,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) | |||
957 | int ret = FAILED; | 974 | int ret = FAILED; |
958 | int cnt, loopcnt; | 975 | int cnt, loopcnt; |
959 | 976 | ||
977 | lpfc_block_error_handler(cmnd); | ||
960 | spin_lock_irq(shost->host_lock); | 978 | spin_lock_irq(shost->host_lock); |
961 | /* | 979 | /* |
962 | * If target is not in a MAPPED state, delay the reset until | 980 | * If target is not in a MAPPED state, delay the reset until |
@@ -1073,6 +1091,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) | |||
1073 | int cnt, loopcnt; | 1091 | int cnt, loopcnt; |
1074 | struct lpfc_scsi_buf * lpfc_cmd; | 1092 | struct lpfc_scsi_buf * lpfc_cmd; |
1075 | 1093 | ||
1094 | lpfc_block_error_handler(cmnd); | ||
1076 | spin_lock_irq(shost->host_lock); | 1095 | spin_lock_irq(shost->host_lock); |
1077 | 1096 | ||
1078 | lpfc_cmd = lpfc_get_scsi_buf(phba); | 1097 | lpfc_cmd = lpfc_get_scsi_buf(phba); |
@@ -1104,7 +1123,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) | |||
1104 | ndlp->rport->dd_data); | 1123 | ndlp->rport->dd_data); |
1105 | if (ret != SUCCESS) { | 1124 | if (ret != SUCCESS) { |
1106 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, | 1125 | lpfc_printf_log(phba, KERN_ERR, LOG_FCP, |
1107 | "%d:0713 Bus Reset on target %d failed\n", | 1126 | "%d:0700 Bus Reset on target %d failed\n", |
1108 | phba->brd_no, i); | 1127 | phba->brd_no, i); |
1109 | err_count++; | 1128 | err_count++; |
1110 | } | 1129 | } |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 350a625fa224..70f4d5a1348e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -320,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) | |||
320 | kfree(old_arr); | 320 | kfree(old_arr); |
321 | return iotag; | 321 | return iotag; |
322 | } | 322 | } |
323 | } | 323 | } else |
324 | spin_unlock_irq(phba->host->host_lock); | ||
324 | 325 | ||
325 | lpfc_printf_log(phba, KERN_ERR,LOG_SLI, | 326 | lpfc_printf_log(phba, KERN_ERR,LOG_SLI, |
326 | "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", | 327 | "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", |
@@ -969,9 +970,11 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) | |||
969 | * resources need to be recovered. | 970 | * resources need to be recovered. |
970 | */ | 971 | */ |
971 | if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { | 972 | if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { |
972 | printk(KERN_INFO "%s: IOCB cmd 0x%x processed." | 973 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
973 | " Skipping completion\n", __FUNCTION__, | 974 | "%d:0314 IOCB cmd 0x%x" |
974 | irsp->ulpCommand); | 975 | " processed. Skipping" |
976 | " completion", phba->brd_no, | ||
977 | irsp->ulpCommand); | ||
975 | break; | 978 | break; |
976 | } | 979 | } |
977 | 980 | ||
@@ -1104,7 +1107,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, | |||
1104 | if (unlikely(irsp->ulpStatus)) { | 1107 | if (unlikely(irsp->ulpStatus)) { |
1105 | /* Rsp ring <ringno> error: IOCB */ | 1108 | /* Rsp ring <ringno> error: IOCB */ |
1106 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | 1109 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
1107 | "%d:0326 Rsp Ring %d error: IOCB Data: " | 1110 | "%d:0336 Rsp Ring %d error: IOCB Data: " |
1108 | "x%x x%x x%x x%x x%x x%x x%x x%x\n", | 1111 | "x%x x%x x%x x%x x%x x%x x%x x%x\n", |
1109 | phba->brd_no, pring->ringno, | 1112 | phba->brd_no, pring->ringno, |
1110 | irsp->un.ulpWord[0], irsp->un.ulpWord[1], | 1113 | irsp->un.ulpWord[0], irsp->un.ulpWord[1], |
@@ -1122,9 +1125,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, | |||
1122 | * resources need to be recovered. | 1125 | * resources need to be recovered. |
1123 | */ | 1126 | */ |
1124 | if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { | 1127 | if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { |
1125 | printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " | 1128 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
1126 | "Skipping completion\n", __FUNCTION__, | 1129 | "%d:0333 IOCB cmd 0x%x" |
1127 | irsp->ulpCommand); | 1130 | " processed. Skipping" |
1131 | " completion\n", phba->brd_no, | ||
1132 | irsp->ulpCommand); | ||
1128 | break; | 1133 | break; |
1129 | } | 1134 | } |
1130 | 1135 | ||
@@ -1155,7 +1160,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, | |||
1155 | } else { | 1160 | } else { |
1156 | /* Unknown IOCB command */ | 1161 | /* Unknown IOCB command */ |
1157 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 1162 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
1158 | "%d:0321 Unknown IOCB command " | 1163 | "%d:0334 Unknown IOCB command " |
1159 | "Data: x%x, x%x x%x x%x x%x\n", | 1164 | "Data: x%x, x%x x%x x%x x%x\n", |
1160 | phba->brd_no, type, irsp->ulpCommand, | 1165 | phba->brd_no, type, irsp->ulpCommand, |
1161 | irsp->ulpStatus, irsp->ulpIoTag, | 1166 | irsp->ulpStatus, irsp->ulpIoTag, |
@@ -1238,7 +1243,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, | |||
1238 | lpfc_printf_log(phba, | 1243 | lpfc_printf_log(phba, |
1239 | KERN_ERR, | 1244 | KERN_ERR, |
1240 | LOG_SLI, | 1245 | LOG_SLI, |
1241 | "%d:0312 Ring %d handler: portRspPut %d " | 1246 | "%d:0303 Ring %d handler: portRspPut %d " |
1242 | "is bigger then rsp ring %d\n", | 1247 | "is bigger then rsp ring %d\n", |
1243 | phba->brd_no, | 1248 | phba->brd_no, |
1244 | pring->ringno, portRspPut, portRspMax); | 1249 | pring->ringno, portRspPut, portRspMax); |
@@ -1383,7 +1388,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, | |||
1383 | lpfc_printf_log(phba, | 1388 | lpfc_printf_log(phba, |
1384 | KERN_ERR, | 1389 | KERN_ERR, |
1385 | LOG_SLI, | 1390 | LOG_SLI, |
1386 | "%d:0321 Unknown IOCB command " | 1391 | "%d:0335 Unknown IOCB command " |
1387 | "Data: x%x x%x x%x x%x\n", | 1392 | "Data: x%x x%x x%x x%x\n", |
1388 | phba->brd_no, | 1393 | phba->brd_no, |
1389 | irsp->ulpCommand, | 1394 | irsp->ulpCommand, |
@@ -1399,11 +1404,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, | |||
1399 | next_iocb, | 1404 | next_iocb, |
1400 | &saveq->list, | 1405 | &saveq->list, |
1401 | list) { | 1406 | list) { |
1407 | list_del(&rspiocbp->list); | ||
1402 | lpfc_sli_release_iocbq(phba, | 1408 | lpfc_sli_release_iocbq(phba, |
1403 | rspiocbp); | 1409 | rspiocbp); |
1404 | } | 1410 | } |
1405 | } | 1411 | } |
1406 | |||
1407 | lpfc_sli_release_iocbq(phba, saveq); | 1412 | lpfc_sli_release_iocbq(phba, saveq); |
1408 | } | 1413 | } |
1409 | } | 1414 | } |
@@ -1711,15 +1716,13 @@ lpfc_sli_brdreset(struct lpfc_hba * phba) | |||
1711 | phba->fc_myDID = 0; | 1716 | phba->fc_myDID = 0; |
1712 | phba->fc_prevDID = 0; | 1717 | phba->fc_prevDID = 0; |
1713 | 1718 | ||
1714 | psli->sli_flag = 0; | ||
1715 | |||
1716 | /* Turn off parity checking and serr during the physical reset */ | 1719 | /* Turn off parity checking and serr during the physical reset */ |
1717 | pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); | 1720 | pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); |
1718 | pci_write_config_word(phba->pcidev, PCI_COMMAND, | 1721 | pci_write_config_word(phba->pcidev, PCI_COMMAND, |
1719 | (cfg_value & | 1722 | (cfg_value & |
1720 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); | 1723 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); |
1721 | 1724 | ||
1722 | psli->sli_flag &= ~LPFC_SLI2_ACTIVE; | 1725 | psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); |
1723 | /* Now toggle INITFF bit in the Host Control Register */ | 1726 | /* Now toggle INITFF bit in the Host Control Register */ |
1724 | writel(HC_INITFF, phba->HCregaddr); | 1727 | writel(HC_INITFF, phba->HCregaddr); |
1725 | mdelay(1); | 1728 | mdelay(1); |
@@ -1760,7 +1763,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) | |||
1760 | 1763 | ||
1761 | /* Restart HBA */ | 1764 | /* Restart HBA */ |
1762 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | 1765 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
1763 | "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no, | 1766 | "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no, |
1764 | phba->hba_state, psli->sli_flag); | 1767 | phba->hba_state, psli->sli_flag); |
1765 | 1768 | ||
1766 | word0 = 0; | 1769 | word0 = 0; |
@@ -1792,6 +1795,9 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) | |||
1792 | 1795 | ||
1793 | spin_unlock_irq(phba->host->host_lock); | 1796 | spin_unlock_irq(phba->host->host_lock); |
1794 | 1797 | ||
1798 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); | ||
1799 | psli->stats_start = get_seconds(); | ||
1800 | |||
1795 | if (skip_post) | 1801 | if (skip_post) |
1796 | mdelay(100); | 1802 | mdelay(100); |
1797 | else | 1803 | else |
@@ -1902,6 +1908,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) | |||
1902 | } | 1908 | } |
1903 | 1909 | ||
1904 | while (resetcount < 2 && !done) { | 1910 | while (resetcount < 2 && !done) { |
1911 | spin_lock_irq(phba->host->host_lock); | ||
1912 | phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; | ||
1913 | spin_unlock_irq(phba->host->host_lock); | ||
1905 | phba->hba_state = LPFC_STATE_UNKNOWN; | 1914 | phba->hba_state = LPFC_STATE_UNKNOWN; |
1906 | lpfc_sli_brdrestart(phba); | 1915 | lpfc_sli_brdrestart(phba); |
1907 | msleep(2500); | 1916 | msleep(2500); |
@@ -1909,6 +1918,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) | |||
1909 | if (rc) | 1918 | if (rc) |
1910 | break; | 1919 | break; |
1911 | 1920 | ||
1921 | spin_lock_irq(phba->host->host_lock); | ||
1922 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | ||
1923 | spin_unlock_irq(phba->host->host_lock); | ||
1912 | resetcount++; | 1924 | resetcount++; |
1913 | 1925 | ||
1914 | /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 | 1926 | /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 |
@@ -2194,7 +2206,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) | |||
2194 | return (MBX_NOT_FINISHED); | 2206 | return (MBX_NOT_FINISHED); |
2195 | } | 2207 | } |
2196 | /* timeout active mbox command */ | 2208 | /* timeout active mbox command */ |
2197 | mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); | 2209 | mod_timer(&psli->mbox_tmo, (jiffies + |
2210 | (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); | ||
2198 | } | 2211 | } |
2199 | 2212 | ||
2200 | /* Mailbox cmd <cmd> issue */ | 2213 | /* Mailbox cmd <cmd> issue */ |
@@ -2254,7 +2267,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) | |||
2254 | break; | 2267 | break; |
2255 | 2268 | ||
2256 | case MBX_POLL: | 2269 | case MBX_POLL: |
2257 | i = 0; | ||
2258 | psli->mbox_active = NULL; | 2270 | psli->mbox_active = NULL; |
2259 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { | 2271 | if (psli->sli_flag & LPFC_SLI2_ACTIVE) { |
2260 | /* First read mbox status word */ | 2272 | /* First read mbox status word */ |
@@ -2268,11 +2280,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) | |||
2268 | /* Read the HBA Host Attention Register */ | 2280 | /* Read the HBA Host Attention Register */ |
2269 | ha_copy = readl(phba->HAregaddr); | 2281 | ha_copy = readl(phba->HAregaddr); |
2270 | 2282 | ||
2283 | i = lpfc_mbox_tmo_val(phba, mb->mbxCommand); | ||
2284 | i *= 1000; /* Convert to ms */ | ||
2285 | |||
2271 | /* Wait for command to complete */ | 2286 | /* Wait for command to complete */ |
2272 | while (((word0 & OWN_CHIP) == OWN_CHIP) || | 2287 | while (((word0 & OWN_CHIP) == OWN_CHIP) || |
2273 | (!(ha_copy & HA_MBATT) && | 2288 | (!(ha_copy & HA_MBATT) && |
2274 | (phba->hba_state > LPFC_WARM_START))) { | 2289 | (phba->hba_state > LPFC_WARM_START))) { |
2275 | if (i++ >= 100) { | 2290 | if (i-- <= 0) { |
2276 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | 2291 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
2277 | spin_unlock_irqrestore(phba->host->host_lock, | 2292 | spin_unlock_irqrestore(phba->host->host_lock, |
2278 | drvr_flag); | 2293 | drvr_flag); |
@@ -2290,7 +2305,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) | |||
2290 | 2305 | ||
2291 | /* Can be in interrupt context, do not sleep */ | 2306 | /* Can be in interrupt context, do not sleep */ |
2292 | /* (or might be called with interrupts disabled) */ | 2307 | /* (or might be called with interrupts disabled) */ |
2293 | mdelay(i); | 2308 | mdelay(1); |
2294 | 2309 | ||
2295 | spin_lock_irqsave(phba->host->host_lock, drvr_flag); | 2310 | spin_lock_irqsave(phba->host->host_lock, drvr_flag); |
2296 | 2311 | ||
@@ -3005,7 +3020,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, | |||
3005 | 3020 | ||
3006 | if (timeleft == 0) { | 3021 | if (timeleft == 0) { |
3007 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 3022 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
3008 | "%d:0329 IOCB wait timeout error - no " | 3023 | "%d:0338 IOCB wait timeout error - no " |
3009 | "wake response Data x%x\n", | 3024 | "wake response Data x%x\n", |
3010 | phba->brd_no, timeout); | 3025 | phba->brd_no, timeout); |
3011 | retval = IOCB_TIMEDOUT; | 3026 | retval = IOCB_TIMEDOUT; |
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index d8ef0d2894d4..e26de6809358 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h | |||
@@ -172,6 +172,18 @@ struct lpfc_sli_stat { | |||
172 | uint32_t mbox_busy; /* Mailbox cmd busy */ | 172 | uint32_t mbox_busy; /* Mailbox cmd busy */ |
173 | }; | 173 | }; |
174 | 174 | ||
175 | /* Structure to store link status values when port stats are reset */ | ||
176 | struct lpfc_lnk_stat { | ||
177 | uint32_t link_failure_count; | ||
178 | uint32_t loss_of_sync_count; | ||
179 | uint32_t loss_of_signal_count; | ||
180 | uint32_t prim_seq_protocol_err_count; | ||
181 | uint32_t invalid_tx_word_count; | ||
182 | uint32_t invalid_crc_count; | ||
183 | uint32_t error_frames; | ||
184 | uint32_t link_events; | ||
185 | }; | ||
186 | |||
175 | /* Structure used to hold SLI information */ | 187 | /* Structure used to hold SLI information */ |
176 | struct lpfc_sli { | 188 | struct lpfc_sli { |
177 | uint32_t num_rings; | 189 | uint32_t num_rings; |
@@ -201,6 +213,8 @@ struct lpfc_sli { | |||
201 | struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ | 213 | struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ |
202 | size_t iocbq_lookup_len; /* current lengs of the array */ | 214 | size_t iocbq_lookup_len; /* current lengs of the array */ |
203 | uint16_t last_iotag; /* last allocated IOTAG */ | 215 | uint16_t last_iotag; /* last allocated IOTAG */ |
216 | unsigned long stats_start; /* in seconds */ | ||
217 | struct lpfc_lnk_stat lnk_stat_offsets; | ||
204 | }; | 218 | }; |
205 | 219 | ||
206 | /* Given a pointer to the start of the ring, and the slot number of | 220 | /* Given a pointer to the start of the ring, and the slot number of |
@@ -211,3 +225,9 @@ struct lpfc_sli { | |||
211 | 225 | ||
212 | #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox | 226 | #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox |
213 | command */ | 227 | command */ |
228 | #define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write | ||
229 | * or erase cmds. This is especially | ||
230 | * long because of the potential of | ||
231 | * multiple flash erases that can be | ||
232 | * spawned. | ||
233 | */ | ||
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 10e89c6ae823..c7091ea29f3f 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.1.7" | 21 | #define LPFC_DRIVER_VERSION "8.1.9" |
22 | 22 | ||
23 | #define LPFC_DRIVER_NAME "lpfc" | 23 | #define LPFC_DRIVER_NAME "lpfc" |
24 | 24 | ||
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h index 4675343228ad..8cd0bd1d0f7c 100644 --- a/drivers/scsi/megaraid/mega_common.h +++ b/drivers/scsi/megaraid/mega_common.h | |||
@@ -37,6 +37,12 @@ | |||
37 | #define LSI_MAX_CHANNELS 16 | 37 | #define LSI_MAX_CHANNELS 16 |
38 | #define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) | 38 | #define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) |
39 | 39 | ||
40 | #define HBA_SIGNATURE_64_BIT 0x299 | ||
41 | #define PCI_CONF_AMISIG64 0xa4 | ||
42 | |||
43 | #define MEGA_SCSI_INQ_EVPD 1 | ||
44 | #define MEGA_INVALID_FIELD_IN_CDB 0x24 | ||
45 | |||
40 | 46 | ||
41 | /** | 47 | /** |
42 | * scb_t - scsi command control block | 48 | * scb_t - scsi command control block |
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h index bdaee144a1c3..b8aa34202ec3 100644 --- a/drivers/scsi/megaraid/megaraid_ioctl.h +++ b/drivers/scsi/megaraid/megaraid_ioctl.h | |||
@@ -132,6 +132,10 @@ typedef struct uioc { | |||
132 | /* Driver Data: */ | 132 | /* Driver Data: */ |
133 | void __user * user_data; | 133 | void __user * user_data; |
134 | uint32_t user_data_len; | 134 | uint32_t user_data_len; |
135 | |||
136 | /* 64bit alignment */ | ||
137 | uint32_t pad_for_64bit_align; | ||
138 | |||
135 | mraid_passthru_t __user *user_pthru; | 139 | mraid_passthru_t __user *user_pthru; |
136 | 140 | ||
137 | mraid_passthru_t *pthru32; | 141 | mraid_passthru_t *pthru32; |
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 92715130ac09..cd982c877da0 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | * | 11 | * |
12 | * FILE : megaraid_mbox.c | 12 | * FILE : megaraid_mbox.c |
13 | * Version : v2.20.4.8 (Apr 11 2006) | 13 | * Version : v2.20.4.9 (Jul 16 2006) |
14 | * | 14 | * |
15 | * Authors: | 15 | * Authors: |
16 | * Atul Mukker <Atul.Mukker@lsil.com> | 16 | * Atul Mukker <Atul.Mukker@lsil.com> |
@@ -720,6 +720,7 @@ megaraid_init_mbox(adapter_t *adapter) | |||
720 | struct pci_dev *pdev; | 720 | struct pci_dev *pdev; |
721 | mraid_device_t *raid_dev; | 721 | mraid_device_t *raid_dev; |
722 | int i; | 722 | int i; |
723 | uint32_t magic64; | ||
723 | 724 | ||
724 | 725 | ||
725 | adapter->ito = MBOX_TIMEOUT; | 726 | adapter->ito = MBOX_TIMEOUT; |
@@ -863,12 +864,33 @@ megaraid_init_mbox(adapter_t *adapter) | |||
863 | 864 | ||
864 | // Set the DMA mask to 64-bit. All supported controllers as capable of | 865 | // Set the DMA mask to 64-bit. All supported controllers as capable of |
865 | // DMA in this range | 866 | // DMA in this range |
866 | if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK) != 0) { | 867 | pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64); |
867 | 868 | ||
868 | con_log(CL_ANN, (KERN_WARNING | 869 | if (((magic64 == HBA_SIGNATURE_64_BIT) && |
869 | "megaraid: could not set DMA mask for 64-bit.\n")); | 870 | ((adapter->pdev->subsystem_device != |
871 | PCI_SUBSYS_ID_MEGARAID_SATA_150_6) || | ||
872 | (adapter->pdev->subsystem_device != | ||
873 | PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) || | ||
874 | (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && | ||
875 | adapter->pdev->device == PCI_DEVICE_ID_VERDE) || | ||
876 | (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && | ||
877 | adapter->pdev->device == PCI_DEVICE_ID_DOBSON) || | ||
878 | (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && | ||
879 | adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) || | ||
880 | (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && | ||
881 | adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) || | ||
882 | (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && | ||
883 | adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) { | ||
884 | if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) { | ||
885 | con_log(CL_ANN, (KERN_WARNING | ||
886 | "megaraid: DMA mask for 64-bit failed\n")); | ||
870 | 887 | ||
871 | goto out_free_sysfs_res; | 888 | if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) { |
889 | con_log(CL_ANN, (KERN_WARNING | ||
890 | "megaraid: 32-bit DMA mask failed\n")); | ||
891 | goto out_free_sysfs_res; | ||
892 | } | ||
893 | } | ||
872 | } | 894 | } |
873 | 895 | ||
874 | // setup tasklet for DPC | 896 | // setup tasklet for DPC |
@@ -1622,6 +1644,14 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) | |||
1622 | rdev->last_disp |= (1L << SCP2CHANNEL(scp)); | 1644 | rdev->last_disp |= (1L << SCP2CHANNEL(scp)); |
1623 | } | 1645 | } |
1624 | 1646 | ||
1647 | if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) { | ||
1648 | scp->sense_buffer[0] = 0x70; | ||
1649 | scp->sense_buffer[2] = ILLEGAL_REQUEST; | ||
1650 | scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB; | ||
1651 | scp->result = CHECK_CONDITION << 1; | ||
1652 | return NULL; | ||
1653 | } | ||
1654 | |||
1625 | /* Fall through */ | 1655 | /* Fall through */ |
1626 | 1656 | ||
1627 | case READ_CAPACITY: | 1657 | case READ_CAPACITY: |
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h index 868fb0ec93e7..2b5a3285f799 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.h +++ b/drivers/scsi/megaraid/megaraid_mbox.h | |||
@@ -21,8 +21,8 @@ | |||
21 | #include "megaraid_ioctl.h" | 21 | #include "megaraid_ioctl.h" |
22 | 22 | ||
23 | 23 | ||
24 | #define MEGARAID_VERSION "2.20.4.8" | 24 | #define MEGARAID_VERSION "2.20.4.9" |
25 | #define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" | 25 | #define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)" |
26 | 26 | ||
27 | 27 | ||
28 | /* | 28 | /* |
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index e8f534fb336b..d85b9a8f1b8d 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | * | 11 | * |
12 | * FILE : megaraid_mm.c | 12 | * FILE : megaraid_mm.c |
13 | * Version : v2.20.2.6 (Mar 7 2005) | 13 | * Version : v2.20.2.7 (Jul 16 2006) |
14 | * | 14 | * |
15 | * Common management module | 15 | * Common management module |
16 | */ | 16 | */ |
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h index 3d9e67d6849d..c8762b2b8ed1 100644 --- a/drivers/scsi/megaraid/megaraid_mm.h +++ b/drivers/scsi/megaraid/megaraid_mm.h | |||
@@ -27,9 +27,9 @@ | |||
27 | #include "megaraid_ioctl.h" | 27 | #include "megaraid_ioctl.h" |
28 | 28 | ||
29 | 29 | ||
30 | #define LSI_COMMON_MOD_VERSION "2.20.2.6" | 30 | #define LSI_COMMON_MOD_VERSION "2.20.2.7" |
31 | #define LSI_COMMON_MOD_EXT_VERSION \ | 31 | #define LSI_COMMON_MOD_EXT_VERSION \ |
32 | "(Release Date: Mon Mar 7 00:01:03 EST 2005)" | 32 | "(Release Date: Sun Jul 16 00:01:03 EST 2006)" |
33 | 33 | ||
34 | 34 | ||
35 | #define LSI_DBGLVL dbglevel | 35 | #define LSI_DBGLVL dbglevel |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 139ea0e27fd7..0930260aec2c 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -487,6 +487,7 @@ typedef struct { | |||
487 | #define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ | 487 | #define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ |
488 | #define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ | 488 | #define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ |
489 | /* used. */ | 489 | /* used. */ |
490 | #define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */ | ||
490 | #define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ | 491 | #define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ |
491 | #define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ | 492 | #define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ |
492 | #define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ | 493 | #define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9758dba95542..859649160caa 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -3063,6 +3063,7 @@ qla2x00_update_fcports(scsi_qla_host_t *ha) | |||
3063 | int | 3063 | int |
3064 | qla2x00_abort_isp(scsi_qla_host_t *ha) | 3064 | qla2x00_abort_isp(scsi_qla_host_t *ha) |
3065 | { | 3065 | { |
3066 | int rval; | ||
3066 | unsigned long flags = 0; | 3067 | unsigned long flags = 0; |
3067 | uint16_t cnt; | 3068 | uint16_t cnt; |
3068 | srb_t *sp; | 3069 | srb_t *sp; |
@@ -3119,6 +3120,16 @@ qla2x00_abort_isp(scsi_qla_host_t *ha) | |||
3119 | 3120 | ||
3120 | ha->isp_abort_cnt = 0; | 3121 | ha->isp_abort_cnt = 0; |
3121 | clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); | 3122 | clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); |
3123 | |||
3124 | if (ha->eft) { | ||
3125 | rval = qla2x00_trace_control(ha, TC_ENABLE, | ||
3126 | ha->eft_dma, EFT_NUM_BUFFERS); | ||
3127 | if (rval) { | ||
3128 | qla_printk(KERN_WARNING, ha, | ||
3129 | "Unable to reinitialize EFT " | ||
3130 | "(%d).\n", rval); | ||
3131 | } | ||
3132 | } | ||
3122 | } else { /* failed the ISP abort */ | 3133 | } else { /* failed the ISP abort */ |
3123 | ha->flags.online = 1; | 3134 | ha->flags.online = 1; |
3124 | if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { | 3135 | if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 2b60a27eff0b..c5b3c610a32a 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -471,6 +471,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, | |||
471 | mrk24->nport_handle = cpu_to_le16(loop_id); | 471 | mrk24->nport_handle = cpu_to_le16(loop_id); |
472 | mrk24->lun[1] = LSB(lun); | 472 | mrk24->lun[1] = LSB(lun); |
473 | mrk24->lun[2] = MSB(lun); | 473 | mrk24->lun[2] = MSB(lun); |
474 | host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); | ||
474 | } else { | 475 | } else { |
475 | SET_TARGET_ID(ha, mrk->target, loop_id); | 476 | SET_TARGET_ID(ha, mrk->target, loop_id); |
476 | mrk->lun = cpu_to_le16(lun); | 477 | mrk->lun = cpu_to_le16(lun); |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 795bf15b1b8f..de0613135f70 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -587,6 +587,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
587 | DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " | 587 | DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " |
588 | "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); | 588 | "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); |
589 | break; | 589 | break; |
590 | |||
591 | case MBA_TRACE_NOTIFICATION: | ||
592 | DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", | ||
593 | ha->host_no, mb[1], mb[2])); | ||
594 | break; | ||
590 | } | 595 | } |
591 | } | 596 | } |
592 | 597 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ec7ebb6037e6..65cbe2f5eea2 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -744,7 +744,6 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
744 | { | 744 | { |
745 | scsi_qla_host_t *ha = to_qla_host(cmd->device->host); | 745 | scsi_qla_host_t *ha = to_qla_host(cmd->device->host); |
746 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | 746 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
747 | srb_t *sp; | ||
748 | int ret; | 747 | int ret; |
749 | unsigned int id, lun; | 748 | unsigned int id, lun; |
750 | unsigned long serial; | 749 | unsigned long serial; |
@@ -755,8 +754,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
755 | lun = cmd->device->lun; | 754 | lun = cmd->device->lun; |
756 | serial = cmd->serial_number; | 755 | serial = cmd->serial_number; |
757 | 756 | ||
758 | sp = (srb_t *) CMD_SP(cmd); | 757 | if (!fcport) |
759 | if (!sp || !fcport) | ||
760 | return ret; | 758 | return ret; |
761 | 759 | ||
762 | qla_printk(KERN_INFO, ha, | 760 | qla_printk(KERN_INFO, ha, |
@@ -875,7 +873,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) | |||
875 | { | 873 | { |
876 | scsi_qla_host_t *ha = to_qla_host(cmd->device->host); | 874 | scsi_qla_host_t *ha = to_qla_host(cmd->device->host); |
877 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | 875 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
878 | srb_t *sp; | ||
879 | int ret; | 876 | int ret; |
880 | unsigned int id, lun; | 877 | unsigned int id, lun; |
881 | unsigned long serial; | 878 | unsigned long serial; |
@@ -886,8 +883,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) | |||
886 | lun = cmd->device->lun; | 883 | lun = cmd->device->lun; |
887 | serial = cmd->serial_number; | 884 | serial = cmd->serial_number; |
888 | 885 | ||
889 | sp = (srb_t *) CMD_SP(cmd); | 886 | if (!fcport) |
890 | if (!sp || !fcport) | ||
891 | return ret; | 887 | return ret; |
892 | 888 | ||
893 | qla_printk(KERN_INFO, ha, | 889 | qla_printk(KERN_INFO, ha, |
@@ -936,7 +932,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
936 | { | 932 | { |
937 | scsi_qla_host_t *ha = to_qla_host(cmd->device->host); | 933 | scsi_qla_host_t *ha = to_qla_host(cmd->device->host); |
938 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | 934 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
939 | srb_t *sp; | ||
940 | int ret; | 935 | int ret; |
941 | unsigned int id, lun; | 936 | unsigned int id, lun; |
942 | unsigned long serial; | 937 | unsigned long serial; |
@@ -947,8 +942,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
947 | lun = cmd->device->lun; | 942 | lun = cmd->device->lun; |
948 | serial = cmd->serial_number; | 943 | serial = cmd->serial_number; |
949 | 944 | ||
950 | sp = (srb_t *) CMD_SP(cmd); | 945 | if (!fcport) |
951 | if (!sp || !fcport) | ||
952 | return ret; | 946 | return ret; |
953 | 947 | ||
954 | qla_printk(KERN_INFO, ha, | 948 | qla_printk(KERN_INFO, ha, |
@@ -2244,9 +2238,6 @@ qla2x00_do_dpc(void *data) | |||
2244 | 2238 | ||
2245 | next_loopid = 0; | 2239 | next_loopid = 0; |
2246 | list_for_each_entry(fcport, &ha->fcports, list) { | 2240 | list_for_each_entry(fcport, &ha->fcports, list) { |
2247 | if (fcport->port_type != FCT_TARGET) | ||
2248 | continue; | ||
2249 | |||
2250 | /* | 2241 | /* |
2251 | * If the port is not ONLINE then try to login | 2242 | * If the port is not ONLINE then try to login |
2252 | * to it if we haven't run out of retries. | 2243 | * to it if we haven't run out of retries. |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index d2d683440659..971259032ef7 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,9 +7,9 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.01.05-k3" | 10 | #define QLA2XXX_VERSION "8.01.07-k1" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 1 | 13 | #define QLA_DRIVER_MINOR_VER 1 |
14 | #define QLA_DRIVER_PATCH_VER 5 | 14 | #define QLA_DRIVER_PATCH_VER 7 |
15 | #define QLA_DRIVER_BETA_VER 0 | 15 | #define QLA_DRIVER_BETA_VER 0 |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 6a5b731bd5ba..a8ed5a22009d 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -460,7 +460,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd) | |||
460 | * Return value: | 460 | * Return value: |
461 | * SUCCESS or FAILED or NEEDS_RETRY | 461 | * SUCCESS or FAILED or NEEDS_RETRY |
462 | **/ | 462 | **/ |
463 | static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense) | 463 | static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, |
464 | int cmnd_size, int timeout, int copy_sense) | ||
464 | { | 465 | { |
465 | struct scsi_device *sdev = scmd->device; | 466 | struct scsi_device *sdev = scmd->device; |
466 | struct Scsi_Host *shost = sdev->host; | 467 | struct Scsi_Host *shost = sdev->host; |
@@ -490,6 +491,9 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense | |||
490 | old_cmd_len = scmd->cmd_len; | 491 | old_cmd_len = scmd->cmd_len; |
491 | old_use_sg = scmd->use_sg; | 492 | old_use_sg = scmd->use_sg; |
492 | 493 | ||
494 | memset(scmd->cmnd, 0, sizeof(scmd->cmnd)); | ||
495 | memcpy(scmd->cmnd, cmnd, cmnd_size); | ||
496 | |||
493 | if (copy_sense) { | 497 | if (copy_sense) { |
494 | int gfp_mask = GFP_ATOMIC; | 498 | int gfp_mask = GFP_ATOMIC; |
495 | 499 | ||
@@ -610,8 +614,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd) | |||
610 | static unsigned char generic_sense[6] = | 614 | static unsigned char generic_sense[6] = |
611 | {REQUEST_SENSE, 0, 0, 0, 252, 0}; | 615 | {REQUEST_SENSE, 0, 0, 0, 252, 0}; |
612 | 616 | ||
613 | memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); | 617 | return scsi_send_eh_cmnd(scmd, generic_sense, 6, SENSE_TIMEOUT, 1); |
614 | return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1); | ||
615 | } | 618 | } |
616 | 619 | ||
617 | /** | 620 | /** |
@@ -736,10 +739,7 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd) | |||
736 | int retry_cnt = 1, rtn; | 739 | int retry_cnt = 1, rtn; |
737 | 740 | ||
738 | retry_tur: | 741 | retry_tur: |
739 | memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); | 742 | rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0); |
740 | |||
741 | |||
742 | rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0); | ||
743 | 743 | ||
744 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", | 744 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", |
745 | __FUNCTION__, scmd, rtn)); | 745 | __FUNCTION__, scmd, rtn)); |
@@ -839,8 +839,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd) | |||
839 | if (scmd->device->allow_restart) { | 839 | if (scmd->device->allow_restart) { |
840 | int rtn; | 840 | int rtn; |
841 | 841 | ||
842 | memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); | 842 | rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, |
843 | rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0); | 843 | START_UNIT_TIMEOUT, 0); |
844 | if (rtn == SUCCESS) | 844 | if (rtn == SUCCESS) |
845 | return 0; | 845 | return 0; |
846 | } | 846 | } |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 7b9e8fa1a4e0..2ecd14188574 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #define ISCSI_SESSION_ATTRS 11 | 34 | #define ISCSI_SESSION_ATTRS 11 |
35 | #define ISCSI_CONN_ATTRS 11 | 35 | #define ISCSI_CONN_ATTRS 11 |
36 | #define ISCSI_HOST_ATTRS 0 | 36 | #define ISCSI_HOST_ATTRS 0 |
37 | #define ISCSI_TRANSPORT_VERSION "1.1-646" | ||
37 | 38 | ||
38 | struct iscsi_internal { | 39 | struct iscsi_internal { |
39 | int daemon_pid; | 40 | int daemon_pid; |
@@ -634,13 +635,13 @@ mempool_zone_get_skb(struct mempool_zone *zone) | |||
634 | } | 635 | } |
635 | 636 | ||
636 | static int | 637 | static int |
637 | iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb) | 638 | iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb, gfp_t gfp) |
638 | { | 639 | { |
639 | unsigned long flags; | 640 | unsigned long flags; |
640 | int rc; | 641 | int rc; |
641 | 642 | ||
642 | skb_get(skb); | 643 | skb_get(skb); |
643 | rc = netlink_broadcast(nls, skb, 0, 1, GFP_KERNEL); | 644 | rc = netlink_broadcast(nls, skb, 0, 1, gfp); |
644 | if (rc < 0) { | 645 | if (rc < 0) { |
645 | mempool_free(skb, zone->pool); | 646 | mempool_free(skb, zone->pool); |
646 | printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc); | 647 | printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc); |
@@ -749,7 +750,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) | |||
749 | ev->r.connerror.cid = conn->cid; | 750 | ev->r.connerror.cid = conn->cid; |
750 | ev->r.connerror.sid = iscsi_conn_get_sid(conn); | 751 | ev->r.connerror.sid = iscsi_conn_get_sid(conn); |
751 | 752 | ||
752 | iscsi_broadcast_skb(conn->z_error, skb); | 753 | iscsi_broadcast_skb(conn->z_error, skb, GFP_ATOMIC); |
753 | 754 | ||
754 | dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", | 755 | dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", |
755 | error); | 756 | error); |
@@ -895,7 +896,7 @@ int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn) | |||
895 | * this will occur if the daemon is not up, so we just warn | 896 | * this will occur if the daemon is not up, so we just warn |
896 | * the user and when the daemon is restarted it will handle it | 897 | * the user and when the daemon is restarted it will handle it |
897 | */ | 898 | */ |
898 | rc = iscsi_broadcast_skb(conn->z_pdu, skb); | 899 | rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL); |
899 | if (rc < 0) | 900 | if (rc < 0) |
900 | dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " | 901 | dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " |
901 | "session destruction event. Check iscsi daemon\n"); | 902 | "session destruction event. Check iscsi daemon\n"); |
@@ -958,7 +959,7 @@ int iscsi_if_create_session_done(struct iscsi_cls_conn *conn) | |||
958 | * this will occur if the daemon is not up, so we just warn | 959 | * this will occur if the daemon is not up, so we just warn |
959 | * the user and when the daemon is restarted it will handle it | 960 | * the user and when the daemon is restarted it will handle it |
960 | */ | 961 | */ |
961 | rc = iscsi_broadcast_skb(conn->z_pdu, skb); | 962 | rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL); |
962 | if (rc < 0) | 963 | if (rc < 0) |
963 | dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " | 964 | dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " |
964 | "session creation event. Check iscsi daemon\n"); | 965 | "session creation event. Check iscsi daemon\n"); |
@@ -1613,6 +1614,9 @@ static __init int iscsi_transport_init(void) | |||
1613 | { | 1614 | { |
1614 | int err; | 1615 | int err; |
1615 | 1616 | ||
1617 | printk(KERN_INFO "Loading iSCSI transport class v%s.", | ||
1618 | ISCSI_TRANSPORT_VERSION); | ||
1619 | |||
1616 | err = class_register(&iscsi_transport_class); | 1620 | err = class_register(&iscsi_transport_class); |
1617 | if (err) | 1621 | if (err) |
1618 | return err; | 1622 | return err; |
@@ -1678,3 +1682,4 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, " | |||
1678 | "Alex Aizman <itn780@yahoo.com>"); | 1682 | "Alex Aizman <itn780@yahoo.com>"); |
1679 | MODULE_DESCRIPTION("iSCSI Transport Interface"); | 1683 | MODULE_DESCRIPTION("iSCSI Transport Interface"); |
1680 | MODULE_LICENSE("GPL"); | 1684 | MODULE_LICENSE("GPL"); |
1685 | MODULE_VERSION(ISCSI_TRANSPORT_VERSION); | ||
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 65eef33846bb..34f9343ed0af 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -18,8 +18,8 @@ | |||
18 | * | 18 | * |
19 | */ | 19 | */ |
20 | 20 | ||
21 | static int sg_version_num = 30533; /* 2 digits for each component */ | 21 | static int sg_version_num = 30534; /* 2 digits for each component */ |
22 | #define SG_VERSION_STR "3.5.33" | 22 | #define SG_VERSION_STR "3.5.34" |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: | 25 | * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: |
@@ -60,7 +60,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */ | |||
60 | 60 | ||
61 | #ifdef CONFIG_SCSI_PROC_FS | 61 | #ifdef CONFIG_SCSI_PROC_FS |
62 | #include <linux/proc_fs.h> | 62 | #include <linux/proc_fs.h> |
63 | static char *sg_version_date = "20050908"; | 63 | static char *sg_version_date = "20060818"; |
64 | 64 | ||
65 | static int sg_proc_init(void); | 65 | static int sg_proc_init(void); |
66 | static void sg_proc_cleanup(void); | 66 | static void sg_proc_cleanup(void); |
@@ -1164,7 +1164,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) | |||
1164 | len = vma->vm_end - sa; | 1164 | len = vma->vm_end - sa; |
1165 | len = (len < sg->length) ? len : sg->length; | 1165 | len = (len < sg->length) ? len : sg->length; |
1166 | if (offset < len) { | 1166 | if (offset < len) { |
1167 | page = sg->page; | 1167 | page = virt_to_page(page_address(sg->page) + offset); |
1168 | get_page(page); /* increment page count */ | 1168 | get_page(page); /* increment page count */ |
1169 | break; | 1169 | break; |
1170 | } | 1170 | } |
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 8c505076c0eb..739d3ef46a40 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c | |||
@@ -2084,7 +2084,7 @@ static struct pci_device_id sym2_id_table[] __devinitdata = { | |||
2084 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, | 2084 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, |
2085 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 2085 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
2086 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, | 2086 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, |
2087 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 2087 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, |
2088 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, | 2088 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, |
2089 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 2089 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
2090 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, | 2090 | { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, |