diff options
author | Swen Schillig <swen@vnet.ibm.com> | 2009-08-18 09:43:19 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2009-09-05 09:49:27 -0400 |
commit | 564e1c86c810f9ccfe4300afa402815e3db4886d (patch) | |
tree | ecb88038c443d6486e9df352c79b3c78be5454ef /drivers/s390 | |
parent | 42428f747a8a0db9c6de03e105932316defad65d (diff) |
[SCSI] zfcp: Move qdio related data out of zfcp_adapter
The zfcp_adapter structure was growing over time to a size of almost
one memory page. To reduce the size of the data structure and to
seperate different layers, put all qdio related data in the new
zfcp_qdio data structure.
Signed-off-by: Swen Schillig <swen@vnet.ibm.com>
Signed-off-by: Christof Schmitt <christof.schmitt@de.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/scsi/zfcp_aux.c | 20 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_dbf.c | 6 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_def.h | 34 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_erp.c | 8 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_ext.h | 26 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fsf.c | 299 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_qdio.c | 224 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_scsi.c | 6 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_sysfs.c | 16 |
9 files changed, 343 insertions, 296 deletions
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index f785cbc7520d..ed7211ef04eb 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -428,7 +428,7 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) | |||
428 | int zfcp_status_read_refill(struct zfcp_adapter *adapter) | 428 | int zfcp_status_read_refill(struct zfcp_adapter *adapter) |
429 | { | 429 | { |
430 | while (atomic_read(&adapter->stat_miss) > 0) | 430 | while (atomic_read(&adapter->stat_miss) > 0) |
431 | if (zfcp_fsf_status_read(adapter)) { | 431 | if (zfcp_fsf_status_read(adapter->qdio)) { |
432 | if (atomic_read(&adapter->stat_miss) >= 16) { | 432 | if (atomic_read(&adapter->stat_miss) >= 16) { |
433 | zfcp_erp_adapter_reopen(adapter, 0, "axsref1", | 433 | zfcp_erp_adapter_reopen(adapter, 0, "axsref1", |
434 | NULL); | 434 | NULL); |
@@ -507,11 +507,16 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
507 | return -ENOMEM; | 507 | return -ENOMEM; |
508 | } | 508 | } |
509 | 509 | ||
510 | adapter->qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); | ||
511 | if (!adapter->qdio) | ||
512 | goto qdio_mem_failed; | ||
513 | |||
514 | adapter->qdio->adapter = adapter; | ||
510 | ccw_device->handler = NULL; | 515 | ccw_device->handler = NULL; |
511 | adapter->ccw_device = ccw_device; | 516 | adapter->ccw_device = ccw_device; |
512 | atomic_set(&adapter->refcount, 0); | 517 | atomic_set(&adapter->refcount, 0); |
513 | 518 | ||
514 | if (zfcp_qdio_allocate(adapter)) | 519 | if (zfcp_qdio_allocate(adapter->qdio, ccw_device)) |
515 | goto qdio_allocate_failed; | 520 | goto qdio_allocate_failed; |
516 | 521 | ||
517 | if (zfcp_allocate_low_mem_buffers(adapter)) | 522 | if (zfcp_allocate_low_mem_buffers(adapter)) |
@@ -536,8 +541,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
536 | 541 | ||
537 | spin_lock_init(&adapter->req_list_lock); | 542 | spin_lock_init(&adapter->req_list_lock); |
538 | 543 | ||
539 | spin_lock_init(&adapter->req_q_lock); | 544 | spin_lock_init(&adapter->qdio->req_q_lock); |
540 | spin_lock_init(&adapter->qdio_stat_lock); | 545 | spin_lock_init(&adapter->qdio->stat_lock); |
541 | 546 | ||
542 | rwlock_init(&adapter->erp_lock); | 547 | rwlock_init(&adapter->erp_lock); |
543 | rwlock_init(&adapter->abort_lock); | 548 | rwlock_init(&adapter->abort_lock); |
@@ -574,7 +579,9 @@ debug_register_failed: | |||
574 | failed_low_mem_buffers: | 579 | failed_low_mem_buffers: |
575 | zfcp_free_low_mem_buffers(adapter); | 580 | zfcp_free_low_mem_buffers(adapter); |
576 | qdio_allocate_failed: | 581 | qdio_allocate_failed: |
577 | zfcp_qdio_free(adapter); | 582 | zfcp_qdio_free(adapter->qdio); |
583 | kfree(adapter->qdio); | ||
584 | qdio_mem_failed: | ||
578 | kfree(adapter); | 585 | kfree(adapter); |
579 | return -ENOMEM; | 586 | return -ENOMEM; |
580 | } | 587 | } |
@@ -605,12 +612,13 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) | |||
605 | 612 | ||
606 | zfcp_destroy_adapter_work_queue(adapter); | 613 | zfcp_destroy_adapter_work_queue(adapter); |
607 | zfcp_adapter_debug_unregister(adapter); | 614 | zfcp_adapter_debug_unregister(adapter); |
608 | zfcp_qdio_free(adapter); | 615 | zfcp_qdio_free(adapter->qdio); |
609 | zfcp_free_low_mem_buffers(adapter); | 616 | zfcp_free_low_mem_buffers(adapter); |
610 | kfree(adapter->req_list); | 617 | kfree(adapter->req_list); |
611 | kfree(adapter->fc_stats); | 618 | kfree(adapter->fc_stats); |
612 | kfree(adapter->stats_reset_data); | 619 | kfree(adapter->stats_reset_data); |
613 | kfree(adapter->gs); | 620 | kfree(adapter->gs); |
621 | kfree(adapter->qdio); | ||
614 | kfree(adapter); | 622 | kfree(adapter); |
615 | } | 623 | } |
616 | 624 | ||
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index fc7f3d66fe37..3179b08bda6a 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -274,16 +274,16 @@ void _zfcp_hba_dbf_event_fsf_unsol(const char *tag, int level, | |||
274 | 274 | ||
275 | /** | 275 | /** |
276 | * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure | 276 | * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure |
277 | * @adapter: adapter affected by this QDIO related event | 277 | * @qdio: qdio structure affected by this QDIO related event |
278 | * @qdio_error: as passed by qdio module | 278 | * @qdio_error: as passed by qdio module |
279 | * @sbal_index: first buffer with error condition, as passed by qdio module | 279 | * @sbal_index: first buffer with error condition, as passed by qdio module |
280 | * @sbal_count: number of buffers affected, as passed by qdio module | 280 | * @sbal_count: number of buffers affected, as passed by qdio module |
281 | */ | 281 | */ |
282 | void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, | 282 | void zfcp_hba_dbf_event_qdio(struct zfcp_qdio *qdio, |
283 | unsigned int qdio_error, int sbal_index, | 283 | unsigned int qdio_error, int sbal_index, |
284 | int sbal_count) | 284 | int sbal_count) |
285 | { | 285 | { |
286 | struct zfcp_dbf *dbf = adapter->dbf; | 286 | struct zfcp_dbf *dbf = qdio->adapter->dbf; |
287 | struct zfcp_hba_dbf_record *r = &dbf->hba_dbf_buf; | 287 | struct zfcp_hba_dbf_record *r = &dbf->hba_dbf_buf; |
288 | unsigned long flags; | 288 | unsigned long flags; |
289 | 289 | ||
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index a04bdfd4d2f6..bac5c497eab5 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -428,6 +428,29 @@ struct zfcp_latencies { | |||
428 | spinlock_t lock; | 428 | spinlock_t lock; |
429 | }; | 429 | }; |
430 | 430 | ||
431 | /** struct zfcp_qdio - basic QDIO data structure | ||
432 | * @resp_q: response queue | ||
433 | * @req_q: request queue | ||
434 | * @stat_lock: lock to protect req_q_util and req_q_time | ||
435 | * @req_q_lock; lock to serialize access to request queue | ||
436 | * @req_q_time: time of last fill level change | ||
437 | * @req_q_util: used for accounting | ||
438 | * @req_q_full: queue full incidents | ||
439 | * @req_q_wq: used to wait for SBAL availability | ||
440 | * @adapter: adapter used in conjunction with this QDIO structure | ||
441 | */ | ||
442 | struct zfcp_qdio { | ||
443 | struct zfcp_qdio_queue resp_q; | ||
444 | struct zfcp_qdio_queue req_q; | ||
445 | spinlock_t stat_lock; | ||
446 | spinlock_t req_q_lock; | ||
447 | ktime_t req_q_time; | ||
448 | u64 req_q_util; | ||
449 | atomic_t req_q_full; | ||
450 | wait_queue_head_t req_q_wq; | ||
451 | struct zfcp_adapter *adapter; | ||
452 | }; | ||
453 | |||
431 | struct zfcp_adapter { | 454 | struct zfcp_adapter { |
432 | atomic_t refcount; /* reference count */ | 455 | atomic_t refcount; /* reference count */ |
433 | wait_queue_head_t remove_wq; /* can be used to wait for | 456 | wait_queue_head_t remove_wq; /* can be used to wait for |
@@ -436,6 +459,7 @@ struct zfcp_adapter { | |||
436 | u64 peer_wwpn; /* P2P peer WWPN */ | 459 | u64 peer_wwpn; /* P2P peer WWPN */ |
437 | u32 peer_d_id; /* P2P peer D_ID */ | 460 | u32 peer_d_id; /* P2P peer D_ID */ |
438 | struct ccw_device *ccw_device; /* S/390 ccw device */ | 461 | struct ccw_device *ccw_device; /* S/390 ccw device */ |
462 | struct zfcp_qdio *qdio; | ||
439 | u32 hydra_version; /* Hydra version */ | 463 | u32 hydra_version; /* Hydra version */ |
440 | u32 fsf_lic_version; | 464 | u32 fsf_lic_version; |
441 | u32 adapter_features; /* FCP channel features */ | 465 | u32 adapter_features; /* FCP channel features */ |
@@ -447,15 +471,7 @@ struct zfcp_adapter { | |||
447 | unsigned long req_no; /* unique FSF req number */ | 471 | unsigned long req_no; /* unique FSF req number */ |
448 | struct list_head *req_list; /* list of pending reqs */ | 472 | struct list_head *req_list; /* list of pending reqs */ |
449 | spinlock_t req_list_lock; /* request list lock */ | 473 | spinlock_t req_list_lock; /* request list lock */ |
450 | struct zfcp_qdio_queue req_q; /* request queue */ | ||
451 | spinlock_t req_q_lock; /* for operations on queue */ | ||
452 | ktime_t req_q_time; /* time of last fill level change */ | ||
453 | u64 req_q_util; /* for accounting */ | ||
454 | spinlock_t qdio_stat_lock; | ||
455 | u32 fsf_req_seq_no; /* FSF cmnd seq number */ | 474 | u32 fsf_req_seq_no; /* FSF cmnd seq number */ |
456 | wait_queue_head_t request_wq; /* can be used to wait for | ||
457 | more avaliable SBALs */ | ||
458 | struct zfcp_qdio_queue resp_q; /* response queue */ | ||
459 | rwlock_t abort_lock; /* Protects against SCSI | 475 | rwlock_t abort_lock; /* Protects against SCSI |
460 | stack abort/command | 476 | stack abort/command |
461 | completion races */ | 477 | completion races */ |
@@ -478,13 +494,11 @@ struct zfcp_adapter { | |||
478 | struct zfcp_wka_ports *gs; /* generic services */ | 494 | struct zfcp_wka_ports *gs; /* generic services */ |
479 | struct zfcp_dbf *dbf; /* debug traces */ | 495 | struct zfcp_dbf *dbf; /* debug traces */ |
480 | struct zfcp_adapter_mempool pool; /* Adapter memory pools */ | 496 | struct zfcp_adapter_mempool pool; /* Adapter memory pools */ |
481 | struct qdio_initialize qdio_init_data; /* for qdio_establish */ | ||
482 | struct fc_host_statistics *fc_stats; | 497 | struct fc_host_statistics *fc_stats; |
483 | struct fsf_qtcb_bottom_port *stats_reset_data; | 498 | struct fsf_qtcb_bottom_port *stats_reset_data; |
484 | unsigned long stats_reset; | 499 | unsigned long stats_reset; |
485 | struct work_struct scan_work; | 500 | struct work_struct scan_work; |
486 | struct service_level service_level; | 501 | struct service_level service_level; |
487 | atomic_t qdio_outb_full; /* queue full incidents */ | ||
488 | struct workqueue_struct *work_queue; | 502 | struct workqueue_struct *work_queue; |
489 | }; | 503 | }; |
490 | 504 | ||
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 50e5fbe2252a..feda1db56b23 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -603,9 +603,11 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter) | |||
603 | 603 | ||
604 | static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) | 604 | static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) |
605 | { | 605 | { |
606 | if (zfcp_qdio_open(act->adapter)) | 606 | struct zfcp_qdio *qdio = act->adapter->qdio; |
607 | |||
608 | if (zfcp_qdio_open(qdio)) | ||
607 | return ZFCP_ERP_FAILED; | 609 | return ZFCP_ERP_FAILED; |
608 | init_waitqueue_head(&act->adapter->request_wq); | 610 | init_waitqueue_head(&qdio->req_q_wq); |
609 | atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status); | 611 | atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status); |
610 | return ZFCP_ERP_SUCCEEDED; | 612 | return ZFCP_ERP_SUCCEEDED; |
611 | } | 613 | } |
@@ -710,7 +712,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) | |||
710 | struct zfcp_adapter *adapter = act->adapter; | 712 | struct zfcp_adapter *adapter = act->adapter; |
711 | 713 | ||
712 | /* close queues to ensure that buffers are not accessed by adapter */ | 714 | /* close queues to ensure that buffers are not accessed by adapter */ |
713 | zfcp_qdio_close(adapter); | 715 | zfcp_qdio_close(adapter->qdio); |
714 | zfcp_fsf_req_dismiss_all(adapter); | 716 | zfcp_fsf_req_dismiss_all(adapter); |
715 | adapter->fsf_req_seq_no = 0; | 717 | adapter->fsf_req_seq_no = 0; |
716 | zfcp_fc_wka_ports_force_offline(adapter->gs); | 718 | zfcp_fc_wka_ports_force_offline(adapter->gs); |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index d11c0f44dad3..e97947d2f2ed 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -51,7 +51,7 @@ extern void _zfcp_hba_dbf_event_fsf_response(const char *, int level, | |||
51 | extern void _zfcp_hba_dbf_event_fsf_unsol(const char *, int level, | 51 | extern void _zfcp_hba_dbf_event_fsf_unsol(const char *, int level, |
52 | struct zfcp_adapter *, | 52 | struct zfcp_adapter *, |
53 | struct fsf_status_read_buffer *); | 53 | struct fsf_status_read_buffer *); |
54 | extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int, | 54 | extern void zfcp_hba_dbf_event_qdio(struct zfcp_qdio *, unsigned int, int, |
55 | int); | 55 | int); |
56 | extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *, | 56 | extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *, |
57 | struct zfcp_fsf_req *); | 57 | struct zfcp_fsf_req *); |
@@ -118,15 +118,15 @@ extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); | |||
118 | extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); | 118 | extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); |
119 | extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); | 119 | extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); |
120 | extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); | 120 | extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); |
121 | extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *, | 121 | extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *, |
122 | struct fsf_qtcb_bottom_config *); | 122 | struct fsf_qtcb_bottom_config *); |
123 | extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *); | 123 | extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *); |
124 | extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *, | 124 | extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *, |
125 | struct fsf_qtcb_bottom_port *); | 125 | struct fsf_qtcb_bottom_port *); |
126 | extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *, | 126 | extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *, |
127 | struct zfcp_fsf_cfdc *); | 127 | struct zfcp_fsf_cfdc *); |
128 | extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); | 128 | extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); |
129 | extern int zfcp_fsf_status_read(struct zfcp_adapter *); | 129 | extern int zfcp_fsf_status_read(struct zfcp_qdio *); |
130 | extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); | 130 | extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); |
131 | extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, | 131 | extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, |
132 | struct zfcp_erp_action *); | 132 | struct zfcp_erp_action *); |
@@ -137,21 +137,21 @@ extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); | |||
137 | extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); | 137 | extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); |
138 | extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, | 138 | extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, |
139 | struct zfcp_unit *); | 139 | struct zfcp_unit *); |
140 | extern void zfcp_fsf_reqid_check(struct zfcp_adapter *, int); | 140 | extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); |
141 | 141 | ||
142 | /* zfcp_qdio.c */ | 142 | /* zfcp_qdio.c */ |
143 | extern int zfcp_qdio_allocate(struct zfcp_adapter *); | 143 | extern int zfcp_qdio_allocate(struct zfcp_qdio *, struct ccw_device *); |
144 | extern void zfcp_qdio_free(struct zfcp_adapter *); | 144 | extern void zfcp_qdio_free(struct zfcp_qdio *); |
145 | extern int zfcp_qdio_send(struct zfcp_adapter *, struct zfcp_queue_req *); | 145 | extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *); |
146 | extern struct qdio_buffer_element | 146 | extern struct qdio_buffer_element |
147 | *zfcp_qdio_sbale_req(struct zfcp_adapter *, struct zfcp_queue_req *); | 147 | *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *); |
148 | extern struct qdio_buffer_element | 148 | extern struct qdio_buffer_element |
149 | *zfcp_qdio_sbale_curr(struct zfcp_adapter *, struct zfcp_queue_req *); | 149 | *zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *); |
150 | extern int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *, | 150 | extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, |
151 | struct zfcp_queue_req *, unsigned long, | 151 | struct zfcp_queue_req *, unsigned long, |
152 | struct scatterlist *, int); | 152 | struct scatterlist *, int); |
153 | extern int zfcp_qdio_open(struct zfcp_adapter *); | 153 | extern int zfcp_qdio_open(struct zfcp_qdio *); |
154 | extern void zfcp_qdio_close(struct zfcp_adapter *); | 154 | extern void zfcp_qdio_close(struct zfcp_qdio *); |
155 | 155 | ||
156 | /* zfcp_scsi.c */ | 156 | /* zfcp_scsi.c */ |
157 | extern struct zfcp_data zfcp_data; | 157 | extern struct zfcp_data zfcp_data; |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index e88b7804780b..b9a16e4b48b4 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -637,33 +637,34 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) | |||
637 | } | 637 | } |
638 | } | 638 | } |
639 | 639 | ||
640 | static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter) | 640 | static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio) |
641 | { | 641 | { |
642 | struct zfcp_qdio_queue *req_q = &adapter->req_q; | 642 | struct zfcp_qdio_queue *req_q = &qdio->req_q; |
643 | 643 | ||
644 | spin_lock_bh(&adapter->req_q_lock); | 644 | spin_lock_bh(&qdio->req_q_lock); |
645 | if (atomic_read(&req_q->count)) | 645 | if (atomic_read(&req_q->count)) |
646 | return 1; | 646 | return 1; |
647 | spin_unlock_bh(&adapter->req_q_lock); | 647 | spin_unlock_bh(&qdio->req_q_lock); |
648 | return 0; | 648 | return 0; |
649 | } | 649 | } |
650 | 650 | ||
651 | static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) | 651 | static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio) |
652 | { | 652 | { |
653 | struct zfcp_adapter *adapter = qdio->adapter; | ||
653 | long ret; | 654 | long ret; |
654 | 655 | ||
655 | spin_unlock_bh(&adapter->req_q_lock); | 656 | spin_unlock_bh(&qdio->req_q_lock); |
656 | ret = wait_event_interruptible_timeout(adapter->request_wq, | 657 | ret = wait_event_interruptible_timeout(qdio->req_q_wq, |
657 | zfcp_fsf_sbal_check(adapter), 5 * HZ); | 658 | zfcp_fsf_sbal_check(qdio), 5 * HZ); |
658 | if (ret > 0) | 659 | if (ret > 0) |
659 | return 0; | 660 | return 0; |
660 | if (!ret) { | 661 | if (!ret) { |
661 | atomic_inc(&adapter->qdio_outb_full); | 662 | atomic_inc(&qdio->req_q_full); |
662 | /* assume hanging outbound queue, try queue recovery */ | 663 | /* assume hanging outbound queue, try queue recovery */ |
663 | zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL); | 664 | zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL); |
664 | } | 665 | } |
665 | 666 | ||
666 | spin_lock_bh(&adapter->req_q_lock); | 667 | spin_lock_bh(&qdio->req_q_lock); |
667 | return -EIO; | 668 | return -EIO; |
668 | } | 669 | } |
669 | 670 | ||
@@ -700,11 +701,12 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) | |||
700 | return qtcb; | 701 | return qtcb; |
701 | } | 702 | } |
702 | 703 | ||
703 | static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, | 704 | static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, |
704 | u32 fsf_cmd, mempool_t *pool) | 705 | u32 fsf_cmd, mempool_t *pool) |
705 | { | 706 | { |
706 | struct qdio_buffer_element *sbale; | 707 | struct qdio_buffer_element *sbale; |
707 | struct zfcp_qdio_queue *req_q = &adapter->req_q; | 708 | struct zfcp_qdio_queue *req_q = &qdio->req_q; |
709 | struct zfcp_adapter *adapter = qdio->adapter; | ||
708 | struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); | 710 | struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); |
709 | 711 | ||
710 | if (unlikely(!req)) | 712 | if (unlikely(!req)) |
@@ -725,7 +727,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, | |||
725 | req->queue_req.sbal_last = req_q->first; | 727 | req->queue_req.sbal_last = req_q->first; |
726 | req->queue_req.sbale_curr = 1; | 728 | req->queue_req.sbale_curr = 1; |
727 | 729 | ||
728 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 730 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
729 | sbale[0].addr = (void *) req->req_id; | 731 | sbale[0].addr = (void *) req->req_id; |
730 | sbale[0].flags |= SBAL_FLAGS0_COMMAND; | 732 | sbale[0].flags |= SBAL_FLAGS0_COMMAND; |
731 | 733 | ||
@@ -740,7 +742,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, | |||
740 | return ERR_PTR(-ENOMEM); | 742 | return ERR_PTR(-ENOMEM); |
741 | } | 743 | } |
742 | 744 | ||
743 | req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no; | 745 | req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; |
744 | req->qtcb->prefix.req_id = req->req_id; | 746 | req->qtcb->prefix.req_id = req->req_id; |
745 | req->qtcb->prefix.ulp_info = 26; | 747 | req->qtcb->prefix.ulp_info = 26; |
746 | req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command]; | 748 | req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command]; |
@@ -764,6 +766,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, | |||
764 | static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) | 766 | static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) |
765 | { | 767 | { |
766 | struct zfcp_adapter *adapter = req->adapter; | 768 | struct zfcp_adapter *adapter = req->adapter; |
769 | struct zfcp_qdio *qdio = adapter->qdio; | ||
767 | unsigned long flags; | 770 | unsigned long flags; |
768 | int idx; | 771 | int idx; |
769 | int with_qtcb = (req->qtcb != NULL); | 772 | int with_qtcb = (req->qtcb != NULL); |
@@ -774,9 +777,9 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) | |||
774 | list_add_tail(&req->list, &adapter->req_list[idx]); | 777 | list_add_tail(&req->list, &adapter->req_list[idx]); |
775 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | 778 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); |
776 | 779 | ||
777 | req->queue_req.qdio_outb_usage = atomic_read(&adapter->req_q.count); | 780 | req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); |
778 | req->issued = get_clock(); | 781 | req->issued = get_clock(); |
779 | if (zfcp_qdio_send(adapter, &req->queue_req)) { | 782 | if (zfcp_qdio_send(qdio, &req->queue_req)) { |
780 | del_timer(&req->timer); | 783 | del_timer(&req->timer); |
781 | spin_lock_irqsave(&adapter->req_list_lock, flags); | 784 | spin_lock_irqsave(&adapter->req_list_lock, flags); |
782 | /* lookup request again, list might have changed */ | 785 | /* lookup request again, list might have changed */ |
@@ -801,25 +804,26 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) | |||
801 | * @req_flags: request flags | 804 | * @req_flags: request flags |
802 | * Returns: 0 on success, ERROR otherwise | 805 | * Returns: 0 on success, ERROR otherwise |
803 | */ | 806 | */ |
804 | int zfcp_fsf_status_read(struct zfcp_adapter *adapter) | 807 | int zfcp_fsf_status_read(struct zfcp_qdio *qdio) |
805 | { | 808 | { |
809 | struct zfcp_adapter *adapter = qdio->adapter; | ||
806 | struct zfcp_fsf_req *req; | 810 | struct zfcp_fsf_req *req; |
807 | struct fsf_status_read_buffer *sr_buf; | 811 | struct fsf_status_read_buffer *sr_buf; |
808 | struct qdio_buffer_element *sbale; | 812 | struct qdio_buffer_element *sbale; |
809 | int retval = -EIO; | 813 | int retval = -EIO; |
810 | 814 | ||
811 | spin_lock_bh(&adapter->req_q_lock); | 815 | spin_lock_bh(&qdio->req_q_lock); |
812 | if (zfcp_fsf_req_sbal_get(adapter)) | 816 | if (zfcp_fsf_req_sbal_get(qdio)) |
813 | goto out; | 817 | goto out; |
814 | 818 | ||
815 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, | 819 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, |
816 | adapter->pool.status_read_req); | 820 | adapter->pool.status_read_req); |
817 | if (IS_ERR(req)) { | 821 | if (IS_ERR(req)) { |
818 | retval = PTR_ERR(req); | 822 | retval = PTR_ERR(req); |
819 | goto out; | 823 | goto out; |
820 | } | 824 | } |
821 | 825 | ||
822 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 826 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
823 | sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; | 827 | sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; |
824 | req->queue_req.sbale_curr = 2; | 828 | req->queue_req.sbale_curr = 2; |
825 | 829 | ||
@@ -830,7 +834,7 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) | |||
830 | } | 834 | } |
831 | memset(sr_buf, 0, sizeof(*sr_buf)); | 835 | memset(sr_buf, 0, sizeof(*sr_buf)); |
832 | req->data = sr_buf; | 836 | req->data = sr_buf; |
833 | sbale = zfcp_qdio_sbale_curr(adapter, &req->queue_req); | 837 | sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req); |
834 | sbale->addr = (void *) sr_buf; | 838 | sbale->addr = (void *) sr_buf; |
835 | sbale->length = sizeof(*sr_buf); | 839 | sbale->length = sizeof(*sr_buf); |
836 | 840 | ||
@@ -846,7 +850,7 @@ failed_buf: | |||
846 | zfcp_fsf_req_free(req); | 850 | zfcp_fsf_req_free(req); |
847 | zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); | 851 | zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); |
848 | out: | 852 | out: |
849 | spin_unlock_bh(&adapter->req_q_lock); | 853 | spin_unlock_bh(&qdio->req_q_lock); |
850 | return retval; | 854 | return retval; |
851 | } | 855 | } |
852 | 856 | ||
@@ -913,13 +917,13 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, | |||
913 | { | 917 | { |
914 | struct qdio_buffer_element *sbale; | 918 | struct qdio_buffer_element *sbale; |
915 | struct zfcp_fsf_req *req = NULL; | 919 | struct zfcp_fsf_req *req = NULL; |
916 | struct zfcp_adapter *adapter = unit->port->adapter; | 920 | struct zfcp_qdio *qdio = unit->port->adapter->qdio; |
917 | 921 | ||
918 | spin_lock_bh(&adapter->req_q_lock); | 922 | spin_lock_bh(&qdio->req_q_lock); |
919 | if (zfcp_fsf_req_sbal_get(adapter)) | 923 | if (zfcp_fsf_req_sbal_get(qdio)) |
920 | goto out; | 924 | goto out; |
921 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, | 925 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, |
922 | adapter->pool.scsi_abort); | 926 | qdio->adapter->pool.scsi_abort); |
923 | if (IS_ERR(req)) { | 927 | if (IS_ERR(req)) { |
924 | req = NULL; | 928 | req = NULL; |
925 | goto out; | 929 | goto out; |
@@ -929,7 +933,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, | |||
929 | ZFCP_STATUS_COMMON_UNBLOCKED))) | 933 | ZFCP_STATUS_COMMON_UNBLOCKED))) |
930 | goto out_error_free; | 934 | goto out_error_free; |
931 | 935 | ||
932 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 936 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
933 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 937 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
934 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 938 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
935 | 939 | ||
@@ -947,7 +951,7 @@ out_error_free: | |||
947 | zfcp_fsf_req_free(req); | 951 | zfcp_fsf_req_free(req); |
948 | req = NULL; | 952 | req = NULL; |
949 | out: | 953 | out: |
950 | spin_unlock_bh(&adapter->req_q_lock); | 954 | spin_unlock_bh(&qdio->req_q_lock); |
951 | return req; | 955 | return req; |
952 | } | 956 | } |
953 | 957 | ||
@@ -1024,7 +1028,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
1024 | int max_sbals) | 1028 | int max_sbals) |
1025 | { | 1029 | { |
1026 | struct zfcp_adapter *adapter = req->adapter; | 1030 | struct zfcp_adapter *adapter = req->adapter; |
1027 | struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter, | 1031 | struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio, |
1028 | &req->queue_req); | 1032 | &req->queue_req); |
1029 | u32 feat = adapter->adapter_features; | 1033 | u32 feat = adapter->adapter_features; |
1030 | int bytes; | 1034 | int bytes; |
@@ -1043,7 +1047,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
1043 | return 0; | 1047 | return 0; |
1044 | } | 1048 | } |
1045 | 1049 | ||
1046 | bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, | 1050 | bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, |
1047 | SBAL_FLAGS0_TYPE_WRITE_READ, | 1051 | SBAL_FLAGS0_TYPE_WRITE_READ, |
1048 | sg_req, max_sbals); | 1052 | sg_req, max_sbals); |
1049 | if (bytes <= 0) | 1053 | if (bytes <= 0) |
@@ -1051,7 +1055,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
1051 | req->qtcb->bottom.support.req_buf_length = bytes; | 1055 | req->qtcb->bottom.support.req_buf_length = bytes; |
1052 | req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; | 1056 | req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; |
1053 | 1057 | ||
1054 | bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, | 1058 | bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, |
1055 | SBAL_FLAGS0_TYPE_WRITE_READ, | 1059 | SBAL_FLAGS0_TYPE_WRITE_READ, |
1056 | sg_resp, max_sbals); | 1060 | sg_resp, max_sbals); |
1057 | if (bytes <= 0) | 1061 | if (bytes <= 0) |
@@ -1071,15 +1075,15 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, | |||
1071 | struct zfcp_erp_action *erp_action) | 1075 | struct zfcp_erp_action *erp_action) |
1072 | { | 1076 | { |
1073 | struct zfcp_wka_port *wka_port = ct->wka_port; | 1077 | struct zfcp_wka_port *wka_port = ct->wka_port; |
1074 | struct zfcp_adapter *adapter = wka_port->adapter; | 1078 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; |
1075 | struct zfcp_fsf_req *req; | 1079 | struct zfcp_fsf_req *req; |
1076 | int ret = -EIO; | 1080 | int ret = -EIO; |
1077 | 1081 | ||
1078 | spin_lock_bh(&adapter->req_q_lock); | 1082 | spin_lock_bh(&qdio->req_q_lock); |
1079 | if (zfcp_fsf_req_sbal_get(adapter)) | 1083 | if (zfcp_fsf_req_sbal_get(qdio)) |
1080 | goto out; | 1084 | goto out; |
1081 | 1085 | ||
1082 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, pool); | 1086 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool); |
1083 | 1087 | ||
1084 | if (IS_ERR(req)) { | 1088 | if (IS_ERR(req)) { |
1085 | ret = PTR_ERR(req); | 1089 | ret = PTR_ERR(req); |
@@ -1118,7 +1122,7 @@ failed_send: | |||
1118 | if (erp_action) | 1122 | if (erp_action) |
1119 | erp_action->fsf_req = NULL; | 1123 | erp_action->fsf_req = NULL; |
1120 | out: | 1124 | out: |
1121 | spin_unlock_bh(&adapter->req_q_lock); | 1125 | spin_unlock_bh(&qdio->req_q_lock); |
1122 | return ret; | 1126 | return ret; |
1123 | } | 1127 | } |
1124 | 1128 | ||
@@ -1181,15 +1185,15 @@ skip_fsfstatus: | |||
1181 | int zfcp_fsf_send_els(struct zfcp_send_els *els) | 1185 | int zfcp_fsf_send_els(struct zfcp_send_els *els) |
1182 | { | 1186 | { |
1183 | struct zfcp_fsf_req *req; | 1187 | struct zfcp_fsf_req *req; |
1184 | struct zfcp_adapter *adapter = els->adapter; | 1188 | struct zfcp_qdio *qdio = els->adapter->qdio; |
1185 | struct fsf_qtcb_bottom_support *bottom; | 1189 | struct fsf_qtcb_bottom_support *bottom; |
1186 | int ret = -EIO; | 1190 | int ret = -EIO; |
1187 | 1191 | ||
1188 | spin_lock_bh(&adapter->req_q_lock); | 1192 | spin_lock_bh(&qdio->req_q_lock); |
1189 | if (zfcp_fsf_req_sbal_get(adapter)) | 1193 | if (zfcp_fsf_req_sbal_get(qdio)) |
1190 | goto out; | 1194 | goto out; |
1191 | 1195 | ||
1192 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, NULL); | 1196 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL); |
1193 | 1197 | ||
1194 | if (IS_ERR(req)) { | 1198 | if (IS_ERR(req)) { |
1195 | ret = PTR_ERR(req); | 1199 | ret = PTR_ERR(req); |
@@ -1221,7 +1225,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) | |||
1221 | failed_send: | 1225 | failed_send: |
1222 | zfcp_fsf_req_free(req); | 1226 | zfcp_fsf_req_free(req); |
1223 | out: | 1227 | out: |
1224 | spin_unlock_bh(&adapter->req_q_lock); | 1228 | spin_unlock_bh(&qdio->req_q_lock); |
1225 | return ret; | 1229 | return ret; |
1226 | } | 1230 | } |
1227 | 1231 | ||
@@ -1229,15 +1233,15 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) | |||
1229 | { | 1233 | { |
1230 | struct qdio_buffer_element *sbale; | 1234 | struct qdio_buffer_element *sbale; |
1231 | struct zfcp_fsf_req *req; | 1235 | struct zfcp_fsf_req *req; |
1232 | struct zfcp_adapter *adapter = erp_action->adapter; | 1236 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; |
1233 | int retval = -EIO; | 1237 | int retval = -EIO; |
1234 | 1238 | ||
1235 | spin_lock_bh(&adapter->req_q_lock); | 1239 | spin_lock_bh(&qdio->req_q_lock); |
1236 | if (zfcp_fsf_req_sbal_get(adapter)) | 1240 | if (zfcp_fsf_req_sbal_get(qdio)) |
1237 | goto out; | 1241 | goto out; |
1238 | 1242 | ||
1239 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, | 1243 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, |
1240 | adapter->pool.erp_req); | 1244 | qdio->adapter->pool.erp_req); |
1241 | 1245 | ||
1242 | if (IS_ERR(req)) { | 1246 | if (IS_ERR(req)) { |
1243 | retval = PTR_ERR(req); | 1247 | retval = PTR_ERR(req); |
@@ -1245,7 +1249,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) | |||
1245 | } | 1249 | } |
1246 | 1250 | ||
1247 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1251 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1248 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 1252 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
1249 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1253 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1250 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1254 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1251 | 1255 | ||
@@ -1265,29 +1269,29 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) | |||
1265 | erp_action->fsf_req = NULL; | 1269 | erp_action->fsf_req = NULL; |
1266 | } | 1270 | } |
1267 | out: | 1271 | out: |
1268 | spin_unlock_bh(&adapter->req_q_lock); | 1272 | spin_unlock_bh(&qdio->req_q_lock); |
1269 | return retval; | 1273 | return retval; |
1270 | } | 1274 | } |
1271 | 1275 | ||
1272 | int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, | 1276 | int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, |
1273 | struct fsf_qtcb_bottom_config *data) | 1277 | struct fsf_qtcb_bottom_config *data) |
1274 | { | 1278 | { |
1275 | struct qdio_buffer_element *sbale; | 1279 | struct qdio_buffer_element *sbale; |
1276 | struct zfcp_fsf_req *req = NULL; | 1280 | struct zfcp_fsf_req *req = NULL; |
1277 | int retval = -EIO; | 1281 | int retval = -EIO; |
1278 | 1282 | ||
1279 | spin_lock_bh(&adapter->req_q_lock); | 1283 | spin_lock_bh(&qdio->req_q_lock); |
1280 | if (zfcp_fsf_req_sbal_get(adapter)) | 1284 | if (zfcp_fsf_req_sbal_get(qdio)) |
1281 | goto out_unlock; | 1285 | goto out_unlock; |
1282 | 1286 | ||
1283 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL); | 1287 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL); |
1284 | 1288 | ||
1285 | if (IS_ERR(req)) { | 1289 | if (IS_ERR(req)) { |
1286 | retval = PTR_ERR(req); | 1290 | retval = PTR_ERR(req); |
1287 | goto out_unlock; | 1291 | goto out_unlock; |
1288 | } | 1292 | } |
1289 | 1293 | ||
1290 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 1294 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
1291 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1295 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1292 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1296 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1293 | req->handler = zfcp_fsf_exchange_config_data_handler; | 1297 | req->handler = zfcp_fsf_exchange_config_data_handler; |
@@ -1303,7 +1307,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, | |||
1303 | 1307 | ||
1304 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); | 1308 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); |
1305 | retval = zfcp_fsf_req_send(req); | 1309 | retval = zfcp_fsf_req_send(req); |
1306 | spin_unlock_bh(&adapter->req_q_lock); | 1310 | spin_unlock_bh(&qdio->req_q_lock); |
1307 | if (!retval) | 1311 | if (!retval) |
1308 | wait_for_completion(&req->completion); | 1312 | wait_for_completion(&req->completion); |
1309 | 1313 | ||
@@ -1311,7 +1315,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, | |||
1311 | return retval; | 1315 | return retval; |
1312 | 1316 | ||
1313 | out_unlock: | 1317 | out_unlock: |
1314 | spin_unlock_bh(&adapter->req_q_lock); | 1318 | spin_unlock_bh(&qdio->req_q_lock); |
1315 | return retval; | 1319 | return retval; |
1316 | } | 1320 | } |
1317 | 1321 | ||
@@ -1322,20 +1326,20 @@ out_unlock: | |||
1322 | */ | 1326 | */ |
1323 | int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) | 1327 | int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) |
1324 | { | 1328 | { |
1329 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; | ||
1325 | struct qdio_buffer_element *sbale; | 1330 | struct qdio_buffer_element *sbale; |
1326 | struct zfcp_fsf_req *req; | 1331 | struct zfcp_fsf_req *req; |
1327 | struct zfcp_adapter *adapter = erp_action->adapter; | ||
1328 | int retval = -EIO; | 1332 | int retval = -EIO; |
1329 | 1333 | ||
1330 | if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) | 1334 | if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) |
1331 | return -EOPNOTSUPP; | 1335 | return -EOPNOTSUPP; |
1332 | 1336 | ||
1333 | spin_lock_bh(&adapter->req_q_lock); | 1337 | spin_lock_bh(&qdio->req_q_lock); |
1334 | if (zfcp_fsf_req_sbal_get(adapter)) | 1338 | if (zfcp_fsf_req_sbal_get(qdio)) |
1335 | goto out; | 1339 | goto out; |
1336 | 1340 | ||
1337 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, | 1341 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, |
1338 | adapter->pool.erp_req); | 1342 | qdio->adapter->pool.erp_req); |
1339 | 1343 | ||
1340 | if (IS_ERR(req)) { | 1344 | if (IS_ERR(req)) { |
1341 | retval = PTR_ERR(req); | 1345 | retval = PTR_ERR(req); |
@@ -1343,7 +1347,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) | |||
1343 | } | 1347 | } |
1344 | 1348 | ||
1345 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1349 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1346 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 1350 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
1347 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1351 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1348 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1352 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1349 | 1353 | ||
@@ -1358,31 +1362,31 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) | |||
1358 | erp_action->fsf_req = NULL; | 1362 | erp_action->fsf_req = NULL; |
1359 | } | 1363 | } |
1360 | out: | 1364 | out: |
1361 | spin_unlock_bh(&adapter->req_q_lock); | 1365 | spin_unlock_bh(&qdio->req_q_lock); |
1362 | return retval; | 1366 | return retval; |
1363 | } | 1367 | } |
1364 | 1368 | ||
1365 | /** | 1369 | /** |
1366 | * zfcp_fsf_exchange_port_data_sync - request information about local port | 1370 | * zfcp_fsf_exchange_port_data_sync - request information about local port |
1367 | * @adapter: pointer to struct zfcp_adapter | 1371 | * @qdio: pointer to struct zfcp_qdio |
1368 | * @data: pointer to struct fsf_qtcb_bottom_port | 1372 | * @data: pointer to struct fsf_qtcb_bottom_port |
1369 | * Returns: 0 on success, error otherwise | 1373 | * Returns: 0 on success, error otherwise |
1370 | */ | 1374 | */ |
1371 | int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, | 1375 | int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, |
1372 | struct fsf_qtcb_bottom_port *data) | 1376 | struct fsf_qtcb_bottom_port *data) |
1373 | { | 1377 | { |
1374 | struct qdio_buffer_element *sbale; | 1378 | struct qdio_buffer_element *sbale; |
1375 | struct zfcp_fsf_req *req = NULL; | 1379 | struct zfcp_fsf_req *req = NULL; |
1376 | int retval = -EIO; | 1380 | int retval = -EIO; |
1377 | 1381 | ||
1378 | if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) | 1382 | if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) |
1379 | return -EOPNOTSUPP; | 1383 | return -EOPNOTSUPP; |
1380 | 1384 | ||
1381 | spin_lock_bh(&adapter->req_q_lock); | 1385 | spin_lock_bh(&qdio->req_q_lock); |
1382 | if (zfcp_fsf_req_sbal_get(adapter)) | 1386 | if (zfcp_fsf_req_sbal_get(qdio)) |
1383 | goto out_unlock; | 1387 | goto out_unlock; |
1384 | 1388 | ||
1385 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, NULL); | 1389 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL); |
1386 | 1390 | ||
1387 | if (IS_ERR(req)) { | 1391 | if (IS_ERR(req)) { |
1388 | retval = PTR_ERR(req); | 1392 | retval = PTR_ERR(req); |
@@ -1392,14 +1396,14 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, | |||
1392 | if (data) | 1396 | if (data) |
1393 | req->data = data; | 1397 | req->data = data; |
1394 | 1398 | ||
1395 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 1399 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
1396 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1400 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1397 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1401 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1398 | 1402 | ||
1399 | req->handler = zfcp_fsf_exchange_port_data_handler; | 1403 | req->handler = zfcp_fsf_exchange_port_data_handler; |
1400 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); | 1404 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); |
1401 | retval = zfcp_fsf_req_send(req); | 1405 | retval = zfcp_fsf_req_send(req); |
1402 | spin_unlock_bh(&adapter->req_q_lock); | 1406 | spin_unlock_bh(&qdio->req_q_lock); |
1403 | 1407 | ||
1404 | if (!retval) | 1408 | if (!retval) |
1405 | wait_for_completion(&req->completion); | 1409 | wait_for_completion(&req->completion); |
@@ -1409,7 +1413,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, | |||
1409 | return retval; | 1413 | return retval; |
1410 | 1414 | ||
1411 | out_unlock: | 1415 | out_unlock: |
1412 | spin_unlock_bh(&adapter->req_q_lock); | 1416 | spin_unlock_bh(&qdio->req_q_lock); |
1413 | return retval; | 1417 | return retval; |
1414 | } | 1418 | } |
1415 | 1419 | ||
@@ -1495,17 +1499,17 @@ out: | |||
1495 | int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) | 1499 | int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) |
1496 | { | 1500 | { |
1497 | struct qdio_buffer_element *sbale; | 1501 | struct qdio_buffer_element *sbale; |
1498 | struct zfcp_adapter *adapter = erp_action->adapter; | 1502 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; |
1499 | struct zfcp_fsf_req *req; | ||
1500 | struct zfcp_port *port = erp_action->port; | 1503 | struct zfcp_port *port = erp_action->port; |
1504 | struct zfcp_fsf_req *req; | ||
1501 | int retval = -EIO; | 1505 | int retval = -EIO; |
1502 | 1506 | ||
1503 | spin_lock_bh(&adapter->req_q_lock); | 1507 | spin_lock_bh(&qdio->req_q_lock); |
1504 | if (zfcp_fsf_req_sbal_get(adapter)) | 1508 | if (zfcp_fsf_req_sbal_get(qdio)) |
1505 | goto out; | 1509 | goto out; |
1506 | 1510 | ||
1507 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, | 1511 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, |
1508 | adapter->pool.erp_req); | 1512 | qdio->adapter->pool.erp_req); |
1509 | 1513 | ||
1510 | if (IS_ERR(req)) { | 1514 | if (IS_ERR(req)) { |
1511 | retval = PTR_ERR(req); | 1515 | retval = PTR_ERR(req); |
@@ -1513,7 +1517,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) | |||
1513 | } | 1517 | } |
1514 | 1518 | ||
1515 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1519 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1516 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 1520 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
1517 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1521 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1518 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1522 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1519 | 1523 | ||
@@ -1532,7 +1536,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) | |||
1532 | zfcp_port_put(port); | 1536 | zfcp_port_put(port); |
1533 | } | 1537 | } |
1534 | out: | 1538 | out: |
1535 | spin_unlock_bh(&adapter->req_q_lock); | 1539 | spin_unlock_bh(&qdio->req_q_lock); |
1536 | return retval; | 1540 | return retval; |
1537 | } | 1541 | } |
1538 | 1542 | ||
@@ -1566,16 +1570,16 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) | |||
1566 | int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) | 1570 | int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) |
1567 | { | 1571 | { |
1568 | struct qdio_buffer_element *sbale; | 1572 | struct qdio_buffer_element *sbale; |
1569 | struct zfcp_adapter *adapter = erp_action->adapter; | 1573 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; |
1570 | struct zfcp_fsf_req *req; | 1574 | struct zfcp_fsf_req *req; |
1571 | int retval = -EIO; | 1575 | int retval = -EIO; |
1572 | 1576 | ||
1573 | spin_lock_bh(&adapter->req_q_lock); | 1577 | spin_lock_bh(&qdio->req_q_lock); |
1574 | if (zfcp_fsf_req_sbal_get(adapter)) | 1578 | if (zfcp_fsf_req_sbal_get(qdio)) |
1575 | goto out; | 1579 | goto out; |
1576 | 1580 | ||
1577 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, | 1581 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, |
1578 | adapter->pool.erp_req); | 1582 | qdio->adapter->pool.erp_req); |
1579 | 1583 | ||
1580 | if (IS_ERR(req)) { | 1584 | if (IS_ERR(req)) { |
1581 | retval = PTR_ERR(req); | 1585 | retval = PTR_ERR(req); |
@@ -1583,7 +1587,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) | |||
1583 | } | 1587 | } |
1584 | 1588 | ||
1585 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1589 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1586 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 1590 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
1587 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1591 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1588 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1592 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1589 | 1593 | ||
@@ -1600,7 +1604,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) | |||
1600 | erp_action->fsf_req = NULL; | 1604 | erp_action->fsf_req = NULL; |
1601 | } | 1605 | } |
1602 | out: | 1606 | out: |
1603 | spin_unlock_bh(&adapter->req_q_lock); | 1607 | spin_unlock_bh(&qdio->req_q_lock); |
1604 | return retval; | 1608 | return retval; |
1605 | } | 1609 | } |
1606 | 1610 | ||
@@ -1643,16 +1647,16 @@ out: | |||
1643 | int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) | 1647 | int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) |
1644 | { | 1648 | { |
1645 | struct qdio_buffer_element *sbale; | 1649 | struct qdio_buffer_element *sbale; |
1646 | struct zfcp_adapter *adapter = wka_port->adapter; | 1650 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; |
1647 | struct zfcp_fsf_req *req; | 1651 | struct zfcp_fsf_req *req; |
1648 | int retval = -EIO; | 1652 | int retval = -EIO; |
1649 | 1653 | ||
1650 | spin_lock_bh(&adapter->req_q_lock); | 1654 | spin_lock_bh(&qdio->req_q_lock); |
1651 | if (zfcp_fsf_req_sbal_get(adapter)) | 1655 | if (zfcp_fsf_req_sbal_get(qdio)) |
1652 | goto out; | 1656 | goto out; |
1653 | 1657 | ||
1654 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, | 1658 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, |
1655 | adapter->pool.erp_req); | 1659 | qdio->adapter->pool.erp_req); |
1656 | 1660 | ||
1657 | if (unlikely(IS_ERR(req))) { | 1661 | if (unlikely(IS_ERR(req))) { |
1658 | retval = PTR_ERR(req); | 1662 | retval = PTR_ERR(req); |
@@ -1660,7 +1664,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) | |||
1660 | } | 1664 | } |
1661 | 1665 | ||
1662 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1666 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1663 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 1667 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
1664 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1668 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1665 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1669 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1666 | 1670 | ||
@@ -1673,7 +1677,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) | |||
1673 | if (retval) | 1677 | if (retval) |
1674 | zfcp_fsf_req_free(req); | 1678 | zfcp_fsf_req_free(req); |
1675 | out: | 1679 | out: |
1676 | spin_unlock_bh(&adapter->req_q_lock); | 1680 | spin_unlock_bh(&qdio->req_q_lock); |
1677 | return retval; | 1681 | return retval; |
1678 | } | 1682 | } |
1679 | 1683 | ||
@@ -1698,16 +1702,16 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) | |||
1698 | int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) | 1702 | int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) |
1699 | { | 1703 | { |
1700 | struct qdio_buffer_element *sbale; | 1704 | struct qdio_buffer_element *sbale; |
1701 | struct zfcp_adapter *adapter = wka_port->adapter; | 1705 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; |
1702 | struct zfcp_fsf_req *req; | 1706 | struct zfcp_fsf_req *req; |
1703 | int retval = -EIO; | 1707 | int retval = -EIO; |
1704 | 1708 | ||
1705 | spin_lock_bh(&adapter->req_q_lock); | 1709 | spin_lock_bh(&qdio->req_q_lock); |
1706 | if (zfcp_fsf_req_sbal_get(adapter)) | 1710 | if (zfcp_fsf_req_sbal_get(qdio)) |
1707 | goto out; | 1711 | goto out; |
1708 | 1712 | ||
1709 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, | 1713 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, |
1710 | adapter->pool.erp_req); | 1714 | qdio->adapter->pool.erp_req); |
1711 | 1715 | ||
1712 | if (unlikely(IS_ERR(req))) { | 1716 | if (unlikely(IS_ERR(req))) { |
1713 | retval = PTR_ERR(req); | 1717 | retval = PTR_ERR(req); |
@@ -1715,7 +1719,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) | |||
1715 | } | 1719 | } |
1716 | 1720 | ||
1717 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1721 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1718 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 1722 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
1719 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1723 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1720 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1724 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1721 | 1725 | ||
@@ -1728,7 +1732,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) | |||
1728 | if (retval) | 1732 | if (retval) |
1729 | zfcp_fsf_req_free(req); | 1733 | zfcp_fsf_req_free(req); |
1730 | out: | 1734 | out: |
1731 | spin_unlock_bh(&adapter->req_q_lock); | 1735 | spin_unlock_bh(&qdio->req_q_lock); |
1732 | return retval; | 1736 | return retval; |
1733 | } | 1737 | } |
1734 | 1738 | ||
@@ -1790,16 +1794,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) | |||
1790 | int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) | 1794 | int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) |
1791 | { | 1795 | { |
1792 | struct qdio_buffer_element *sbale; | 1796 | struct qdio_buffer_element *sbale; |
1793 | struct zfcp_adapter *adapter = erp_action->adapter; | 1797 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; |
1794 | struct zfcp_fsf_req *req; | 1798 | struct zfcp_fsf_req *req; |
1795 | int retval = -EIO; | 1799 | int retval = -EIO; |
1796 | 1800 | ||
1797 | spin_lock_bh(&adapter->req_q_lock); | 1801 | spin_lock_bh(&qdio->req_q_lock); |
1798 | if (zfcp_fsf_req_sbal_get(adapter)) | 1802 | if (zfcp_fsf_req_sbal_get(qdio)) |
1799 | goto out; | 1803 | goto out; |
1800 | 1804 | ||
1801 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT, | 1805 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, |
1802 | adapter->pool.erp_req); | 1806 | qdio->adapter->pool.erp_req); |
1803 | 1807 | ||
1804 | if (IS_ERR(req)) { | 1808 | if (IS_ERR(req)) { |
1805 | retval = PTR_ERR(req); | 1809 | retval = PTR_ERR(req); |
@@ -1807,7 +1811,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) | |||
1807 | } | 1811 | } |
1808 | 1812 | ||
1809 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1813 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1810 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 1814 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
1811 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1815 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1812 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1816 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1813 | 1817 | ||
@@ -1824,7 +1828,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) | |||
1824 | erp_action->fsf_req = NULL; | 1828 | erp_action->fsf_req = NULL; |
1825 | } | 1829 | } |
1826 | out: | 1830 | out: |
1827 | spin_unlock_bh(&adapter->req_q_lock); | 1831 | spin_unlock_bh(&qdio->req_q_lock); |
1828 | return retval; | 1832 | return retval; |
1829 | } | 1833 | } |
1830 | 1834 | ||
@@ -1964,14 +1968,15 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | |||
1964 | { | 1968 | { |
1965 | struct qdio_buffer_element *sbale; | 1969 | struct qdio_buffer_element *sbale; |
1966 | struct zfcp_adapter *adapter = erp_action->adapter; | 1970 | struct zfcp_adapter *adapter = erp_action->adapter; |
1971 | struct zfcp_qdio *qdio = adapter->qdio; | ||
1967 | struct zfcp_fsf_req *req; | 1972 | struct zfcp_fsf_req *req; |
1968 | int retval = -EIO; | 1973 | int retval = -EIO; |
1969 | 1974 | ||
1970 | spin_lock_bh(&adapter->req_q_lock); | 1975 | spin_lock_bh(&qdio->req_q_lock); |
1971 | if (zfcp_fsf_req_sbal_get(adapter)) | 1976 | if (zfcp_fsf_req_sbal_get(qdio)) |
1972 | goto out; | 1977 | goto out; |
1973 | 1978 | ||
1974 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN, | 1979 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, |
1975 | adapter->pool.erp_req); | 1980 | adapter->pool.erp_req); |
1976 | 1981 | ||
1977 | if (IS_ERR(req)) { | 1982 | if (IS_ERR(req)) { |
@@ -1980,7 +1985,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | |||
1980 | } | 1985 | } |
1981 | 1986 | ||
1982 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1987 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1983 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 1988 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
1984 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1989 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1985 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1990 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1986 | 1991 | ||
@@ -2001,7 +2006,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | |||
2001 | erp_action->fsf_req = NULL; | 2006 | erp_action->fsf_req = NULL; |
2002 | } | 2007 | } |
2003 | out: | 2008 | out: |
2004 | spin_unlock_bh(&adapter->req_q_lock); | 2009 | spin_unlock_bh(&qdio->req_q_lock); |
2005 | return retval; | 2010 | return retval; |
2006 | } | 2011 | } |
2007 | 2012 | ||
@@ -2050,16 +2055,16 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) | |||
2050 | int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) | 2055 | int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) |
2051 | { | 2056 | { |
2052 | struct qdio_buffer_element *sbale; | 2057 | struct qdio_buffer_element *sbale; |
2053 | struct zfcp_adapter *adapter = erp_action->adapter; | 2058 | struct zfcp_qdio *qdio = erp_action->adapter->qdio; |
2054 | struct zfcp_fsf_req *req; | 2059 | struct zfcp_fsf_req *req; |
2055 | int retval = -EIO; | 2060 | int retval = -EIO; |
2056 | 2061 | ||
2057 | spin_lock_bh(&adapter->req_q_lock); | 2062 | spin_lock_bh(&qdio->req_q_lock); |
2058 | if (zfcp_fsf_req_sbal_get(adapter)) | 2063 | if (zfcp_fsf_req_sbal_get(qdio)) |
2059 | goto out; | 2064 | goto out; |
2060 | 2065 | ||
2061 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, | 2066 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, |
2062 | adapter->pool.erp_req); | 2067 | qdio->adapter->pool.erp_req); |
2063 | 2068 | ||
2064 | if (IS_ERR(req)) { | 2069 | if (IS_ERR(req)) { |
2065 | retval = PTR_ERR(req); | 2070 | retval = PTR_ERR(req); |
@@ -2067,7 +2072,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) | |||
2067 | } | 2072 | } |
2068 | 2073 | ||
2069 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 2074 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
2070 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 2075 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
2071 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 2076 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
2072 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 2077 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
2073 | 2078 | ||
@@ -2085,7 +2090,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) | |||
2085 | erp_action->fsf_req = NULL; | 2090 | erp_action->fsf_req = NULL; |
2086 | } | 2091 | } |
2087 | out: | 2092 | out: |
2088 | spin_unlock_bh(&adapter->req_q_lock); | 2093 | spin_unlock_bh(&qdio->req_q_lock); |
2089 | return retval; | 2094 | return retval; |
2090 | } | 2095 | } |
2091 | 2096 | ||
@@ -2353,18 +2358,19 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | |||
2353 | unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; | 2358 | unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; |
2354 | int real_bytes, retval = -EIO; | 2359 | int real_bytes, retval = -EIO; |
2355 | struct zfcp_adapter *adapter = unit->port->adapter; | 2360 | struct zfcp_adapter *adapter = unit->port->adapter; |
2361 | struct zfcp_qdio *qdio = adapter->qdio; | ||
2356 | 2362 | ||
2357 | if (unlikely(!(atomic_read(&unit->status) & | 2363 | if (unlikely(!(atomic_read(&unit->status) & |
2358 | ZFCP_STATUS_COMMON_UNBLOCKED))) | 2364 | ZFCP_STATUS_COMMON_UNBLOCKED))) |
2359 | return -EBUSY; | 2365 | return -EBUSY; |
2360 | 2366 | ||
2361 | spin_lock(&adapter->req_q_lock); | 2367 | spin_lock(&qdio->req_q_lock); |
2362 | if (atomic_read(&adapter->req_q.count) <= 0) { | 2368 | if (atomic_read(&qdio->req_q.count) <= 0) { |
2363 | atomic_inc(&adapter->qdio_outb_full); | 2369 | atomic_inc(&qdio->req_q_full); |
2364 | goto out; | 2370 | goto out; |
2365 | } | 2371 | } |
2366 | 2372 | ||
2367 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, | 2373 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, |
2368 | adapter->pool.scsi_req); | 2374 | adapter->pool.scsi_req); |
2369 | 2375 | ||
2370 | if (IS_ERR(req)) { | 2376 | if (IS_ERR(req)) { |
@@ -2424,7 +2430,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | |||
2424 | req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + | 2430 | req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + |
2425 | fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32); | 2431 | fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32); |
2426 | 2432 | ||
2427 | real_bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, sbtype, | 2433 | real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype, |
2428 | scsi_sglist(scsi_cmnd), | 2434 | scsi_sglist(scsi_cmnd), |
2429 | FSF_MAX_SBALS_PER_REQ); | 2435 | FSF_MAX_SBALS_PER_REQ); |
2430 | if (unlikely(real_bytes < 0)) { | 2436 | if (unlikely(real_bytes < 0)) { |
@@ -2453,7 +2459,7 @@ failed_scsi_cmnd: | |||
2453 | zfcp_fsf_req_free(req); | 2459 | zfcp_fsf_req_free(req); |
2454 | scsi_cmnd->host_scribble = NULL; | 2460 | scsi_cmnd->host_scribble = NULL; |
2455 | out: | 2461 | out: |
2456 | spin_unlock(&adapter->req_q_lock); | 2462 | spin_unlock(&qdio->req_q_lock); |
2457 | return retval; | 2463 | return retval; |
2458 | } | 2464 | } |
2459 | 2465 | ||
@@ -2468,18 +2474,18 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) | |||
2468 | struct qdio_buffer_element *sbale; | 2474 | struct qdio_buffer_element *sbale; |
2469 | struct zfcp_fsf_req *req = NULL; | 2475 | struct zfcp_fsf_req *req = NULL; |
2470 | struct fcp_cmnd_iu *fcp_cmnd_iu; | 2476 | struct fcp_cmnd_iu *fcp_cmnd_iu; |
2471 | struct zfcp_adapter *adapter = unit->port->adapter; | 2477 | struct zfcp_qdio *qdio = unit->port->adapter->qdio; |
2472 | 2478 | ||
2473 | if (unlikely(!(atomic_read(&unit->status) & | 2479 | if (unlikely(!(atomic_read(&unit->status) & |
2474 | ZFCP_STATUS_COMMON_UNBLOCKED))) | 2480 | ZFCP_STATUS_COMMON_UNBLOCKED))) |
2475 | return NULL; | 2481 | return NULL; |
2476 | 2482 | ||
2477 | spin_lock_bh(&adapter->req_q_lock); | 2483 | spin_lock_bh(&qdio->req_q_lock); |
2478 | if (zfcp_fsf_req_sbal_get(adapter)) | 2484 | if (zfcp_fsf_req_sbal_get(qdio)) |
2479 | goto out; | 2485 | goto out; |
2480 | 2486 | ||
2481 | req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, | 2487 | req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, |
2482 | adapter->pool.scsi_req); | 2488 | qdio->adapter->pool.scsi_req); |
2483 | 2489 | ||
2484 | if (IS_ERR(req)) { | 2490 | if (IS_ERR(req)) { |
2485 | req = NULL; | 2491 | req = NULL; |
@@ -2496,7 +2502,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) | |||
2496 | req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + | 2502 | req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + |
2497 | sizeof(u32); | 2503 | sizeof(u32); |
2498 | 2504 | ||
2499 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 2505 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
2500 | sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; | 2506 | sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; |
2501 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 2507 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
2502 | 2508 | ||
@@ -2511,7 +2517,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) | |||
2511 | zfcp_fsf_req_free(req); | 2517 | zfcp_fsf_req_free(req); |
2512 | req = NULL; | 2518 | req = NULL; |
2513 | out: | 2519 | out: |
2514 | spin_unlock_bh(&adapter->req_q_lock); | 2520 | spin_unlock_bh(&qdio->req_q_lock); |
2515 | return req; | 2521 | return req; |
2516 | } | 2522 | } |
2517 | 2523 | ||
@@ -2529,6 +2535,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2529 | struct zfcp_fsf_cfdc *fsf_cfdc) | 2535 | struct zfcp_fsf_cfdc *fsf_cfdc) |
2530 | { | 2536 | { |
2531 | struct qdio_buffer_element *sbale; | 2537 | struct qdio_buffer_element *sbale; |
2538 | struct zfcp_qdio *qdio = adapter->qdio; | ||
2532 | struct zfcp_fsf_req *req = NULL; | 2539 | struct zfcp_fsf_req *req = NULL; |
2533 | struct fsf_qtcb_bottom_support *bottom; | 2540 | struct fsf_qtcb_bottom_support *bottom; |
2534 | int direction, retval = -EIO, bytes; | 2541 | int direction, retval = -EIO, bytes; |
@@ -2547,11 +2554,11 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2547 | return ERR_PTR(-EINVAL); | 2554 | return ERR_PTR(-EINVAL); |
2548 | } | 2555 | } |
2549 | 2556 | ||
2550 | spin_lock_bh(&adapter->req_q_lock); | 2557 | spin_lock_bh(&qdio->req_q_lock); |
2551 | if (zfcp_fsf_req_sbal_get(adapter)) | 2558 | if (zfcp_fsf_req_sbal_get(qdio)) |
2552 | goto out; | 2559 | goto out; |
2553 | 2560 | ||
2554 | req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, NULL); | 2561 | req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL); |
2555 | if (IS_ERR(req)) { | 2562 | if (IS_ERR(req)) { |
2556 | retval = -EPERM; | 2563 | retval = -EPERM; |
2557 | goto out; | 2564 | goto out; |
@@ -2559,15 +2566,16 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2559 | 2566 | ||
2560 | req->handler = zfcp_fsf_control_file_handler; | 2567 | req->handler = zfcp_fsf_control_file_handler; |
2561 | 2568 | ||
2562 | sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); | 2569 | sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); |
2563 | sbale[0].flags |= direction; | 2570 | sbale[0].flags |= direction; |
2564 | 2571 | ||
2565 | bottom = &req->qtcb->bottom.support; | 2572 | bottom = &req->qtcb->bottom.support; |
2566 | bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; | 2573 | bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; |
2567 | bottom->option = fsf_cfdc->option; | 2574 | bottom->option = fsf_cfdc->option; |
2568 | 2575 | ||
2569 | bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, direction, | 2576 | bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, |
2570 | fsf_cfdc->sg, FSF_MAX_SBALS_PER_REQ); | 2577 | direction, fsf_cfdc->sg, |
2578 | FSF_MAX_SBALS_PER_REQ); | ||
2571 | if (bytes != ZFCP_CFDC_MAX_SIZE) { | 2579 | if (bytes != ZFCP_CFDC_MAX_SIZE) { |
2572 | zfcp_fsf_req_free(req); | 2580 | zfcp_fsf_req_free(req); |
2573 | goto out; | 2581 | goto out; |
@@ -2576,7 +2584,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2576 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); | 2584 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); |
2577 | retval = zfcp_fsf_req_send(req); | 2585 | retval = zfcp_fsf_req_send(req); |
2578 | out: | 2586 | out: |
2579 | spin_unlock_bh(&adapter->req_q_lock); | 2587 | spin_unlock_bh(&qdio->req_q_lock); |
2580 | 2588 | ||
2581 | if (!retval) { | 2589 | if (!retval) { |
2582 | wait_for_completion(&req->completion); | 2590 | wait_for_completion(&req->completion); |
@@ -2590,9 +2598,10 @@ out: | |||
2590 | * @adapter: pointer to struct zfcp_adapter | 2598 | * @adapter: pointer to struct zfcp_adapter |
2591 | * @sbal_idx: response queue index of SBAL to be processed | 2599 | * @sbal_idx: response queue index of SBAL to be processed |
2592 | */ | 2600 | */ |
2593 | void zfcp_fsf_reqid_check(struct zfcp_adapter *adapter, int sbal_idx) | 2601 | void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) |
2594 | { | 2602 | { |
2595 | struct qdio_buffer *sbal = adapter->resp_q.sbal[sbal_idx]; | 2603 | struct zfcp_adapter *adapter = qdio->adapter; |
2604 | struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; | ||
2596 | struct qdio_buffer_element *sbale; | 2605 | struct qdio_buffer_element *sbale; |
2597 | struct zfcp_fsf_req *fsf_req; | 2606 | struct zfcp_fsf_req *fsf_req; |
2598 | unsigned long flags, req_id; | 2607 | unsigned long flags, req_id; |
@@ -2618,7 +2627,7 @@ void zfcp_fsf_reqid_check(struct zfcp_adapter *adapter, int sbal_idx) | |||
2618 | 2627 | ||
2619 | fsf_req->queue_req.sbal_response = sbal_idx; | 2628 | fsf_req->queue_req.sbal_response = sbal_idx; |
2620 | fsf_req->queue_req.qdio_inb_usage = | 2629 | fsf_req->queue_req.qdio_inb_usage = |
2621 | atomic_read(&adapter->resp_q.count); | 2630 | atomic_read(&qdio->resp_q.count); |
2622 | zfcp_fsf_req_complete(fsf_req); | 2631 | zfcp_fsf_req_complete(fsf_req); |
2623 | 2632 | ||
2624 | if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) | 2633 | if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index e118874976f0..0b3f634509bf 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -36,18 +36,18 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) | |||
36 | 36 | ||
37 | /** | 37 | /** |
38 | * zfcp_qdio_free - free memory used by request- and resposne queue | 38 | * zfcp_qdio_free - free memory used by request- and resposne queue |
39 | * @adapter: pointer to the zfcp_adapter structure | 39 | * @qdio: pointer to the zfcp_qdio structure |
40 | */ | 40 | */ |
41 | void zfcp_qdio_free(struct zfcp_adapter *adapter) | 41 | void zfcp_qdio_free(struct zfcp_qdio *qdio) |
42 | { | 42 | { |
43 | struct qdio_buffer **sbal_req, **sbal_resp; | 43 | struct qdio_buffer **sbal_req, **sbal_resp; |
44 | int p; | 44 | int p; |
45 | 45 | ||
46 | if (adapter->ccw_device) | 46 | if (qdio->adapter->ccw_device) |
47 | qdio_free(adapter->ccw_device); | 47 | qdio_free(qdio->adapter->ccw_device); |
48 | 48 | ||
49 | sbal_req = adapter->req_q.sbal; | 49 | sbal_req = qdio->req_q.sbal; |
50 | sbal_resp = adapter->resp_q.sbal; | 50 | sbal_resp = qdio->resp_q.sbal; |
51 | 51 | ||
52 | for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { | 52 | for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { |
53 | free_page((unsigned long) sbal_req[p]); | 53 | free_page((unsigned long) sbal_req[p]); |
@@ -55,8 +55,10 @@ void zfcp_qdio_free(struct zfcp_adapter *adapter) | |||
55 | } | 55 | } |
56 | } | 56 | } |
57 | 57 | ||
58 | static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, char *id) | 58 | static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) |
59 | { | 59 | { |
60 | struct zfcp_adapter *adapter = qdio->adapter; | ||
61 | |||
60 | dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); | 62 | dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); |
61 | 63 | ||
62 | zfcp_erp_adapter_reopen(adapter, | 64 | zfcp_erp_adapter_reopen(adapter, |
@@ -75,47 +77,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) | |||
75 | } | 77 | } |
76 | 78 | ||
77 | /* this needs to be called prior to updating the queue fill level */ | 79 | /* this needs to be called prior to updating the queue fill level */ |
78 | static void zfcp_qdio_account(struct zfcp_adapter *adapter) | 80 | static void zfcp_qdio_account(struct zfcp_qdio *qdio) |
79 | { | 81 | { |
80 | ktime_t now; | 82 | ktime_t now; |
81 | s64 span; | 83 | s64 span; |
82 | int free, used; | 84 | int free, used; |
83 | 85 | ||
84 | spin_lock(&adapter->qdio_stat_lock); | 86 | spin_lock(&qdio->stat_lock); |
85 | now = ktime_get(); | 87 | now = ktime_get(); |
86 | span = ktime_us_delta(now, adapter->req_q_time); | 88 | span = ktime_us_delta(now, qdio->req_q_time); |
87 | free = max(0, atomic_read(&adapter->req_q.count)); | 89 | free = max(0, atomic_read(&qdio->req_q.count)); |
88 | used = QDIO_MAX_BUFFERS_PER_Q - free; | 90 | used = QDIO_MAX_BUFFERS_PER_Q - free; |
89 | adapter->req_q_util += used * span; | 91 | qdio->req_q_util += used * span; |
90 | adapter->req_q_time = now; | 92 | qdio->req_q_time = now; |
91 | spin_unlock(&adapter->qdio_stat_lock); | 93 | spin_unlock(&qdio->stat_lock); |
92 | } | 94 | } |
93 | 95 | ||
94 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, | 96 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, |
95 | int queue_no, int first, int count, | 97 | int queue_no, int first, int count, |
96 | unsigned long parm) | 98 | unsigned long parm) |
97 | { | 99 | { |
98 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; | 100 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; |
99 | struct zfcp_qdio_queue *queue = &adapter->req_q; | 101 | struct zfcp_qdio_queue *queue = &qdio->req_q; |
100 | 102 | ||
101 | if (unlikely(qdio_err)) { | 103 | if (unlikely(qdio_err)) { |
102 | zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); | 104 | zfcp_hba_dbf_event_qdio(qdio, qdio_err, first, count); |
103 | zfcp_qdio_handler_error(adapter, "qdireq1"); | 105 | zfcp_qdio_handler_error(qdio, "qdireq1"); |
104 | return; | 106 | return; |
105 | } | 107 | } |
106 | 108 | ||
107 | /* cleanup all SBALs being program-owned now */ | 109 | /* cleanup all SBALs being program-owned now */ |
108 | zfcp_qdio_zero_sbals(queue->sbal, first, count); | 110 | zfcp_qdio_zero_sbals(queue->sbal, first, count); |
109 | 111 | ||
110 | zfcp_qdio_account(adapter); | 112 | zfcp_qdio_account(qdio); |
111 | atomic_add(count, &queue->count); | 113 | atomic_add(count, &queue->count); |
112 | wake_up(&adapter->request_wq); | 114 | wake_up(&qdio->req_q_wq); |
113 | } | 115 | } |
114 | 116 | ||
115 | static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) | 117 | static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed) |
116 | { | 118 | { |
117 | struct zfcp_qdio_queue *queue = &adapter->resp_q; | 119 | struct zfcp_qdio_queue *queue = &qdio->resp_q; |
118 | struct ccw_device *cdev = adapter->ccw_device; | 120 | struct ccw_device *cdev = qdio->adapter->ccw_device; |
119 | u8 count, start = queue->first; | 121 | u8 count, start = queue->first; |
120 | unsigned int retval; | 122 | unsigned int retval; |
121 | 123 | ||
@@ -137,12 +139,12 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, | |||
137 | int queue_no, int first, int count, | 139 | int queue_no, int first, int count, |
138 | unsigned long parm) | 140 | unsigned long parm) |
139 | { | 141 | { |
140 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; | 142 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; |
141 | int sbal_idx, sbal_no; | 143 | int sbal_idx, sbal_no; |
142 | 144 | ||
143 | if (unlikely(qdio_err)) { | 145 | if (unlikely(qdio_err)) { |
144 | zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); | 146 | zfcp_hba_dbf_event_qdio(qdio, qdio_err, first, count); |
145 | zfcp_qdio_handler_error(adapter, "qdires1"); | 147 | zfcp_qdio_handler_error(qdio, "qdires1"); |
146 | return; | 148 | return; |
147 | } | 149 | } |
148 | 150 | ||
@@ -153,26 +155,26 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, | |||
153 | for (sbal_no = 0; sbal_no < count; sbal_no++) { | 155 | for (sbal_no = 0; sbal_no < count; sbal_no++) { |
154 | sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; | 156 | sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; |
155 | /* go through all SBALEs of SBAL */ | 157 | /* go through all SBALEs of SBAL */ |
156 | zfcp_fsf_reqid_check(adapter, sbal_idx); | 158 | zfcp_fsf_reqid_check(qdio, sbal_idx); |
157 | } | 159 | } |
158 | 160 | ||
159 | /* | 161 | /* |
160 | * put range of SBALs back to response queue | 162 | * put range of SBALs back to response queue |
161 | * (including SBALs which have already been free before) | 163 | * (including SBALs which have already been free before) |
162 | */ | 164 | */ |
163 | zfcp_qdio_resp_put_back(adapter, count); | 165 | zfcp_qdio_resp_put_back(qdio, count); |
164 | } | 166 | } |
165 | 167 | ||
166 | /** | 168 | /** |
167 | * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req | 169 | * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req |
168 | * @adapter: pointer to struct zfcp_adapter | 170 | * @qdio: pointer to struct zfcp_qdio |
169 | * @q_rec: pointer to struct zfcp_queue_rec | 171 | * @q_rec: pointer to struct zfcp_queue_rec |
170 | * Returns: pointer to qdio_buffer_element (SBALE) structure | 172 | * Returns: pointer to qdio_buffer_element (SBALE) structure |
171 | */ | 173 | */ |
172 | struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_adapter *adapter, | 174 | struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, |
173 | struct zfcp_queue_req *q_req) | 175 | struct zfcp_queue_req *q_req) |
174 | { | 176 | { |
175 | return zfcp_qdio_sbale(&adapter->req_q, q_req->sbal_last, 0); | 177 | return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); |
176 | } | 178 | } |
177 | 179 | ||
178 | /** | 180 | /** |
@@ -180,30 +182,30 @@ struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_adapter *adapter, | |||
180 | * @fsf_req: pointer to struct fsf_req | 182 | * @fsf_req: pointer to struct fsf_req |
181 | * Returns: pointer to qdio_buffer_element (SBALE) structure | 183 | * Returns: pointer to qdio_buffer_element (SBALE) structure |
182 | */ | 184 | */ |
183 | struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_adapter *adapter, | 185 | struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, |
184 | struct zfcp_queue_req *q_req) | 186 | struct zfcp_queue_req *q_req) |
185 | { | 187 | { |
186 | return zfcp_qdio_sbale(&adapter->req_q, q_req->sbal_last, | 188 | return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, |
187 | q_req->sbale_curr); | 189 | q_req->sbale_curr); |
188 | } | 190 | } |
189 | 191 | ||
190 | static void zfcp_qdio_sbal_limit(struct zfcp_adapter *adapter, | 192 | static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, |
191 | struct zfcp_queue_req *q_req, int max_sbals) | 193 | struct zfcp_queue_req *q_req, int max_sbals) |
192 | { | 194 | { |
193 | int count = atomic_read(&adapter->req_q.count); | 195 | int count = atomic_read(&qdio->req_q.count); |
194 | count = min(count, max_sbals); | 196 | count = min(count, max_sbals); |
195 | q_req->sbal_limit = (q_req->sbal_first + count - 1) | 197 | q_req->sbal_limit = (q_req->sbal_first + count - 1) |
196 | % QDIO_MAX_BUFFERS_PER_Q; | 198 | % QDIO_MAX_BUFFERS_PER_Q; |
197 | } | 199 | } |
198 | 200 | ||
199 | static struct qdio_buffer_element * | 201 | static struct qdio_buffer_element * |
200 | zfcp_qdio_sbal_chain(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, | 202 | zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, |
201 | unsigned long sbtype) | 203 | unsigned long sbtype) |
202 | { | 204 | { |
203 | struct qdio_buffer_element *sbale; | 205 | struct qdio_buffer_element *sbale; |
204 | 206 | ||
205 | /* set last entry flag in current SBALE of current SBAL */ | 207 | /* set last entry flag in current SBALE of current SBAL */ |
206 | sbale = zfcp_qdio_sbale_curr(adapter, q_req); | 208 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); |
207 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; | 209 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; |
208 | 210 | ||
209 | /* don't exceed last allowed SBAL */ | 211 | /* don't exceed last allowed SBAL */ |
@@ -211,7 +213,7 @@ zfcp_qdio_sbal_chain(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, | |||
211 | return NULL; | 213 | return NULL; |
212 | 214 | ||
213 | /* set chaining flag in first SBALE of current SBAL */ | 215 | /* set chaining flag in first SBALE of current SBAL */ |
214 | sbale = zfcp_qdio_sbale_req(adapter, q_req); | 216 | sbale = zfcp_qdio_sbale_req(qdio, q_req); |
215 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; | 217 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; |
216 | 218 | ||
217 | /* calculate index of next SBAL */ | 219 | /* calculate index of next SBAL */ |
@@ -225,26 +227,26 @@ zfcp_qdio_sbal_chain(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, | |||
225 | q_req->sbale_curr = 0; | 227 | q_req->sbale_curr = 0; |
226 | 228 | ||
227 | /* set storage-block type for new SBAL */ | 229 | /* set storage-block type for new SBAL */ |
228 | sbale = zfcp_qdio_sbale_curr(adapter, q_req); | 230 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); |
229 | sbale->flags |= sbtype; | 231 | sbale->flags |= sbtype; |
230 | 232 | ||
231 | return sbale; | 233 | return sbale; |
232 | } | 234 | } |
233 | 235 | ||
234 | static struct qdio_buffer_element * | 236 | static struct qdio_buffer_element * |
235 | zfcp_qdio_sbale_next(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, | 237 | zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, |
236 | unsigned int sbtype) | 238 | unsigned int sbtype) |
237 | { | 239 | { |
238 | if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) | 240 | if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) |
239 | return zfcp_qdio_sbal_chain(adapter, q_req, sbtype); | 241 | return zfcp_qdio_sbal_chain(qdio, q_req, sbtype); |
240 | q_req->sbale_curr++; | 242 | q_req->sbale_curr++; |
241 | return zfcp_qdio_sbale_curr(adapter, q_req); | 243 | return zfcp_qdio_sbale_curr(qdio, q_req); |
242 | } | 244 | } |
243 | 245 | ||
244 | static void zfcp_qdio_undo_sbals(struct zfcp_adapter *adapter, | 246 | static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, |
245 | struct zfcp_queue_req *q_req) | 247 | struct zfcp_queue_req *q_req) |
246 | { | 248 | { |
247 | struct qdio_buffer **sbal = adapter->req_q.sbal; | 249 | struct qdio_buffer **sbal = qdio->req_q.sbal; |
248 | int first = q_req->sbal_first; | 250 | int first = q_req->sbal_first; |
249 | int last = q_req->sbal_last; | 251 | int last = q_req->sbal_last; |
250 | int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % | 252 | int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % |
@@ -252,7 +254,7 @@ static void zfcp_qdio_undo_sbals(struct zfcp_adapter *adapter, | |||
252 | zfcp_qdio_zero_sbals(sbal, first, count); | 254 | zfcp_qdio_zero_sbals(sbal, first, count); |
253 | } | 255 | } |
254 | 256 | ||
255 | static int zfcp_qdio_fill_sbals(struct zfcp_adapter *adapter, | 257 | static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio, |
256 | struct zfcp_queue_req *q_req, | 258 | struct zfcp_queue_req *q_req, |
257 | unsigned int sbtype, void *start_addr, | 259 | unsigned int sbtype, void *start_addr, |
258 | unsigned int total_length) | 260 | unsigned int total_length) |
@@ -264,10 +266,10 @@ static int zfcp_qdio_fill_sbals(struct zfcp_adapter *adapter, | |||
264 | /* split segment up */ | 266 | /* split segment up */ |
265 | for (addr = start_addr, remaining = total_length; remaining > 0; | 267 | for (addr = start_addr, remaining = total_length; remaining > 0; |
266 | addr += length, remaining -= length) { | 268 | addr += length, remaining -= length) { |
267 | sbale = zfcp_qdio_sbale_next(adapter, q_req, sbtype); | 269 | sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype); |
268 | if (!sbale) { | 270 | if (!sbale) { |
269 | atomic_inc(&adapter->qdio_outb_full); | 271 | atomic_inc(&qdio->req_q_full); |
270 | zfcp_qdio_undo_sbals(adapter, q_req); | 272 | zfcp_qdio_undo_sbals(qdio, q_req); |
271 | return -EINVAL; | 273 | return -EINVAL; |
272 | } | 274 | } |
273 | 275 | ||
@@ -289,7 +291,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_adapter *adapter, | |||
289 | * @max_sbals: upper bound for number of SBALs to be used | 291 | * @max_sbals: upper bound for number of SBALs to be used |
290 | * Returns: number of bytes, or error (negativ) | 292 | * Returns: number of bytes, or error (negativ) |
291 | */ | 293 | */ |
292 | int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, | 294 | int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, |
293 | struct zfcp_queue_req *q_req, | 295 | struct zfcp_queue_req *q_req, |
294 | unsigned long sbtype, struct scatterlist *sg, | 296 | unsigned long sbtype, struct scatterlist *sg, |
295 | int max_sbals) | 297 | int max_sbals) |
@@ -298,14 +300,14 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, | |||
298 | int retval, bytes = 0; | 300 | int retval, bytes = 0; |
299 | 301 | ||
300 | /* figure out last allowed SBAL */ | 302 | /* figure out last allowed SBAL */ |
301 | zfcp_qdio_sbal_limit(adapter, q_req, max_sbals); | 303 | zfcp_qdio_sbal_limit(qdio, q_req, max_sbals); |
302 | 304 | ||
303 | /* set storage-block type for this request */ | 305 | /* set storage-block type for this request */ |
304 | sbale = zfcp_qdio_sbale_req(adapter, q_req); | 306 | sbale = zfcp_qdio_sbale_req(qdio, q_req); |
305 | sbale->flags |= sbtype; | 307 | sbale->flags |= sbtype; |
306 | 308 | ||
307 | for (; sg; sg = sg_next(sg)) { | 309 | for (; sg; sg = sg_next(sg)) { |
308 | retval = zfcp_qdio_fill_sbals(adapter, q_req, sbtype, | 310 | retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype, |
309 | sg_virt(sg), sg->length); | 311 | sg_virt(sg), sg->length); |
310 | if (retval < 0) | 312 | if (retval < 0) |
311 | return retval; | 313 | return retval; |
@@ -313,7 +315,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, | |||
313 | } | 315 | } |
314 | 316 | ||
315 | /* assume that no other SBALEs are to follow in the same SBAL */ | 317 | /* assume that no other SBALEs are to follow in the same SBAL */ |
316 | sbale = zfcp_qdio_sbale_curr(adapter, q_req); | 318 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); |
317 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; | 319 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; |
318 | 320 | ||
319 | return bytes; | 321 | return bytes; |
@@ -321,20 +323,22 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, | |||
321 | 323 | ||
322 | /** | 324 | /** |
323 | * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO | 325 | * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO |
324 | * @fsf_req: pointer to struct zfcp_fsf_req | 326 | * @qdio: pointer to struct zfcp_qdio |
327 | * @q_req: pointer to struct zfcp_queue_req | ||
325 | * Returns: 0 on success, error otherwise | 328 | * Returns: 0 on success, error otherwise |
326 | */ | 329 | */ |
327 | int zfcp_qdio_send(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req) | 330 | int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) |
328 | { | 331 | { |
329 | struct zfcp_qdio_queue *req_q = &adapter->req_q; | 332 | struct zfcp_qdio_queue *req_q = &qdio->req_q; |
330 | int first = q_req->sbal_first; | 333 | int first = q_req->sbal_first; |
331 | int count = q_req->sbal_number; | 334 | int count = q_req->sbal_number; |
332 | int retval; | 335 | int retval; |
333 | unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; | 336 | unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; |
334 | 337 | ||
335 | zfcp_qdio_account(adapter); | 338 | zfcp_qdio_account(qdio); |
336 | 339 | ||
337 | retval = do_QDIO(adapter->ccw_device, qdio_flags, 0, first, count); | 340 | retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, |
341 | count); | ||
338 | if (unlikely(retval)) { | 342 | if (unlikely(retval)) { |
339 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | 343 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); |
340 | return retval; | 344 | return retval; |
@@ -347,63 +351,69 @@ int zfcp_qdio_send(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req) | |||
347 | return 0; | 351 | return 0; |
348 | } | 352 | } |
349 | 353 | ||
354 | |||
355 | static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, | ||
356 | struct zfcp_qdio *qdio) | ||
357 | { | ||
358 | |||
359 | id->cdev = qdio->adapter->ccw_device; | ||
360 | id->q_format = QDIO_ZFCP_QFMT; | ||
361 | memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); | ||
362 | ASCEBC(id->adapter_name, 8); | ||
363 | id->qib_param_field_format = 0; | ||
364 | id->qib_param_field = NULL; | ||
365 | id->input_slib_elements = NULL; | ||
366 | id->output_slib_elements = NULL; | ||
367 | id->no_input_qs = 1; | ||
368 | id->no_output_qs = 1; | ||
369 | id->input_handler = zfcp_qdio_int_resp; | ||
370 | id->output_handler = zfcp_qdio_int_req; | ||
371 | id->int_parm = (unsigned long) qdio; | ||
372 | id->flags = QDIO_INBOUND_0COPY_SBALS | | ||
373 | QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; | ||
374 | id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); | ||
375 | id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); | ||
376 | |||
377 | } | ||
350 | /** | 378 | /** |
351 | * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data | 379 | * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data |
352 | * @adapter: pointer to struct zfcp_adapter | 380 | * @adapter: pointer to struct zfcp_adapter |
353 | * Returns: -ENOMEM on memory allocation error or return value from | 381 | * Returns: -ENOMEM on memory allocation error or return value from |
354 | * qdio_allocate | 382 | * qdio_allocate |
355 | */ | 383 | */ |
356 | int zfcp_qdio_allocate(struct zfcp_adapter *adapter) | 384 | int zfcp_qdio_allocate(struct zfcp_qdio *qdio, struct ccw_device *ccw_dev) |
357 | { | 385 | { |
358 | struct qdio_initialize *init_data; | 386 | struct qdio_initialize init_data; |
359 | 387 | ||
360 | if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) || | 388 | if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) || |
361 | zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal)) | 389 | zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal)) |
362 | return -ENOMEM; | 390 | return -ENOMEM; |
363 | 391 | ||
364 | init_data = &adapter->qdio_init_data; | 392 | zfcp_qdio_setup_init_data(&init_data, qdio); |
365 | 393 | ||
366 | init_data->cdev = adapter->ccw_device; | 394 | return qdio_allocate(&init_data); |
367 | init_data->q_format = QDIO_ZFCP_QFMT; | ||
368 | memcpy(init_data->adapter_name, dev_name(&adapter->ccw_device->dev), 8); | ||
369 | ASCEBC(init_data->adapter_name, 8); | ||
370 | init_data->qib_param_field_format = 0; | ||
371 | init_data->qib_param_field = NULL; | ||
372 | init_data->input_slib_elements = NULL; | ||
373 | init_data->output_slib_elements = NULL; | ||
374 | init_data->no_input_qs = 1; | ||
375 | init_data->no_output_qs = 1; | ||
376 | init_data->input_handler = zfcp_qdio_int_resp; | ||
377 | init_data->output_handler = zfcp_qdio_int_req; | ||
378 | init_data->int_parm = (unsigned long) adapter; | ||
379 | init_data->flags = QDIO_INBOUND_0COPY_SBALS | | ||
380 | QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; | ||
381 | init_data->input_sbal_addr_array = | ||
382 | (void **) (adapter->resp_q.sbal); | ||
383 | init_data->output_sbal_addr_array = | ||
384 | (void **) (adapter->req_q.sbal); | ||
385 | |||
386 | return qdio_allocate(init_data); | ||
387 | } | 395 | } |
388 | 396 | ||
389 | /** | 397 | /** |
390 | * zfcp_close_qdio - close qdio queues for an adapter | 398 | * zfcp_close_qdio - close qdio queues for an adapter |
399 | * @qdio: pointer to structure zfcp_qdio | ||
391 | */ | 400 | */ |
392 | void zfcp_qdio_close(struct zfcp_adapter *adapter) | 401 | void zfcp_qdio_close(struct zfcp_qdio *qdio) |
393 | { | 402 | { |
394 | struct zfcp_qdio_queue *req_q; | 403 | struct zfcp_qdio_queue *req_q; |
395 | int first, count; | 404 | int first, count; |
396 | 405 | ||
397 | if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 406 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
398 | return; | 407 | return; |
399 | 408 | ||
400 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ | 409 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ |
401 | req_q = &adapter->req_q; | 410 | req_q = &qdio->req_q; |
402 | spin_lock_bh(&adapter->req_q_lock); | 411 | spin_lock_bh(&qdio->req_q_lock); |
403 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | 412 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); |
404 | spin_unlock_bh(&adapter->req_q_lock); | 413 | spin_unlock_bh(&qdio->req_q_lock); |
405 | 414 | ||
406 | qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); | 415 | qdio_shutdown(qdio->adapter->ccw_device, |
416 | QDIO_FLAG_CLEANUP_USING_CLEAR); | ||
407 | 417 | ||
408 | /* cleanup used outbound sbals */ | 418 | /* cleanup used outbound sbals */ |
409 | count = atomic_read(&req_q->count); | 419 | count = atomic_read(&req_q->count); |
@@ -414,50 +424,54 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter) | |||
414 | } | 424 | } |
415 | req_q->first = 0; | 425 | req_q->first = 0; |
416 | atomic_set(&req_q->count, 0); | 426 | atomic_set(&req_q->count, 0); |
417 | adapter->resp_q.first = 0; | 427 | qdio->resp_q.first = 0; |
418 | atomic_set(&adapter->resp_q.count, 0); | 428 | atomic_set(&qdio->resp_q.count, 0); |
419 | } | 429 | } |
420 | 430 | ||
421 | /** | 431 | /** |
422 | * zfcp_qdio_open - prepare and initialize response queue | 432 | * zfcp_qdio_open - prepare and initialize response queue |
423 | * @adapter: pointer to struct zfcp_adapter | 433 | * @qdio: pointer to struct zfcp_qdio |
424 | * Returns: 0 on success, otherwise -EIO | 434 | * Returns: 0 on success, otherwise -EIO |
425 | */ | 435 | */ |
426 | int zfcp_qdio_open(struct zfcp_adapter *adapter) | 436 | int zfcp_qdio_open(struct zfcp_qdio *qdio) |
427 | { | 437 | { |
428 | struct qdio_buffer_element *sbale; | 438 | struct qdio_buffer_element *sbale; |
439 | struct qdio_initialize init_data; | ||
440 | struct ccw_device *cdev = qdio->adapter->ccw_device; | ||
429 | int cc; | 441 | int cc; |
430 | 442 | ||
431 | if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) | 443 | if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) |
432 | return -EIO; | 444 | return -EIO; |
433 | 445 | ||
434 | if (qdio_establish(&adapter->qdio_init_data)) | 446 | zfcp_qdio_setup_init_data(&init_data, qdio); |
447 | |||
448 | if (qdio_establish(&init_data)) | ||
435 | goto failed_establish; | 449 | goto failed_establish; |
436 | 450 | ||
437 | if (qdio_activate(adapter->ccw_device)) | 451 | if (qdio_activate(cdev)) |
438 | goto failed_qdio; | 452 | goto failed_qdio; |
439 | 453 | ||
440 | for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { | 454 | for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { |
441 | sbale = &(adapter->resp_q.sbal[cc]->element[0]); | 455 | sbale = &(qdio->resp_q.sbal[cc]->element[0]); |
442 | sbale->length = 0; | 456 | sbale->length = 0; |
443 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; | 457 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; |
444 | sbale->addr = NULL; | 458 | sbale->addr = NULL; |
445 | } | 459 | } |
446 | 460 | ||
447 | if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, | 461 | if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, |
448 | QDIO_MAX_BUFFERS_PER_Q)) | 462 | QDIO_MAX_BUFFERS_PER_Q)) |
449 | goto failed_qdio; | 463 | goto failed_qdio; |
450 | 464 | ||
451 | /* set index of first avalable SBALS / number of available SBALS */ | 465 | /* set index of first avalable SBALS / number of available SBALS */ |
452 | adapter->req_q.first = 0; | 466 | qdio->req_q.first = 0; |
453 | atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); | 467 | atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q); |
454 | 468 | ||
455 | return 0; | 469 | return 0; |
456 | 470 | ||
457 | failed_qdio: | 471 | failed_qdio: |
458 | qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); | 472 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); |
459 | failed_establish: | 473 | failed_establish: |
460 | dev_err(&adapter->ccw_device->dev, | 474 | dev_err(&cdev->dev, |
461 | "Setting up the QDIO connection to the FCP adapter failed\n"); | 475 | "Setting up the QDIO connection to the FCP adapter failed\n"); |
462 | return -EIO; | 476 | return -EIO; |
463 | } | 477 | } |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 2e13d41269a4..4414720c87f6 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -225,7 +225,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) | |||
225 | { | 225 | { |
226 | struct zfcp_unit *unit = scpnt->device->hostdata; | 226 | struct zfcp_unit *unit = scpnt->device->hostdata; |
227 | struct zfcp_adapter *adapter = unit->port->adapter; | 227 | struct zfcp_adapter *adapter = unit->port->adapter; |
228 | struct zfcp_fsf_req *fsf_req; | 228 | struct zfcp_fsf_req *fsf_req = NULL; |
229 | int retval = SUCCESS; | 229 | int retval = SUCCESS; |
230 | int retry = 3; | 230 | int retry = 3; |
231 | 231 | ||
@@ -429,7 +429,7 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host) | |||
429 | if (!data) | 429 | if (!data) |
430 | return NULL; | 430 | return NULL; |
431 | 431 | ||
432 | ret = zfcp_fsf_exchange_port_data_sync(adapter, data); | 432 | ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); |
433 | if (ret) { | 433 | if (ret) { |
434 | kfree(data); | 434 | kfree(data); |
435 | return NULL; | 435 | return NULL; |
@@ -458,7 +458,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost) | |||
458 | if (!data) | 458 | if (!data) |
459 | return; | 459 | return; |
460 | 460 | ||
461 | ret = zfcp_fsf_exchange_port_data_sync(adapter, data); | 461 | ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); |
462 | if (ret) | 462 | if (ret) |
463 | kfree(data); | 463 | kfree(data); |
464 | else { | 464 | else { |
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 0fe5cce818cb..a6cf62636834 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
@@ -425,7 +425,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev, | |||
425 | if (!qtcb_port) | 425 | if (!qtcb_port) |
426 | return -ENOMEM; | 426 | return -ENOMEM; |
427 | 427 | ||
428 | retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port); | 428 | retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port); |
429 | if (!retval) | 429 | if (!retval) |
430 | retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, | 430 | retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, |
431 | qtcb_port->cb_util, qtcb_port->a_util); | 431 | qtcb_port->cb_util, qtcb_port->a_util); |
@@ -451,7 +451,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev, | |||
451 | if (!qtcb_config) | 451 | if (!qtcb_config) |
452 | return -ENOMEM; | 452 | return -ENOMEM; |
453 | 453 | ||
454 | retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config); | 454 | retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config); |
455 | if (!retval) | 455 | if (!retval) |
456 | *stat_inf = qtcb_config->stat_info; | 456 | *stat_inf = qtcb_config->stat_info; |
457 | 457 | ||
@@ -492,15 +492,15 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev, | |||
492 | char *buf) | 492 | char *buf) |
493 | { | 493 | { |
494 | struct Scsi_Host *scsi_host = class_to_shost(dev); | 494 | struct Scsi_Host *scsi_host = class_to_shost(dev); |
495 | struct zfcp_adapter *adapter = | 495 | struct zfcp_qdio *qdio = |
496 | (struct zfcp_adapter *) scsi_host->hostdata[0]; | 496 | ((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio; |
497 | u64 util; | 497 | u64 util; |
498 | 498 | ||
499 | spin_lock_bh(&adapter->qdio_stat_lock); | 499 | spin_lock_bh(&qdio->stat_lock); |
500 | util = adapter->req_q_util; | 500 | util = qdio->req_q_util; |
501 | spin_unlock_bh(&adapter->qdio_stat_lock); | 501 | spin_unlock_bh(&qdio->stat_lock); |
502 | 502 | ||
503 | return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full), | 503 | return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full), |
504 | (unsigned long long)util); | 504 | (unsigned long long)util); |
505 | } | 505 | } |
506 | static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); | 506 | static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); |