diff options
author | Swen Schillig <swen@vnet.ibm.com> | 2008-06-10 12:20:57 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-07-12 09:22:25 -0400 |
commit | 00bab91066a49468bfa4f6d5c8ad5e9ec53b7ea3 (patch) | |
tree | a5ce7bfe5ad290c339f669b3596b75f5238157c6 /drivers/s390 | |
parent | fa04c2816883a49ec518514f6c19767d54be20b5 (diff) |
[SCSI] zfcp: Cleanup qdio code
Cleanup the interface code from zfcp to qdio. Also move code that
belongs to the qdio interface from the erp to the qdio file.
Signed-off-by: Swen Schillig <swen@vnet.ibm.com>
Signed-off-by: Christof Schmitt <christof.schmitt@de.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/scsi/zfcp_aux.c | 45 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_dbf.c | 3 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_def.h | 26 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_erp.c | 114 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_ext.h | 18 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fsf.c | 178 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_qdio.c | 815 |
7 files changed, 412 insertions, 787 deletions
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 735f7af43d6a..7084a6ae1096 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -606,7 +606,6 @@ static void _zfcp_status_read_scheduler(struct work_struct *work) | |||
606 | struct zfcp_adapter * | 606 | struct zfcp_adapter * |
607 | zfcp_adapter_enqueue(struct ccw_device *ccw_device) | 607 | zfcp_adapter_enqueue(struct ccw_device *ccw_device) |
608 | { | 608 | { |
609 | int retval = 0; | ||
610 | struct zfcp_adapter *adapter; | 609 | struct zfcp_adapter *adapter; |
611 | 610 | ||
612 | /* | 611 | /* |
@@ -627,19 +626,11 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
627 | /* save ccw_device pointer */ | 626 | /* save ccw_device pointer */ |
628 | adapter->ccw_device = ccw_device; | 627 | adapter->ccw_device = ccw_device; |
629 | 628 | ||
630 | retval = zfcp_qdio_allocate_queues(adapter); | 629 | if (zfcp_qdio_allocate(adapter)) |
631 | if (retval) | ||
632 | goto queues_alloc_failed; | ||
633 | |||
634 | retval = zfcp_qdio_allocate(adapter); | ||
635 | if (retval) | ||
636 | goto qdio_allocate_failed; | 630 | goto qdio_allocate_failed; |
637 | 631 | ||
638 | retval = zfcp_allocate_low_mem_buffers(adapter); | 632 | if (zfcp_allocate_low_mem_buffers(adapter)) |
639 | if (retval) { | ||
640 | ZFCP_LOG_INFO("error: pool allocation failed\n"); | ||
641 | goto failed_low_mem_buffers; | 633 | goto failed_low_mem_buffers; |
642 | } | ||
643 | 634 | ||
644 | /* initialise reference count stuff */ | 635 | /* initialise reference count stuff */ |
645 | atomic_set(&adapter->refcount, 0); | 636 | atomic_set(&adapter->refcount, 0); |
@@ -653,11 +644,8 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
653 | 644 | ||
654 | /* initialize list of fsf requests */ | 645 | /* initialize list of fsf requests */ |
655 | spin_lock_init(&adapter->req_list_lock); | 646 | spin_lock_init(&adapter->req_list_lock); |
656 | retval = zfcp_reqlist_alloc(adapter); | 647 | if (zfcp_reqlist_alloc(adapter)) |
657 | if (retval) { | ||
658 | ZFCP_LOG_INFO("request list initialization failed\n"); | ||
659 | goto failed_low_mem_buffers; | 648 | goto failed_low_mem_buffers; |
660 | } | ||
661 | 649 | ||
662 | /* initialize debug locks */ | 650 | /* initialize debug locks */ |
663 | 651 | ||
@@ -666,8 +654,7 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
666 | spin_lock_init(&adapter->scsi_dbf_lock); | 654 | spin_lock_init(&adapter->scsi_dbf_lock); |
667 | spin_lock_init(&adapter->rec_dbf_lock); | 655 | spin_lock_init(&adapter->rec_dbf_lock); |
668 | 656 | ||
669 | retval = zfcp_adapter_debug_register(adapter); | 657 | if (zfcp_adapter_debug_register(adapter)) |
670 | if (retval) | ||
671 | goto debug_register_failed; | 658 | goto debug_register_failed; |
672 | 659 | ||
673 | /* initialize error recovery stuff */ | 660 | /* initialize error recovery stuff */ |
@@ -685,7 +672,7 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
685 | init_waitqueue_head(&adapter->erp_done_wqh); | 672 | init_waitqueue_head(&adapter->erp_done_wqh); |
686 | 673 | ||
687 | /* initialize lock of associated request queue */ | 674 | /* initialize lock of associated request queue */ |
688 | rwlock_init(&adapter->request_queue.queue_lock); | 675 | rwlock_init(&adapter->req_q.lock); |
689 | INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); | 676 | INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); |
690 | 677 | ||
691 | /* mark adapter unusable as long as sysfs registration is not complete */ | 678 | /* mark adapter unusable as long as sysfs registration is not complete */ |
@@ -723,12 +710,8 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
723 | zfcp_reqlist_free(adapter); | 710 | zfcp_reqlist_free(adapter); |
724 | failed_low_mem_buffers: | 711 | failed_low_mem_buffers: |
725 | zfcp_free_low_mem_buffers(adapter); | 712 | zfcp_free_low_mem_buffers(adapter); |
726 | if (qdio_free(ccw_device) != 0) | ||
727 | ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n", | ||
728 | zfcp_get_busid_by_adapter(adapter)); | ||
729 | qdio_allocate_failed: | 713 | qdio_allocate_failed: |
730 | zfcp_qdio_free_queues(adapter); | 714 | zfcp_qdio_free(adapter); |
731 | queues_alloc_failed: | ||
732 | kfree(adapter); | 715 | kfree(adapter); |
733 | adapter = NULL; | 716 | adapter = NULL; |
734 | out: | 717 | out: |
@@ -757,10 +740,6 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) | |||
757 | retval = zfcp_reqlist_isempty(adapter); | 740 | retval = zfcp_reqlist_isempty(adapter); |
758 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | 741 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); |
759 | if (!retval) { | 742 | if (!retval) { |
760 | ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " | ||
761 | "%i requests outstanding\n", | ||
762 | zfcp_get_busid_by_adapter(adapter), adapter, | ||
763 | atomic_read(&adapter->reqs_active)); | ||
764 | retval = -EBUSY; | 743 | retval = -EBUSY; |
765 | goto out; | 744 | goto out; |
766 | } | 745 | } |
@@ -775,19 +754,9 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) | |||
775 | /* decrease number of adapters in list */ | 754 | /* decrease number of adapters in list */ |
776 | zfcp_data.adapters--; | 755 | zfcp_data.adapters--; |
777 | 756 | ||
778 | ZFCP_LOG_TRACE("adapter %s (%p) removed from list, " | 757 | zfcp_qdio_free(adapter); |
779 | "%i adapters still in list\n", | ||
780 | zfcp_get_busid_by_adapter(adapter), | ||
781 | adapter, zfcp_data.adapters); | ||
782 | |||
783 | retval = qdio_free(adapter->ccw_device); | ||
784 | if (retval) | ||
785 | ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n", | ||
786 | zfcp_get_busid_by_adapter(adapter)); | ||
787 | 758 | ||
788 | zfcp_free_low_mem_buffers(adapter); | 759 | zfcp_free_low_mem_buffers(adapter); |
789 | /* free memory of adapter data structure and queues */ | ||
790 | zfcp_qdio_free_queues(adapter); | ||
791 | zfcp_reqlist_free(adapter); | 760 | zfcp_reqlist_free(adapter); |
792 | kfree(adapter->fc_stats); | 761 | kfree(adapter->fc_stats); |
793 | kfree(adapter->stats_reset_data); | 762 | kfree(adapter->stats_reset_data); |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 01e817abe0a5..c47c23a01c7f 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -603,13 +603,14 @@ static const char *zfcp_rec_dbf_ids[] = { | |||
603 | [137] = "hbaapi port open", | 603 | [137] = "hbaapi port open", |
604 | [138] = "hbaapi unit open", | 604 | [138] = "hbaapi unit open", |
605 | [139] = "hbaapi unit shutdown", | 605 | [139] = "hbaapi unit shutdown", |
606 | [140] = "qdio error", | 606 | [140] = "qdio error outbound", |
607 | [141] = "scsi host reset", | 607 | [141] = "scsi host reset", |
608 | [142] = "dismissing fsf request for recovery action", | 608 | [142] = "dismissing fsf request for recovery action", |
609 | [143] = "recovery action timed out", | 609 | [143] = "recovery action timed out", |
610 | [144] = "recovery action gone", | 610 | [144] = "recovery action gone", |
611 | [145] = "recovery action being processed", | 611 | [145] = "recovery action being processed", |
612 | [146] = "recovery action ready for next step", | 612 | [146] = "recovery action ready for next step", |
613 | [147] = "qdio error inbound", | ||
613 | }; | 614 | }; |
614 | 615 | ||
615 | static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, | 616 | static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 72f225817ebd..5fcb555e1484 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -112,21 +112,10 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size) | |||
112 | /* max. number of (data buffer) SBALEs in largest SBAL chain | 112 | /* max. number of (data buffer) SBALEs in largest SBAL chain |
113 | multiplied with number of sectors per 4k block */ | 113 | multiplied with number of sectors per 4k block */ |
114 | 114 | ||
115 | /* FIXME(tune): free space should be one max. SBAL chain plus what? */ | ||
116 | #define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \ | ||
117 | - (ZFCP_MAX_SBALS_PER_REQ + 4)) | ||
118 | |||
119 | #define ZFCP_SBAL_TIMEOUT (5*HZ) | 115 | #define ZFCP_SBAL_TIMEOUT (5*HZ) |
120 | 116 | ||
121 | #define ZFCP_TYPE2_RECOVERY_TIME 8 /* seconds */ | 117 | #define ZFCP_TYPE2_RECOVERY_TIME 8 /* seconds */ |
122 | 118 | ||
123 | /* queue polling (values in microseconds) */ | ||
124 | #define ZFCP_MAX_INPUT_THRESHOLD 5000 /* FIXME: tune */ | ||
125 | #define ZFCP_MAX_OUTPUT_THRESHOLD 1000 /* FIXME: tune */ | ||
126 | #define ZFCP_MIN_INPUT_THRESHOLD 1 /* ignored by QDIO layer */ | ||
127 | #define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */ | ||
128 | |||
129 | #define QDIO_SCSI_QFMT 1 /* 1 for FSF */ | ||
130 | #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) | 119 | #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) |
131 | 120 | ||
132 | /********************* FSF SPECIFIC DEFINES *********************************/ | 121 | /********************* FSF SPECIFIC DEFINES *********************************/ |
@@ -649,13 +638,13 @@ struct zfcp_send_els { | |||
649 | }; | 638 | }; |
650 | 639 | ||
651 | struct zfcp_qdio_queue { | 640 | struct zfcp_qdio_queue { |
652 | struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ | 641 | struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ |
653 | u8 free_index; /* index of next free bfr | 642 | u8 first; /* index of next free bfr |
654 | in queue (free_count>0) */ | 643 | in queue (free_count>0) */ |
655 | atomic_t free_count; /* number of free buffers | 644 | atomic_t count; /* number of free buffers |
656 | in queue */ | 645 | in queue */ |
657 | rwlock_t queue_lock; /* lock for operations on queue */ | 646 | rwlock_t lock; /* lock for operations on queue */ |
658 | int distance_from_int; /* SBALs used since PCI indication | 647 | int pci_batch; /* SBALs since PCI indication |
659 | was last set */ | 648 | was last set */ |
660 | }; | 649 | }; |
661 | 650 | ||
@@ -711,15 +700,14 @@ struct zfcp_adapter { | |||
711 | struct list_head port_remove_lh; /* head of ports to be | 700 | struct list_head port_remove_lh; /* head of ports to be |
712 | removed */ | 701 | removed */ |
713 | u32 ports; /* number of remote ports */ | 702 | u32 ports; /* number of remote ports */ |
714 | atomic_t reqs_active; /* # active FSF reqs */ | ||
715 | unsigned long req_no; /* unique FSF req number */ | 703 | unsigned long req_no; /* unique FSF req number */ |
716 | struct list_head *req_list; /* list of pending reqs */ | 704 | struct list_head *req_list; /* list of pending reqs */ |
717 | spinlock_t req_list_lock; /* request list lock */ | 705 | spinlock_t req_list_lock; /* request list lock */ |
718 | struct zfcp_qdio_queue request_queue; /* request queue */ | 706 | struct zfcp_qdio_queue req_q; /* request queue */ |
719 | u32 fsf_req_seq_no; /* FSF cmnd seq number */ | 707 | u32 fsf_req_seq_no; /* FSF cmnd seq number */ |
720 | wait_queue_head_t request_wq; /* can be used to wait for | 708 | wait_queue_head_t request_wq; /* can be used to wait for |
721 | more avaliable SBALs */ | 709 | more avaliable SBALs */ |
722 | struct zfcp_qdio_queue response_queue; /* response queue */ | 710 | struct zfcp_qdio_queue resp_q; /* response queue */ |
723 | rwlock_t abort_lock; /* Protects against SCSI | 711 | rwlock_t abort_lock; /* Protects against SCSI |
724 | stack abort/command | 712 | stack abort/command |
725 | completion races */ | 713 | completion races */ |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index ee19be13e708..4a6d08363d4b 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -114,41 +114,6 @@ static void zfcp_erp_action_to_running(struct zfcp_erp_action *); | |||
114 | static void zfcp_erp_memwait_handler(unsigned long); | 114 | static void zfcp_erp_memwait_handler(unsigned long); |
115 | 115 | ||
116 | /** | 116 | /** |
117 | * zfcp_close_qdio - close qdio queues for an adapter | ||
118 | */ | ||
119 | static void zfcp_close_qdio(struct zfcp_adapter *adapter) | ||
120 | { | ||
121 | struct zfcp_qdio_queue *req_queue; | ||
122 | int first, count; | ||
123 | |||
124 | if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) | ||
125 | return; | ||
126 | |||
127 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ | ||
128 | req_queue = &adapter->request_queue; | ||
129 | write_lock_irq(&req_queue->queue_lock); | ||
130 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | ||
131 | write_unlock_irq(&req_queue->queue_lock); | ||
132 | |||
133 | while (qdio_shutdown(adapter->ccw_device, | ||
134 | QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) | ||
135 | ssleep(1); | ||
136 | |||
137 | /* cleanup used outbound sbals */ | ||
138 | count = atomic_read(&req_queue->free_count); | ||
139 | if (count < QDIO_MAX_BUFFERS_PER_Q) { | ||
140 | first = (req_queue->free_index+count) % QDIO_MAX_BUFFERS_PER_Q; | ||
141 | count = QDIO_MAX_BUFFERS_PER_Q - count; | ||
142 | zfcp_qdio_zero_sbals(req_queue->buffer, first, count); | ||
143 | } | ||
144 | req_queue->free_index = 0; | ||
145 | atomic_set(&req_queue->free_count, 0); | ||
146 | req_queue->distance_from_int = 0; | ||
147 | adapter->response_queue.free_index = 0; | ||
148 | atomic_set(&adapter->response_queue.free_count, 0); | ||
149 | } | ||
150 | |||
151 | /** | ||
152 | * zfcp_close_fsf - stop FSF operations for an adapter | 117 | * zfcp_close_fsf - stop FSF operations for an adapter |
153 | * | 118 | * |
154 | * Dismiss and cleanup all pending fsf_reqs (this wakes up all initiators of | 119 | * Dismiss and cleanup all pending fsf_reqs (this wakes up all initiators of |
@@ -158,7 +123,7 @@ static void zfcp_close_qdio(struct zfcp_adapter *adapter) | |||
158 | static void zfcp_close_fsf(struct zfcp_adapter *adapter) | 123 | static void zfcp_close_fsf(struct zfcp_adapter *adapter) |
159 | { | 124 | { |
160 | /* close queues to ensure that buffers are not accessed by adapter */ | 125 | /* close queues to ensure that buffers are not accessed by adapter */ |
161 | zfcp_close_qdio(adapter); | 126 | zfcp_qdio_close(adapter); |
162 | zfcp_fsf_req_dismiss_all(adapter); | 127 | zfcp_fsf_req_dismiss_all(adapter); |
163 | /* reset FSF request sequence number */ | 128 | /* reset FSF request sequence number */ |
164 | adapter->fsf_req_seq_no = 0; | 129 | adapter->fsf_req_seq_no = 0; |
@@ -1735,88 +1700,17 @@ zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close) | |||
1735 | static int | 1700 | static int |
1736 | zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) | 1701 | zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) |
1737 | { | 1702 | { |
1738 | int retval; | ||
1739 | int i; | ||
1740 | volatile struct qdio_buffer_element *sbale; | ||
1741 | struct zfcp_adapter *adapter = erp_action->adapter; | 1703 | struct zfcp_adapter *adapter = erp_action->adapter; |
1742 | 1704 | ||
1743 | if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) { | 1705 | if (zfcp_qdio_open(adapter)) |
1744 | ZFCP_LOG_NORMAL("bug: second attempt to set up QDIO on " | 1706 | return ZFCP_ERP_FAILED; |
1745 | "adapter %s\n", | ||
1746 | zfcp_get_busid_by_adapter(adapter)); | ||
1747 | goto failed_sanity; | ||
1748 | } | ||
1749 | |||
1750 | if (qdio_establish(&adapter->qdio_init_data) != 0) { | ||
1751 | ZFCP_LOG_INFO("error: establishment of QDIO queues failed " | ||
1752 | "on adapter %s\n", | ||
1753 | zfcp_get_busid_by_adapter(adapter)); | ||
1754 | goto failed_qdio_establish; | ||
1755 | } | ||
1756 | |||
1757 | if (qdio_activate(adapter->ccw_device, 0) != 0) { | ||
1758 | ZFCP_LOG_INFO("error: activation of QDIO queues failed " | ||
1759 | "on adapter %s\n", | ||
1760 | zfcp_get_busid_by_adapter(adapter)); | ||
1761 | goto failed_qdio_activate; | ||
1762 | } | ||
1763 | |||
1764 | /* | ||
1765 | * put buffers into response queue, | ||
1766 | */ | ||
1767 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { | ||
1768 | sbale = &(adapter->response_queue.buffer[i]->element[0]); | ||
1769 | sbale->length = 0; | ||
1770 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; | ||
1771 | sbale->addr = NULL; | ||
1772 | } | ||
1773 | |||
1774 | ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, " | ||
1775 | "queue_no=%i, index_in_queue=%i, count=%i)\n", | ||
1776 | zfcp_get_busid_by_adapter(adapter), | ||
1777 | QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q); | ||
1778 | |||
1779 | retval = do_QDIO(adapter->ccw_device, | ||
1780 | QDIO_FLAG_SYNC_INPUT, | ||
1781 | 0, 0, QDIO_MAX_BUFFERS_PER_Q, NULL); | ||
1782 | |||
1783 | if (retval) { | ||
1784 | ZFCP_LOG_NORMAL("bug: setup of QDIO failed (retval=%d)\n", | ||
1785 | retval); | ||
1786 | goto failed_do_qdio; | ||
1787 | } else { | ||
1788 | adapter->response_queue.free_index = 0; | ||
1789 | atomic_set(&adapter->response_queue.free_count, 0); | ||
1790 | ZFCP_LOG_DEBUG("%i buffers successfully enqueued to " | ||
1791 | "response queue\n", QDIO_MAX_BUFFERS_PER_Q); | ||
1792 | } | ||
1793 | /* set index of first avalable SBALS / number of available SBALS */ | ||
1794 | adapter->request_queue.free_index = 0; | ||
1795 | atomic_set(&adapter->request_queue.free_count, QDIO_MAX_BUFFERS_PER_Q); | ||
1796 | adapter->request_queue.distance_from_int = 0; | ||
1797 | 1707 | ||
1798 | /* initialize waitqueue used to wait for free SBALs in requests queue */ | 1708 | /* initialize waitqueue used to wait for free SBALs in requests queue */ |
1799 | init_waitqueue_head(&adapter->request_wq); | 1709 | init_waitqueue_head(&adapter->request_wq); |
1800 | 1710 | ||
1801 | /* ok, we did it - skip all cleanups for different failures */ | 1711 | /* ok, we did it - skip all cleanups for different failures */ |
1802 | atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | 1712 | atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); |
1803 | retval = ZFCP_ERP_SUCCEEDED; | 1713 | return ZFCP_ERP_SUCCEEDED; |
1804 | goto out; | ||
1805 | |||
1806 | failed_do_qdio: | ||
1807 | /* NOP */ | ||
1808 | |||
1809 | failed_qdio_activate: | ||
1810 | while (qdio_shutdown(adapter->ccw_device, | ||
1811 | QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) | ||
1812 | ssleep(1); | ||
1813 | |||
1814 | failed_qdio_establish: | ||
1815 | failed_sanity: | ||
1816 | retval = ZFCP_ERP_FAILED; | ||
1817 | |||
1818 | out: | ||
1819 | return retval; | ||
1820 | } | 1714 | } |
1821 | 1715 | ||
1822 | 1716 | ||
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 867972898cb1..6ffe2068ba8a 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -57,21 +57,17 @@ extern int zfcp_ccw_register(void); | |||
57 | 57 | ||
58 | extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int); | 58 | extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int); |
59 | extern int zfcp_qdio_allocate(struct zfcp_adapter *); | 59 | extern int zfcp_qdio_allocate(struct zfcp_adapter *); |
60 | extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *); | 60 | extern void zfcp_qdio_free(struct zfcp_adapter *); |
61 | extern void zfcp_qdio_free_queues(struct zfcp_adapter *); | 61 | extern int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req); |
62 | extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, | ||
63 | struct zfcp_fsf_req *); | ||
64 | 62 | ||
65 | extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req | 63 | extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req |
66 | (struct zfcp_fsf_req *, int, int); | 64 | (struct zfcp_fsf_req *); |
67 | extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr | 65 | extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr |
68 | (struct zfcp_fsf_req *); | 66 | (struct zfcp_fsf_req *); |
69 | extern int zfcp_qdio_sbals_from_sg | 67 | extern int zfcp_qdio_sbals_from_sg |
70 | (struct zfcp_fsf_req *, unsigned long, struct scatterlist *, int, int); | 68 | (struct zfcp_fsf_req *, unsigned long, struct scatterlist *, int); |
71 | extern int zfcp_qdio_sbals_from_scsicmnd | 69 | extern int zfcp_qdio_open(struct zfcp_adapter *adapter); |
72 | (struct zfcp_fsf_req *, unsigned long, struct scsi_cmnd *); | 70 | extern void zfcp_qdio_close(struct zfcp_adapter *adapter); |
73 | |||
74 | |||
75 | /******************************** FSF ****************************************/ | 71 | /******************************** FSF ****************************************/ |
76 | extern int zfcp_fsf_open_port(struct zfcp_erp_action *); | 72 | extern int zfcp_fsf_open_port(struct zfcp_erp_action *); |
77 | extern int zfcp_fsf_close_port(struct zfcp_erp_action *); | 73 | extern int zfcp_fsf_close_port(struct zfcp_erp_action *); |
@@ -95,7 +91,7 @@ extern int zfcp_fsf_status_read(struct zfcp_adapter *, int); | |||
95 | extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); | 91 | extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); |
96 | extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *, | 92 | extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *, |
97 | unsigned long *, struct zfcp_fsf_req **) | 93 | unsigned long *, struct zfcp_fsf_req **) |
98 | __acquires(adapter->request_queue.queue_lock); | 94 | __acquires(adapter->req_q.lock); |
99 | extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, | 95 | extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, |
100 | struct zfcp_erp_action *); | 96 | struct zfcp_erp_action *); |
101 | extern int zfcp_fsf_send_els(struct zfcp_send_els *); | 97 | extern int zfcp_fsf_send_els(struct zfcp_send_els *); |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index de42a01fc4b1..cc48a6462e6c 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -171,7 +171,6 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) | |||
171 | 171 | ||
172 | BUG_ON(atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)); | 172 | BUG_ON(atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)); |
173 | spin_lock_irqsave(&adapter->req_list_lock, flags); | 173 | spin_lock_irqsave(&adapter->req_list_lock, flags); |
174 | atomic_set(&adapter->reqs_active, 0); | ||
175 | for (i = 0; i < REQUEST_LIST_SIZE; i++) | 174 | for (i = 0; i < REQUEST_LIST_SIZE; i++) |
176 | list_splice_init(&adapter->req_list[i], &remove_queue); | 175 | list_splice_init(&adapter->req_list[i], &remove_queue); |
177 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | 176 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); |
@@ -726,7 +725,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags) | |||
726 | goto failed_req_create; | 725 | goto failed_req_create; |
727 | } | 726 | } |
728 | 727 | ||
729 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 728 | sbale = zfcp_qdio_sbale_req(fsf_req); |
730 | sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS; | 729 | sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS; |
731 | sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; | 730 | sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; |
732 | fsf_req->sbale_curr = 2; | 731 | fsf_req->sbale_curr = 2; |
@@ -763,7 +762,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags) | |||
763 | failed_req_create: | 762 | failed_req_create: |
764 | zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); | 763 | zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); |
765 | out: | 764 | out: |
766 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); | 765 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
767 | return retval; | 766 | return retval; |
768 | } | 767 | } |
769 | 768 | ||
@@ -1075,7 +1074,7 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id, | |||
1075 | &unit->status))) | 1074 | &unit->status))) |
1076 | goto unit_blocked; | 1075 | goto unit_blocked; |
1077 | 1076 | ||
1078 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 1077 | sbale = zfcp_qdio_sbale_req(fsf_req); |
1079 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1078 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1080 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1079 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1081 | 1080 | ||
@@ -1098,7 +1097,7 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id, | |||
1098 | fsf_req = NULL; | 1097 | fsf_req = NULL; |
1099 | 1098 | ||
1100 | out: | 1099 | out: |
1101 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); | 1100 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
1102 | return fsf_req; | 1101 | return fsf_req; |
1103 | } | 1102 | } |
1104 | 1103 | ||
@@ -1295,7 +1294,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, | |||
1295 | goto failed_req; | 1294 | goto failed_req; |
1296 | } | 1295 | } |
1297 | 1296 | ||
1298 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 1297 | sbale = zfcp_qdio_sbale_req(fsf_req); |
1299 | if (zfcp_use_one_sbal(ct->req, ct->req_count, | 1298 | if (zfcp_use_one_sbal(ct->req, ct->req_count, |
1300 | ct->resp, ct->resp_count)){ | 1299 | ct->resp, ct->resp_count)){ |
1301 | /* both request buffer and response buffer | 1300 | /* both request buffer and response buffer |
@@ -1311,7 +1310,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, | |||
1311 | /* try to use chained SBALs */ | 1310 | /* try to use chained SBALs */ |
1312 | bytes = zfcp_qdio_sbals_from_sg(fsf_req, | 1311 | bytes = zfcp_qdio_sbals_from_sg(fsf_req, |
1313 | SBAL_FLAGS0_TYPE_WRITE_READ, | 1312 | SBAL_FLAGS0_TYPE_WRITE_READ, |
1314 | ct->req, ct->req_count, | 1313 | ct->req, |
1315 | ZFCP_MAX_SBALS_PER_CT_REQ); | 1314 | ZFCP_MAX_SBALS_PER_CT_REQ); |
1316 | if (bytes <= 0) { | 1315 | if (bytes <= 0) { |
1317 | ZFCP_LOG_INFO("error: creation of CT request failed " | 1316 | ZFCP_LOG_INFO("error: creation of CT request failed " |
@@ -1328,7 +1327,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, | |||
1328 | fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; | 1327 | fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; |
1329 | bytes = zfcp_qdio_sbals_from_sg(fsf_req, | 1328 | bytes = zfcp_qdio_sbals_from_sg(fsf_req, |
1330 | SBAL_FLAGS0_TYPE_WRITE_READ, | 1329 | SBAL_FLAGS0_TYPE_WRITE_READ, |
1331 | ct->resp, ct->resp_count, | 1330 | ct->resp, |
1332 | ZFCP_MAX_SBALS_PER_CT_REQ); | 1331 | ZFCP_MAX_SBALS_PER_CT_REQ); |
1333 | if (bytes <= 0) { | 1332 | if (bytes <= 0) { |
1334 | ZFCP_LOG_INFO("error: creation of CT request failed " | 1333 | ZFCP_LOG_INFO("error: creation of CT request failed " |
@@ -1387,8 +1386,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, | |||
1387 | } | 1386 | } |
1388 | failed_req: | 1387 | failed_req: |
1389 | out: | 1388 | out: |
1390 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, | 1389 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
1391 | lock_flags); | ||
1392 | return ret; | 1390 | return ret; |
1393 | } | 1391 | } |
1394 | 1392 | ||
@@ -1593,7 +1591,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els) | |||
1593 | goto port_blocked; | 1591 | goto port_blocked; |
1594 | } | 1592 | } |
1595 | 1593 | ||
1596 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 1594 | sbale = zfcp_qdio_sbale_req(fsf_req); |
1597 | if (zfcp_use_one_sbal(els->req, els->req_count, | 1595 | if (zfcp_use_one_sbal(els->req, els->req_count, |
1598 | els->resp, els->resp_count)){ | 1596 | els->resp, els->resp_count)){ |
1599 | /* both request buffer and response buffer | 1597 | /* both request buffer and response buffer |
@@ -1609,7 +1607,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els) | |||
1609 | /* try to use chained SBALs */ | 1607 | /* try to use chained SBALs */ |
1610 | bytes = zfcp_qdio_sbals_from_sg(fsf_req, | 1608 | bytes = zfcp_qdio_sbals_from_sg(fsf_req, |
1611 | SBAL_FLAGS0_TYPE_WRITE_READ, | 1609 | SBAL_FLAGS0_TYPE_WRITE_READ, |
1612 | els->req, els->req_count, | 1610 | els->req, |
1613 | ZFCP_MAX_SBALS_PER_ELS_REQ); | 1611 | ZFCP_MAX_SBALS_PER_ELS_REQ); |
1614 | if (bytes <= 0) { | 1612 | if (bytes <= 0) { |
1615 | ZFCP_LOG_INFO("error: creation of ELS request failed " | 1613 | ZFCP_LOG_INFO("error: creation of ELS request failed " |
@@ -1626,7 +1624,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els) | |||
1626 | fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; | 1624 | fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; |
1627 | bytes = zfcp_qdio_sbals_from_sg(fsf_req, | 1625 | bytes = zfcp_qdio_sbals_from_sg(fsf_req, |
1628 | SBAL_FLAGS0_TYPE_WRITE_READ, | 1626 | SBAL_FLAGS0_TYPE_WRITE_READ, |
1629 | els->resp, els->resp_count, | 1627 | els->resp, |
1630 | ZFCP_MAX_SBALS_PER_ELS_REQ); | 1628 | ZFCP_MAX_SBALS_PER_ELS_REQ); |
1631 | if (bytes <= 0) { | 1629 | if (bytes <= 0) { |
1632 | ZFCP_LOG_INFO("error: creation of ELS request failed " | 1630 | ZFCP_LOG_INFO("error: creation of ELS request failed " |
@@ -1657,7 +1655,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els) | |||
1657 | fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT; | 1655 | fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT; |
1658 | fsf_req->data = (unsigned long) els; | 1656 | fsf_req->data = (unsigned long) els; |
1659 | 1657 | ||
1660 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 1658 | sbale = zfcp_qdio_sbale_req(fsf_req); |
1661 | 1659 | ||
1662 | zfcp_san_dbf_event_els_request(fsf_req); | 1660 | zfcp_san_dbf_event_els_request(fsf_req); |
1663 | 1661 | ||
@@ -1680,8 +1678,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els) | |||
1680 | 1678 | ||
1681 | failed_req: | 1679 | failed_req: |
1682 | out: | 1680 | out: |
1683 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, | 1681 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
1684 | lock_flags); | ||
1685 | 1682 | ||
1686 | return ret; | 1683 | return ret; |
1687 | } | 1684 | } |
@@ -1863,12 +1860,11 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) | |||
1863 | ZFCP_LOG_INFO("error: Could not create exchange configuration " | 1860 | ZFCP_LOG_INFO("error: Could not create exchange configuration " |
1864 | "data request for adapter %s.\n", | 1861 | "data request for adapter %s.\n", |
1865 | zfcp_get_busid_by_adapter(adapter)); | 1862 | zfcp_get_busid_by_adapter(adapter)); |
1866 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, | 1863 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
1867 | lock_flags); | ||
1868 | return retval; | 1864 | return retval; |
1869 | } | 1865 | } |
1870 | 1866 | ||
1871 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 1867 | sbale = zfcp_qdio_sbale_req(fsf_req); |
1872 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1868 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1873 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1869 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1874 | 1870 | ||
@@ -1882,8 +1878,7 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) | |||
1882 | 1878 | ||
1883 | zfcp_erp_start_timer(fsf_req); | 1879 | zfcp_erp_start_timer(fsf_req); |
1884 | retval = zfcp_fsf_req_send(fsf_req); | 1880 | retval = zfcp_fsf_req_send(fsf_req); |
1885 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, | 1881 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
1886 | lock_flags); | ||
1887 | if (retval) { | 1882 | if (retval) { |
1888 | ZFCP_LOG_INFO("error: Could not send exchange configuration " | 1883 | ZFCP_LOG_INFO("error: Could not send exchange configuration " |
1889 | "data command on the adapter %s\n", | 1884 | "data command on the adapter %s\n", |
@@ -1916,12 +1911,11 @@ zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, | |||
1916 | ZFCP_LOG_INFO("error: Could not create exchange configuration " | 1911 | ZFCP_LOG_INFO("error: Could not create exchange configuration " |
1917 | "data request for adapter %s.\n", | 1912 | "data request for adapter %s.\n", |
1918 | zfcp_get_busid_by_adapter(adapter)); | 1913 | zfcp_get_busid_by_adapter(adapter)); |
1919 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, | 1914 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
1920 | lock_flags); | ||
1921 | return retval; | 1915 | return retval; |
1922 | } | 1916 | } |
1923 | 1917 | ||
1924 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 1918 | sbale = zfcp_qdio_sbale_req(fsf_req); |
1925 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 1919 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
1926 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 1920 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
1927 | 1921 | ||
@@ -1936,8 +1930,7 @@ zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, | |||
1936 | 1930 | ||
1937 | zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); | 1931 | zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); |
1938 | retval = zfcp_fsf_req_send(fsf_req); | 1932 | retval = zfcp_fsf_req_send(fsf_req); |
1939 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, | 1933 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
1940 | lock_flags); | ||
1941 | if (retval) | 1934 | if (retval) |
1942 | ZFCP_LOG_INFO("error: Could not send exchange configuration " | 1935 | ZFCP_LOG_INFO("error: Could not send exchange configuration " |
1943 | "data command on the adapter %s\n", | 1936 | "data command on the adapter %s\n", |
@@ -2178,12 +2171,11 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) | |||
2178 | "exchange port data request for " | 2171 | "exchange port data request for " |
2179 | "the adapter %s.\n", | 2172 | "the adapter %s.\n", |
2180 | zfcp_get_busid_by_adapter(adapter)); | 2173 | zfcp_get_busid_by_adapter(adapter)); |
2181 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, | 2174 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
2182 | lock_flags); | ||
2183 | return retval; | 2175 | return retval; |
2184 | } | 2176 | } |
2185 | 2177 | ||
2186 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 2178 | sbale = zfcp_qdio_sbale_req(fsf_req); |
2187 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 2179 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
2188 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 2180 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
2189 | 2181 | ||
@@ -2192,7 +2184,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) | |||
2192 | zfcp_erp_start_timer(fsf_req); | 2184 | zfcp_erp_start_timer(fsf_req); |
2193 | 2185 | ||
2194 | retval = zfcp_fsf_req_send(fsf_req); | 2186 | retval = zfcp_fsf_req_send(fsf_req); |
2195 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); | 2187 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
2196 | 2188 | ||
2197 | if (retval) { | 2189 | if (retval) { |
2198 | ZFCP_LOG_INFO("error: Could not send an exchange port data " | 2190 | ZFCP_LOG_INFO("error: Could not send an exchange port data " |
@@ -2237,21 +2229,20 @@ zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, | |||
2237 | "exchange port data request for " | 2229 | "exchange port data request for " |
2238 | "the adapter %s.\n", | 2230 | "the adapter %s.\n", |
2239 | zfcp_get_busid_by_adapter(adapter)); | 2231 | zfcp_get_busid_by_adapter(adapter)); |
2240 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, | 2232 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
2241 | lock_flags); | ||
2242 | return retval; | 2233 | return retval; |
2243 | } | 2234 | } |
2244 | 2235 | ||
2245 | if (data) | 2236 | if (data) |
2246 | fsf_req->data = (unsigned long) data; | 2237 | fsf_req->data = (unsigned long) data; |
2247 | 2238 | ||
2248 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 2239 | sbale = zfcp_qdio_sbale_req(fsf_req); |
2249 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 2240 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
2250 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 2241 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
2251 | 2242 | ||
2252 | zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); | 2243 | zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); |
2253 | retval = zfcp_fsf_req_send(fsf_req); | 2244 | retval = zfcp_fsf_req_send(fsf_req); |
2254 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); | 2245 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
2255 | 2246 | ||
2256 | if (retval) | 2247 | if (retval) |
2257 | ZFCP_LOG_INFO("error: Could not send an exchange port data " | 2248 | ZFCP_LOG_INFO("error: Could not send an exchange port data " |
@@ -2355,7 +2346,7 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) | |||
2355 | goto out; | 2346 | goto out; |
2356 | } | 2347 | } |
2357 | 2348 | ||
2358 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 2349 | sbale = zfcp_qdio_sbale_req(fsf_req); |
2359 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 2350 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
2360 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 2351 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
2361 | 2352 | ||
@@ -2382,8 +2373,7 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) | |||
2382 | zfcp_get_busid_by_adapter(erp_action->adapter), | 2373 | zfcp_get_busid_by_adapter(erp_action->adapter), |
2383 | erp_action->port->wwpn); | 2374 | erp_action->port->wwpn); |
2384 | out: | 2375 | out: |
2385 | write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, | 2376 | write_unlock_irqrestore(&erp_action->adapter->req_q.lock, lock_flags); |
2386 | lock_flags); | ||
2387 | return retval; | 2377 | return retval; |
2388 | } | 2378 | } |
2389 | 2379 | ||
@@ -2587,7 +2577,7 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) | |||
2587 | goto out; | 2577 | goto out; |
2588 | } | 2578 | } |
2589 | 2579 | ||
2590 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 2580 | sbale = zfcp_qdio_sbale_req(fsf_req); |
2591 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 2581 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
2592 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 2582 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
2593 | 2583 | ||
@@ -2615,8 +2605,7 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) | |||
2615 | zfcp_get_busid_by_adapter(erp_action->adapter), | 2605 | zfcp_get_busid_by_adapter(erp_action->adapter), |
2616 | erp_action->port->wwpn); | 2606 | erp_action->port->wwpn); |
2617 | out: | 2607 | out: |
2618 | write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, | 2608 | write_unlock_irqrestore(&erp_action->adapter->req_q.lock, lock_flags); |
2619 | lock_flags); | ||
2620 | return retval; | 2609 | return retval; |
2621 | } | 2610 | } |
2622 | 2611 | ||
@@ -2716,7 +2705,7 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) | |||
2716 | goto out; | 2705 | goto out; |
2717 | } | 2706 | } |
2718 | 2707 | ||
2719 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 2708 | sbale = zfcp_qdio_sbale_req(fsf_req); |
2720 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 2709 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
2721 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 2710 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
2722 | 2711 | ||
@@ -2746,8 +2735,7 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) | |||
2746 | zfcp_get_busid_by_adapter(erp_action->adapter), | 2735 | zfcp_get_busid_by_adapter(erp_action->adapter), |
2747 | erp_action->port->wwpn); | 2736 | erp_action->port->wwpn); |
2748 | out: | 2737 | out: |
2749 | write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, | 2738 | write_unlock_irqrestore(&erp_action->adapter->req_q.lock, lock_flags); |
2750 | lock_flags); | ||
2751 | return retval; | 2739 | return retval; |
2752 | } | 2740 | } |
2753 | 2741 | ||
@@ -2911,7 +2899,7 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | |||
2911 | goto out; | 2899 | goto out; |
2912 | } | 2900 | } |
2913 | 2901 | ||
2914 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 2902 | sbale = zfcp_qdio_sbale_req(fsf_req); |
2915 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 2903 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
2916 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 2904 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
2917 | 2905 | ||
@@ -2944,8 +2932,7 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) | |||
2944 | zfcp_get_busid_by_adapter(erp_action->adapter), | 2932 | zfcp_get_busid_by_adapter(erp_action->adapter), |
2945 | erp_action->port->wwpn, erp_action->unit->fcp_lun); | 2933 | erp_action->port->wwpn, erp_action->unit->fcp_lun); |
2946 | out: | 2934 | out: |
2947 | write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, | 2935 | write_unlock_irqrestore(&erp_action->adapter->req_q.lock, lock_flags); |
2948 | lock_flags); | ||
2949 | return retval; | 2936 | return retval; |
2950 | } | 2937 | } |
2951 | 2938 | ||
@@ -3226,7 +3213,7 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) | |||
3226 | goto out; | 3213 | goto out; |
3227 | } | 3214 | } |
3228 | 3215 | ||
3229 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 3216 | sbale = zfcp_qdio_sbale_req(fsf_req); |
3230 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; | 3217 | sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; |
3231 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 3218 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
3232 | 3219 | ||
@@ -3255,8 +3242,7 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) | |||
3255 | zfcp_get_busid_by_adapter(erp_action->adapter), | 3242 | zfcp_get_busid_by_adapter(erp_action->adapter), |
3256 | erp_action->port->wwpn, erp_action->unit->fcp_lun); | 3243 | erp_action->port->wwpn, erp_action->unit->fcp_lun); |
3257 | out: | 3244 | out: |
3258 | write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, | 3245 | write_unlock_irqrestore(&erp_action->adapter->req_q.lock, lock_flags); |
3259 | lock_flags); | ||
3260 | return retval; | 3246 | return retval; |
3261 | } | 3247 | } |
3262 | 3248 | ||
@@ -3498,7 +3484,9 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter, | |||
3498 | fcp_cmnd_iu->add_fcp_cdb_length + sizeof (fcp_dl_t); | 3484 | fcp_cmnd_iu->add_fcp_cdb_length + sizeof (fcp_dl_t); |
3499 | 3485 | ||
3500 | /* generate SBALEs from data buffer */ | 3486 | /* generate SBALEs from data buffer */ |
3501 | real_bytes = zfcp_qdio_sbals_from_scsicmnd(fsf_req, sbtype, scsi_cmnd); | 3487 | real_bytes = zfcp_qdio_sbals_from_sg(fsf_req, sbtype, |
3488 | scsi_sglist(scsi_cmnd), | ||
3489 | ZFCP_MAX_SBALS_PER_REQ); | ||
3502 | if (unlikely(real_bytes < 0)) { | 3490 | if (unlikely(real_bytes < 0)) { |
3503 | if (fsf_req->sbal_number < ZFCP_MAX_SBALS_PER_REQ) { | 3491 | if (fsf_req->sbal_number < ZFCP_MAX_SBALS_PER_REQ) { |
3504 | ZFCP_LOG_DEBUG( | 3492 | ZFCP_LOG_DEBUG( |
@@ -3556,7 +3544,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter, | |||
3556 | scsi_cmnd->host_scribble = NULL; | 3544 | scsi_cmnd->host_scribble = NULL; |
3557 | success: | 3545 | success: |
3558 | failed_req_create: | 3546 | failed_req_create: |
3559 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); | 3547 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
3560 | return retval; | 3548 | return retval; |
3561 | } | 3549 | } |
3562 | 3550 | ||
@@ -3609,7 +3597,7 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter, | |||
3609 | fsf_req->qtcb->bottom.io.fcp_cmnd_length = | 3597 | fsf_req->qtcb->bottom.io.fcp_cmnd_length = |
3610 | sizeof (struct fcp_cmnd_iu) + sizeof (fcp_dl_t); | 3598 | sizeof (struct fcp_cmnd_iu) + sizeof (fcp_dl_t); |
3611 | 3599 | ||
3612 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 3600 | sbale = zfcp_qdio_sbale_req(fsf_req); |
3613 | sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; | 3601 | sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; |
3614 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; | 3602 | sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; |
3615 | 3603 | ||
@@ -3629,7 +3617,7 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter, | |||
3629 | fsf_req = NULL; | 3617 | fsf_req = NULL; |
3630 | 3618 | ||
3631 | out: | 3619 | out: |
3632 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); | 3620 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
3633 | return fsf_req; | 3621 | return fsf_req; |
3634 | } | 3622 | } |
3635 | 3623 | ||
@@ -4216,7 +4204,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
4216 | goto unlock_queue_lock; | 4204 | goto unlock_queue_lock; |
4217 | } | 4205 | } |
4218 | 4206 | ||
4219 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 4207 | sbale = zfcp_qdio_sbale_req(fsf_req); |
4220 | sbale[0].flags |= direction; | 4208 | sbale[0].flags |= direction; |
4221 | 4209 | ||
4222 | bottom = &fsf_req->qtcb->bottom.support; | 4210 | bottom = &fsf_req->qtcb->bottom.support; |
@@ -4224,7 +4212,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
4224 | bottom->option = fsf_cfdc->option; | 4212 | bottom->option = fsf_cfdc->option; |
4225 | 4213 | ||
4226 | bytes = zfcp_qdio_sbals_from_sg(fsf_req, direction, | 4214 | bytes = zfcp_qdio_sbals_from_sg(fsf_req, direction, |
4227 | fsf_cfdc->sg, ZFCP_CFDC_PAGES, | 4215 | fsf_cfdc->sg, |
4228 | ZFCP_MAX_SBALS_PER_REQ); | 4216 | ZFCP_MAX_SBALS_PER_REQ); |
4229 | if (bytes != ZFCP_CFDC_MAX_SIZE) { | 4217 | if (bytes != ZFCP_CFDC_MAX_SIZE) { |
4230 | retval = -ENOMEM; | 4218 | retval = -ENOMEM; |
@@ -4237,7 +4225,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
4237 | retval = -EPERM; | 4225 | retval = -EPERM; |
4238 | goto free_fsf_req; | 4226 | goto free_fsf_req; |
4239 | } | 4227 | } |
4240 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); | 4228 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
4241 | 4229 | ||
4242 | wait_event(fsf_req->completion_wq, | 4230 | wait_event(fsf_req->completion_wq, |
4243 | fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); | 4231 | fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); |
@@ -4247,7 +4235,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
4247 | free_fsf_req: | 4235 | free_fsf_req: |
4248 | zfcp_fsf_req_free(fsf_req); | 4236 | zfcp_fsf_req_free(fsf_req); |
4249 | unlock_queue_lock: | 4237 | unlock_queue_lock: |
4250 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); | 4238 | write_unlock_irqrestore(&adapter->req_q.lock, lock_flags); |
4251 | return ERR_PTR(retval); | 4239 | return ERR_PTR(retval); |
4252 | } | 4240 | } |
4253 | 4241 | ||
@@ -4261,10 +4249,10 @@ static inline int | |||
4261 | zfcp_fsf_req_sbal_check(unsigned long *flags, | 4249 | zfcp_fsf_req_sbal_check(unsigned long *flags, |
4262 | struct zfcp_qdio_queue *queue, int needed) | 4250 | struct zfcp_qdio_queue *queue, int needed) |
4263 | { | 4251 | { |
4264 | write_lock_irqsave(&queue->queue_lock, *flags); | 4252 | write_lock_irqsave(&queue->lock, *flags); |
4265 | if (likely(atomic_read(&queue->free_count) >= needed)) | 4253 | if (likely(atomic_read(&queue->count) >= needed)) |
4266 | return 1; | 4254 | return 1; |
4267 | write_unlock_irqrestore(&queue->queue_lock, *flags); | 4255 | write_unlock_irqrestore(&queue->lock, *flags); |
4268 | return 0; | 4256 | return 0; |
4269 | } | 4257 | } |
4270 | 4258 | ||
@@ -4293,24 +4281,24 @@ zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) | |||
4293 | * @req_flags: flags indicating whether to wait for needed SBAL or not | 4281 | * @req_flags: flags indicating whether to wait for needed SBAL or not |
4294 | * @lock_flags: lock_flags if queue_lock is taken | 4282 | * @lock_flags: lock_flags if queue_lock is taken |
4295 | * Return: 0 on success, otherwise -EIO, or -ERESTARTSYS | 4283 | * Return: 0 on success, otherwise -EIO, or -ERESTARTSYS |
4296 | * Locks: lock adapter->request_queue->queue_lock on success | 4284 | * Locks: lock adapter->req_q->lock on success |
4297 | */ | 4285 | */ |
4298 | static int | 4286 | static int |
4299 | zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter, int req_flags, | 4287 | zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter, int req_flags, |
4300 | unsigned long *lock_flags) | 4288 | unsigned long *lock_flags) |
4301 | { | 4289 | { |
4302 | long ret; | 4290 | long ret; |
4303 | struct zfcp_qdio_queue *req_queue = &adapter->request_queue; | 4291 | struct zfcp_qdio_queue *req_q = &adapter->req_q; |
4304 | 4292 | ||
4305 | if (unlikely(req_flags & ZFCP_WAIT_FOR_SBAL)) { | 4293 | if (unlikely(req_flags & ZFCP_WAIT_FOR_SBAL)) { |
4306 | ret = wait_event_interruptible_timeout(adapter->request_wq, | 4294 | ret = wait_event_interruptible_timeout(adapter->request_wq, |
4307 | zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1), | 4295 | zfcp_fsf_req_sbal_check(lock_flags, req_q, 1), |
4308 | ZFCP_SBAL_TIMEOUT); | 4296 | ZFCP_SBAL_TIMEOUT); |
4309 | if (ret < 0) | 4297 | if (ret < 0) |
4310 | return ret; | 4298 | return ret; |
4311 | if (!ret) | 4299 | if (!ret) |
4312 | return -EIO; | 4300 | return -EIO; |
4313 | } else if (!zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1)) | 4301 | } else if (!zfcp_fsf_req_sbal_check(lock_flags, req_q, 1)) |
4314 | return -EIO; | 4302 | return -EIO; |
4315 | 4303 | ||
4316 | return 0; | 4304 | return 0; |
@@ -4340,7 +4328,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, | |||
4340 | volatile struct qdio_buffer_element *sbale; | 4328 | volatile struct qdio_buffer_element *sbale; |
4341 | struct zfcp_fsf_req *fsf_req = NULL; | 4329 | struct zfcp_fsf_req *fsf_req = NULL; |
4342 | int ret = 0; | 4330 | int ret = 0; |
4343 | struct zfcp_qdio_queue *req_queue = &adapter->request_queue; | 4331 | struct zfcp_qdio_queue *req_q = &adapter->req_q; |
4344 | 4332 | ||
4345 | /* allocate new FSF request */ | 4333 | /* allocate new FSF request */ |
4346 | fsf_req = zfcp_fsf_req_alloc(pool, req_flags); | 4334 | fsf_req = zfcp_fsf_req_alloc(pool, req_flags); |
@@ -4377,7 +4365,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, | |||
4377 | */ | 4365 | */ |
4378 | 4366 | ||
4379 | if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) { | 4367 | if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) { |
4380 | write_unlock_irqrestore(&req_queue->queue_lock, *lock_flags); | 4368 | write_unlock_irqrestore(&req_q->lock, *lock_flags); |
4381 | ret = -EIO; | 4369 | ret = -EIO; |
4382 | goto failed_sbals; | 4370 | goto failed_sbals; |
4383 | } | 4371 | } |
@@ -4387,15 +4375,15 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, | |||
4387 | fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; | 4375 | fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; |
4388 | } | 4376 | } |
4389 | fsf_req->sbal_number = 1; | 4377 | fsf_req->sbal_number = 1; |
4390 | fsf_req->sbal_first = req_queue->free_index; | 4378 | fsf_req->sbal_first = req_q->first; |
4391 | fsf_req->sbal_last = req_queue->free_index; | 4379 | fsf_req->sbal_last = req_q->first; |
4392 | fsf_req->sbale_curr = 1; | 4380 | fsf_req->sbale_curr = 1; |
4393 | 4381 | ||
4394 | if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) { | 4382 | if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) { |
4395 | fsf_req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 4383 | fsf_req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
4396 | } | 4384 | } |
4397 | 4385 | ||
4398 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 4386 | sbale = zfcp_qdio_sbale_req(fsf_req); |
4399 | 4387 | ||
4400 | /* setup common SBALE fields */ | 4388 | /* setup common SBALE fields */ |
4401 | sbale[0].addr = (void *) fsf_req->req_id; | 4389 | sbale[0].addr = (void *) fsf_req->req_id; |
@@ -4416,7 +4404,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, | |||
4416 | fsf_req = NULL; | 4404 | fsf_req = NULL; |
4417 | 4405 | ||
4418 | failed_fsf_req: | 4406 | failed_fsf_req: |
4419 | write_lock_irqsave(&req_queue->queue_lock, *lock_flags); | 4407 | write_lock_irqsave(&req_q->lock, *lock_flags); |
4420 | success: | 4408 | success: |
4421 | *fsf_req_p = fsf_req; | 4409 | *fsf_req_p = fsf_req; |
4422 | return ret; | 4410 | return ret; |
@@ -4433,18 +4421,17 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, | |||
4433 | static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req) | 4421 | static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req) |
4434 | { | 4422 | { |
4435 | struct zfcp_adapter *adapter; | 4423 | struct zfcp_adapter *adapter; |
4436 | struct zfcp_qdio_queue *req_queue; | 4424 | struct zfcp_qdio_queue *req_q; |
4437 | volatile struct qdio_buffer_element *sbale; | 4425 | volatile struct qdio_buffer_element *sbale; |
4438 | int inc_seq_no; | 4426 | int inc_seq_no; |
4439 | int new_distance_from_int; | ||
4440 | int retval = 0; | 4427 | int retval = 0; |
4441 | 4428 | ||
4442 | adapter = fsf_req->adapter; | 4429 | adapter = fsf_req->adapter; |
4443 | req_queue = &adapter->request_queue, | 4430 | req_q = &adapter->req_q; |
4444 | 4431 | ||
4445 | 4432 | ||
4446 | /* FIXME(debug): remove it later */ | 4433 | /* FIXME(debug): remove it later */ |
4447 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_first, 0); | 4434 | sbale = zfcp_qdio_sbale_req(fsf_req); |
4448 | ZFCP_LOG_DEBUG("SBALE0 flags=0x%x\n", sbale[0].flags); | 4435 | ZFCP_LOG_DEBUG("SBALE0 flags=0x%x\n", sbale[0].flags); |
4449 | ZFCP_LOG_TRACE("HEX DUMP OF SBALE1 PAYLOAD:\n"); | 4436 | ZFCP_LOG_TRACE("HEX DUMP OF SBALE1 PAYLOAD:\n"); |
4450 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, | 4437 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, |
@@ -4457,52 +4444,24 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req) | |||
4457 | 4444 | ||
4458 | inc_seq_no = (fsf_req->qtcb != NULL); | 4445 | inc_seq_no = (fsf_req->qtcb != NULL); |
4459 | 4446 | ||
4460 | ZFCP_LOG_TRACE("request queue of adapter %s: " | ||
4461 | "next free SBAL is %i, %i free SBALs\n", | ||
4462 | zfcp_get_busid_by_adapter(adapter), | ||
4463 | req_queue->free_index, | ||
4464 | atomic_read(&req_queue->free_count)); | ||
4465 | |||
4466 | ZFCP_LOG_DEBUG("calling do_QDIO adapter %s, flags=0x%x, queue_no=%i, " | ||
4467 | "index_in_queue=%i, count=%i, buffers=%p\n", | ||
4468 | zfcp_get_busid_by_adapter(adapter), | ||
4469 | QDIO_FLAG_SYNC_OUTPUT, | ||
4470 | 0, fsf_req->sbal_first, fsf_req->sbal_number, | ||
4471 | &req_queue->buffer[fsf_req->sbal_first]); | ||
4472 | |||
4473 | /* | ||
4474 | * adjust the number of free SBALs in request queue as well as | ||
4475 | * position of first one | ||
4476 | */ | ||
4477 | atomic_sub(fsf_req->sbal_number, &req_queue->free_count); | ||
4478 | ZFCP_LOG_TRACE("free_count=%d\n", atomic_read(&req_queue->free_count)); | ||
4479 | req_queue->free_index += fsf_req->sbal_number; /* increase */ | ||
4480 | req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */ | ||
4481 | new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req); | ||
4482 | |||
4483 | fsf_req->issued = get_clock(); | 4447 | fsf_req->issued = get_clock(); |
4484 | 4448 | ||
4485 | retval = do_QDIO(adapter->ccw_device, | 4449 | retval = zfcp_qdio_send(fsf_req); |
4486 | QDIO_FLAG_SYNC_OUTPUT, | ||
4487 | 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); | ||
4488 | 4450 | ||
4489 | if (unlikely(retval)) { | 4451 | if (unlikely(retval)) { |
4490 | /* Queues are down..... */ | 4452 | /* Queues are down..... */ |
4491 | retval = -EIO; | ||
4492 | del_timer(&fsf_req->timer); | 4453 | del_timer(&fsf_req->timer); |
4493 | spin_lock(&adapter->req_list_lock); | 4454 | spin_lock(&adapter->req_list_lock); |
4494 | zfcp_reqlist_remove(adapter, fsf_req); | 4455 | zfcp_reqlist_remove(adapter, fsf_req); |
4495 | spin_unlock(&adapter->req_list_lock); | 4456 | spin_unlock(&adapter->req_list_lock); |
4496 | /* undo changes in request queue made for this request */ | 4457 | /* undo changes in request queue made for this request */ |
4497 | zfcp_qdio_zero_sbals(req_queue->buffer, | 4458 | atomic_add(fsf_req->sbal_number, &req_q->count); |
4498 | fsf_req->sbal_first, fsf_req->sbal_number); | 4459 | req_q->first -= fsf_req->sbal_number; |
4499 | atomic_add(fsf_req->sbal_number, &req_queue->free_count); | 4460 | req_q->first += QDIO_MAX_BUFFERS_PER_Q; |
4500 | req_queue->free_index -= fsf_req->sbal_number; | 4461 | req_q->first %= QDIO_MAX_BUFFERS_PER_Q; |
4501 | req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; | ||
4502 | req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ | ||
4503 | zfcp_erp_adapter_reopen(adapter, 0, 116, fsf_req); | 4462 | zfcp_erp_adapter_reopen(adapter, 0, 116, fsf_req); |
4463 | retval = -EIO; | ||
4504 | } else { | 4464 | } else { |
4505 | req_queue->distance_from_int = new_distance_from_int; | ||
4506 | /* | 4465 | /* |
4507 | * increase FSF sequence counter - | 4466 | * increase FSF sequence counter - |
4508 | * this must only be done for request successfully enqueued to | 4467 | * this must only be done for request successfully enqueued to |
@@ -4514,9 +4473,6 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req) | |||
4514 | /* Don't increase for unsolicited status */ | 4473 | /* Don't increase for unsolicited status */ |
4515 | if (inc_seq_no) | 4474 | if (inc_seq_no) |
4516 | adapter->fsf_req_seq_no++; | 4475 | adapter->fsf_req_seq_no++; |
4517 | |||
4518 | /* count FSF requests pending */ | ||
4519 | atomic_inc(&adapter->reqs_active); | ||
4520 | } | 4476 | } |
4521 | return retval; | 4477 | return retval; |
4522 | } | 4478 | } |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index e71547357f62..bd6561d53589 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -1,241 +1,92 @@ | |||
1 | /* | 1 | /* |
2 | * This file is part of the zfcp device driver for | 2 | * zfcp device driver |
3 | * FCP adapters for IBM System z9 and zSeries. | ||
4 | * | 3 | * |
5 | * (C) Copyright IBM Corp. 2002, 2006 | 4 | * Setup and helper functions to access QDIO. |
6 | * | 5 | * |
7 | * This program is free software; you can redistribute it and/or modify | 6 | * Copyright IBM Corporation 2002, 2008 |
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | */ | 7 | */ |
21 | 8 | ||
22 | #include "zfcp_ext.h" | 9 | #include "zfcp_ext.h" |
23 | 10 | ||
24 | static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); | 11 | /* FIXME(tune): free space should be one max. SBAL chain plus what? */ |
25 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get | 12 | #define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \ |
26 | (struct zfcp_qdio_queue *, int, int); | 13 | - (ZFCP_MAX_SBALS_PER_REQ + 4)) |
27 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp | ||
28 | (struct zfcp_fsf_req *, int, int); | ||
29 | static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain | ||
30 | (struct zfcp_fsf_req *, unsigned long); | ||
31 | static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next | ||
32 | (struct zfcp_fsf_req *, unsigned long); | ||
33 | static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); | ||
34 | static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); | ||
35 | static void zfcp_qdio_sbale_fill | ||
36 | (struct zfcp_fsf_req *, unsigned long, void *, int); | ||
37 | static int zfcp_qdio_sbals_from_segment | ||
38 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); | ||
39 | |||
40 | static qdio_handler_t zfcp_qdio_request_handler; | ||
41 | static qdio_handler_t zfcp_qdio_response_handler; | ||
42 | static int zfcp_qdio_handler_error_check(struct zfcp_adapter *, | ||
43 | unsigned int, unsigned int, unsigned int, int, int); | ||
44 | |||
45 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO | ||
46 | 14 | ||
47 | /* | 15 | static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) |
48 | * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array | ||
49 | * in the adapter struct sbuf is the pointer array. | ||
50 | * | ||
51 | * locks: must only be called with zfcp_data.config_sema taken | ||
52 | */ | ||
53 | static void | ||
54 | zfcp_qdio_buffers_dequeue(struct qdio_buffer **sbuf) | ||
55 | { | ||
56 | int pos; | ||
57 | |||
58 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) | ||
59 | free_page((unsigned long) sbuf[pos]); | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t | ||
64 | * array in the adapter struct. | ||
65 | * Cur_buf is the pointer array | ||
66 | * | ||
67 | * returns: zero on success else -ENOMEM | ||
68 | * locks: must only be called with zfcp_data.config_sema taken | ||
69 | */ | ||
70 | static int | ||
71 | zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbuf) | ||
72 | { | 16 | { |
73 | int pos; | 17 | int pos; |
74 | 18 | ||
75 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) { | 19 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) { |
76 | sbuf[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); | 20 | sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); |
77 | if (!sbuf[pos]) { | 21 | if (!sbal[pos]) |
78 | zfcp_qdio_buffers_dequeue(sbuf); | ||
79 | return -ENOMEM; | 22 | return -ENOMEM; |
80 | } | ||
81 | } | 23 | } |
82 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++) | 24 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++) |
83 | if (pos % QBUFF_PER_PAGE) | 25 | if (pos % QBUFF_PER_PAGE) |
84 | sbuf[pos] = sbuf[pos - 1] + 1; | 26 | sbal[pos] = sbal[pos - 1] + 1; |
85 | return 0; | 27 | return 0; |
86 | } | 28 | } |
87 | 29 | ||
88 | /* locks: must only be called with zfcp_data.config_sema taken */ | 30 | static volatile struct qdio_buffer_element * |
89 | int | 31 | zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) |
90 | zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter) | ||
91 | { | 32 | { |
92 | int ret; | 33 | return &q->sbal[sbal_idx]->element[sbale_idx]; |
93 | |||
94 | ret = zfcp_qdio_buffers_enqueue(adapter->request_queue.buffer); | ||
95 | if (ret) | ||
96 | return ret; | ||
97 | return zfcp_qdio_buffers_enqueue(adapter->response_queue.buffer); | ||
98 | } | 34 | } |
99 | 35 | ||
100 | /* locks: must only be called with zfcp_data.config_sema taken */ | 36 | /** |
101 | void | 37 | * zfcp_qdio_free - free memory used by request- and resposne queue |
102 | zfcp_qdio_free_queues(struct zfcp_adapter *adapter) | 38 | * @adapter: pointer to the zfcp_adapter structure |
39 | */ | ||
40 | void zfcp_qdio_free(struct zfcp_adapter *adapter) | ||
103 | { | 41 | { |
104 | ZFCP_LOG_TRACE("freeing request_queue buffers\n"); | 42 | struct qdio_buffer **sbal_req, **sbal_resp; |
105 | zfcp_qdio_buffers_dequeue(adapter->request_queue.buffer); | 43 | int p; |
106 | |||
107 | ZFCP_LOG_TRACE("freeing response_queue buffers\n"); | ||
108 | zfcp_qdio_buffers_dequeue(adapter->response_queue.buffer); | ||
109 | } | ||
110 | 44 | ||
111 | int | 45 | if (adapter->ccw_device) |
112 | zfcp_qdio_allocate(struct zfcp_adapter *adapter) | 46 | qdio_free(adapter->ccw_device); |
113 | { | ||
114 | struct qdio_initialize *init_data; | ||
115 | 47 | ||
116 | init_data = &adapter->qdio_init_data; | 48 | sbal_req = adapter->req_q.sbal; |
49 | sbal_resp = adapter->resp_q.sbal; | ||
117 | 50 | ||
118 | init_data->cdev = adapter->ccw_device; | 51 | for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { |
119 | init_data->q_format = QDIO_SCSI_QFMT; | 52 | free_page((unsigned long) sbal_req[p]); |
120 | memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8); | 53 | free_page((unsigned long) sbal_resp[p]); |
121 | ASCEBC(init_data->adapter_name, 8); | 54 | } |
122 | init_data->qib_param_field_format = 0; | ||
123 | init_data->qib_param_field = NULL; | ||
124 | init_data->input_slib_elements = NULL; | ||
125 | init_data->output_slib_elements = NULL; | ||
126 | init_data->min_input_threshold = ZFCP_MIN_INPUT_THRESHOLD; | ||
127 | init_data->max_input_threshold = ZFCP_MAX_INPUT_THRESHOLD; | ||
128 | init_data->min_output_threshold = ZFCP_MIN_OUTPUT_THRESHOLD; | ||
129 | init_data->max_output_threshold = ZFCP_MAX_OUTPUT_THRESHOLD; | ||
130 | init_data->no_input_qs = 1; | ||
131 | init_data->no_output_qs = 1; | ||
132 | init_data->input_handler = zfcp_qdio_response_handler; | ||
133 | init_data->output_handler = zfcp_qdio_request_handler; | ||
134 | init_data->int_parm = (unsigned long) adapter; | ||
135 | init_data->flags = QDIO_INBOUND_0COPY_SBALS | | ||
136 | QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; | ||
137 | init_data->input_sbal_addr_array = | ||
138 | (void **) (adapter->response_queue.buffer); | ||
139 | init_data->output_sbal_addr_array = | ||
140 | (void **) (adapter->request_queue.buffer); | ||
141 | |||
142 | return qdio_allocate(init_data); | ||
143 | } | 55 | } |
144 | 56 | ||
145 | /* | 57 | static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id) |
146 | * function: zfcp_qdio_handler_error_check | ||
147 | * | ||
148 | * purpose: called by the response handler to determine error condition | ||
149 | * | ||
150 | * returns: error flag | ||
151 | * | ||
152 | */ | ||
153 | static int | ||
154 | zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, | ||
155 | unsigned int qdio_error, unsigned int siga_error, | ||
156 | int first_element, int elements_processed) | ||
157 | { | 58 | { |
158 | int retval = 0; | 59 | dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n"); |
159 | 60 | ||
160 | if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { | 61 | zfcp_erp_adapter_reopen(adapter, |
161 | retval = -EIO; | 62 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | |
162 | 63 | ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); | |
163 | ZFCP_LOG_INFO("QDIO problem occurred (status=0x%x, " | ||
164 | "qdio_error=0x%x, siga_error=0x%x)\n", | ||
165 | status, qdio_error, siga_error); | ||
166 | |||
167 | zfcp_hba_dbf_event_qdio(adapter, status, qdio_error, siga_error, | ||
168 | first_element, elements_processed); | ||
169 | /* | ||
170 | * Restarting IO on the failed adapter from scratch. | ||
171 | * Since we have been using this adapter, it is save to assume | ||
172 | * that it is not failed but recoverable. The card seems to | ||
173 | * report link-up events by self-initiated queue shutdown. | ||
174 | * That is why we need to clear the link-down flag | ||
175 | * which is set again in case we have missed by a mile. | ||
176 | */ | ||
177 | zfcp_erp_adapter_reopen(adapter, | ||
178 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | ||
179 | ZFCP_STATUS_COMMON_ERP_FAILED, 140, | ||
180 | NULL); | ||
181 | } | ||
182 | return retval; | ||
183 | } | 64 | } |
184 | 65 | ||
185 | /* | 66 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int status, |
186 | * function: zfcp_qdio_request_handler | 67 | unsigned int qdio_err, unsigned int siga_err, |
187 | * | 68 | unsigned int queue_no, int first, int count, |
188 | * purpose: is called by QDIO layer for completed SBALs in request queue | 69 | unsigned long parm) |
189 | * | ||
190 | * returns: (void) | ||
191 | */ | ||
192 | static void | ||
193 | zfcp_qdio_request_handler(struct ccw_device *ccw_device, | ||
194 | unsigned int status, | ||
195 | unsigned int qdio_error, | ||
196 | unsigned int siga_error, | ||
197 | unsigned int queue_number, | ||
198 | int first_element, | ||
199 | int elements_processed, | ||
200 | unsigned long int_parm) | ||
201 | { | 70 | { |
202 | struct zfcp_adapter *adapter; | 71 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; |
203 | struct zfcp_qdio_queue *queue; | 72 | struct zfcp_qdio_queue *queue = &adapter->req_q; |
204 | |||
205 | adapter = (struct zfcp_adapter *) int_parm; | ||
206 | queue = &adapter->request_queue; | ||
207 | 73 | ||
208 | ZFCP_LOG_DEBUG("adapter %s, first=%d, elements_processed=%d\n", | 74 | if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { |
209 | zfcp_get_busid_by_adapter(adapter), | 75 | zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, |
210 | first_element, elements_processed); | 76 | first, count); |
211 | 77 | zfcp_qdio_handler_error(adapter, 140); | |
212 | if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, | 78 | return; |
213 | siga_error, first_element, | 79 | } |
214 | elements_processed))) | ||
215 | goto out; | ||
216 | /* | ||
217 | * we stored address of struct zfcp_adapter data structure | ||
218 | * associated with irq in int_parm | ||
219 | */ | ||
220 | 80 | ||
221 | /* cleanup all SBALs being program-owned now */ | 81 | /* cleanup all SBALs being program-owned now */ |
222 | zfcp_qdio_zero_sbals(queue->buffer, first_element, elements_processed); | 82 | zfcp_qdio_zero_sbals(queue->sbal, first, count); |
223 | 83 | ||
224 | /* increase free space in outbound queue */ | 84 | atomic_add(count, &queue->count); |
225 | atomic_add(elements_processed, &queue->free_count); | ||
226 | ZFCP_LOG_DEBUG("free_count=%d\n", atomic_read(&queue->free_count)); | ||
227 | wake_up(&adapter->request_wq); | 85 | wake_up(&adapter->request_wq); |
228 | ZFCP_LOG_DEBUG("elements_processed=%d, free count=%d\n", | ||
229 | elements_processed, atomic_read(&queue->free_count)); | ||
230 | out: | ||
231 | return; | ||
232 | } | 86 | } |
233 | 87 | ||
234 | /** | ||
235 | * zfcp_qdio_reqid_check - checks for valid reqids. | ||
236 | */ | ||
237 | static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, | 88 | static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, |
238 | unsigned long req_id, int sbal) | 89 | unsigned long req_id, int sbal_idx) |
239 | { | 90 | { |
240 | struct zfcp_fsf_req *fsf_req; | 91 | struct zfcp_fsf_req *fsf_req; |
241 | unsigned long flags; | 92 | unsigned long flags; |
@@ -248,204 +99,117 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, | |||
248 | * Unknown request means that we have potentially memory | 99 | * Unknown request means that we have potentially memory |
249 | * corruption and must stop the machine immediatly. | 100 | * corruption and must stop the machine immediatly. |
250 | */ | 101 | */ |
251 | panic("error: unknown request id (%ld) on adapter %s.\n", | 102 | panic("error: unknown request id (%lx) on adapter %s.\n", |
252 | req_id, zfcp_get_busid_by_adapter(adapter)); | 103 | req_id, zfcp_get_busid_by_adapter(adapter)); |
253 | 104 | ||
254 | zfcp_reqlist_remove(adapter, fsf_req); | 105 | zfcp_reqlist_remove(adapter, fsf_req); |
255 | atomic_dec(&adapter->reqs_active); | ||
256 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | 106 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); |
257 | 107 | ||
258 | fsf_req->sbal_response = sbal; | 108 | fsf_req->sbal_response = sbal_idx; |
259 | /* finish the FSF request */ | ||
260 | zfcp_fsf_req_complete(fsf_req); | 109 | zfcp_fsf_req_complete(fsf_req); |
261 | } | 110 | } |
262 | 111 | ||
263 | /* | 112 | static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) |
264 | * function: zfcp_qdio_response_handler | ||
265 | * | ||
266 | * purpose: is called by QDIO layer for completed SBALs in response queue | ||
267 | * | ||
268 | * returns: (void) | ||
269 | */ | ||
270 | static void | ||
271 | zfcp_qdio_response_handler(struct ccw_device *ccw_device, | ||
272 | unsigned int status, | ||
273 | unsigned int qdio_error, | ||
274 | unsigned int siga_error, | ||
275 | unsigned int queue_number, | ||
276 | int first_element, | ||
277 | int elements_processed, | ||
278 | unsigned long int_parm) | ||
279 | { | 113 | { |
280 | struct zfcp_adapter *adapter; | 114 | struct zfcp_qdio_queue *queue = &adapter->resp_q; |
281 | struct zfcp_qdio_queue *queue; | 115 | struct ccw_device *cdev = adapter->ccw_device; |
282 | int buffer_index; | 116 | u8 count, start = queue->first; |
283 | int i; | 117 | unsigned int retval; |
284 | struct qdio_buffer *buffer; | ||
285 | int retval = 0; | ||
286 | u8 count; | ||
287 | u8 start; | ||
288 | volatile struct qdio_buffer_element *buffere = NULL; | ||
289 | int buffere_index; | ||
290 | |||
291 | adapter = (struct zfcp_adapter *) int_parm; | ||
292 | queue = &adapter->response_queue; | ||
293 | |||
294 | if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, | ||
295 | siga_error, first_element, | ||
296 | elements_processed))) | ||
297 | goto out; | ||
298 | 118 | ||
299 | /* | 119 | count = atomic_read(&queue->count) + processed; |
300 | * we stored address of struct zfcp_adapter data structure | 120 | |
301 | * associated with irq in int_parm | 121 | retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, |
302 | */ | 122 | 0, start, count, NULL); |
123 | |||
124 | if (unlikely(retval)) { | ||
125 | atomic_set(&queue->count, count); | ||
126 | /* FIXME: Recover this with an adapter reopen? */ | ||
127 | } else { | ||
128 | queue->first += count; | ||
129 | queue->first %= QDIO_MAX_BUFFERS_PER_Q; | ||
130 | atomic_set(&queue->count, 0); | ||
131 | } | ||
132 | } | ||
133 | |||
134 | static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status, | ||
135 | unsigned int qdio_err, unsigned int siga_err, | ||
136 | unsigned int queue_no, int first, int count, | ||
137 | unsigned long parm) | ||
138 | { | ||
139 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; | ||
140 | struct zfcp_qdio_queue *queue = &adapter->resp_q; | ||
141 | volatile struct qdio_buffer_element *sbale; | ||
142 | int sbal_idx, sbale_idx, sbal_no; | ||
143 | |||
144 | if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { | ||
145 | zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, | ||
146 | first, count); | ||
147 | zfcp_qdio_handler_error(adapter, 147); | ||
148 | return; | ||
149 | } | ||
303 | 150 | ||
304 | buffere = &(queue->buffer[first_element]->element[0]); | ||
305 | ZFCP_LOG_DEBUG("first BUFFERE flags=0x%x\n", buffere->flags); | ||
306 | /* | 151 | /* |
307 | * go through all SBALs from input queue currently | 152 | * go through all SBALs from input queue currently |
308 | * returned by QDIO layer | 153 | * returned by QDIO layer |
309 | */ | 154 | */ |
310 | 155 | for (sbal_no = 0; sbal_no < count; sbal_no++) { | |
311 | for (i = 0; i < elements_processed; i++) { | 156 | sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; |
312 | |||
313 | buffer_index = first_element + i; | ||
314 | buffer_index %= QDIO_MAX_BUFFERS_PER_Q; | ||
315 | buffer = queue->buffer[buffer_index]; | ||
316 | 157 | ||
317 | /* go through all SBALEs of SBAL */ | 158 | /* go through all SBALEs of SBAL */ |
318 | for (buffere_index = 0; | 159 | for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER; |
319 | buffere_index < QDIO_MAX_ELEMENTS_PER_BUFFER; | 160 | sbale_idx++) { |
320 | buffere_index++) { | 161 | sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx); |
321 | |||
322 | /* look for QDIO request identifiers in SB */ | ||
323 | buffere = &buffer->element[buffere_index]; | ||
324 | zfcp_qdio_reqid_check(adapter, | 162 | zfcp_qdio_reqid_check(adapter, |
325 | (unsigned long) buffere->addr, i); | 163 | (unsigned long) sbale->addr, |
326 | 164 | sbal_idx); | |
327 | /* | 165 | if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) |
328 | * A single used SBALE per inbound SBALE has been | ||
329 | * implemented by QDIO so far. Hope they will | ||
330 | * do some optimisation. Will need to change to | ||
331 | * unlikely() then. | ||
332 | */ | ||
333 | if (likely(buffere->flags & SBAL_FLAGS_LAST_ENTRY)) | ||
334 | break; | 166 | break; |
335 | }; | 167 | }; |
336 | 168 | ||
337 | if (unlikely(!(buffere->flags & SBAL_FLAGS_LAST_ENTRY))) { | 169 | if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) |
338 | ZFCP_LOG_NORMAL("bug: End of inbound data " | 170 | dev_warn(&adapter->ccw_device->dev, |
339 | "not marked!\n"); | 171 | "Protocol violation by adapter. " |
340 | } | 172 | "Continuing operations.\n"); |
341 | } | 173 | } |
342 | 174 | ||
343 | /* | 175 | /* |
344 | * put range of SBALs back to response queue | 176 | * put range of SBALs back to response queue |
345 | * (including SBALs which have already been free before) | 177 | * (including SBALs which have already been free before) |
346 | */ | 178 | */ |
347 | count = atomic_read(&queue->free_count) + elements_processed; | 179 | zfcp_qdio_resp_put_back(adapter, count); |
348 | start = queue->free_index; | ||
349 | |||
350 | ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, " | ||
351 | "queue_no=%i, index_in_queue=%i, count=%i, " | ||
352 | "buffers=0x%lx\n", | ||
353 | zfcp_get_busid_by_adapter(adapter), | ||
354 | QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, | ||
355 | 0, start, count, (unsigned long) &queue->buffer[start]); | ||
356 | |||
357 | retval = do_QDIO(ccw_device, | ||
358 | QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, | ||
359 | 0, start, count, NULL); | ||
360 | |||
361 | if (unlikely(retval)) { | ||
362 | atomic_set(&queue->free_count, count); | ||
363 | ZFCP_LOG_DEBUG("clearing of inbound data regions failed, " | ||
364 | "queues may be down " | ||
365 | "(count=%d, start=%d, retval=%d)\n", | ||
366 | count, start, retval); | ||
367 | } else { | ||
368 | queue->free_index += count; | ||
369 | queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; | ||
370 | atomic_set(&queue->free_count, 0); | ||
371 | ZFCP_LOG_TRACE("%i buffers enqueued to response " | ||
372 | "queue at position %i\n", count, start); | ||
373 | } | ||
374 | out: | ||
375 | return; | ||
376 | } | 180 | } |
377 | 181 | ||
378 | /** | 182 | /** |
379 | * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue | 183 | * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req |
380 | * @queue: queue from which SBALE should be returned | 184 | * @fsf_req: pointer to struct fsf_req |
381 | * @sbal: specifies number of SBAL in queue | 185 | * Returns: pointer to qdio_buffer_element (SBALE) structure |
382 | * @sbale: specifes number of SBALE in SBAL | ||
383 | */ | ||
384 | static inline volatile struct qdio_buffer_element * | ||
385 | zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale) | ||
386 | { | ||
387 | return &queue->buffer[sbal]->element[sbale]; | ||
388 | } | ||
389 | |||
390 | /** | ||
391 | * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for | ||
392 | * a struct zfcp_fsf_req | ||
393 | */ | 186 | */ |
394 | volatile struct qdio_buffer_element * | 187 | volatile struct qdio_buffer_element * |
395 | zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) | 188 | zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) |
396 | { | ||
397 | return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, | ||
398 | sbal, sbale); | ||
399 | } | ||
400 | |||
401 | /** | ||
402 | * zfcp_qdio_sbale_resp - return pointer to SBALE of response_queue for | ||
403 | * a struct zfcp_fsf_req | ||
404 | */ | ||
405 | static inline volatile struct qdio_buffer_element * | ||
406 | zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) | ||
407 | { | 189 | { |
408 | return zfcp_qdio_sbale_get(&fsf_req->adapter->response_queue, | 190 | return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); |
409 | sbal, sbale); | ||
410 | } | 191 | } |
411 | 192 | ||
412 | /** | 193 | /** |
413 | * zfcp_qdio_sbale_curr - return current SBALE on request_queue for | 194 | * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req |
414 | * a struct zfcp_fsf_req | 195 | * @fsf_req: pointer to struct fsf_req |
196 | * Returns: pointer to qdio_buffer_element (SBALE) structure | ||
415 | */ | 197 | */ |
416 | volatile struct qdio_buffer_element * | 198 | volatile struct qdio_buffer_element * |
417 | zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) | 199 | zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req) |
418 | { | 200 | { |
419 | return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, | 201 | return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, |
420 | fsf_req->sbale_curr); | 202 | req->sbale_curr); |
421 | } | 203 | } |
422 | 204 | ||
423 | /** | 205 | static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) |
424 | * zfcp_qdio_sbal_limit - determine maximum number of SBALs that can be used | ||
425 | * on the request_queue for a struct zfcp_fsf_req | ||
426 | * @fsf_req: the number of the last SBAL that can be used is stored herein | ||
427 | * @max_sbals: used to pass an upper limit for the number of SBALs | ||
428 | * | ||
429 | * Note: We can assume at least one free SBAL in the request_queue when called. | ||
430 | */ | ||
431 | static void | ||
432 | zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) | ||
433 | { | 206 | { |
434 | int count = atomic_read(&fsf_req->adapter->request_queue.free_count); | 207 | int count = atomic_read(&fsf_req->adapter->req_q.count); |
435 | count = min(count, max_sbals); | 208 | count = min(count, max_sbals); |
436 | fsf_req->sbal_limit = fsf_req->sbal_first; | 209 | fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1) |
437 | fsf_req->sbal_limit += (count - 1); | 210 | % QDIO_MAX_BUFFERS_PER_Q; |
438 | fsf_req->sbal_limit %= QDIO_MAX_BUFFERS_PER_Q; | ||
439 | } | 211 | } |
440 | 212 | ||
441 | /** | ||
442 | * zfcp_qdio_sbal_chain - chain SBALs if more than one SBAL is needed for a | ||
443 | * request | ||
444 | * @fsf_req: zfcp_fsf_req to be processed | ||
445 | * @sbtype: SBAL flags which have to be set in first SBALE of new SBAL | ||
446 | * | ||
447 | * This function changes sbal_last, sbale_curr, sbal_number of fsf_req. | ||
448 | */ | ||
449 | static volatile struct qdio_buffer_element * | 213 | static volatile struct qdio_buffer_element * |
450 | zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 214 | zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) |
451 | { | 215 | { |
@@ -460,7 +224,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | |||
460 | return NULL; | 224 | return NULL; |
461 | 225 | ||
462 | /* set chaining flag in first SBALE of current SBAL */ | 226 | /* set chaining flag in first SBALE of current SBAL */ |
463 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 227 | sbale = zfcp_qdio_sbale_req(fsf_req); |
464 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; | 228 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; |
465 | 229 | ||
466 | /* calculate index of next SBAL */ | 230 | /* calculate index of next SBAL */ |
@@ -480,214 +244,271 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | |||
480 | return sbale; | 244 | return sbale; |
481 | } | 245 | } |
482 | 246 | ||
483 | /** | ||
484 | * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed | ||
485 | */ | ||
486 | static volatile struct qdio_buffer_element * | 247 | static volatile struct qdio_buffer_element * |
487 | zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 248 | zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) |
488 | { | 249 | { |
489 | if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) | 250 | if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) |
490 | return zfcp_qdio_sbal_chain(fsf_req, sbtype); | 251 | return zfcp_qdio_sbal_chain(fsf_req, sbtype); |
491 | |||
492 | fsf_req->sbale_curr++; | 252 | fsf_req->sbale_curr++; |
493 | |||
494 | return zfcp_qdio_sbale_curr(fsf_req); | 253 | return zfcp_qdio_sbale_curr(fsf_req); |
495 | } | 254 | } |
496 | 255 | ||
497 | /** | 256 | static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req) |
498 | * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue | ||
499 | * with zero from | ||
500 | */ | ||
501 | static int | ||
502 | zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) | ||
503 | { | 257 | { |
504 | struct qdio_buffer **buf = queue->buffer; | 258 | struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal; |
505 | int curr = first; | 259 | int first = fsf_req->sbal_first; |
506 | int count = 0; | 260 | int last = fsf_req->sbal_last; |
507 | 261 | int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % | |
508 | for(;;) { | 262 | QDIO_MAX_BUFFERS_PER_Q + 1; |
509 | curr %= QDIO_MAX_BUFFERS_PER_Q; | 263 | zfcp_qdio_zero_sbals(sbal, first, count); |
510 | count++; | ||
511 | memset(buf[curr], 0, sizeof(struct qdio_buffer)); | ||
512 | if (curr == last) | ||
513 | break; | ||
514 | curr++; | ||
515 | } | ||
516 | return count; | ||
517 | } | 264 | } |
518 | 265 | ||
519 | 266 | static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, | |
520 | /** | 267 | unsigned int sbtype, void *start_addr, |
521 | * zfcp_qdio_sbals_wipe - reset all changes in SBALs for an fsf_req | 268 | unsigned int total_length) |
522 | */ | ||
523 | static inline int | ||
524 | zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req) | ||
525 | { | ||
526 | return zfcp_qdio_sbals_zero(&fsf_req->adapter->request_queue, | ||
527 | fsf_req->sbal_first, fsf_req->sbal_last); | ||
528 | } | ||
529 | |||
530 | |||
531 | /** | ||
532 | * zfcp_qdio_sbale_fill - set address and length in current SBALE | ||
533 | * on request_queue | ||
534 | */ | ||
535 | static void | ||
536 | zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | ||
537 | void *addr, int length) | ||
538 | { | 269 | { |
539 | volatile struct qdio_buffer_element *sbale; | 270 | volatile struct qdio_buffer_element *sbale; |
540 | |||
541 | sbale = zfcp_qdio_sbale_curr(fsf_req); | ||
542 | sbale->addr = addr; | ||
543 | sbale->length = length; | ||
544 | } | ||
545 | |||
546 | /** | ||
547 | * zfcp_qdio_sbals_from_segment - map memory segment to SBALE(s) | ||
548 | * @fsf_req: request to be processed | ||
549 | * @sbtype: SBALE flags | ||
550 | * @start_addr: address of memory segment | ||
551 | * @total_length: length of memory segment | ||
552 | * | ||
553 | * Alignment and length of the segment determine how many SBALEs are needed | ||
554 | * for the memory segment. | ||
555 | */ | ||
556 | static int | ||
557 | zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | ||
558 | void *start_addr, unsigned long total_length) | ||
559 | { | ||
560 | unsigned long remaining, length; | 271 | unsigned long remaining, length; |
561 | void *addr; | 272 | void *addr; |
562 | 273 | ||
563 | /* split segment up heeding page boundaries */ | 274 | /* split segment up */ |
564 | for (addr = start_addr, remaining = total_length; remaining > 0; | 275 | for (addr = start_addr, remaining = total_length; remaining > 0; |
565 | addr += length, remaining -= length) { | 276 | addr += length, remaining -= length) { |
566 | /* get next free SBALE for new piece */ | 277 | sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); |
567 | if (NULL == zfcp_qdio_sbale_next(fsf_req, sbtype)) { | 278 | if (!sbale) { |
568 | /* no SBALE left, clean up and leave */ | 279 | zfcp_qdio_undo_sbals(fsf_req); |
569 | zfcp_qdio_sbals_wipe(fsf_req); | ||
570 | return -EINVAL; | 280 | return -EINVAL; |
571 | } | 281 | } |
572 | /* calculate length of new piece */ | 282 | |
283 | /* new piece must not exceed next page boundary */ | ||
573 | length = min(remaining, | 284 | length = min(remaining, |
574 | (PAGE_SIZE - ((unsigned long) addr & | 285 | (PAGE_SIZE - ((unsigned long)addr & |
575 | (PAGE_SIZE - 1)))); | 286 | (PAGE_SIZE - 1)))); |
576 | /* fill current SBALE with calculated piece */ | 287 | sbale->addr = addr; |
577 | zfcp_qdio_sbale_fill(fsf_req, sbtype, addr, length); | 288 | sbale->length = length; |
578 | } | 289 | } |
579 | return total_length; | 290 | return 0; |
580 | } | 291 | } |
581 | 292 | ||
582 | |||
583 | /** | 293 | /** |
584 | * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list | 294 | * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list |
585 | * @fsf_req: request to be processed | 295 | * @fsf_req: request to be processed |
586 | * @sbtype: SBALE flags | 296 | * @sbtype: SBALE flags |
587 | * @sg: scatter-gather list | 297 | * @sg: scatter-gather list |
588 | * @sg_count: number of elements in scatter-gather list | ||
589 | * @max_sbals: upper bound for number of SBALs to be used | 298 | * @max_sbals: upper bound for number of SBALs to be used |
299 | * Returns: number of bytes, or error (negativ) | ||
590 | */ | 300 | */ |
591 | int | 301 | int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, |
592 | zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 302 | struct scatterlist *sg, int max_sbals) |
593 | struct scatterlist *sgl, int sg_count, int max_sbals) | ||
594 | { | 303 | { |
595 | int sg_index; | ||
596 | struct scatterlist *sg_segment; | ||
597 | int retval; | ||
598 | volatile struct qdio_buffer_element *sbale; | 304 | volatile struct qdio_buffer_element *sbale; |
599 | int bytes = 0; | 305 | int retval, bytes = 0; |
600 | 306 | ||
601 | /* figure out last allowed SBAL */ | 307 | /* figure out last allowed SBAL */ |
602 | zfcp_qdio_sbal_limit(fsf_req, max_sbals); | 308 | zfcp_qdio_sbal_limit(fsf_req, max_sbals); |
603 | 309 | ||
604 | /* set storage-block type for current SBAL */ | 310 | /* set storage-block type for this request */ |
605 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 311 | sbale = zfcp_qdio_sbale_req(fsf_req); |
606 | sbale->flags |= sbtype; | 312 | sbale->flags |= sbtype; |
607 | 313 | ||
608 | /* process all segements of scatter-gather list */ | 314 | for (; sg; sg = sg_next(sg)) { |
609 | for_each_sg(sgl, sg_segment, sg_count, sg_index) { | 315 | retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg), |
610 | retval = zfcp_qdio_sbals_from_segment( | 316 | sg->length); |
611 | fsf_req, | 317 | if (retval < 0) |
612 | sbtype, | 318 | return retval; |
613 | zfcp_sg_to_address(sg_segment), | 319 | bytes += sg->length; |
614 | sg_segment->length); | ||
615 | if (retval < 0) { | ||
616 | bytes = retval; | ||
617 | goto out; | ||
618 | } else | ||
619 | bytes += retval; | ||
620 | } | 320 | } |
321 | |||
621 | /* assume that no other SBALEs are to follow in the same SBAL */ | 322 | /* assume that no other SBALEs are to follow in the same SBAL */ |
622 | sbale = zfcp_qdio_sbale_curr(fsf_req); | 323 | sbale = zfcp_qdio_sbale_curr(fsf_req); |
623 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; | 324 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; |
624 | out: | 325 | |
625 | return bytes; | 326 | return bytes; |
626 | } | 327 | } |
627 | 328 | ||
628 | |||
629 | /** | 329 | /** |
630 | * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command | 330 | * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO |
631 | * @fsf_req: request to be processed | 331 | * @fsf_req: pointer to struct zfcp_fsf_req |
632 | * @sbtype: SBALE flags | 332 | * Returns: 0 on success, error otherwise |
633 | * @scsi_cmnd: either scatter-gather list or buffer contained herein is used | ||
634 | * to fill SBALs | ||
635 | */ | 333 | */ |
636 | int | 334 | int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) |
637 | zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, | ||
638 | unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) | ||
639 | { | 335 | { |
640 | return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, scsi_sglist(scsi_cmnd), | 336 | struct zfcp_adapter *adapter = fsf_req->adapter; |
641 | scsi_sg_count(scsi_cmnd), | 337 | struct zfcp_qdio_queue *req_q = &adapter->req_q; |
642 | ZFCP_MAX_SBALS_PER_REQ); | 338 | int first = fsf_req->sbal_first; |
339 | int count = fsf_req->sbal_number; | ||
340 | int retval, pci, pci_batch; | ||
341 | volatile struct qdio_buffer_element *sbale; | ||
342 | |||
343 | /* acknowledgements for transferred buffers */ | ||
344 | pci_batch = req_q->pci_batch + count; | ||
345 | if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) { | ||
346 | pci_batch %= ZFCP_QDIO_PCI_INTERVAL; | ||
347 | pci = first + count - (pci_batch + 1); | ||
348 | pci %= QDIO_MAX_BUFFERS_PER_Q; | ||
349 | sbale = zfcp_qdio_sbale(req_q, pci, 0); | ||
350 | sbale->flags |= SBAL_FLAGS0_PCI; | ||
351 | } | ||
352 | |||
353 | retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first, | ||
354 | count, NULL); | ||
355 | if (unlikely(retval)) { | ||
356 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | ||
357 | return retval; | ||
358 | } | ||
359 | |||
360 | /* account for transferred buffers */ | ||
361 | atomic_sub(count, &req_q->count); | ||
362 | req_q->first += count; | ||
363 | req_q->first %= QDIO_MAX_BUFFERS_PER_Q; | ||
364 | req_q->pci_batch = pci_batch; | ||
365 | return 0; | ||
643 | } | 366 | } |
644 | 367 | ||
645 | /** | 368 | /** |
646 | * zfcp_qdio_determine_pci - set PCI flag in first SBALE on qdio queue if needed | 369 | * zfcp_qdio_zero_sbals - zero all sbals of the specified area and queue |
370 | * @buf: pointer to array of SBALS | ||
371 | * @first: integer specifying the SBAL number to start | ||
372 | * @count: integer specifying the number of SBALS to process | ||
647 | */ | 373 | */ |
648 | int | 374 | void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int count) |
649 | zfcp_qdio_determine_pci(struct zfcp_qdio_queue *req_queue, | ||
650 | struct zfcp_fsf_req *fsf_req) | ||
651 | { | 375 | { |
652 | int new_distance_from_int; | 376 | int i, sbal_idx; |
653 | int pci_pos; | ||
654 | volatile struct qdio_buffer_element *sbale; | ||
655 | 377 | ||
656 | new_distance_from_int = req_queue->distance_from_int + | 378 | for (i = first; i < first + count; i++) { |
657 | fsf_req->sbal_number; | 379 | sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q; |
658 | 380 | memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer)); | |
659 | if (unlikely(new_distance_from_int >= ZFCP_QDIO_PCI_INTERVAL)) { | ||
660 | new_distance_from_int %= ZFCP_QDIO_PCI_INTERVAL; | ||
661 | pci_pos = fsf_req->sbal_first; | ||
662 | pci_pos += fsf_req->sbal_number; | ||
663 | pci_pos -= new_distance_from_int; | ||
664 | pci_pos -= 1; | ||
665 | pci_pos %= QDIO_MAX_BUFFERS_PER_Q; | ||
666 | sbale = zfcp_qdio_sbale_req(fsf_req, pci_pos, 0); | ||
667 | sbale->flags |= SBAL_FLAGS0_PCI; | ||
668 | } | 381 | } |
669 | return new_distance_from_int; | ||
670 | } | 382 | } |
671 | 383 | ||
672 | /* | 384 | /** |
673 | * function: zfcp_zero_sbals | 385 | * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data |
674 | * | 386 | * @adapter: pointer to struct zfcp_adapter |
675 | * purpose: zeros specified range of SBALs | 387 | * Returns: -ENOMEM on memory allocation error or return value from |
676 | * | 388 | * qdio_allocate |
677 | * returns: | 389 | */ |
390 | int zfcp_qdio_allocate(struct zfcp_adapter *adapter) | ||
391 | { | ||
392 | struct qdio_initialize *init_data; | ||
393 | |||
394 | if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) || | ||
395 | zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal)) | ||
396 | return -ENOMEM; | ||
397 | |||
398 | init_data = &adapter->qdio_init_data; | ||
399 | |||
400 | init_data->cdev = adapter->ccw_device; | ||
401 | init_data->q_format = QDIO_ZFCP_QFMT; | ||
402 | memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8); | ||
403 | ASCEBC(init_data->adapter_name, 8); | ||
404 | init_data->qib_param_field_format = 0; | ||
405 | init_data->qib_param_field = NULL; | ||
406 | init_data->input_slib_elements = NULL; | ||
407 | init_data->output_slib_elements = NULL; | ||
408 | init_data->min_input_threshold = 1; | ||
409 | init_data->max_input_threshold = 5000; | ||
410 | init_data->min_output_threshold = 1; | ||
411 | init_data->max_output_threshold = 1000; | ||
412 | init_data->no_input_qs = 1; | ||
413 | init_data->no_output_qs = 1; | ||
414 | init_data->input_handler = zfcp_qdio_int_resp; | ||
415 | init_data->output_handler = zfcp_qdio_int_req; | ||
416 | init_data->int_parm = (unsigned long) adapter; | ||
417 | init_data->flags = QDIO_INBOUND_0COPY_SBALS | | ||
418 | QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; | ||
419 | init_data->input_sbal_addr_array = | ||
420 | (void **) (adapter->resp_q.sbal); | ||
421 | init_data->output_sbal_addr_array = | ||
422 | (void **) (adapter->req_q.sbal); | ||
423 | |||
424 | return qdio_allocate(init_data); | ||
425 | } | ||
426 | |||
427 | /** | ||
428 | * zfcp_close_qdio - close qdio queues for an adapter | ||
678 | */ | 429 | */ |
679 | void | 430 | void zfcp_qdio_close(struct zfcp_adapter *adapter) |
680 | zfcp_qdio_zero_sbals(struct qdio_buffer *buf[], int first, int clean_count) | ||
681 | { | 431 | { |
682 | int cur_pos; | 432 | struct zfcp_qdio_queue *req_q; |
683 | int index; | 433 | int first, count; |
684 | 434 | ||
685 | for (cur_pos = first; cur_pos < (first + clean_count); cur_pos++) { | 435 | if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) |
686 | index = cur_pos % QDIO_MAX_BUFFERS_PER_Q; | 436 | return; |
687 | memset(buf[index], 0, sizeof (struct qdio_buffer)); | 437 | |
688 | ZFCP_LOG_TRACE("zeroing BUFFER %d at address %p\n", | 438 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ |
689 | index, buf[index]); | 439 | req_q = &adapter->req_q; |
440 | write_lock_irq(&req_q->lock); | ||
441 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | ||
442 | write_unlock_irq(&req_q->lock); | ||
443 | |||
444 | while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) | ||
445 | == -EINPROGRESS) | ||
446 | ssleep(1); | ||
447 | |||
448 | /* cleanup used outbound sbals */ | ||
449 | count = atomic_read(&req_q->count); | ||
450 | if (count < QDIO_MAX_BUFFERS_PER_Q) { | ||
451 | first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; | ||
452 | count = QDIO_MAX_BUFFERS_PER_Q - count; | ||
453 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | ||
690 | } | 454 | } |
455 | req_q->first = 0; | ||
456 | atomic_set(&req_q->count, 0); | ||
457 | req_q->pci_batch = 0; | ||
458 | adapter->resp_q.first = 0; | ||
459 | atomic_set(&adapter->resp_q.count, 0); | ||
691 | } | 460 | } |
692 | 461 | ||
693 | #undef ZFCP_LOG_AREA | 462 | /** |
463 | * zfcp_qdio_open - prepare and initialize response queue | ||
464 | * @adapter: pointer to struct zfcp_adapter | ||
465 | * Returns: 0 on success, otherwise -EIO | ||
466 | */ | ||
467 | int zfcp_qdio_open(struct zfcp_adapter *adapter) | ||
468 | { | ||
469 | volatile struct qdio_buffer_element *sbale; | ||
470 | int cc; | ||
471 | |||
472 | if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) | ||
473 | return -EIO; | ||
474 | |||
475 | if (qdio_establish(&adapter->qdio_init_data)) { | ||
476 | dev_err(&adapter->ccw_device->dev, | ||
477 | "Establish of QDIO queues failed.\n"); | ||
478 | return -EIO; | ||
479 | } | ||
480 | |||
481 | if (qdio_activate(adapter->ccw_device, 0)) { | ||
482 | dev_err(&adapter->ccw_device->dev, | ||
483 | "Activate of QDIO queues failed.\n"); | ||
484 | goto failed_qdio; | ||
485 | } | ||
486 | |||
487 | for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { | ||
488 | sbale = &(adapter->resp_q.sbal[cc]->element[0]); | ||
489 | sbale->length = 0; | ||
490 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; | ||
491 | sbale->addr = NULL; | ||
492 | } | ||
493 | |||
494 | if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, | ||
495 | QDIO_MAX_BUFFERS_PER_Q, NULL)) { | ||
496 | dev_err(&adapter->ccw_device->dev, | ||
497 | "Init of QDIO response queue failed.\n"); | ||
498 | goto failed_qdio; | ||
499 | } | ||
500 | |||
501 | /* set index of first avalable SBALS / number of available SBALS */ | ||
502 | adapter->req_q.first = 0; | ||
503 | atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); | ||
504 | adapter->req_q.pci_batch = 0; | ||
505 | |||
506 | return 0; | ||
507 | |||
508 | failed_qdio: | ||
509 | while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) | ||
510 | == -EINPROGRESS) | ||
511 | ssleep(1); | ||
512 | |||
513 | return -EIO; | ||
514 | } | ||