diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-15 21:58:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-15 21:58:04 -0400 |
commit | 89a93f2f4834f8c126e8d9dd6b368d0b9e21ec3d (patch) | |
tree | e731456fec0cab1225ad3e806dc8d3efefa0a78b /drivers/s390/scsi/zfcp_qdio.c | |
parent | 260eddf4391f162a69d1d163729249635fa7a78f (diff) | |
parent | fe9233fb6914a0eb20166c967e3020f7f0fba2c9 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (102 commits)
[SCSI] scsi_dh: fix kconfig related build errors
[SCSI] sym53c8xx: Fix bogus sym_que_entry re-implementation of container_of
[SCSI] scsi_cmnd.h: remove double inclusion of linux/blkdev.h
[SCSI] make struct scsi_{host,target}_type static
[SCSI] fix locking in host use of blk_plug_device()
[SCSI] zfcp: Cleanup external header file
[SCSI] zfcp: Cleanup code in zfcp_erp.c
[SCSI] zfcp: zfcp_fsf cleanup.
[SCSI] zfcp: consolidate sysfs things into one file.
[SCSI] zfcp: Cleanup of code in zfcp_aux.c
[SCSI] zfcp: Cleanup of code in zfcp_scsi.c
[SCSI] zfcp: Move status accessors from zfcp to SCSI include file.
[SCSI] zfcp: Small QDIO cleanups
[SCSI] zfcp: Adapter reopen for large number of unsolicited status
[SCSI] zfcp: Fix error checking for ELS ADISC requests
[SCSI] zfcp: wait until adapter is finished with ERP during auto-port
[SCSI] ibmvfc: IBM Power Virtual Fibre Channel Adapter Client Driver
[SCSI] sg: Add target reset support
[SCSI] lib: Add support for the T10 (SCSI) Data Integrity Field CRC
[SCSI] sd: Move scsi_disk() accessor function to sd.h
...
Diffstat (limited to 'drivers/s390/scsi/zfcp_qdio.c')
-rw-r--r-- | drivers/s390/scsi/zfcp_qdio.c | 811 |
1 files changed, 314 insertions, 497 deletions
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 8ca5f074c687..72e3094796d4 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -1,241 +1,103 @@ | |||
1 | /* | 1 | /* |
2 | * This file is part of the zfcp device driver for | 2 | * zfcp device driver |
3 | * FCP adapters for IBM System z9 and zSeries. | ||
4 | * | 3 | * |
5 | * (C) Copyright IBM Corp. 2002, 2006 | 4 | * Setup and helper functions to access QDIO. |
6 | * | 5 | * |
7 | * This program is free software; you can redistribute it and/or modify | 6 | * Copyright IBM Corporation 2002, 2008 |
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | */ | 7 | */ |
21 | 8 | ||
22 | #include "zfcp_ext.h" | 9 | #include "zfcp_ext.h" |
23 | 10 | ||
24 | static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); | 11 | /* FIXME(tune): free space should be one max. SBAL chain plus what? */ |
25 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get | 12 | #define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \ |
26 | (struct zfcp_qdio_queue *, int, int); | 13 | - (FSF_MAX_SBALS_PER_REQ + 4)) |
27 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp | 14 | #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) |
28 | (struct zfcp_fsf_req *, int, int); | ||
29 | static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain | ||
30 | (struct zfcp_fsf_req *, unsigned long); | ||
31 | static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next | ||
32 | (struct zfcp_fsf_req *, unsigned long); | ||
33 | static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); | ||
34 | static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); | ||
35 | static void zfcp_qdio_sbale_fill | ||
36 | (struct zfcp_fsf_req *, unsigned long, void *, int); | ||
37 | static int zfcp_qdio_sbals_from_segment | ||
38 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); | ||
39 | |||
40 | static qdio_handler_t zfcp_qdio_request_handler; | ||
41 | static qdio_handler_t zfcp_qdio_response_handler; | ||
42 | static int zfcp_qdio_handler_error_check(struct zfcp_adapter *, | ||
43 | unsigned int, unsigned int, unsigned int, int, int); | ||
44 | |||
45 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO | ||
46 | |||
47 | /* | ||
48 | * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array | ||
49 | * in the adapter struct sbuf is the pointer array. | ||
50 | * | ||
51 | * locks: must only be called with zfcp_data.config_sema taken | ||
52 | */ | ||
53 | static void | ||
54 | zfcp_qdio_buffers_dequeue(struct qdio_buffer **sbuf) | ||
55 | { | ||
56 | int pos; | ||
57 | |||
58 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) | ||
59 | free_page((unsigned long) sbuf[pos]); | ||
60 | } | ||
61 | 15 | ||
62 | /* | 16 | static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) |
63 | * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t | ||
64 | * array in the adapter struct. | ||
65 | * Cur_buf is the pointer array | ||
66 | * | ||
67 | * returns: zero on success else -ENOMEM | ||
68 | * locks: must only be called with zfcp_data.config_sema taken | ||
69 | */ | ||
70 | static int | ||
71 | zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbuf) | ||
72 | { | 17 | { |
73 | int pos; | 18 | int pos; |
74 | 19 | ||
75 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) { | 20 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) { |
76 | sbuf[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); | 21 | sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); |
77 | if (!sbuf[pos]) { | 22 | if (!sbal[pos]) |
78 | zfcp_qdio_buffers_dequeue(sbuf); | ||
79 | return -ENOMEM; | 23 | return -ENOMEM; |
80 | } | ||
81 | } | 24 | } |
82 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++) | 25 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++) |
83 | if (pos % QBUFF_PER_PAGE) | 26 | if (pos % QBUFF_PER_PAGE) |
84 | sbuf[pos] = sbuf[pos - 1] + 1; | 27 | sbal[pos] = sbal[pos - 1] + 1; |
85 | return 0; | 28 | return 0; |
86 | } | 29 | } |
87 | 30 | ||
88 | /* locks: must only be called with zfcp_data.config_sema taken */ | 31 | static volatile struct qdio_buffer_element * |
89 | int | 32 | zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) |
90 | zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter) | ||
91 | { | 33 | { |
92 | int ret; | 34 | return &q->sbal[sbal_idx]->element[sbale_idx]; |
93 | |||
94 | ret = zfcp_qdio_buffers_enqueue(adapter->request_queue.buffer); | ||
95 | if (ret) | ||
96 | return ret; | ||
97 | return zfcp_qdio_buffers_enqueue(adapter->response_queue.buffer); | ||
98 | } | 35 | } |
99 | 36 | ||
100 | /* locks: must only be called with zfcp_data.config_sema taken */ | 37 | /** |
101 | void | 38 | * zfcp_qdio_free - free memory used by request- and resposne queue |
102 | zfcp_qdio_free_queues(struct zfcp_adapter *adapter) | 39 | * @adapter: pointer to the zfcp_adapter structure |
40 | */ | ||
41 | void zfcp_qdio_free(struct zfcp_adapter *adapter) | ||
103 | { | 42 | { |
104 | ZFCP_LOG_TRACE("freeing request_queue buffers\n"); | 43 | struct qdio_buffer **sbal_req, **sbal_resp; |
105 | zfcp_qdio_buffers_dequeue(adapter->request_queue.buffer); | 44 | int p; |
106 | 45 | ||
107 | ZFCP_LOG_TRACE("freeing response_queue buffers\n"); | 46 | if (adapter->ccw_device) |
108 | zfcp_qdio_buffers_dequeue(adapter->response_queue.buffer); | 47 | qdio_free(adapter->ccw_device); |
109 | } | ||
110 | 48 | ||
111 | int | 49 | sbal_req = adapter->req_q.sbal; |
112 | zfcp_qdio_allocate(struct zfcp_adapter *adapter) | 50 | sbal_resp = adapter->resp_q.sbal; |
113 | { | ||
114 | struct qdio_initialize *init_data; | ||
115 | 51 | ||
116 | init_data = &adapter->qdio_init_data; | 52 | for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { |
53 | free_page((unsigned long) sbal_req[p]); | ||
54 | free_page((unsigned long) sbal_resp[p]); | ||
55 | } | ||
56 | } | ||
117 | 57 | ||
118 | init_data->cdev = adapter->ccw_device; | 58 | static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id) |
119 | init_data->q_format = QDIO_SCSI_QFMT; | 59 | { |
120 | memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8); | 60 | dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n"); |
121 | ASCEBC(init_data->adapter_name, 8); | ||
122 | init_data->qib_param_field_format = 0; | ||
123 | init_data->qib_param_field = NULL; | ||
124 | init_data->input_slib_elements = NULL; | ||
125 | init_data->output_slib_elements = NULL; | ||
126 | init_data->min_input_threshold = ZFCP_MIN_INPUT_THRESHOLD; | ||
127 | init_data->max_input_threshold = ZFCP_MAX_INPUT_THRESHOLD; | ||
128 | init_data->min_output_threshold = ZFCP_MIN_OUTPUT_THRESHOLD; | ||
129 | init_data->max_output_threshold = ZFCP_MAX_OUTPUT_THRESHOLD; | ||
130 | init_data->no_input_qs = 1; | ||
131 | init_data->no_output_qs = 1; | ||
132 | init_data->input_handler = zfcp_qdio_response_handler; | ||
133 | init_data->output_handler = zfcp_qdio_request_handler; | ||
134 | init_data->int_parm = (unsigned long) adapter; | ||
135 | init_data->flags = QDIO_INBOUND_0COPY_SBALS | | ||
136 | QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; | ||
137 | init_data->input_sbal_addr_array = | ||
138 | (void **) (adapter->response_queue.buffer); | ||
139 | init_data->output_sbal_addr_array = | ||
140 | (void **) (adapter->request_queue.buffer); | ||
141 | 61 | ||
142 | return qdio_allocate(init_data); | 62 | zfcp_erp_adapter_reopen(adapter, |
63 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | ||
64 | ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); | ||
143 | } | 65 | } |
144 | 66 | ||
145 | /* | 67 | static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) |
146 | * function: zfcp_qdio_handler_error_check | ||
147 | * | ||
148 | * purpose: called by the response handler to determine error condition | ||
149 | * | ||
150 | * returns: error flag | ||
151 | * | ||
152 | */ | ||
153 | static int | ||
154 | zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, | ||
155 | unsigned int qdio_error, unsigned int siga_error, | ||
156 | int first_element, int elements_processed) | ||
157 | { | 68 | { |
158 | int retval = 0; | 69 | int i, sbal_idx; |
159 | 70 | ||
160 | if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { | 71 | for (i = first; i < first + cnt; i++) { |
161 | retval = -EIO; | 72 | sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q; |
162 | 73 | memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer)); | |
163 | ZFCP_LOG_INFO("QDIO problem occurred (status=0x%x, " | ||
164 | "qdio_error=0x%x, siga_error=0x%x)\n", | ||
165 | status, qdio_error, siga_error); | ||
166 | |||
167 | zfcp_hba_dbf_event_qdio(adapter, status, qdio_error, siga_error, | ||
168 | first_element, elements_processed); | ||
169 | /* | ||
170 | * Restarting IO on the failed adapter from scratch. | ||
171 | * Since we have been using this adapter, it is save to assume | ||
172 | * that it is not failed but recoverable. The card seems to | ||
173 | * report link-up events by self-initiated queue shutdown. | ||
174 | * That is why we need to clear the link-down flag | ||
175 | * which is set again in case we have missed by a mile. | ||
176 | */ | ||
177 | zfcp_erp_adapter_reopen(adapter, | ||
178 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | ||
179 | ZFCP_STATUS_COMMON_ERP_FAILED, 140, | ||
180 | NULL); | ||
181 | } | 74 | } |
182 | return retval; | ||
183 | } | 75 | } |
184 | 76 | ||
185 | /* | 77 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int status, |
186 | * function: zfcp_qdio_request_handler | 78 | unsigned int qdio_err, unsigned int siga_err, |
187 | * | 79 | unsigned int queue_no, int first, int count, |
188 | * purpose: is called by QDIO layer for completed SBALs in request queue | 80 | unsigned long parm) |
189 | * | ||
190 | * returns: (void) | ||
191 | */ | ||
192 | static void | ||
193 | zfcp_qdio_request_handler(struct ccw_device *ccw_device, | ||
194 | unsigned int status, | ||
195 | unsigned int qdio_error, | ||
196 | unsigned int siga_error, | ||
197 | unsigned int queue_number, | ||
198 | int first_element, | ||
199 | int elements_processed, | ||
200 | unsigned long int_parm) | ||
201 | { | 81 | { |
202 | struct zfcp_adapter *adapter; | 82 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; |
203 | struct zfcp_qdio_queue *queue; | 83 | struct zfcp_qdio_queue *queue = &adapter->req_q; |
204 | |||
205 | adapter = (struct zfcp_adapter *) int_parm; | ||
206 | queue = &adapter->request_queue; | ||
207 | |||
208 | ZFCP_LOG_DEBUG("adapter %s, first=%d, elements_processed=%d\n", | ||
209 | zfcp_get_busid_by_adapter(adapter), | ||
210 | first_element, elements_processed); | ||
211 | 84 | ||
212 | if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, | 85 | if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { |
213 | siga_error, first_element, | 86 | zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, |
214 | elements_processed))) | 87 | first, count); |
215 | goto out; | 88 | zfcp_qdio_handler_error(adapter, 140); |
216 | /* | 89 | return; |
217 | * we stored address of struct zfcp_adapter data structure | 90 | } |
218 | * associated with irq in int_parm | ||
219 | */ | ||
220 | 91 | ||
221 | /* cleanup all SBALs being program-owned now */ | 92 | /* cleanup all SBALs being program-owned now */ |
222 | zfcp_qdio_zero_sbals(queue->buffer, first_element, elements_processed); | 93 | zfcp_qdio_zero_sbals(queue->sbal, first, count); |
223 | 94 | ||
224 | /* increase free space in outbound queue */ | 95 | atomic_add(count, &queue->count); |
225 | atomic_add(elements_processed, &queue->free_count); | ||
226 | ZFCP_LOG_DEBUG("free_count=%d\n", atomic_read(&queue->free_count)); | ||
227 | wake_up(&adapter->request_wq); | 96 | wake_up(&adapter->request_wq); |
228 | ZFCP_LOG_DEBUG("elements_processed=%d, free count=%d\n", | ||
229 | elements_processed, atomic_read(&queue->free_count)); | ||
230 | out: | ||
231 | return; | ||
232 | } | 97 | } |
233 | 98 | ||
234 | /** | ||
235 | * zfcp_qdio_reqid_check - checks for valid reqids. | ||
236 | */ | ||
237 | static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, | 99 | static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, |
238 | unsigned long req_id) | 100 | unsigned long req_id, int sbal_idx) |
239 | { | 101 | { |
240 | struct zfcp_fsf_req *fsf_req; | 102 | struct zfcp_fsf_req *fsf_req; |
241 | unsigned long flags; | 103 | unsigned long flags; |
@@ -248,203 +110,117 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, | |||
248 | * Unknown request means that we have potentially memory | 110 | * Unknown request means that we have potentially memory |
249 | * corruption and must stop the machine immediatly. | 111 | * corruption and must stop the machine immediatly. |
250 | */ | 112 | */ |
251 | panic("error: unknown request id (%ld) on adapter %s.\n", | 113 | panic("error: unknown request id (%lx) on adapter %s.\n", |
252 | req_id, zfcp_get_busid_by_adapter(adapter)); | 114 | req_id, zfcp_get_busid_by_adapter(adapter)); |
253 | 115 | ||
254 | zfcp_reqlist_remove(adapter, fsf_req); | 116 | zfcp_reqlist_remove(adapter, fsf_req); |
255 | atomic_dec(&adapter->reqs_active); | ||
256 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | 117 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); |
257 | 118 | ||
258 | /* finish the FSF request */ | 119 | fsf_req->sbal_response = sbal_idx; |
259 | zfcp_fsf_req_complete(fsf_req); | 120 | zfcp_fsf_req_complete(fsf_req); |
260 | } | 121 | } |
261 | 122 | ||
262 | /* | 123 | static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) |
263 | * function: zfcp_qdio_response_handler | ||
264 | * | ||
265 | * purpose: is called by QDIO layer for completed SBALs in response queue | ||
266 | * | ||
267 | * returns: (void) | ||
268 | */ | ||
269 | static void | ||
270 | zfcp_qdio_response_handler(struct ccw_device *ccw_device, | ||
271 | unsigned int status, | ||
272 | unsigned int qdio_error, | ||
273 | unsigned int siga_error, | ||
274 | unsigned int queue_number, | ||
275 | int first_element, | ||
276 | int elements_processed, | ||
277 | unsigned long int_parm) | ||
278 | { | 124 | { |
279 | struct zfcp_adapter *adapter; | 125 | struct zfcp_qdio_queue *queue = &adapter->resp_q; |
280 | struct zfcp_qdio_queue *queue; | 126 | struct ccw_device *cdev = adapter->ccw_device; |
281 | int buffer_index; | 127 | u8 count, start = queue->first; |
282 | int i; | 128 | unsigned int retval; |
283 | struct qdio_buffer *buffer; | ||
284 | int retval = 0; | ||
285 | u8 count; | ||
286 | u8 start; | ||
287 | volatile struct qdio_buffer_element *buffere = NULL; | ||
288 | int buffere_index; | ||
289 | |||
290 | adapter = (struct zfcp_adapter *) int_parm; | ||
291 | queue = &adapter->response_queue; | ||
292 | |||
293 | if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, | ||
294 | siga_error, first_element, | ||
295 | elements_processed))) | ||
296 | goto out; | ||
297 | 129 | ||
298 | /* | 130 | count = atomic_read(&queue->count) + processed; |
299 | * we stored address of struct zfcp_adapter data structure | 131 | |
300 | * associated with irq in int_parm | 132 | retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, |
301 | */ | 133 | 0, start, count, NULL); |
134 | |||
135 | if (unlikely(retval)) { | ||
136 | atomic_set(&queue->count, count); | ||
137 | /* FIXME: Recover this with an adapter reopen? */ | ||
138 | } else { | ||
139 | queue->first += count; | ||
140 | queue->first %= QDIO_MAX_BUFFERS_PER_Q; | ||
141 | atomic_set(&queue->count, 0); | ||
142 | } | ||
143 | } | ||
144 | |||
145 | static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status, | ||
146 | unsigned int qdio_err, unsigned int siga_err, | ||
147 | unsigned int queue_no, int first, int count, | ||
148 | unsigned long parm) | ||
149 | { | ||
150 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; | ||
151 | struct zfcp_qdio_queue *queue = &adapter->resp_q; | ||
152 | volatile struct qdio_buffer_element *sbale; | ||
153 | int sbal_idx, sbale_idx, sbal_no; | ||
154 | |||
155 | if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { | ||
156 | zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, | ||
157 | first, count); | ||
158 | zfcp_qdio_handler_error(adapter, 147); | ||
159 | return; | ||
160 | } | ||
302 | 161 | ||
303 | buffere = &(queue->buffer[first_element]->element[0]); | ||
304 | ZFCP_LOG_DEBUG("first BUFFERE flags=0x%x\n", buffere->flags); | ||
305 | /* | 162 | /* |
306 | * go through all SBALs from input queue currently | 163 | * go through all SBALs from input queue currently |
307 | * returned by QDIO layer | 164 | * returned by QDIO layer |
308 | */ | 165 | */ |
309 | 166 | for (sbal_no = 0; sbal_no < count; sbal_no++) { | |
310 | for (i = 0; i < elements_processed; i++) { | 167 | sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; |
311 | |||
312 | buffer_index = first_element + i; | ||
313 | buffer_index %= QDIO_MAX_BUFFERS_PER_Q; | ||
314 | buffer = queue->buffer[buffer_index]; | ||
315 | 168 | ||
316 | /* go through all SBALEs of SBAL */ | 169 | /* go through all SBALEs of SBAL */ |
317 | for (buffere_index = 0; | 170 | for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER; |
318 | buffere_index < QDIO_MAX_ELEMENTS_PER_BUFFER; | 171 | sbale_idx++) { |
319 | buffere_index++) { | 172 | sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx); |
320 | |||
321 | /* look for QDIO request identifiers in SB */ | ||
322 | buffere = &buffer->element[buffere_index]; | ||
323 | zfcp_qdio_reqid_check(adapter, | 173 | zfcp_qdio_reqid_check(adapter, |
324 | (unsigned long) buffere->addr); | 174 | (unsigned long) sbale->addr, |
325 | 175 | sbal_idx); | |
326 | /* | 176 | if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) |
327 | * A single used SBALE per inbound SBALE has been | ||
328 | * implemented by QDIO so far. Hope they will | ||
329 | * do some optimisation. Will need to change to | ||
330 | * unlikely() then. | ||
331 | */ | ||
332 | if (likely(buffere->flags & SBAL_FLAGS_LAST_ENTRY)) | ||
333 | break; | 177 | break; |
334 | }; | 178 | }; |
335 | 179 | ||
336 | if (unlikely(!(buffere->flags & SBAL_FLAGS_LAST_ENTRY))) { | 180 | if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) |
337 | ZFCP_LOG_NORMAL("bug: End of inbound data " | 181 | dev_warn(&adapter->ccw_device->dev, |
338 | "not marked!\n"); | 182 | "Protocol violation by adapter. " |
339 | } | 183 | "Continuing operations.\n"); |
340 | } | 184 | } |
341 | 185 | ||
342 | /* | 186 | /* |
343 | * put range of SBALs back to response queue | 187 | * put range of SBALs back to response queue |
344 | * (including SBALs which have already been free before) | 188 | * (including SBALs which have already been free before) |
345 | */ | 189 | */ |
346 | count = atomic_read(&queue->free_count) + elements_processed; | 190 | zfcp_qdio_resp_put_back(adapter, count); |
347 | start = queue->free_index; | ||
348 | |||
349 | ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, " | ||
350 | "queue_no=%i, index_in_queue=%i, count=%i, " | ||
351 | "buffers=0x%lx\n", | ||
352 | zfcp_get_busid_by_adapter(adapter), | ||
353 | QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, | ||
354 | 0, start, count, (unsigned long) &queue->buffer[start]); | ||
355 | |||
356 | retval = do_QDIO(ccw_device, | ||
357 | QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, | ||
358 | 0, start, count, NULL); | ||
359 | |||
360 | if (unlikely(retval)) { | ||
361 | atomic_set(&queue->free_count, count); | ||
362 | ZFCP_LOG_DEBUG("clearing of inbound data regions failed, " | ||
363 | "queues may be down " | ||
364 | "(count=%d, start=%d, retval=%d)\n", | ||
365 | count, start, retval); | ||
366 | } else { | ||
367 | queue->free_index += count; | ||
368 | queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; | ||
369 | atomic_set(&queue->free_count, 0); | ||
370 | ZFCP_LOG_TRACE("%i buffers enqueued to response " | ||
371 | "queue at position %i\n", count, start); | ||
372 | } | ||
373 | out: | ||
374 | return; | ||
375 | } | ||
376 | |||
377 | /** | ||
378 | * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue | ||
379 | * @queue: queue from which SBALE should be returned | ||
380 | * @sbal: specifies number of SBAL in queue | ||
381 | * @sbale: specifes number of SBALE in SBAL | ||
382 | */ | ||
383 | static inline volatile struct qdio_buffer_element * | ||
384 | zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale) | ||
385 | { | ||
386 | return &queue->buffer[sbal]->element[sbale]; | ||
387 | } | 191 | } |
388 | 192 | ||
389 | /** | 193 | /** |
390 | * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for | 194 | * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req |
391 | * a struct zfcp_fsf_req | 195 | * @fsf_req: pointer to struct fsf_req |
196 | * Returns: pointer to qdio_buffer_element (SBALE) structure | ||
392 | */ | 197 | */ |
393 | volatile struct qdio_buffer_element * | 198 | volatile struct qdio_buffer_element * |
394 | zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) | 199 | zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) |
395 | { | 200 | { |
396 | return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, | 201 | return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); |
397 | sbal, sbale); | ||
398 | } | 202 | } |
399 | 203 | ||
400 | /** | 204 | /** |
401 | * zfcp_qdio_sbale_resp - return pointer to SBALE of response_queue for | 205 | * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req |
402 | * a struct zfcp_fsf_req | 206 | * @fsf_req: pointer to struct fsf_req |
403 | */ | 207 | * Returns: pointer to qdio_buffer_element (SBALE) structure |
404 | static inline volatile struct qdio_buffer_element * | ||
405 | zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) | ||
406 | { | ||
407 | return zfcp_qdio_sbale_get(&fsf_req->adapter->response_queue, | ||
408 | sbal, sbale); | ||
409 | } | ||
410 | |||
411 | /** | ||
412 | * zfcp_qdio_sbale_curr - return current SBALE on request_queue for | ||
413 | * a struct zfcp_fsf_req | ||
414 | */ | 208 | */ |
415 | volatile struct qdio_buffer_element * | 209 | volatile struct qdio_buffer_element * |
416 | zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) | 210 | zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req) |
417 | { | 211 | { |
418 | return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, | 212 | return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, |
419 | fsf_req->sbale_curr); | 213 | req->sbale_curr); |
420 | } | 214 | } |
421 | 215 | ||
422 | /** | 216 | static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) |
423 | * zfcp_qdio_sbal_limit - determine maximum number of SBALs that can be used | ||
424 | * on the request_queue for a struct zfcp_fsf_req | ||
425 | * @fsf_req: the number of the last SBAL that can be used is stored herein | ||
426 | * @max_sbals: used to pass an upper limit for the number of SBALs | ||
427 | * | ||
428 | * Note: We can assume at least one free SBAL in the request_queue when called. | ||
429 | */ | ||
430 | static void | ||
431 | zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) | ||
432 | { | 217 | { |
433 | int count = atomic_read(&fsf_req->adapter->request_queue.free_count); | 218 | int count = atomic_read(&fsf_req->adapter->req_q.count); |
434 | count = min(count, max_sbals); | 219 | count = min(count, max_sbals); |
435 | fsf_req->sbal_last = fsf_req->sbal_first; | 220 | fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1) |
436 | fsf_req->sbal_last += (count - 1); | 221 | % QDIO_MAX_BUFFERS_PER_Q; |
437 | fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; | ||
438 | } | 222 | } |
439 | 223 | ||
440 | /** | ||
441 | * zfcp_qdio_sbal_chain - chain SBALs if more than one SBAL is needed for a | ||
442 | * request | ||
443 | * @fsf_req: zfcp_fsf_req to be processed | ||
444 | * @sbtype: SBAL flags which have to be set in first SBALE of new SBAL | ||
445 | * | ||
446 | * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. | ||
447 | */ | ||
448 | static volatile struct qdio_buffer_element * | 224 | static volatile struct qdio_buffer_element * |
449 | zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 225 | zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) |
450 | { | 226 | { |
@@ -455,16 +231,16 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | |||
455 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; | 231 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; |
456 | 232 | ||
457 | /* don't exceed last allowed SBAL */ | 233 | /* don't exceed last allowed SBAL */ |
458 | if (fsf_req->sbal_curr == fsf_req->sbal_last) | 234 | if (fsf_req->sbal_last == fsf_req->sbal_limit) |
459 | return NULL; | 235 | return NULL; |
460 | 236 | ||
461 | /* set chaining flag in first SBALE of current SBAL */ | 237 | /* set chaining flag in first SBALE of current SBAL */ |
462 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); | 238 | sbale = zfcp_qdio_sbale_req(fsf_req); |
463 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; | 239 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; |
464 | 240 | ||
465 | /* calculate index of next SBAL */ | 241 | /* calculate index of next SBAL */ |
466 | fsf_req->sbal_curr++; | 242 | fsf_req->sbal_last++; |
467 | fsf_req->sbal_curr %= QDIO_MAX_BUFFERS_PER_Q; | 243 | fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; |
468 | 244 | ||
469 | /* keep this requests number of SBALs up-to-date */ | 245 | /* keep this requests number of SBALs up-to-date */ |
470 | fsf_req->sbal_number++; | 246 | fsf_req->sbal_number++; |
@@ -479,214 +255,255 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | |||
479 | return sbale; | 255 | return sbale; |
480 | } | 256 | } |
481 | 257 | ||
482 | /** | ||
483 | * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed | ||
484 | */ | ||
485 | static volatile struct qdio_buffer_element * | 258 | static volatile struct qdio_buffer_element * |
486 | zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 259 | zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) |
487 | { | 260 | { |
488 | if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) | 261 | if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) |
489 | return zfcp_qdio_sbal_chain(fsf_req, sbtype); | 262 | return zfcp_qdio_sbal_chain(fsf_req, sbtype); |
490 | |||
491 | fsf_req->sbale_curr++; | 263 | fsf_req->sbale_curr++; |
492 | |||
493 | return zfcp_qdio_sbale_curr(fsf_req); | 264 | return zfcp_qdio_sbale_curr(fsf_req); |
494 | } | 265 | } |
495 | 266 | ||
496 | /** | 267 | static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req) |
497 | * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue | ||
498 | * with zero from | ||
499 | */ | ||
500 | static int | ||
501 | zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) | ||
502 | { | ||
503 | struct qdio_buffer **buf = queue->buffer; | ||
504 | int curr = first; | ||
505 | int count = 0; | ||
506 | |||
507 | for(;;) { | ||
508 | curr %= QDIO_MAX_BUFFERS_PER_Q; | ||
509 | count++; | ||
510 | memset(buf[curr], 0, sizeof(struct qdio_buffer)); | ||
511 | if (curr == last) | ||
512 | break; | ||
513 | curr++; | ||
514 | } | ||
515 | return count; | ||
516 | } | ||
517 | |||
518 | |||
519 | /** | ||
520 | * zfcp_qdio_sbals_wipe - reset all changes in SBALs for an fsf_req | ||
521 | */ | ||
522 | static inline int | ||
523 | zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req) | ||
524 | { | 268 | { |
525 | return zfcp_qdio_sbals_zero(&fsf_req->adapter->request_queue, | 269 | struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal; |
526 | fsf_req->sbal_first, fsf_req->sbal_curr); | 270 | int first = fsf_req->sbal_first; |
271 | int last = fsf_req->sbal_last; | ||
272 | int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % | ||
273 | QDIO_MAX_BUFFERS_PER_Q + 1; | ||
274 | zfcp_qdio_zero_sbals(sbal, first, count); | ||
527 | } | 275 | } |
528 | 276 | ||
529 | 277 | static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, | |
530 | /** | 278 | unsigned int sbtype, void *start_addr, |
531 | * zfcp_qdio_sbale_fill - set address and length in current SBALE | 279 | unsigned int total_length) |
532 | * on request_queue | ||
533 | */ | ||
534 | static void | ||
535 | zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | ||
536 | void *addr, int length) | ||
537 | { | 280 | { |
538 | volatile struct qdio_buffer_element *sbale; | 281 | volatile struct qdio_buffer_element *sbale; |
539 | |||
540 | sbale = zfcp_qdio_sbale_curr(fsf_req); | ||
541 | sbale->addr = addr; | ||
542 | sbale->length = length; | ||
543 | } | ||
544 | |||
545 | /** | ||
546 | * zfcp_qdio_sbals_from_segment - map memory segment to SBALE(s) | ||
547 | * @fsf_req: request to be processed | ||
548 | * @sbtype: SBALE flags | ||
549 | * @start_addr: address of memory segment | ||
550 | * @total_length: length of memory segment | ||
551 | * | ||
552 | * Alignment and length of the segment determine how many SBALEs are needed | ||
553 | * for the memory segment. | ||
554 | */ | ||
555 | static int | ||
556 | zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | ||
557 | void *start_addr, unsigned long total_length) | ||
558 | { | ||
559 | unsigned long remaining, length; | 282 | unsigned long remaining, length; |
560 | void *addr; | 283 | void *addr; |
561 | 284 | ||
562 | /* split segment up heeding page boundaries */ | 285 | /* split segment up */ |
563 | for (addr = start_addr, remaining = total_length; remaining > 0; | 286 | for (addr = start_addr, remaining = total_length; remaining > 0; |
564 | addr += length, remaining -= length) { | 287 | addr += length, remaining -= length) { |
565 | /* get next free SBALE for new piece */ | 288 | sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); |
566 | if (NULL == zfcp_qdio_sbale_next(fsf_req, sbtype)) { | 289 | if (!sbale) { |
567 | /* no SBALE left, clean up and leave */ | 290 | zfcp_qdio_undo_sbals(fsf_req); |
568 | zfcp_qdio_sbals_wipe(fsf_req); | ||
569 | return -EINVAL; | 291 | return -EINVAL; |
570 | } | 292 | } |
571 | /* calculate length of new piece */ | 293 | |
294 | /* new piece must not exceed next page boundary */ | ||
572 | length = min(remaining, | 295 | length = min(remaining, |
573 | (PAGE_SIZE - ((unsigned long) addr & | 296 | (PAGE_SIZE - ((unsigned long)addr & |
574 | (PAGE_SIZE - 1)))); | 297 | (PAGE_SIZE - 1)))); |
575 | /* fill current SBALE with calculated piece */ | 298 | sbale->addr = addr; |
576 | zfcp_qdio_sbale_fill(fsf_req, sbtype, addr, length); | 299 | sbale->length = length; |
577 | } | 300 | } |
578 | return total_length; | 301 | return 0; |
579 | } | 302 | } |
580 | 303 | ||
581 | |||
582 | /** | 304 | /** |
583 | * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list | 305 | * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list |
584 | * @fsf_req: request to be processed | 306 | * @fsf_req: request to be processed |
585 | * @sbtype: SBALE flags | 307 | * @sbtype: SBALE flags |
586 | * @sg: scatter-gather list | 308 | * @sg: scatter-gather list |
587 | * @sg_count: number of elements in scatter-gather list | ||
588 | * @max_sbals: upper bound for number of SBALs to be used | 309 | * @max_sbals: upper bound for number of SBALs to be used |
310 | * Returns: number of bytes, or error (negativ) | ||
589 | */ | 311 | */ |
590 | int | 312 | int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, |
591 | zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 313 | struct scatterlist *sg, int max_sbals) |
592 | struct scatterlist *sgl, int sg_count, int max_sbals) | ||
593 | { | 314 | { |
594 | int sg_index; | ||
595 | struct scatterlist *sg_segment; | ||
596 | int retval; | ||
597 | volatile struct qdio_buffer_element *sbale; | 315 | volatile struct qdio_buffer_element *sbale; |
598 | int bytes = 0; | 316 | int retval, bytes = 0; |
599 | 317 | ||
600 | /* figure out last allowed SBAL */ | 318 | /* figure out last allowed SBAL */ |
601 | zfcp_qdio_sbal_limit(fsf_req, max_sbals); | 319 | zfcp_qdio_sbal_limit(fsf_req, max_sbals); |
602 | 320 | ||
603 | /* set storage-block type for current SBAL */ | 321 | /* set storage-block type for this request */ |
604 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); | 322 | sbale = zfcp_qdio_sbale_req(fsf_req); |
605 | sbale->flags |= sbtype; | 323 | sbale->flags |= sbtype; |
606 | 324 | ||
607 | /* process all segements of scatter-gather list */ | 325 | for (; sg; sg = sg_next(sg)) { |
608 | for_each_sg(sgl, sg_segment, sg_count, sg_index) { | 326 | retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg), |
609 | retval = zfcp_qdio_sbals_from_segment( | 327 | sg->length); |
610 | fsf_req, | 328 | if (retval < 0) |
611 | sbtype, | 329 | return retval; |
612 | zfcp_sg_to_address(sg_segment), | 330 | bytes += sg->length; |
613 | sg_segment->length); | ||
614 | if (retval < 0) { | ||
615 | bytes = retval; | ||
616 | goto out; | ||
617 | } else | ||
618 | bytes += retval; | ||
619 | } | 331 | } |
332 | |||
620 | /* assume that no other SBALEs are to follow in the same SBAL */ | 333 | /* assume that no other SBALEs are to follow in the same SBAL */ |
621 | sbale = zfcp_qdio_sbale_curr(fsf_req); | 334 | sbale = zfcp_qdio_sbale_curr(fsf_req); |
622 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; | 335 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; |
623 | out: | 336 | |
624 | return bytes; | 337 | return bytes; |
625 | } | 338 | } |
626 | 339 | ||
627 | |||
628 | /** | 340 | /** |
629 | * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command | 341 | * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO |
630 | * @fsf_req: request to be processed | 342 | * @fsf_req: pointer to struct zfcp_fsf_req |
631 | * @sbtype: SBALE flags | 343 | * Returns: 0 on success, error otherwise |
632 | * @scsi_cmnd: either scatter-gather list or buffer contained herein is used | ||
633 | * to fill SBALs | ||
634 | */ | 344 | */ |
635 | int | 345 | int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) |
636 | zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, | ||
637 | unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) | ||
638 | { | 346 | { |
639 | return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, scsi_sglist(scsi_cmnd), | 347 | struct zfcp_adapter *adapter = fsf_req->adapter; |
640 | scsi_sg_count(scsi_cmnd), | 348 | struct zfcp_qdio_queue *req_q = &adapter->req_q; |
641 | ZFCP_MAX_SBALS_PER_REQ); | 349 | int first = fsf_req->sbal_first; |
350 | int count = fsf_req->sbal_number; | ||
351 | int retval, pci, pci_batch; | ||
352 | volatile struct qdio_buffer_element *sbale; | ||
353 | |||
354 | /* acknowledgements for transferred buffers */ | ||
355 | pci_batch = req_q->pci_batch + count; | ||
356 | if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) { | ||
357 | pci_batch %= ZFCP_QDIO_PCI_INTERVAL; | ||
358 | pci = first + count - (pci_batch + 1); | ||
359 | pci %= QDIO_MAX_BUFFERS_PER_Q; | ||
360 | sbale = zfcp_qdio_sbale(req_q, pci, 0); | ||
361 | sbale->flags |= SBAL_FLAGS0_PCI; | ||
362 | } | ||
363 | |||
364 | retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first, | ||
365 | count, NULL); | ||
366 | if (unlikely(retval)) { | ||
367 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | ||
368 | return retval; | ||
369 | } | ||
370 | |||
371 | /* account for transferred buffers */ | ||
372 | atomic_sub(count, &req_q->count); | ||
373 | req_q->first += count; | ||
374 | req_q->first %= QDIO_MAX_BUFFERS_PER_Q; | ||
375 | req_q->pci_batch = pci_batch; | ||
376 | return 0; | ||
642 | } | 377 | } |
643 | 378 | ||
644 | /** | 379 | /** |
645 | * zfcp_qdio_determine_pci - set PCI flag in first SBALE on qdio queue if needed | 380 | * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data |
381 | * @adapter: pointer to struct zfcp_adapter | ||
382 | * Returns: -ENOMEM on memory allocation error or return value from | ||
383 | * qdio_allocate | ||
646 | */ | 384 | */ |
647 | int | 385 | int zfcp_qdio_allocate(struct zfcp_adapter *adapter) |
648 | zfcp_qdio_determine_pci(struct zfcp_qdio_queue *req_queue, | ||
649 | struct zfcp_fsf_req *fsf_req) | ||
650 | { | 386 | { |
651 | int new_distance_from_int; | 387 | struct qdio_initialize *init_data; |
652 | int pci_pos; | ||
653 | volatile struct qdio_buffer_element *sbale; | ||
654 | 388 | ||
655 | new_distance_from_int = req_queue->distance_from_int + | 389 | if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) || |
656 | fsf_req->sbal_number; | 390 | zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal)) |
657 | 391 | return -ENOMEM; | |
658 | if (unlikely(new_distance_from_int >= ZFCP_QDIO_PCI_INTERVAL)) { | 392 | |
659 | new_distance_from_int %= ZFCP_QDIO_PCI_INTERVAL; | 393 | init_data = &adapter->qdio_init_data; |
660 | pci_pos = fsf_req->sbal_first; | 394 | |
661 | pci_pos += fsf_req->sbal_number; | 395 | init_data->cdev = adapter->ccw_device; |
662 | pci_pos -= new_distance_from_int; | 396 | init_data->q_format = QDIO_ZFCP_QFMT; |
663 | pci_pos -= 1; | 397 | memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8); |
664 | pci_pos %= QDIO_MAX_BUFFERS_PER_Q; | 398 | ASCEBC(init_data->adapter_name, 8); |
665 | sbale = zfcp_qdio_sbale_req(fsf_req, pci_pos, 0); | 399 | init_data->qib_param_field_format = 0; |
666 | sbale->flags |= SBAL_FLAGS0_PCI; | 400 | init_data->qib_param_field = NULL; |
667 | } | 401 | init_data->input_slib_elements = NULL; |
668 | return new_distance_from_int; | 402 | init_data->output_slib_elements = NULL; |
403 | init_data->min_input_threshold = 1; | ||
404 | init_data->max_input_threshold = 5000; | ||
405 | init_data->min_output_threshold = 1; | ||
406 | init_data->max_output_threshold = 1000; | ||
407 | init_data->no_input_qs = 1; | ||
408 | init_data->no_output_qs = 1; | ||
409 | init_data->input_handler = zfcp_qdio_int_resp; | ||
410 | init_data->output_handler = zfcp_qdio_int_req; | ||
411 | init_data->int_parm = (unsigned long) adapter; | ||
412 | init_data->flags = QDIO_INBOUND_0COPY_SBALS | | ||
413 | QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; | ||
414 | init_data->input_sbal_addr_array = | ||
415 | (void **) (adapter->resp_q.sbal); | ||
416 | init_data->output_sbal_addr_array = | ||
417 | (void **) (adapter->req_q.sbal); | ||
418 | |||
419 | return qdio_allocate(init_data); | ||
669 | } | 420 | } |
670 | 421 | ||
671 | /* | 422 | /** |
672 | * function: zfcp_zero_sbals | 423 | * zfcp_close_qdio - close qdio queues for an adapter |
673 | * | ||
674 | * purpose: zeros specified range of SBALs | ||
675 | * | ||
676 | * returns: | ||
677 | */ | 424 | */ |
678 | void | 425 | void zfcp_qdio_close(struct zfcp_adapter *adapter) |
679 | zfcp_qdio_zero_sbals(struct qdio_buffer *buf[], int first, int clean_count) | ||
680 | { | 426 | { |
681 | int cur_pos; | 427 | struct zfcp_qdio_queue *req_q; |
682 | int index; | 428 | int first, count; |
683 | 429 | ||
684 | for (cur_pos = first; cur_pos < (first + clean_count); cur_pos++) { | 430 | if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) |
685 | index = cur_pos % QDIO_MAX_BUFFERS_PER_Q; | 431 | return; |
686 | memset(buf[index], 0, sizeof (struct qdio_buffer)); | 432 | |
687 | ZFCP_LOG_TRACE("zeroing BUFFER %d at address %p\n", | 433 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ |
688 | index, buf[index]); | 434 | req_q = &adapter->req_q; |
435 | spin_lock(&req_q->lock); | ||
436 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | ||
437 | spin_unlock(&req_q->lock); | ||
438 | |||
439 | while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) | ||
440 | == -EINPROGRESS) | ||
441 | ssleep(1); | ||
442 | |||
443 | /* cleanup used outbound sbals */ | ||
444 | count = atomic_read(&req_q->count); | ||
445 | if (count < QDIO_MAX_BUFFERS_PER_Q) { | ||
446 | first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; | ||
447 | count = QDIO_MAX_BUFFERS_PER_Q - count; | ||
448 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | ||
689 | } | 449 | } |
450 | req_q->first = 0; | ||
451 | atomic_set(&req_q->count, 0); | ||
452 | req_q->pci_batch = 0; | ||
453 | adapter->resp_q.first = 0; | ||
454 | atomic_set(&adapter->resp_q.count, 0); | ||
690 | } | 455 | } |
691 | 456 | ||
692 | #undef ZFCP_LOG_AREA | 457 | /** |
458 | * zfcp_qdio_open - prepare and initialize response queue | ||
459 | * @adapter: pointer to struct zfcp_adapter | ||
460 | * Returns: 0 on success, otherwise -EIO | ||
461 | */ | ||
462 | int zfcp_qdio_open(struct zfcp_adapter *adapter) | ||
463 | { | ||
464 | volatile struct qdio_buffer_element *sbale; | ||
465 | int cc; | ||
466 | |||
467 | if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) | ||
468 | return -EIO; | ||
469 | |||
470 | if (qdio_establish(&adapter->qdio_init_data)) { | ||
471 | dev_err(&adapter->ccw_device->dev, | ||
472 | "Establish of QDIO queues failed.\n"); | ||
473 | return -EIO; | ||
474 | } | ||
475 | |||
476 | if (qdio_activate(adapter->ccw_device, 0)) { | ||
477 | dev_err(&adapter->ccw_device->dev, | ||
478 | "Activate of QDIO queues failed.\n"); | ||
479 | goto failed_qdio; | ||
480 | } | ||
481 | |||
482 | for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { | ||
483 | sbale = &(adapter->resp_q.sbal[cc]->element[0]); | ||
484 | sbale->length = 0; | ||
485 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; | ||
486 | sbale->addr = NULL; | ||
487 | } | ||
488 | |||
489 | if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, | ||
490 | QDIO_MAX_BUFFERS_PER_Q, NULL)) { | ||
491 | dev_err(&adapter->ccw_device->dev, | ||
492 | "Init of QDIO response queue failed.\n"); | ||
493 | goto failed_qdio; | ||
494 | } | ||
495 | |||
496 | /* set index of first avalable SBALS / number of available SBALS */ | ||
497 | adapter->req_q.first = 0; | ||
498 | atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); | ||
499 | adapter->req_q.pci_batch = 0; | ||
500 | |||
501 | return 0; | ||
502 | |||
503 | failed_qdio: | ||
504 | while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) | ||
505 | == -EINPROGRESS) | ||
506 | ssleep(1); | ||
507 | |||
508 | return -EIO; | ||
509 | } | ||