diff options
Diffstat (limited to 'drivers/s390/scsi/zfcp_qdio.c')
-rw-r--r-- | drivers/s390/scsi/zfcp_qdio.c | 815 |
1 files changed, 318 insertions, 497 deletions
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index e71547357f62..bd6561d53589 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -1,241 +1,92 @@ | |||
1 | /* | 1 | /* |
2 | * This file is part of the zfcp device driver for | 2 | * zfcp device driver |
3 | * FCP adapters for IBM System z9 and zSeries. | ||
4 | * | 3 | * |
5 | * (C) Copyright IBM Corp. 2002, 2006 | 4 | * Setup and helper functions to access QDIO. |
6 | * | 5 | * |
7 | * This program is free software; you can redistribute it and/or modify | 6 | * Copyright IBM Corporation 2002, 2008 |
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | */ | 7 | */ |
21 | 8 | ||
22 | #include "zfcp_ext.h" | 9 | #include "zfcp_ext.h" |
23 | 10 | ||
24 | static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); | 11 | /* FIXME(tune): free space should be one max. SBAL chain plus what? */ |
25 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get | 12 | #define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \ |
26 | (struct zfcp_qdio_queue *, int, int); | 13 | - (ZFCP_MAX_SBALS_PER_REQ + 4)) |
27 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp | ||
28 | (struct zfcp_fsf_req *, int, int); | ||
29 | static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain | ||
30 | (struct zfcp_fsf_req *, unsigned long); | ||
31 | static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next | ||
32 | (struct zfcp_fsf_req *, unsigned long); | ||
33 | static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); | ||
34 | static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); | ||
35 | static void zfcp_qdio_sbale_fill | ||
36 | (struct zfcp_fsf_req *, unsigned long, void *, int); | ||
37 | static int zfcp_qdio_sbals_from_segment | ||
38 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); | ||
39 | |||
40 | static qdio_handler_t zfcp_qdio_request_handler; | ||
41 | static qdio_handler_t zfcp_qdio_response_handler; | ||
42 | static int zfcp_qdio_handler_error_check(struct zfcp_adapter *, | ||
43 | unsigned int, unsigned int, unsigned int, int, int); | ||
44 | |||
45 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO | ||
46 | 14 | ||
47 | /* | 15 | static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) |
48 | * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array | ||
49 | * in the adapter struct sbuf is the pointer array. | ||
50 | * | ||
51 | * locks: must only be called with zfcp_data.config_sema taken | ||
52 | */ | ||
53 | static void | ||
54 | zfcp_qdio_buffers_dequeue(struct qdio_buffer **sbuf) | ||
55 | { | ||
56 | int pos; | ||
57 | |||
58 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) | ||
59 | free_page((unsigned long) sbuf[pos]); | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t | ||
64 | * array in the adapter struct. | ||
65 | * Cur_buf is the pointer array | ||
66 | * | ||
67 | * returns: zero on success else -ENOMEM | ||
68 | * locks: must only be called with zfcp_data.config_sema taken | ||
69 | */ | ||
70 | static int | ||
71 | zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbuf) | ||
72 | { | 16 | { |
73 | int pos; | 17 | int pos; |
74 | 18 | ||
75 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) { | 19 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) { |
76 | sbuf[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); | 20 | sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); |
77 | if (!sbuf[pos]) { | 21 | if (!sbal[pos]) |
78 | zfcp_qdio_buffers_dequeue(sbuf); | ||
79 | return -ENOMEM; | 22 | return -ENOMEM; |
80 | } | ||
81 | } | 23 | } |
82 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++) | 24 | for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++) |
83 | if (pos % QBUFF_PER_PAGE) | 25 | if (pos % QBUFF_PER_PAGE) |
84 | sbuf[pos] = sbuf[pos - 1] + 1; | 26 | sbal[pos] = sbal[pos - 1] + 1; |
85 | return 0; | 27 | return 0; |
86 | } | 28 | } |
87 | 29 | ||
88 | /* locks: must only be called with zfcp_data.config_sema taken */ | 30 | static volatile struct qdio_buffer_element * |
89 | int | 31 | zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) |
90 | zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter) | ||
91 | { | 32 | { |
92 | int ret; | 33 | return &q->sbal[sbal_idx]->element[sbale_idx]; |
93 | |||
94 | ret = zfcp_qdio_buffers_enqueue(adapter->request_queue.buffer); | ||
95 | if (ret) | ||
96 | return ret; | ||
97 | return zfcp_qdio_buffers_enqueue(adapter->response_queue.buffer); | ||
98 | } | 34 | } |
99 | 35 | ||
100 | /* locks: must only be called with zfcp_data.config_sema taken */ | 36 | /** |
101 | void | 37 | * zfcp_qdio_free - free memory used by request- and resposne queue |
102 | zfcp_qdio_free_queues(struct zfcp_adapter *adapter) | 38 | * @adapter: pointer to the zfcp_adapter structure |
39 | */ | ||
40 | void zfcp_qdio_free(struct zfcp_adapter *adapter) | ||
103 | { | 41 | { |
104 | ZFCP_LOG_TRACE("freeing request_queue buffers\n"); | 42 | struct qdio_buffer **sbal_req, **sbal_resp; |
105 | zfcp_qdio_buffers_dequeue(adapter->request_queue.buffer); | 43 | int p; |
106 | |||
107 | ZFCP_LOG_TRACE("freeing response_queue buffers\n"); | ||
108 | zfcp_qdio_buffers_dequeue(adapter->response_queue.buffer); | ||
109 | } | ||
110 | 44 | ||
111 | int | 45 | if (adapter->ccw_device) |
112 | zfcp_qdio_allocate(struct zfcp_adapter *adapter) | 46 | qdio_free(adapter->ccw_device); |
113 | { | ||
114 | struct qdio_initialize *init_data; | ||
115 | 47 | ||
116 | init_data = &adapter->qdio_init_data; | 48 | sbal_req = adapter->req_q.sbal; |
49 | sbal_resp = adapter->resp_q.sbal; | ||
117 | 50 | ||
118 | init_data->cdev = adapter->ccw_device; | 51 | for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { |
119 | init_data->q_format = QDIO_SCSI_QFMT; | 52 | free_page((unsigned long) sbal_req[p]); |
120 | memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8); | 53 | free_page((unsigned long) sbal_resp[p]); |
121 | ASCEBC(init_data->adapter_name, 8); | 54 | } |
122 | init_data->qib_param_field_format = 0; | ||
123 | init_data->qib_param_field = NULL; | ||
124 | init_data->input_slib_elements = NULL; | ||
125 | init_data->output_slib_elements = NULL; | ||
126 | init_data->min_input_threshold = ZFCP_MIN_INPUT_THRESHOLD; | ||
127 | init_data->max_input_threshold = ZFCP_MAX_INPUT_THRESHOLD; | ||
128 | init_data->min_output_threshold = ZFCP_MIN_OUTPUT_THRESHOLD; | ||
129 | init_data->max_output_threshold = ZFCP_MAX_OUTPUT_THRESHOLD; | ||
130 | init_data->no_input_qs = 1; | ||
131 | init_data->no_output_qs = 1; | ||
132 | init_data->input_handler = zfcp_qdio_response_handler; | ||
133 | init_data->output_handler = zfcp_qdio_request_handler; | ||
134 | init_data->int_parm = (unsigned long) adapter; | ||
135 | init_data->flags = QDIO_INBOUND_0COPY_SBALS | | ||
136 | QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; | ||
137 | init_data->input_sbal_addr_array = | ||
138 | (void **) (adapter->response_queue.buffer); | ||
139 | init_data->output_sbal_addr_array = | ||
140 | (void **) (adapter->request_queue.buffer); | ||
141 | |||
142 | return qdio_allocate(init_data); | ||
143 | } | 55 | } |
144 | 56 | ||
145 | /* | 57 | static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id) |
146 | * function: zfcp_qdio_handler_error_check | ||
147 | * | ||
148 | * purpose: called by the response handler to determine error condition | ||
149 | * | ||
150 | * returns: error flag | ||
151 | * | ||
152 | */ | ||
153 | static int | ||
154 | zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, | ||
155 | unsigned int qdio_error, unsigned int siga_error, | ||
156 | int first_element, int elements_processed) | ||
157 | { | 58 | { |
158 | int retval = 0; | 59 | dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n"); |
159 | 60 | ||
160 | if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { | 61 | zfcp_erp_adapter_reopen(adapter, |
161 | retval = -EIO; | 62 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | |
162 | 63 | ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); | |
163 | ZFCP_LOG_INFO("QDIO problem occurred (status=0x%x, " | ||
164 | "qdio_error=0x%x, siga_error=0x%x)\n", | ||
165 | status, qdio_error, siga_error); | ||
166 | |||
167 | zfcp_hba_dbf_event_qdio(adapter, status, qdio_error, siga_error, | ||
168 | first_element, elements_processed); | ||
169 | /* | ||
170 | * Restarting IO on the failed adapter from scratch. | ||
171 | * Since we have been using this adapter, it is save to assume | ||
172 | * that it is not failed but recoverable. The card seems to | ||
173 | * report link-up events by self-initiated queue shutdown. | ||
174 | * That is why we need to clear the link-down flag | ||
175 | * which is set again in case we have missed by a mile. | ||
176 | */ | ||
177 | zfcp_erp_adapter_reopen(adapter, | ||
178 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | ||
179 | ZFCP_STATUS_COMMON_ERP_FAILED, 140, | ||
180 | NULL); | ||
181 | } | ||
182 | return retval; | ||
183 | } | 64 | } |
184 | 65 | ||
185 | /* | 66 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int status, |
186 | * function: zfcp_qdio_request_handler | 67 | unsigned int qdio_err, unsigned int siga_err, |
187 | * | 68 | unsigned int queue_no, int first, int count, |
188 | * purpose: is called by QDIO layer for completed SBALs in request queue | 69 | unsigned long parm) |
189 | * | ||
190 | * returns: (void) | ||
191 | */ | ||
192 | static void | ||
193 | zfcp_qdio_request_handler(struct ccw_device *ccw_device, | ||
194 | unsigned int status, | ||
195 | unsigned int qdio_error, | ||
196 | unsigned int siga_error, | ||
197 | unsigned int queue_number, | ||
198 | int first_element, | ||
199 | int elements_processed, | ||
200 | unsigned long int_parm) | ||
201 | { | 70 | { |
202 | struct zfcp_adapter *adapter; | 71 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; |
203 | struct zfcp_qdio_queue *queue; | 72 | struct zfcp_qdio_queue *queue = &adapter->req_q; |
204 | |||
205 | adapter = (struct zfcp_adapter *) int_parm; | ||
206 | queue = &adapter->request_queue; | ||
207 | 73 | ||
208 | ZFCP_LOG_DEBUG("adapter %s, first=%d, elements_processed=%d\n", | 74 | if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { |
209 | zfcp_get_busid_by_adapter(adapter), | 75 | zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, |
210 | first_element, elements_processed); | 76 | first, count); |
211 | 77 | zfcp_qdio_handler_error(adapter, 140); | |
212 | if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, | 78 | return; |
213 | siga_error, first_element, | 79 | } |
214 | elements_processed))) | ||
215 | goto out; | ||
216 | /* | ||
217 | * we stored address of struct zfcp_adapter data structure | ||
218 | * associated with irq in int_parm | ||
219 | */ | ||
220 | 80 | ||
221 | /* cleanup all SBALs being program-owned now */ | 81 | /* cleanup all SBALs being program-owned now */ |
222 | zfcp_qdio_zero_sbals(queue->buffer, first_element, elements_processed); | 82 | zfcp_qdio_zero_sbals(queue->sbal, first, count); |
223 | 83 | ||
224 | /* increase free space in outbound queue */ | 84 | atomic_add(count, &queue->count); |
225 | atomic_add(elements_processed, &queue->free_count); | ||
226 | ZFCP_LOG_DEBUG("free_count=%d\n", atomic_read(&queue->free_count)); | ||
227 | wake_up(&adapter->request_wq); | 85 | wake_up(&adapter->request_wq); |
228 | ZFCP_LOG_DEBUG("elements_processed=%d, free count=%d\n", | ||
229 | elements_processed, atomic_read(&queue->free_count)); | ||
230 | out: | ||
231 | return; | ||
232 | } | 86 | } |
233 | 87 | ||
234 | /** | ||
235 | * zfcp_qdio_reqid_check - checks for valid reqids. | ||
236 | */ | ||
237 | static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, | 88 | static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, |
238 | unsigned long req_id, int sbal) | 89 | unsigned long req_id, int sbal_idx) |
239 | { | 90 | { |
240 | struct zfcp_fsf_req *fsf_req; | 91 | struct zfcp_fsf_req *fsf_req; |
241 | unsigned long flags; | 92 | unsigned long flags; |
@@ -248,204 +99,117 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, | |||
248 | * Unknown request means that we have potentially memory | 99 | * Unknown request means that we have potentially memory |
249 | * corruption and must stop the machine immediatly. | 100 | * corruption and must stop the machine immediatly. |
250 | */ | 101 | */ |
251 | panic("error: unknown request id (%ld) on adapter %s.\n", | 102 | panic("error: unknown request id (%lx) on adapter %s.\n", |
252 | req_id, zfcp_get_busid_by_adapter(adapter)); | 103 | req_id, zfcp_get_busid_by_adapter(adapter)); |
253 | 104 | ||
254 | zfcp_reqlist_remove(adapter, fsf_req); | 105 | zfcp_reqlist_remove(adapter, fsf_req); |
255 | atomic_dec(&adapter->reqs_active); | ||
256 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | 106 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); |
257 | 107 | ||
258 | fsf_req->sbal_response = sbal; | 108 | fsf_req->sbal_response = sbal_idx; |
259 | /* finish the FSF request */ | ||
260 | zfcp_fsf_req_complete(fsf_req); | 109 | zfcp_fsf_req_complete(fsf_req); |
261 | } | 110 | } |
262 | 111 | ||
263 | /* | 112 | static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) |
264 | * function: zfcp_qdio_response_handler | ||
265 | * | ||
266 | * purpose: is called by QDIO layer for completed SBALs in response queue | ||
267 | * | ||
268 | * returns: (void) | ||
269 | */ | ||
270 | static void | ||
271 | zfcp_qdio_response_handler(struct ccw_device *ccw_device, | ||
272 | unsigned int status, | ||
273 | unsigned int qdio_error, | ||
274 | unsigned int siga_error, | ||
275 | unsigned int queue_number, | ||
276 | int first_element, | ||
277 | int elements_processed, | ||
278 | unsigned long int_parm) | ||
279 | { | 113 | { |
280 | struct zfcp_adapter *adapter; | 114 | struct zfcp_qdio_queue *queue = &adapter->resp_q; |
281 | struct zfcp_qdio_queue *queue; | 115 | struct ccw_device *cdev = adapter->ccw_device; |
282 | int buffer_index; | 116 | u8 count, start = queue->first; |
283 | int i; | 117 | unsigned int retval; |
284 | struct qdio_buffer *buffer; | ||
285 | int retval = 0; | ||
286 | u8 count; | ||
287 | u8 start; | ||
288 | volatile struct qdio_buffer_element *buffere = NULL; | ||
289 | int buffere_index; | ||
290 | |||
291 | adapter = (struct zfcp_adapter *) int_parm; | ||
292 | queue = &adapter->response_queue; | ||
293 | |||
294 | if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, | ||
295 | siga_error, first_element, | ||
296 | elements_processed))) | ||
297 | goto out; | ||
298 | 118 | ||
299 | /* | 119 | count = atomic_read(&queue->count) + processed; |
300 | * we stored address of struct zfcp_adapter data structure | 120 | |
301 | * associated with irq in int_parm | 121 | retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, |
302 | */ | 122 | 0, start, count, NULL); |
123 | |||
124 | if (unlikely(retval)) { | ||
125 | atomic_set(&queue->count, count); | ||
126 | /* FIXME: Recover this with an adapter reopen? */ | ||
127 | } else { | ||
128 | queue->first += count; | ||
129 | queue->first %= QDIO_MAX_BUFFERS_PER_Q; | ||
130 | atomic_set(&queue->count, 0); | ||
131 | } | ||
132 | } | ||
133 | |||
134 | static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status, | ||
135 | unsigned int qdio_err, unsigned int siga_err, | ||
136 | unsigned int queue_no, int first, int count, | ||
137 | unsigned long parm) | ||
138 | { | ||
139 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; | ||
140 | struct zfcp_qdio_queue *queue = &adapter->resp_q; | ||
141 | volatile struct qdio_buffer_element *sbale; | ||
142 | int sbal_idx, sbale_idx, sbal_no; | ||
143 | |||
144 | if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { | ||
145 | zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, | ||
146 | first, count); | ||
147 | zfcp_qdio_handler_error(adapter, 147); | ||
148 | return; | ||
149 | } | ||
303 | 150 | ||
304 | buffere = &(queue->buffer[first_element]->element[0]); | ||
305 | ZFCP_LOG_DEBUG("first BUFFERE flags=0x%x\n", buffere->flags); | ||
306 | /* | 151 | /* |
307 | * go through all SBALs from input queue currently | 152 | * go through all SBALs from input queue currently |
308 | * returned by QDIO layer | 153 | * returned by QDIO layer |
309 | */ | 154 | */ |
310 | 155 | for (sbal_no = 0; sbal_no < count; sbal_no++) { | |
311 | for (i = 0; i < elements_processed; i++) { | 156 | sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; |
312 | |||
313 | buffer_index = first_element + i; | ||
314 | buffer_index %= QDIO_MAX_BUFFERS_PER_Q; | ||
315 | buffer = queue->buffer[buffer_index]; | ||
316 | 157 | ||
317 | /* go through all SBALEs of SBAL */ | 158 | /* go through all SBALEs of SBAL */ |
318 | for (buffere_index = 0; | 159 | for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER; |
319 | buffere_index < QDIO_MAX_ELEMENTS_PER_BUFFER; | 160 | sbale_idx++) { |
320 | buffere_index++) { | 161 | sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx); |
321 | |||
322 | /* look for QDIO request identifiers in SB */ | ||
323 | buffere = &buffer->element[buffere_index]; | ||
324 | zfcp_qdio_reqid_check(adapter, | 162 | zfcp_qdio_reqid_check(adapter, |
325 | (unsigned long) buffere->addr, i); | 163 | (unsigned long) sbale->addr, |
326 | 164 | sbal_idx); | |
327 | /* | 165 | if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) |
328 | * A single used SBALE per inbound SBALE has been | ||
329 | * implemented by QDIO so far. Hope they will | ||
330 | * do some optimisation. Will need to change to | ||
331 | * unlikely() then. | ||
332 | */ | ||
333 | if (likely(buffere->flags & SBAL_FLAGS_LAST_ENTRY)) | ||
334 | break; | 166 | break; |
335 | }; | 167 | }; |
336 | 168 | ||
337 | if (unlikely(!(buffere->flags & SBAL_FLAGS_LAST_ENTRY))) { | 169 | if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) |
338 | ZFCP_LOG_NORMAL("bug: End of inbound data " | 170 | dev_warn(&adapter->ccw_device->dev, |
339 | "not marked!\n"); | 171 | "Protocol violation by adapter. " |
340 | } | 172 | "Continuing operations.\n"); |
341 | } | 173 | } |
342 | 174 | ||
343 | /* | 175 | /* |
344 | * put range of SBALs back to response queue | 176 | * put range of SBALs back to response queue |
345 | * (including SBALs which have already been free before) | 177 | * (including SBALs which have already been free before) |
346 | */ | 178 | */ |
347 | count = atomic_read(&queue->free_count) + elements_processed; | 179 | zfcp_qdio_resp_put_back(adapter, count); |
348 | start = queue->free_index; | ||
349 | |||
350 | ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, " | ||
351 | "queue_no=%i, index_in_queue=%i, count=%i, " | ||
352 | "buffers=0x%lx\n", | ||
353 | zfcp_get_busid_by_adapter(adapter), | ||
354 | QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, | ||
355 | 0, start, count, (unsigned long) &queue->buffer[start]); | ||
356 | |||
357 | retval = do_QDIO(ccw_device, | ||
358 | QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, | ||
359 | 0, start, count, NULL); | ||
360 | |||
361 | if (unlikely(retval)) { | ||
362 | atomic_set(&queue->free_count, count); | ||
363 | ZFCP_LOG_DEBUG("clearing of inbound data regions failed, " | ||
364 | "queues may be down " | ||
365 | "(count=%d, start=%d, retval=%d)\n", | ||
366 | count, start, retval); | ||
367 | } else { | ||
368 | queue->free_index += count; | ||
369 | queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; | ||
370 | atomic_set(&queue->free_count, 0); | ||
371 | ZFCP_LOG_TRACE("%i buffers enqueued to response " | ||
372 | "queue at position %i\n", count, start); | ||
373 | } | ||
374 | out: | ||
375 | return; | ||
376 | } | 180 | } |
377 | 181 | ||
378 | /** | 182 | /** |
379 | * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue | 183 | * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req |
380 | * @queue: queue from which SBALE should be returned | 184 | * @fsf_req: pointer to struct fsf_req |
381 | * @sbal: specifies number of SBAL in queue | 185 | * Returns: pointer to qdio_buffer_element (SBALE) structure |
382 | * @sbale: specifes number of SBALE in SBAL | ||
383 | */ | ||
384 | static inline volatile struct qdio_buffer_element * | ||
385 | zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale) | ||
386 | { | ||
387 | return &queue->buffer[sbal]->element[sbale]; | ||
388 | } | ||
389 | |||
390 | /** | ||
391 | * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for | ||
392 | * a struct zfcp_fsf_req | ||
393 | */ | 186 | */ |
394 | volatile struct qdio_buffer_element * | 187 | volatile struct qdio_buffer_element * |
395 | zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) | 188 | zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) |
396 | { | ||
397 | return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, | ||
398 | sbal, sbale); | ||
399 | } | ||
400 | |||
401 | /** | ||
402 | * zfcp_qdio_sbale_resp - return pointer to SBALE of response_queue for | ||
403 | * a struct zfcp_fsf_req | ||
404 | */ | ||
405 | static inline volatile struct qdio_buffer_element * | ||
406 | zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) | ||
407 | { | 189 | { |
408 | return zfcp_qdio_sbale_get(&fsf_req->adapter->response_queue, | 190 | return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); |
409 | sbal, sbale); | ||
410 | } | 191 | } |
411 | 192 | ||
412 | /** | 193 | /** |
413 | * zfcp_qdio_sbale_curr - return current SBALE on request_queue for | 194 | * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req |
414 | * a struct zfcp_fsf_req | 195 | * @fsf_req: pointer to struct fsf_req |
196 | * Returns: pointer to qdio_buffer_element (SBALE) structure | ||
415 | */ | 197 | */ |
416 | volatile struct qdio_buffer_element * | 198 | volatile struct qdio_buffer_element * |
417 | zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) | 199 | zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req) |
418 | { | 200 | { |
419 | return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, | 201 | return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, |
420 | fsf_req->sbale_curr); | 202 | req->sbale_curr); |
421 | } | 203 | } |
422 | 204 | ||
423 | /** | 205 | static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) |
424 | * zfcp_qdio_sbal_limit - determine maximum number of SBALs that can be used | ||
425 | * on the request_queue for a struct zfcp_fsf_req | ||
426 | * @fsf_req: the number of the last SBAL that can be used is stored herein | ||
427 | * @max_sbals: used to pass an upper limit for the number of SBALs | ||
428 | * | ||
429 | * Note: We can assume at least one free SBAL in the request_queue when called. | ||
430 | */ | ||
431 | static void | ||
432 | zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) | ||
433 | { | 206 | { |
434 | int count = atomic_read(&fsf_req->adapter->request_queue.free_count); | 207 | int count = atomic_read(&fsf_req->adapter->req_q.count); |
435 | count = min(count, max_sbals); | 208 | count = min(count, max_sbals); |
436 | fsf_req->sbal_limit = fsf_req->sbal_first; | 209 | fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1) |
437 | fsf_req->sbal_limit += (count - 1); | 210 | % QDIO_MAX_BUFFERS_PER_Q; |
438 | fsf_req->sbal_limit %= QDIO_MAX_BUFFERS_PER_Q; | ||
439 | } | 211 | } |
440 | 212 | ||
441 | /** | ||
442 | * zfcp_qdio_sbal_chain - chain SBALs if more than one SBAL is needed for a | ||
443 | * request | ||
444 | * @fsf_req: zfcp_fsf_req to be processed | ||
445 | * @sbtype: SBAL flags which have to be set in first SBALE of new SBAL | ||
446 | * | ||
447 | * This function changes sbal_last, sbale_curr, sbal_number of fsf_req. | ||
448 | */ | ||
449 | static volatile struct qdio_buffer_element * | 213 | static volatile struct qdio_buffer_element * |
450 | zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 214 | zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) |
451 | { | 215 | { |
@@ -460,7 +224,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | |||
460 | return NULL; | 224 | return NULL; |
461 | 225 | ||
462 | /* set chaining flag in first SBALE of current SBAL */ | 226 | /* set chaining flag in first SBALE of current SBAL */ |
463 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 227 | sbale = zfcp_qdio_sbale_req(fsf_req); |
464 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; | 228 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; |
465 | 229 | ||
466 | /* calculate index of next SBAL */ | 230 | /* calculate index of next SBAL */ |
@@ -480,214 +244,271 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | |||
480 | return sbale; | 244 | return sbale; |
481 | } | 245 | } |
482 | 246 | ||
483 | /** | ||
484 | * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed | ||
485 | */ | ||
486 | static volatile struct qdio_buffer_element * | 247 | static volatile struct qdio_buffer_element * |
487 | zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 248 | zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) |
488 | { | 249 | { |
489 | if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) | 250 | if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) |
490 | return zfcp_qdio_sbal_chain(fsf_req, sbtype); | 251 | return zfcp_qdio_sbal_chain(fsf_req, sbtype); |
491 | |||
492 | fsf_req->sbale_curr++; | 252 | fsf_req->sbale_curr++; |
493 | |||
494 | return zfcp_qdio_sbale_curr(fsf_req); | 253 | return zfcp_qdio_sbale_curr(fsf_req); |
495 | } | 254 | } |
496 | 255 | ||
497 | /** | 256 | static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req) |
498 | * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue | ||
499 | * with zero from | ||
500 | */ | ||
501 | static int | ||
502 | zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) | ||
503 | { | 257 | { |
504 | struct qdio_buffer **buf = queue->buffer; | 258 | struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal; |
505 | int curr = first; | 259 | int first = fsf_req->sbal_first; |
506 | int count = 0; | 260 | int last = fsf_req->sbal_last; |
507 | 261 | int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % | |
508 | for(;;) { | 262 | QDIO_MAX_BUFFERS_PER_Q + 1; |
509 | curr %= QDIO_MAX_BUFFERS_PER_Q; | 263 | zfcp_qdio_zero_sbals(sbal, first, count); |
510 | count++; | ||
511 | memset(buf[curr], 0, sizeof(struct qdio_buffer)); | ||
512 | if (curr == last) | ||
513 | break; | ||
514 | curr++; | ||
515 | } | ||
516 | return count; | ||
517 | } | 264 | } |
518 | 265 | ||
519 | 266 | static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, | |
520 | /** | 267 | unsigned int sbtype, void *start_addr, |
521 | * zfcp_qdio_sbals_wipe - reset all changes in SBALs for an fsf_req | 268 | unsigned int total_length) |
522 | */ | ||
523 | static inline int | ||
524 | zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req) | ||
525 | { | ||
526 | return zfcp_qdio_sbals_zero(&fsf_req->adapter->request_queue, | ||
527 | fsf_req->sbal_first, fsf_req->sbal_last); | ||
528 | } | ||
529 | |||
530 | |||
531 | /** | ||
532 | * zfcp_qdio_sbale_fill - set address and length in current SBALE | ||
533 | * on request_queue | ||
534 | */ | ||
535 | static void | ||
536 | zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | ||
537 | void *addr, int length) | ||
538 | { | 269 | { |
539 | volatile struct qdio_buffer_element *sbale; | 270 | volatile struct qdio_buffer_element *sbale; |
540 | |||
541 | sbale = zfcp_qdio_sbale_curr(fsf_req); | ||
542 | sbale->addr = addr; | ||
543 | sbale->length = length; | ||
544 | } | ||
545 | |||
546 | /** | ||
547 | * zfcp_qdio_sbals_from_segment - map memory segment to SBALE(s) | ||
548 | * @fsf_req: request to be processed | ||
549 | * @sbtype: SBALE flags | ||
550 | * @start_addr: address of memory segment | ||
551 | * @total_length: length of memory segment | ||
552 | * | ||
553 | * Alignment and length of the segment determine how many SBALEs are needed | ||
554 | * for the memory segment. | ||
555 | */ | ||
556 | static int | ||
557 | zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | ||
558 | void *start_addr, unsigned long total_length) | ||
559 | { | ||
560 | unsigned long remaining, length; | 271 | unsigned long remaining, length; |
561 | void *addr; | 272 | void *addr; |
562 | 273 | ||
563 | /* split segment up heeding page boundaries */ | 274 | /* split segment up */ |
564 | for (addr = start_addr, remaining = total_length; remaining > 0; | 275 | for (addr = start_addr, remaining = total_length; remaining > 0; |
565 | addr += length, remaining -= length) { | 276 | addr += length, remaining -= length) { |
566 | /* get next free SBALE for new piece */ | 277 | sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); |
567 | if (NULL == zfcp_qdio_sbale_next(fsf_req, sbtype)) { | 278 | if (!sbale) { |
568 | /* no SBALE left, clean up and leave */ | 279 | zfcp_qdio_undo_sbals(fsf_req); |
569 | zfcp_qdio_sbals_wipe(fsf_req); | ||
570 | return -EINVAL; | 280 | return -EINVAL; |
571 | } | 281 | } |
572 | /* calculate length of new piece */ | 282 | |
283 | /* new piece must not exceed next page boundary */ | ||
573 | length = min(remaining, | 284 | length = min(remaining, |
574 | (PAGE_SIZE - ((unsigned long) addr & | 285 | (PAGE_SIZE - ((unsigned long)addr & |
575 | (PAGE_SIZE - 1)))); | 286 | (PAGE_SIZE - 1)))); |
576 | /* fill current SBALE with calculated piece */ | 287 | sbale->addr = addr; |
577 | zfcp_qdio_sbale_fill(fsf_req, sbtype, addr, length); | 288 | sbale->length = length; |
578 | } | 289 | } |
579 | return total_length; | 290 | return 0; |
580 | } | 291 | } |
581 | 292 | ||
582 | |||
583 | /** | 293 | /** |
584 | * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list | 294 | * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list |
585 | * @fsf_req: request to be processed | 295 | * @fsf_req: request to be processed |
586 | * @sbtype: SBALE flags | 296 | * @sbtype: SBALE flags |
587 | * @sg: scatter-gather list | 297 | * @sg: scatter-gather list |
588 | * @sg_count: number of elements in scatter-gather list | ||
589 | * @max_sbals: upper bound for number of SBALs to be used | 298 | * @max_sbals: upper bound for number of SBALs to be used |
299 | * Returns: number of bytes, or error (negativ) | ||
590 | */ | 300 | */ |
591 | int | 301 | int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, |
592 | zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 302 | struct scatterlist *sg, int max_sbals) |
593 | struct scatterlist *sgl, int sg_count, int max_sbals) | ||
594 | { | 303 | { |
595 | int sg_index; | ||
596 | struct scatterlist *sg_segment; | ||
597 | int retval; | ||
598 | volatile struct qdio_buffer_element *sbale; | 304 | volatile struct qdio_buffer_element *sbale; |
599 | int bytes = 0; | 305 | int retval, bytes = 0; |
600 | 306 | ||
601 | /* figure out last allowed SBAL */ | 307 | /* figure out last allowed SBAL */ |
602 | zfcp_qdio_sbal_limit(fsf_req, max_sbals); | 308 | zfcp_qdio_sbal_limit(fsf_req, max_sbals); |
603 | 309 | ||
604 | /* set storage-block type for current SBAL */ | 310 | /* set storage-block type for this request */ |
605 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0); | 311 | sbale = zfcp_qdio_sbale_req(fsf_req); |
606 | sbale->flags |= sbtype; | 312 | sbale->flags |= sbtype; |
607 | 313 | ||
608 | /* process all segements of scatter-gather list */ | 314 | for (; sg; sg = sg_next(sg)) { |
609 | for_each_sg(sgl, sg_segment, sg_count, sg_index) { | 315 | retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg), |
610 | retval = zfcp_qdio_sbals_from_segment( | 316 | sg->length); |
611 | fsf_req, | 317 | if (retval < 0) |
612 | sbtype, | 318 | return retval; |
613 | zfcp_sg_to_address(sg_segment), | 319 | bytes += sg->length; |
614 | sg_segment->length); | ||
615 | if (retval < 0) { | ||
616 | bytes = retval; | ||
617 | goto out; | ||
618 | } else | ||
619 | bytes += retval; | ||
620 | } | 320 | } |
321 | |||
621 | /* assume that no other SBALEs are to follow in the same SBAL */ | 322 | /* assume that no other SBALEs are to follow in the same SBAL */ |
622 | sbale = zfcp_qdio_sbale_curr(fsf_req); | 323 | sbale = zfcp_qdio_sbale_curr(fsf_req); |
623 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; | 324 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; |
624 | out: | 325 | |
625 | return bytes; | 326 | return bytes; |
626 | } | 327 | } |
627 | 328 | ||
628 | |||
629 | /** | 329 | /** |
630 | * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command | 330 | * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO |
631 | * @fsf_req: request to be processed | 331 | * @fsf_req: pointer to struct zfcp_fsf_req |
632 | * @sbtype: SBALE flags | 332 | * Returns: 0 on success, error otherwise |
633 | * @scsi_cmnd: either scatter-gather list or buffer contained herein is used | ||
634 | * to fill SBALs | ||
635 | */ | 333 | */ |
636 | int | 334 | int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) |
637 | zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, | ||
638 | unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) | ||
639 | { | 335 | { |
640 | return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, scsi_sglist(scsi_cmnd), | 336 | struct zfcp_adapter *adapter = fsf_req->adapter; |
641 | scsi_sg_count(scsi_cmnd), | 337 | struct zfcp_qdio_queue *req_q = &adapter->req_q; |
642 | ZFCP_MAX_SBALS_PER_REQ); | 338 | int first = fsf_req->sbal_first; |
339 | int count = fsf_req->sbal_number; | ||
340 | int retval, pci, pci_batch; | ||
341 | volatile struct qdio_buffer_element *sbale; | ||
342 | |||
343 | /* acknowledgements for transferred buffers */ | ||
344 | pci_batch = req_q->pci_batch + count; | ||
345 | if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) { | ||
346 | pci_batch %= ZFCP_QDIO_PCI_INTERVAL; | ||
347 | pci = first + count - (pci_batch + 1); | ||
348 | pci %= QDIO_MAX_BUFFERS_PER_Q; | ||
349 | sbale = zfcp_qdio_sbale(req_q, pci, 0); | ||
350 | sbale->flags |= SBAL_FLAGS0_PCI; | ||
351 | } | ||
352 | |||
353 | retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first, | ||
354 | count, NULL); | ||
355 | if (unlikely(retval)) { | ||
356 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | ||
357 | return retval; | ||
358 | } | ||
359 | |||
360 | /* account for transferred buffers */ | ||
361 | atomic_sub(count, &req_q->count); | ||
362 | req_q->first += count; | ||
363 | req_q->first %= QDIO_MAX_BUFFERS_PER_Q; | ||
364 | req_q->pci_batch = pci_batch; | ||
365 | return 0; | ||
643 | } | 366 | } |
644 | 367 | ||
645 | /** | 368 | /** |
646 | * zfcp_qdio_determine_pci - set PCI flag in first SBALE on qdio queue if needed | 369 | * zfcp_qdio_zero_sbals - zero all sbals of the specified area and queue |
370 | * @buf: pointer to array of SBALS | ||
371 | * @first: integer specifying the SBAL number to start | ||
372 | * @count: integer specifying the number of SBALS to process | ||
647 | */ | 373 | */ |
648 | int | 374 | void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int count) |
649 | zfcp_qdio_determine_pci(struct zfcp_qdio_queue *req_queue, | ||
650 | struct zfcp_fsf_req *fsf_req) | ||
651 | { | 375 | { |
652 | int new_distance_from_int; | 376 | int i, sbal_idx; |
653 | int pci_pos; | ||
654 | volatile struct qdio_buffer_element *sbale; | ||
655 | 377 | ||
656 | new_distance_from_int = req_queue->distance_from_int + | 378 | for (i = first; i < first + count; i++) { |
657 | fsf_req->sbal_number; | 379 | sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q; |
658 | 380 | memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer)); | |
659 | if (unlikely(new_distance_from_int >= ZFCP_QDIO_PCI_INTERVAL)) { | ||
660 | new_distance_from_int %= ZFCP_QDIO_PCI_INTERVAL; | ||
661 | pci_pos = fsf_req->sbal_first; | ||
662 | pci_pos += fsf_req->sbal_number; | ||
663 | pci_pos -= new_distance_from_int; | ||
664 | pci_pos -= 1; | ||
665 | pci_pos %= QDIO_MAX_BUFFERS_PER_Q; | ||
666 | sbale = zfcp_qdio_sbale_req(fsf_req, pci_pos, 0); | ||
667 | sbale->flags |= SBAL_FLAGS0_PCI; | ||
668 | } | 381 | } |
669 | return new_distance_from_int; | ||
670 | } | 382 | } |
671 | 383 | ||
672 | /* | 384 | /** |
673 | * function: zfcp_zero_sbals | 385 | * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data |
674 | * | 386 | * @adapter: pointer to struct zfcp_adapter |
675 | * purpose: zeros specified range of SBALs | 387 | * Returns: -ENOMEM on memory allocation error or return value from |
676 | * | 388 | * qdio_allocate |
677 | * returns: | 389 | */ |
390 | int zfcp_qdio_allocate(struct zfcp_adapter *adapter) | ||
391 | { | ||
392 | struct qdio_initialize *init_data; | ||
393 | |||
394 | if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) || | ||
395 | zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal)) | ||
396 | return -ENOMEM; | ||
397 | |||
398 | init_data = &adapter->qdio_init_data; | ||
399 | |||
400 | init_data->cdev = adapter->ccw_device; | ||
401 | init_data->q_format = QDIO_ZFCP_QFMT; | ||
402 | memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8); | ||
403 | ASCEBC(init_data->adapter_name, 8); | ||
404 | init_data->qib_param_field_format = 0; | ||
405 | init_data->qib_param_field = NULL; | ||
406 | init_data->input_slib_elements = NULL; | ||
407 | init_data->output_slib_elements = NULL; | ||
408 | init_data->min_input_threshold = 1; | ||
409 | init_data->max_input_threshold = 5000; | ||
410 | init_data->min_output_threshold = 1; | ||
411 | init_data->max_output_threshold = 1000; | ||
412 | init_data->no_input_qs = 1; | ||
413 | init_data->no_output_qs = 1; | ||
414 | init_data->input_handler = zfcp_qdio_int_resp; | ||
415 | init_data->output_handler = zfcp_qdio_int_req; | ||
416 | init_data->int_parm = (unsigned long) adapter; | ||
417 | init_data->flags = QDIO_INBOUND_0COPY_SBALS | | ||
418 | QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; | ||
419 | init_data->input_sbal_addr_array = | ||
420 | (void **) (adapter->resp_q.sbal); | ||
421 | init_data->output_sbal_addr_array = | ||
422 | (void **) (adapter->req_q.sbal); | ||
423 | |||
424 | return qdio_allocate(init_data); | ||
425 | } | ||
426 | |||
427 | /** | ||
428 | * zfcp_close_qdio - close qdio queues for an adapter | ||
678 | */ | 429 | */ |
679 | void | 430 | void zfcp_qdio_close(struct zfcp_adapter *adapter) |
680 | zfcp_qdio_zero_sbals(struct qdio_buffer *buf[], int first, int clean_count) | ||
681 | { | 431 | { |
682 | int cur_pos; | 432 | struct zfcp_qdio_queue *req_q; |
683 | int index; | 433 | int first, count; |
684 | 434 | ||
685 | for (cur_pos = first; cur_pos < (first + clean_count); cur_pos++) { | 435 | if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) |
686 | index = cur_pos % QDIO_MAX_BUFFERS_PER_Q; | 436 | return; |
687 | memset(buf[index], 0, sizeof (struct qdio_buffer)); | 437 | |
688 | ZFCP_LOG_TRACE("zeroing BUFFER %d at address %p\n", | 438 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ |
689 | index, buf[index]); | 439 | req_q = &adapter->req_q; |
440 | write_lock_irq(&req_q->lock); | ||
441 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | ||
442 | write_unlock_irq(&req_q->lock); | ||
443 | |||
444 | while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) | ||
445 | == -EINPROGRESS) | ||
446 | ssleep(1); | ||
447 | |||
448 | /* cleanup used outbound sbals */ | ||
449 | count = atomic_read(&req_q->count); | ||
450 | if (count < QDIO_MAX_BUFFERS_PER_Q) { | ||
451 | first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; | ||
452 | count = QDIO_MAX_BUFFERS_PER_Q - count; | ||
453 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | ||
690 | } | 454 | } |
455 | req_q->first = 0; | ||
456 | atomic_set(&req_q->count, 0); | ||
457 | req_q->pci_batch = 0; | ||
458 | adapter->resp_q.first = 0; | ||
459 | atomic_set(&adapter->resp_q.count, 0); | ||
691 | } | 460 | } |
692 | 461 | ||
693 | #undef ZFCP_LOG_AREA | 462 | /** |
463 | * zfcp_qdio_open - prepare and initialize response queue | ||
464 | * @adapter: pointer to struct zfcp_adapter | ||
465 | * Returns: 0 on success, otherwise -EIO | ||
466 | */ | ||
467 | int zfcp_qdio_open(struct zfcp_adapter *adapter) | ||
468 | { | ||
469 | volatile struct qdio_buffer_element *sbale; | ||
470 | int cc; | ||
471 | |||
472 | if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) | ||
473 | return -EIO; | ||
474 | |||
475 | if (qdio_establish(&adapter->qdio_init_data)) { | ||
476 | dev_err(&adapter->ccw_device->dev, | ||
477 | "Establish of QDIO queues failed.\n"); | ||
478 | return -EIO; | ||
479 | } | ||
480 | |||
481 | if (qdio_activate(adapter->ccw_device, 0)) { | ||
482 | dev_err(&adapter->ccw_device->dev, | ||
483 | "Activate of QDIO queues failed.\n"); | ||
484 | goto failed_qdio; | ||
485 | } | ||
486 | |||
487 | for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { | ||
488 | sbale = &(adapter->resp_q.sbal[cc]->element[0]); | ||
489 | sbale->length = 0; | ||
490 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; | ||
491 | sbale->addr = NULL; | ||
492 | } | ||
493 | |||
494 | if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, | ||
495 | QDIO_MAX_BUFFERS_PER_Q, NULL)) { | ||
496 | dev_err(&adapter->ccw_device->dev, | ||
497 | "Init of QDIO response queue failed.\n"); | ||
498 | goto failed_qdio; | ||
499 | } | ||
500 | |||
501 | /* set index of first avalable SBALS / number of available SBALS */ | ||
502 | adapter->req_q.first = 0; | ||
503 | atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); | ||
504 | adapter->req_q.pci_batch = 0; | ||
505 | |||
506 | return 0; | ||
507 | |||
508 | failed_qdio: | ||
509 | while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) | ||
510 | == -EINPROGRESS) | ||
511 | ssleep(1); | ||
512 | |||
513 | return -EIO; | ||
514 | } | ||