diff options
Diffstat (limited to 'drivers/s390/scsi/zfcp_qdio.c')
-rw-r--r-- | drivers/s390/scsi/zfcp_qdio.c | 369 |
1 files changed, 188 insertions, 181 deletions
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index e0a215309df..6c5228b627f 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Setup and helper functions to access QDIO. | 4 | * Setup and helper functions to access QDIO. |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2009 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | 9 | #define KMSG_COMPONENT "zfcp" |
@@ -34,29 +34,10 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) | |||
34 | return &q->sbal[sbal_idx]->element[sbale_idx]; | 34 | return &q->sbal[sbal_idx]->element[sbale_idx]; |
35 | } | 35 | } |
36 | 36 | ||
37 | /** | 37 | static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) |
38 | * zfcp_qdio_free - free memory used by request- and resposne queue | ||
39 | * @adapter: pointer to the zfcp_adapter structure | ||
40 | */ | ||
41 | void zfcp_qdio_free(struct zfcp_adapter *adapter) | ||
42 | { | 38 | { |
43 | struct qdio_buffer **sbal_req, **sbal_resp; | 39 | struct zfcp_adapter *adapter = qdio->adapter; |
44 | int p; | ||
45 | |||
46 | if (adapter->ccw_device) | ||
47 | qdio_free(adapter->ccw_device); | ||
48 | |||
49 | sbal_req = adapter->req_q.sbal; | ||
50 | sbal_resp = adapter->resp_q.sbal; | ||
51 | |||
52 | for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { | ||
53 | free_page((unsigned long) sbal_req[p]); | ||
54 | free_page((unsigned long) sbal_resp[p]); | ||
55 | } | ||
56 | } | ||
57 | 40 | ||
58 | static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, char *id) | ||
59 | { | ||
60 | dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); | 41 | dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); |
61 | 42 | ||
62 | zfcp_erp_adapter_reopen(adapter, | 43 | zfcp_erp_adapter_reopen(adapter, |
@@ -75,72 +56,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) | |||
75 | } | 56 | } |
76 | 57 | ||
77 | /* this needs to be called prior to updating the queue fill level */ | 58 | /* this needs to be called prior to updating the queue fill level */ |
78 | static void zfcp_qdio_account(struct zfcp_adapter *adapter) | 59 | static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) |
79 | { | 60 | { |
80 | ktime_t now; | 61 | unsigned long long now, span; |
81 | s64 span; | ||
82 | int free, used; | 62 | int free, used; |
83 | 63 | ||
84 | spin_lock(&adapter->qdio_stat_lock); | 64 | spin_lock(&qdio->stat_lock); |
85 | now = ktime_get(); | 65 | now = get_clock_monotonic(); |
86 | span = ktime_us_delta(now, adapter->req_q_time); | 66 | span = (now - qdio->req_q_time) >> 12; |
87 | free = max(0, atomic_read(&adapter->req_q.count)); | 67 | free = atomic_read(&qdio->req_q.count); |
88 | used = QDIO_MAX_BUFFERS_PER_Q - free; | 68 | used = QDIO_MAX_BUFFERS_PER_Q - free; |
89 | adapter->req_q_util += used * span; | 69 | qdio->req_q_util += used * span; |
90 | adapter->req_q_time = now; | 70 | qdio->req_q_time = now; |
91 | spin_unlock(&adapter->qdio_stat_lock); | 71 | spin_unlock(&qdio->stat_lock); |
92 | } | 72 | } |
93 | 73 | ||
94 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, | 74 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, |
95 | int queue_no, int first, int count, | 75 | int queue_no, int first, int count, |
96 | unsigned long parm) | 76 | unsigned long parm) |
97 | { | 77 | { |
98 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; | 78 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; |
99 | struct zfcp_qdio_queue *queue = &adapter->req_q; | 79 | struct zfcp_qdio_queue *queue = &qdio->req_q; |
100 | 80 | ||
101 | if (unlikely(qdio_err)) { | 81 | if (unlikely(qdio_err)) { |
102 | zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); | 82 | zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, |
103 | zfcp_qdio_handler_error(adapter, "qdireq1"); | 83 | count); |
84 | zfcp_qdio_handler_error(qdio, "qdireq1"); | ||
104 | return; | 85 | return; |
105 | } | 86 | } |
106 | 87 | ||
107 | /* cleanup all SBALs being program-owned now */ | 88 | /* cleanup all SBALs being program-owned now */ |
108 | zfcp_qdio_zero_sbals(queue->sbal, first, count); | 89 | zfcp_qdio_zero_sbals(queue->sbal, first, count); |
109 | 90 | ||
110 | zfcp_qdio_account(adapter); | 91 | zfcp_qdio_account(qdio); |
111 | atomic_add(count, &queue->count); | 92 | atomic_add(count, &queue->count); |
112 | wake_up(&adapter->request_wq); | 93 | wake_up(&qdio->req_q_wq); |
113 | } | ||
114 | |||
115 | static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, | ||
116 | unsigned long req_id, int sbal_idx) | ||
117 | { | ||
118 | struct zfcp_fsf_req *fsf_req; | ||
119 | unsigned long flags; | ||
120 | |||
121 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
122 | fsf_req = zfcp_reqlist_find(adapter, req_id); | ||
123 | |||
124 | if (!fsf_req) | ||
125 | /* | ||
126 | * Unknown request means that we have potentially memory | ||
127 | * corruption and must stop the machine immediatly. | ||
128 | */ | ||
129 | panic("error: unknown request id (%lx) on adapter %s.\n", | ||
130 | req_id, dev_name(&adapter->ccw_device->dev)); | ||
131 | |||
132 | zfcp_reqlist_remove(adapter, fsf_req); | ||
133 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
134 | |||
135 | fsf_req->sbal_response = sbal_idx; | ||
136 | fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count); | ||
137 | zfcp_fsf_req_complete(fsf_req); | ||
138 | } | 94 | } |
139 | 95 | ||
140 | static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) | 96 | static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed) |
141 | { | 97 | { |
142 | struct zfcp_qdio_queue *queue = &adapter->resp_q; | 98 | struct zfcp_qdio_queue *queue = &qdio->resp_q; |
143 | struct ccw_device *cdev = adapter->ccw_device; | 99 | struct ccw_device *cdev = qdio->adapter->ccw_device; |
144 | u8 count, start = queue->first; | 100 | u8 count, start = queue->first; |
145 | unsigned int retval; | 101 | unsigned int retval; |
146 | 102 | ||
@@ -162,14 +118,13 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, | |||
162 | int queue_no, int first, int count, | 118 | int queue_no, int first, int count, |
163 | unsigned long parm) | 119 | unsigned long parm) |
164 | { | 120 | { |
165 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; | 121 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; |
166 | struct zfcp_qdio_queue *queue = &adapter->resp_q; | 122 | int sbal_idx, sbal_no; |
167 | struct qdio_buffer_element *sbale; | ||
168 | int sbal_idx, sbale_idx, sbal_no; | ||
169 | 123 | ||
170 | if (unlikely(qdio_err)) { | 124 | if (unlikely(qdio_err)) { |
171 | zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); | 125 | zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, |
172 | zfcp_qdio_handler_error(adapter, "qdires1"); | 126 | count); |
127 | zfcp_qdio_handler_error(qdio, "qdires1"); | ||
173 | return; | 128 | return; |
174 | } | 129 | } |
175 | 130 | ||
@@ -179,39 +134,27 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, | |||
179 | */ | 134 | */ |
180 | for (sbal_no = 0; sbal_no < count; sbal_no++) { | 135 | for (sbal_no = 0; sbal_no < count; sbal_no++) { |
181 | sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; | 136 | sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; |
182 | |||
183 | /* go through all SBALEs of SBAL */ | 137 | /* go through all SBALEs of SBAL */ |
184 | for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER; | 138 | zfcp_fsf_reqid_check(qdio, sbal_idx); |
185 | sbale_idx++) { | ||
186 | sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx); | ||
187 | zfcp_qdio_reqid_check(adapter, | ||
188 | (unsigned long) sbale->addr, | ||
189 | sbal_idx); | ||
190 | if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) | ||
191 | break; | ||
192 | }; | ||
193 | |||
194 | if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) | ||
195 | dev_warn(&adapter->ccw_device->dev, | ||
196 | "A QDIO protocol error occurred, " | ||
197 | "operations continue\n"); | ||
198 | } | 139 | } |
199 | 140 | ||
200 | /* | 141 | /* |
201 | * put range of SBALs back to response queue | 142 | * put range of SBALs back to response queue |
202 | * (including SBALs which have already been free before) | 143 | * (including SBALs which have already been free before) |
203 | */ | 144 | */ |
204 | zfcp_qdio_resp_put_back(adapter, count); | 145 | zfcp_qdio_resp_put_back(qdio, count); |
205 | } | 146 | } |
206 | 147 | ||
207 | /** | 148 | /** |
208 | * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req | 149 | * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req |
209 | * @fsf_req: pointer to struct fsf_req | 150 | * @qdio: pointer to struct zfcp_qdio |
151 | * @q_rec: pointer to struct zfcp_queue_rec | ||
210 | * Returns: pointer to qdio_buffer_element (SBALE) structure | 152 | * Returns: pointer to qdio_buffer_element (SBALE) structure |
211 | */ | 153 | */ |
212 | struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) | 154 | struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, |
155 | struct zfcp_queue_req *q_req) | ||
213 | { | 156 | { |
214 | return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); | 157 | return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); |
215 | } | 158 | } |
216 | 159 | ||
217 | /** | 160 | /** |
@@ -219,74 +162,80 @@ struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) | |||
219 | * @fsf_req: pointer to struct fsf_req | 162 | * @fsf_req: pointer to struct fsf_req |
220 | * Returns: pointer to qdio_buffer_element (SBALE) structure | 163 | * Returns: pointer to qdio_buffer_element (SBALE) structure |
221 | */ | 164 | */ |
222 | struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req) | 165 | struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, |
166 | struct zfcp_queue_req *q_req) | ||
223 | { | 167 | { |
224 | return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, | 168 | return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, |
225 | req->sbale_curr); | 169 | q_req->sbale_curr); |
226 | } | 170 | } |
227 | 171 | ||
228 | static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) | 172 | static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, |
173 | struct zfcp_queue_req *q_req, int max_sbals) | ||
229 | { | 174 | { |
230 | int count = atomic_read(&fsf_req->adapter->req_q.count); | 175 | int count = atomic_read(&qdio->req_q.count); |
231 | count = min(count, max_sbals); | 176 | count = min(count, max_sbals); |
232 | fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1) | 177 | q_req->sbal_limit = (q_req->sbal_first + count - 1) |
233 | % QDIO_MAX_BUFFERS_PER_Q; | 178 | % QDIO_MAX_BUFFERS_PER_Q; |
234 | } | 179 | } |
235 | 180 | ||
236 | static struct qdio_buffer_element * | 181 | static struct qdio_buffer_element * |
237 | zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 182 | zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, |
183 | unsigned long sbtype) | ||
238 | { | 184 | { |
239 | struct qdio_buffer_element *sbale; | 185 | struct qdio_buffer_element *sbale; |
240 | 186 | ||
241 | /* set last entry flag in current SBALE of current SBAL */ | 187 | /* set last entry flag in current SBALE of current SBAL */ |
242 | sbale = zfcp_qdio_sbale_curr(fsf_req); | 188 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); |
243 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; | 189 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; |
244 | 190 | ||
245 | /* don't exceed last allowed SBAL */ | 191 | /* don't exceed last allowed SBAL */ |
246 | if (fsf_req->sbal_last == fsf_req->sbal_limit) | 192 | if (q_req->sbal_last == q_req->sbal_limit) |
247 | return NULL; | 193 | return NULL; |
248 | 194 | ||
249 | /* set chaining flag in first SBALE of current SBAL */ | 195 | /* set chaining flag in first SBALE of current SBAL */ |
250 | sbale = zfcp_qdio_sbale_req(fsf_req); | 196 | sbale = zfcp_qdio_sbale_req(qdio, q_req); |
251 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; | 197 | sbale->flags |= SBAL_FLAGS0_MORE_SBALS; |
252 | 198 | ||
253 | /* calculate index of next SBAL */ | 199 | /* calculate index of next SBAL */ |
254 | fsf_req->sbal_last++; | 200 | q_req->sbal_last++; |
255 | fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; | 201 | q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; |
256 | 202 | ||
257 | /* keep this requests number of SBALs up-to-date */ | 203 | /* keep this requests number of SBALs up-to-date */ |
258 | fsf_req->sbal_number++; | 204 | q_req->sbal_number++; |
259 | 205 | ||
260 | /* start at first SBALE of new SBAL */ | 206 | /* start at first SBALE of new SBAL */ |
261 | fsf_req->sbale_curr = 0; | 207 | q_req->sbale_curr = 0; |
262 | 208 | ||
263 | /* set storage-block type for new SBAL */ | 209 | /* set storage-block type for new SBAL */ |
264 | sbale = zfcp_qdio_sbale_curr(fsf_req); | 210 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); |
265 | sbale->flags |= sbtype; | 211 | sbale->flags |= sbtype; |
266 | 212 | ||
267 | return sbale; | 213 | return sbale; |
268 | } | 214 | } |
269 | 215 | ||
270 | static struct qdio_buffer_element * | 216 | static struct qdio_buffer_element * |
271 | zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 217 | zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, |
218 | unsigned int sbtype) | ||
272 | { | 219 | { |
273 | if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) | 220 | if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) |
274 | return zfcp_qdio_sbal_chain(fsf_req, sbtype); | 221 | return zfcp_qdio_sbal_chain(qdio, q_req, sbtype); |
275 | fsf_req->sbale_curr++; | 222 | q_req->sbale_curr++; |
276 | return zfcp_qdio_sbale_curr(fsf_req); | 223 | return zfcp_qdio_sbale_curr(qdio, q_req); |
277 | } | 224 | } |
278 | 225 | ||
279 | static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req) | 226 | static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, |
227 | struct zfcp_queue_req *q_req) | ||
280 | { | 228 | { |
281 | struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal; | 229 | struct qdio_buffer **sbal = qdio->req_q.sbal; |
282 | int first = fsf_req->sbal_first; | 230 | int first = q_req->sbal_first; |
283 | int last = fsf_req->sbal_last; | 231 | int last = q_req->sbal_last; |
284 | int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % | 232 | int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % |
285 | QDIO_MAX_BUFFERS_PER_Q + 1; | 233 | QDIO_MAX_BUFFERS_PER_Q + 1; |
286 | zfcp_qdio_zero_sbals(sbal, first, count); | 234 | zfcp_qdio_zero_sbals(sbal, first, count); |
287 | } | 235 | } |
288 | 236 | ||
289 | static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, | 237 | static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio, |
238 | struct zfcp_queue_req *q_req, | ||
290 | unsigned int sbtype, void *start_addr, | 239 | unsigned int sbtype, void *start_addr, |
291 | unsigned int total_length) | 240 | unsigned int total_length) |
292 | { | 241 | { |
@@ -297,10 +246,10 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, | |||
297 | /* split segment up */ | 246 | /* split segment up */ |
298 | for (addr = start_addr, remaining = total_length; remaining > 0; | 247 | for (addr = start_addr, remaining = total_length; remaining > 0; |
299 | addr += length, remaining -= length) { | 248 | addr += length, remaining -= length) { |
300 | sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); | 249 | sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype); |
301 | if (!sbale) { | 250 | if (!sbale) { |
302 | atomic_inc(&fsf_req->adapter->qdio_outb_full); | 251 | atomic_inc(&qdio->req_q_full); |
303 | zfcp_qdio_undo_sbals(fsf_req); | 252 | zfcp_qdio_undo_sbals(qdio, q_req); |
304 | return -EINVAL; | 253 | return -EINVAL; |
305 | } | 254 | } |
306 | 255 | ||
@@ -322,29 +271,31 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, | |||
322 | * @max_sbals: upper bound for number of SBALs to be used | 271 | * @max_sbals: upper bound for number of SBALs to be used |
323 | * Returns: number of bytes, or error (negativ) | 272 | * Returns: number of bytes, or error (negativ) |
324 | */ | 273 | */ |
325 | int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 274 | int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, |
326 | struct scatterlist *sg, int max_sbals) | 275 | struct zfcp_queue_req *q_req, |
276 | unsigned long sbtype, struct scatterlist *sg, | ||
277 | int max_sbals) | ||
327 | { | 278 | { |
328 | struct qdio_buffer_element *sbale; | 279 | struct qdio_buffer_element *sbale; |
329 | int retval, bytes = 0; | 280 | int retval, bytes = 0; |
330 | 281 | ||
331 | /* figure out last allowed SBAL */ | 282 | /* figure out last allowed SBAL */ |
332 | zfcp_qdio_sbal_limit(fsf_req, max_sbals); | 283 | zfcp_qdio_sbal_limit(qdio, q_req, max_sbals); |
333 | 284 | ||
334 | /* set storage-block type for this request */ | 285 | /* set storage-block type for this request */ |
335 | sbale = zfcp_qdio_sbale_req(fsf_req); | 286 | sbale = zfcp_qdio_sbale_req(qdio, q_req); |
336 | sbale->flags |= sbtype; | 287 | sbale->flags |= sbtype; |
337 | 288 | ||
338 | for (; sg; sg = sg_next(sg)) { | 289 | for (; sg; sg = sg_next(sg)) { |
339 | retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg), | 290 | retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype, |
340 | sg->length); | 291 | sg_virt(sg), sg->length); |
341 | if (retval < 0) | 292 | if (retval < 0) |
342 | return retval; | 293 | return retval; |
343 | bytes += sg->length; | 294 | bytes += sg->length; |
344 | } | 295 | } |
345 | 296 | ||
346 | /* assume that no other SBALEs are to follow in the same SBAL */ | 297 | /* assume that no other SBALEs are to follow in the same SBAL */ |
347 | sbale = zfcp_qdio_sbale_curr(fsf_req); | 298 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); |
348 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; | 299 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; |
349 | 300 | ||
350 | return bytes; | 301 | return bytes; |
@@ -352,21 +303,22 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | |||
352 | 303 | ||
353 | /** | 304 | /** |
354 | * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO | 305 | * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO |
355 | * @fsf_req: pointer to struct zfcp_fsf_req | 306 | * @qdio: pointer to struct zfcp_qdio |
307 | * @q_req: pointer to struct zfcp_queue_req | ||
356 | * Returns: 0 on success, error otherwise | 308 | * Returns: 0 on success, error otherwise |
357 | */ | 309 | */ |
358 | int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) | 310 | int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) |
359 | { | 311 | { |
360 | struct zfcp_adapter *adapter = fsf_req->adapter; | 312 | struct zfcp_qdio_queue *req_q = &qdio->req_q; |
361 | struct zfcp_qdio_queue *req_q = &adapter->req_q; | 313 | int first = q_req->sbal_first; |
362 | int first = fsf_req->sbal_first; | 314 | int count = q_req->sbal_number; |
363 | int count = fsf_req->sbal_number; | ||
364 | int retval; | 315 | int retval; |
365 | unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; | 316 | unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; |
366 | 317 | ||
367 | zfcp_qdio_account(adapter); | 318 | zfcp_qdio_account(qdio); |
368 | 319 | ||
369 | retval = do_QDIO(adapter->ccw_device, qdio_flags, 0, first, count); | 320 | retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, |
321 | count); | ||
370 | if (unlikely(retval)) { | 322 | if (unlikely(retval)) { |
371 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | 323 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); |
372 | return retval; | 324 | return retval; |
@@ -379,63 +331,69 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) | |||
379 | return 0; | 331 | return 0; |
380 | } | 332 | } |
381 | 333 | ||
334 | |||
335 | static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, | ||
336 | struct zfcp_qdio *qdio) | ||
337 | { | ||
338 | |||
339 | id->cdev = qdio->adapter->ccw_device; | ||
340 | id->q_format = QDIO_ZFCP_QFMT; | ||
341 | memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); | ||
342 | ASCEBC(id->adapter_name, 8); | ||
343 | id->qib_param_field_format = 0; | ||
344 | id->qib_param_field = NULL; | ||
345 | id->input_slib_elements = NULL; | ||
346 | id->output_slib_elements = NULL; | ||
347 | id->no_input_qs = 1; | ||
348 | id->no_output_qs = 1; | ||
349 | id->input_handler = zfcp_qdio_int_resp; | ||
350 | id->output_handler = zfcp_qdio_int_req; | ||
351 | id->int_parm = (unsigned long) qdio; | ||
352 | id->flags = QDIO_INBOUND_0COPY_SBALS | | ||
353 | QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; | ||
354 | id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); | ||
355 | id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); | ||
356 | |||
357 | } | ||
382 | /** | 358 | /** |
383 | * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data | 359 | * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data |
384 | * @adapter: pointer to struct zfcp_adapter | 360 | * @adapter: pointer to struct zfcp_adapter |
385 | * Returns: -ENOMEM on memory allocation error or return value from | 361 | * Returns: -ENOMEM on memory allocation error or return value from |
386 | * qdio_allocate | 362 | * qdio_allocate |
387 | */ | 363 | */ |
388 | int zfcp_qdio_allocate(struct zfcp_adapter *adapter) | 364 | static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) |
389 | { | 365 | { |
390 | struct qdio_initialize *init_data; | 366 | struct qdio_initialize init_data; |
391 | 367 | ||
392 | if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) || | 368 | if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) || |
393 | zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal)) | 369 | zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal)) |
394 | return -ENOMEM; | 370 | return -ENOMEM; |
395 | 371 | ||
396 | init_data = &adapter->qdio_init_data; | 372 | zfcp_qdio_setup_init_data(&init_data, qdio); |
397 | 373 | ||
398 | init_data->cdev = adapter->ccw_device; | 374 | return qdio_allocate(&init_data); |
399 | init_data->q_format = QDIO_ZFCP_QFMT; | ||
400 | memcpy(init_data->adapter_name, dev_name(&adapter->ccw_device->dev), 8); | ||
401 | ASCEBC(init_data->adapter_name, 8); | ||
402 | init_data->qib_param_field_format = 0; | ||
403 | init_data->qib_param_field = NULL; | ||
404 | init_data->input_slib_elements = NULL; | ||
405 | init_data->output_slib_elements = NULL; | ||
406 | init_data->no_input_qs = 1; | ||
407 | init_data->no_output_qs = 1; | ||
408 | init_data->input_handler = zfcp_qdio_int_resp; | ||
409 | init_data->output_handler = zfcp_qdio_int_req; | ||
410 | init_data->int_parm = (unsigned long) adapter; | ||
411 | init_data->flags = QDIO_INBOUND_0COPY_SBALS | | ||
412 | QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; | ||
413 | init_data->input_sbal_addr_array = | ||
414 | (void **) (adapter->resp_q.sbal); | ||
415 | init_data->output_sbal_addr_array = | ||
416 | (void **) (adapter->req_q.sbal); | ||
417 | |||
418 | return qdio_allocate(init_data); | ||
419 | } | 375 | } |
420 | 376 | ||
421 | /** | 377 | /** |
422 | * zfcp_close_qdio - close qdio queues for an adapter | 378 | * zfcp_close_qdio - close qdio queues for an adapter |
379 | * @qdio: pointer to structure zfcp_qdio | ||
423 | */ | 380 | */ |
424 | void zfcp_qdio_close(struct zfcp_adapter *adapter) | 381 | void zfcp_qdio_close(struct zfcp_qdio *qdio) |
425 | { | 382 | { |
426 | struct zfcp_qdio_queue *req_q; | 383 | struct zfcp_qdio_queue *req_q; |
427 | int first, count; | 384 | int first, count; |
428 | 385 | ||
429 | if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 386 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
430 | return; | 387 | return; |
431 | 388 | ||
432 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ | 389 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ |
433 | req_q = &adapter->req_q; | 390 | req_q = &qdio->req_q; |
434 | spin_lock_bh(&adapter->req_q_lock); | 391 | spin_lock_bh(&qdio->req_q_lock); |
435 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | 392 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); |
436 | spin_unlock_bh(&adapter->req_q_lock); | 393 | spin_unlock_bh(&qdio->req_q_lock); |
437 | 394 | ||
438 | qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); | 395 | qdio_shutdown(qdio->adapter->ccw_device, |
396 | QDIO_FLAG_CLEANUP_USING_CLEAR); | ||
439 | 397 | ||
440 | /* cleanup used outbound sbals */ | 398 | /* cleanup used outbound sbals */ |
441 | count = atomic_read(&req_q->count); | 399 | count = atomic_read(&req_q->count); |
@@ -446,50 +404,99 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter) | |||
446 | } | 404 | } |
447 | req_q->first = 0; | 405 | req_q->first = 0; |
448 | atomic_set(&req_q->count, 0); | 406 | atomic_set(&req_q->count, 0); |
449 | adapter->resp_q.first = 0; | 407 | qdio->resp_q.first = 0; |
450 | atomic_set(&adapter->resp_q.count, 0); | 408 | atomic_set(&qdio->resp_q.count, 0); |
451 | } | 409 | } |
452 | 410 | ||
453 | /** | 411 | /** |
454 | * zfcp_qdio_open - prepare and initialize response queue | 412 | * zfcp_qdio_open - prepare and initialize response queue |
455 | * @adapter: pointer to struct zfcp_adapter | 413 | * @qdio: pointer to struct zfcp_qdio |
456 | * Returns: 0 on success, otherwise -EIO | 414 | * Returns: 0 on success, otherwise -EIO |
457 | */ | 415 | */ |
458 | int zfcp_qdio_open(struct zfcp_adapter *adapter) | 416 | int zfcp_qdio_open(struct zfcp_qdio *qdio) |
459 | { | 417 | { |
460 | struct qdio_buffer_element *sbale; | 418 | struct qdio_buffer_element *sbale; |
419 | struct qdio_initialize init_data; | ||
420 | struct ccw_device *cdev = qdio->adapter->ccw_device; | ||
461 | int cc; | 421 | int cc; |
462 | 422 | ||
463 | if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) | 423 | if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) |
464 | return -EIO; | 424 | return -EIO; |
465 | 425 | ||
466 | if (qdio_establish(&adapter->qdio_init_data)) | 426 | zfcp_qdio_setup_init_data(&init_data, qdio); |
427 | |||
428 | if (qdio_establish(&init_data)) | ||
467 | goto failed_establish; | 429 | goto failed_establish; |
468 | 430 | ||
469 | if (qdio_activate(adapter->ccw_device)) | 431 | if (qdio_activate(cdev)) |
470 | goto failed_qdio; | 432 | goto failed_qdio; |
471 | 433 | ||
472 | for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { | 434 | for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { |
473 | sbale = &(adapter->resp_q.sbal[cc]->element[0]); | 435 | sbale = &(qdio->resp_q.sbal[cc]->element[0]); |
474 | sbale->length = 0; | 436 | sbale->length = 0; |
475 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; | 437 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; |
476 | sbale->addr = NULL; | 438 | sbale->addr = NULL; |
477 | } | 439 | } |
478 | 440 | ||
479 | if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, | 441 | if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, |
480 | QDIO_MAX_BUFFERS_PER_Q)) | 442 | QDIO_MAX_BUFFERS_PER_Q)) |
481 | goto failed_qdio; | 443 | goto failed_qdio; |
482 | 444 | ||
483 | /* set index of first avalable SBALS / number of available SBALS */ | 445 | /* set index of first avalable SBALS / number of available SBALS */ |
484 | adapter->req_q.first = 0; | 446 | qdio->req_q.first = 0; |
485 | atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); | 447 | atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q); |
486 | 448 | ||
487 | return 0; | 449 | return 0; |
488 | 450 | ||
489 | failed_qdio: | 451 | failed_qdio: |
490 | qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); | 452 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); |
491 | failed_establish: | 453 | failed_establish: |
492 | dev_err(&adapter->ccw_device->dev, | 454 | dev_err(&cdev->dev, |
493 | "Setting up the QDIO connection to the FCP adapter failed\n"); | 455 | "Setting up the QDIO connection to the FCP adapter failed\n"); |
494 | return -EIO; | 456 | return -EIO; |
495 | } | 457 | } |
458 | |||
459 | void zfcp_qdio_destroy(struct zfcp_qdio *qdio) | ||
460 | { | ||
461 | struct qdio_buffer **sbal_req, **sbal_resp; | ||
462 | int p; | ||
463 | |||
464 | if (!qdio) | ||
465 | return; | ||
466 | |||
467 | if (qdio->adapter->ccw_device) | ||
468 | qdio_free(qdio->adapter->ccw_device); | ||
469 | |||
470 | sbal_req = qdio->req_q.sbal; | ||
471 | sbal_resp = qdio->resp_q.sbal; | ||
472 | |||
473 | for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { | ||
474 | free_page((unsigned long) sbal_req[p]); | ||
475 | free_page((unsigned long) sbal_resp[p]); | ||
476 | } | ||
477 | |||
478 | kfree(qdio); | ||
479 | } | ||
480 | |||
481 | int zfcp_qdio_setup(struct zfcp_adapter *adapter) | ||
482 | { | ||
483 | struct zfcp_qdio *qdio; | ||
484 | |||
485 | qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); | ||
486 | if (!qdio) | ||
487 | return -ENOMEM; | ||
488 | |||
489 | qdio->adapter = adapter; | ||
490 | |||
491 | if (zfcp_qdio_allocate(qdio)) { | ||
492 | zfcp_qdio_destroy(qdio); | ||
493 | return -ENOMEM; | ||
494 | } | ||
495 | |||
496 | spin_lock_init(&qdio->req_q_lock); | ||
497 | spin_lock_init(&qdio->stat_lock); | ||
498 | |||
499 | adapter->qdio = qdio; | ||
500 | return 0; | ||
501 | } | ||
502 | |||