aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorSwen Schillig <swen@vnet.ibm.com>2010-07-16 09:37:38 -0400
committerJames Bottomley <James.Bottomley@suse.de>2010-07-28 10:48:50 -0400
commit706eca49a044a1ea89352dcc4b96ffc1631b2cb5 (patch)
treef36692c277a38c051536e06272ade7ed759ab4cc /drivers/s390
parent01b047599ade30051bf6c14fbe64181d1fec3dfa (diff)
[SCSI] zfcp: Cleanup QDIO attachment and improve processing.
Some definitions and structures in the zfcp QDIO processing are improved by the removal of not required variables and processing steps. I addition the naming of some variables is changed to make their purpose more clear. Signed-off-by: Swen Schillig <swen@vnet.ibm.com> Signed-off-by: Christof Schmitt <christof.schmitt@de.ibm.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c10
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c141
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h57
3 files changed, 69 insertions, 139 deletions
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 5f502c9cb067..0710c59b80ae 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -732,7 +732,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
732 732
733 zfcp_reqlist_add(adapter->req_list, req); 733 zfcp_reqlist_add(adapter->req_list, req);
734 734
735 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); 735 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
736 req->issued = get_clock(); 736 req->issued = get_clock();
737 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 737 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
738 del_timer(&req->timer); 738 del_timer(&req->timer);
@@ -2025,7 +2025,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2025 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2025 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2026 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2026 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2027 blktrc.flags |= ZFCP_BLK_REQ_ERROR; 2027 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2028 blktrc.inb_usage = req->qdio_req.qdio_inb_usage; 2028 blktrc.inb_usage = 0;
2029 blktrc.outb_usage = req->qdio_req.qdio_outb_usage; 2029 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2030 2030
2031 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && 2031 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
@@ -2207,7 +2207,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2207 return -EBUSY; 2207 return -EBUSY;
2208 2208
2209 spin_lock(&qdio->req_q_lock); 2209 spin_lock(&qdio->req_q_lock);
2210 if (atomic_read(&qdio->req_q.count) <= 0) { 2210 if (atomic_read(&qdio->req_q_free) <= 0) {
2211 atomic_inc(&qdio->req_q_full); 2211 atomic_inc(&qdio->req_q_full);
2212 goto out; 2212 goto out;
2213 } 2213 }
@@ -2407,7 +2407,7 @@ out:
2407void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) 2407void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2408{ 2408{
2409 struct zfcp_adapter *adapter = qdio->adapter; 2409 struct zfcp_adapter *adapter = qdio->adapter;
2410 struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; 2410 struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2411 struct qdio_buffer_element *sbale; 2411 struct qdio_buffer_element *sbale;
2412 struct zfcp_fsf_req *fsf_req; 2412 struct zfcp_fsf_req *fsf_req;
2413 unsigned long req_id; 2413 unsigned long req_id;
@@ -2428,8 +2428,6 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2428 req_id, dev_name(&adapter->ccw_device->dev)); 2428 req_id, dev_name(&adapter->ccw_device->dev));
2429 2429
2430 fsf_req->qdio_req.sbal_response = sbal_idx; 2430 fsf_req->qdio_req.sbal_response = sbal_idx;
2431 fsf_req->qdio_req.qdio_inb_usage =
2432 atomic_read(&qdio->resp_q.count);
2433 zfcp_fsf_req_complete(fsf_req); 2431 zfcp_fsf_req_complete(fsf_req);
2434 2432
2435 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) 2433 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 7ab1ac16a11f..a638278c6025 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -55,71 +55,46 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
55static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) 55static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
56{ 56{
57 unsigned long long now, span; 57 unsigned long long now, span;
58 int free, used; 58 int used;
59 59
60 spin_lock(&qdio->stat_lock); 60 spin_lock(&qdio->stat_lock);
61 now = get_clock_monotonic(); 61 now = get_clock_monotonic();
62 span = (now - qdio->req_q_time) >> 12; 62 span = (now - qdio->req_q_time) >> 12;
63 free = atomic_read(&qdio->req_q.count); 63 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
64 used = QDIO_MAX_BUFFERS_PER_Q - free;
65 qdio->req_q_util += used * span; 64 qdio->req_q_util += used * span;
66 qdio->req_q_time = now; 65 qdio->req_q_time = now;
67 spin_unlock(&qdio->stat_lock); 66 spin_unlock(&qdio->stat_lock);
68} 67}
69 68
70static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, 69static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
71 int queue_no, int first, int count, 70 int queue_no, int idx, int count,
72 unsigned long parm) 71 unsigned long parm)
73{ 72{
74 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 73 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
75 struct zfcp_qdio_queue *queue = &qdio->req_q;
76 74
77 if (unlikely(qdio_err)) { 75 if (unlikely(qdio_err)) {
78 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, 76 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
79 count);
80 zfcp_qdio_handler_error(qdio, "qdireq1"); 77 zfcp_qdio_handler_error(qdio, "qdireq1");
81 return; 78 return;
82 } 79 }
83 80
84 /* cleanup all SBALs being program-owned now */ 81 /* cleanup all SBALs being program-owned now */
85 zfcp_qdio_zero_sbals(queue->sbal, first, count); 82 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
86 83
87 zfcp_qdio_account(qdio); 84 zfcp_qdio_account(qdio);
88 atomic_add(count, &queue->count); 85 atomic_add(count, &qdio->req_q_free);
89 wake_up(&qdio->req_q_wq); 86 wake_up(&qdio->req_q_wq);
90} 87}
91 88
92static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
93{
94 struct zfcp_qdio_queue *queue = &qdio->resp_q;
95 struct ccw_device *cdev = qdio->adapter->ccw_device;
96 u8 count, start = queue->first;
97 unsigned int retval;
98
99 count = atomic_read(&queue->count) + processed;
100
101 retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
102
103 if (unlikely(retval)) {
104 atomic_set(&queue->count, count);
105 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL);
106 } else {
107 queue->first += count;
108 queue->first %= QDIO_MAX_BUFFERS_PER_Q;
109 atomic_set(&queue->count, 0);
110 }
111}
112
113static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, 89static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
114 int queue_no, int first, int count, 90 int queue_no, int idx, int count,
115 unsigned long parm) 91 unsigned long parm)
116{ 92{
117 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 93 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
118 int sbal_idx, sbal_no; 94 int sbal_idx, sbal_no;
119 95
120 if (unlikely(qdio_err)) { 96 if (unlikely(qdio_err)) {
121 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, 97 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
122 count);
123 zfcp_qdio_handler_error(qdio, "qdires1"); 98 zfcp_qdio_handler_error(qdio, "qdires1");
124 return; 99 return;
125 } 100 }
@@ -129,16 +104,16 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
129 * returned by QDIO layer 104 * returned by QDIO layer
130 */ 105 */
131 for (sbal_no = 0; sbal_no < count; sbal_no++) { 106 for (sbal_no = 0; sbal_no < count; sbal_no++) {
132 sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; 107 sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
133 /* go through all SBALEs of SBAL */ 108 /* go through all SBALEs of SBAL */
134 zfcp_fsf_reqid_check(qdio, sbal_idx); 109 zfcp_fsf_reqid_check(qdio, sbal_idx);
135 } 110 }
136 111
137 /* 112 /*
138 * put range of SBALs back to response queue 113 * put SBALs back to response queue
139 * (including SBALs which have already been free before)
140 */ 114 */
141 zfcp_qdio_resp_put_back(qdio, count); 115 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
116 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL);
142} 117}
143 118
144static struct qdio_buffer_element * 119static struct qdio_buffer_element *
@@ -185,17 +160,6 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
185 return zfcp_qdio_sbale_curr(qdio, q_req); 160 return zfcp_qdio_sbale_curr(qdio, q_req);
186} 161}
187 162
188static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
189 struct zfcp_qdio_req *q_req)
190{
191 struct qdio_buffer **sbal = qdio->req_q.sbal;
192 int first = q_req->sbal_first;
193 int last = q_req->sbal_last;
194 int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
195 QDIO_MAX_BUFFERS_PER_Q + 1;
196 zfcp_qdio_zero_sbals(sbal, first, count);
197}
198
199/** 163/**
200 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list 164 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
201 * @qdio: pointer to struct zfcp_qdio 165 * @qdio: pointer to struct zfcp_qdio
@@ -218,7 +182,8 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
218 sbale = zfcp_qdio_sbale_next(qdio, q_req); 182 sbale = zfcp_qdio_sbale_next(qdio, q_req);
219 if (!sbale) { 183 if (!sbale) {
220 atomic_inc(&qdio->req_q_full); 184 atomic_inc(&qdio->req_q_full);
221 zfcp_qdio_undo_sbals(qdio, q_req); 185 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
186 q_req->sbal_number);
222 return -EINVAL; 187 return -EINVAL;
223 } 188 }
224 189
@@ -237,10 +202,8 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
237 202
238static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 203static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
239{ 204{
240 struct zfcp_qdio_queue *req_q = &qdio->req_q;
241
242 spin_lock_bh(&qdio->req_q_lock); 205 spin_lock_bh(&qdio->req_q_lock);
243 if (atomic_read(&req_q->count) || 206 if (atomic_read(&qdio->req_q_free) ||
244 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 207 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
245 return 1; 208 return 1;
246 spin_unlock_bh(&qdio->req_q_lock); 209 spin_unlock_bh(&qdio->req_q_lock);
@@ -289,25 +252,25 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
289 */ 252 */
290int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 253int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
291{ 254{
292 struct zfcp_qdio_queue *req_q = &qdio->req_q;
293 int first = q_req->sbal_first;
294 int count = q_req->sbal_number;
295 int retval; 255 int retval;
296 unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 256 u8 sbal_number = q_req->sbal_number;
297 257
298 zfcp_qdio_account(qdio); 258 zfcp_qdio_account(qdio);
299 259
300 retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, 260 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
301 count); 261 q_req->sbal_first, sbal_number);
262
302 if (unlikely(retval)) { 263 if (unlikely(retval)) {
303 zfcp_qdio_zero_sbals(req_q->sbal, first, count); 264 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
265 sbal_number);
304 return retval; 266 return retval;
305 } 267 }
306 268
307 /* account for transferred buffers */ 269 /* account for transferred buffers */
308 atomic_sub(count, &req_q->count); 270 atomic_sub(sbal_number, &qdio->req_q_free);
309 req_q->first += count; 271 qdio->req_q_idx += sbal_number;
310 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; 272 qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
273
311 return 0; 274 return 0;
312} 275}
313 276
@@ -329,8 +292,8 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
329 id->input_handler = zfcp_qdio_int_resp; 292 id->input_handler = zfcp_qdio_int_resp;
330 id->output_handler = zfcp_qdio_int_req; 293 id->output_handler = zfcp_qdio_int_req;
331 id->int_parm = (unsigned long) qdio; 294 id->int_parm = (unsigned long) qdio;
332 id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); 295 id->input_sbal_addr_array = (void **) (qdio->res_q);
333 id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); 296 id->output_sbal_addr_array = (void **) (qdio->req_q);
334 297
335} 298}
336/** 299/**
@@ -343,8 +306,8 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
343{ 306{
344 struct qdio_initialize init_data; 307 struct qdio_initialize init_data;
345 308
346 if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) || 309 if (zfcp_qdio_buffers_enqueue(qdio->req_q) ||
347 zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal)) 310 zfcp_qdio_buffers_enqueue(qdio->res_q))
348 return -ENOMEM; 311 return -ENOMEM;
349 312
350 zfcp_qdio_setup_init_data(&init_data, qdio); 313 zfcp_qdio_setup_init_data(&init_data, qdio);
@@ -358,34 +321,30 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
358 */ 321 */
359void zfcp_qdio_close(struct zfcp_qdio *qdio) 322void zfcp_qdio_close(struct zfcp_qdio *qdio)
360{ 323{
361 struct zfcp_qdio_queue *req_q; 324 struct zfcp_adapter *adapter = qdio->adapter;
362 int first, count; 325 int idx, count;
363 326
364 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 327 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
365 return; 328 return;
366 329
367 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 330 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
368 req_q = &qdio->req_q;
369 spin_lock_bh(&qdio->req_q_lock); 331 spin_lock_bh(&qdio->req_q_lock);
370 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); 332 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
371 spin_unlock_bh(&qdio->req_q_lock); 333 spin_unlock_bh(&qdio->req_q_lock);
372 334
373 wake_up(&qdio->req_q_wq); 335 wake_up(&qdio->req_q_wq);
374 336
375 qdio_shutdown(qdio->adapter->ccw_device, 337 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
376 QDIO_FLAG_CLEANUP_USING_CLEAR);
377 338
378 /* cleanup used outbound sbals */ 339 /* cleanup used outbound sbals */
379 count = atomic_read(&req_q->count); 340 count = atomic_read(&qdio->req_q_free);
380 if (count < QDIO_MAX_BUFFERS_PER_Q) { 341 if (count < QDIO_MAX_BUFFERS_PER_Q) {
381 first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; 342 idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
382 count = QDIO_MAX_BUFFERS_PER_Q - count; 343 count = QDIO_MAX_BUFFERS_PER_Q - count;
383 zfcp_qdio_zero_sbals(req_q->sbal, first, count); 344 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
384 } 345 }
385 req_q->first = 0; 346 qdio->req_q_idx = 0;
386 atomic_set(&req_q->count, 0); 347 atomic_set(&qdio->req_q_free, 0);
387 qdio->resp_q.first = 0;
388 atomic_set(&qdio->resp_q.count, 0);
389} 348}
390 349
391/** 350/**
@@ -397,10 +356,11 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
397{ 356{
398 struct qdio_buffer_element *sbale; 357 struct qdio_buffer_element *sbale;
399 struct qdio_initialize init_data; 358 struct qdio_initialize init_data;
400 struct ccw_device *cdev = qdio->adapter->ccw_device; 359 struct zfcp_adapter *adapter = qdio->adapter;
360 struct ccw_device *cdev = adapter->ccw_device;
401 int cc; 361 int cc;
402 362
403 if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) 363 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
404 return -EIO; 364 return -EIO;
405 365
406 zfcp_qdio_setup_init_data(&init_data, qdio); 366 zfcp_qdio_setup_init_data(&init_data, qdio);
@@ -412,19 +372,18 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
412 goto failed_qdio; 372 goto failed_qdio;
413 373
414 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 374 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
415 sbale = &(qdio->resp_q.sbal[cc]->element[0]); 375 sbale = &(qdio->res_q[cc]->element[0]);
416 sbale->length = 0; 376 sbale->length = 0;
417 sbale->flags = SBAL_FLAGS_LAST_ENTRY; 377 sbale->flags = SBAL_FLAGS_LAST_ENTRY;
418 sbale->addr = NULL; 378 sbale->addr = NULL;
419 } 379 }
420 380
421 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, 381 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
422 QDIO_MAX_BUFFERS_PER_Q))
423 goto failed_qdio; 382 goto failed_qdio;
424 383
425 /* set index of first avalable SBALS / number of available SBALS */ 384 /* set index of first avalable SBALS / number of available SBALS */
426 qdio->req_q.first = 0; 385 qdio->req_q_idx = 0;
427 atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q); 386 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
428 387
429 return 0; 388 return 0;
430 389
@@ -438,7 +397,6 @@ failed_establish:
438 397
439void zfcp_qdio_destroy(struct zfcp_qdio *qdio) 398void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
440{ 399{
441 struct qdio_buffer **sbal_req, **sbal_resp;
442 int p; 400 int p;
443 401
444 if (!qdio) 402 if (!qdio)
@@ -447,12 +405,9 @@ void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
447 if (qdio->adapter->ccw_device) 405 if (qdio->adapter->ccw_device)
448 qdio_free(qdio->adapter->ccw_device); 406 qdio_free(qdio->adapter->ccw_device);
449 407
450 sbal_req = qdio->req_q.sbal;
451 sbal_resp = qdio->resp_q.sbal;
452
453 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { 408 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
454 free_page((unsigned long) sbal_req[p]); 409 free_page((unsigned long) qdio->req_q[p]);
455 free_page((unsigned long) sbal_resp[p]); 410 free_page((unsigned long) qdio->res_q[p]);
456 } 411 }
457 412
458 kfree(qdio); 413 kfree(qdio);
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 8bb00545f19c..10d0df99dbf4 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -28,21 +28,11 @@
28 (ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2) 28 (ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
29 29
30/** 30/**
31 * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
32 * @sbal: qdio buffers
33 * @first: index of next free buffer in queue
34 * @count: number of free buffers in queue
35 */
36struct zfcp_qdio_queue {
37 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
38 u8 first;
39 atomic_t count;
40};
41
42/**
43 * struct zfcp_qdio - basic qdio data structure 31 * struct zfcp_qdio - basic qdio data structure
44 * @resp_q: response queue 32 * @res_q: response queue
45 * @req_q: request queue 33 * @req_q: request queue
34 * @req_q_idx: index of next free buffer
35 * @req_q_free: number of free buffers in queue
46 * @stat_lock: lock to protect req_q_util and req_q_time 36 * @stat_lock: lock to protect req_q_util and req_q_time
47 * @req_q_lock: lock to serialize access to request queue 37 * @req_q_lock: lock to serialize access to request queue
48 * @req_q_time: time of last fill level change 38 * @req_q_time: time of last fill level change
@@ -52,8 +42,10 @@ struct zfcp_qdio_queue {
52 * @adapter: adapter used in conjunction with this qdio structure 42 * @adapter: adapter used in conjunction with this qdio structure
53 */ 43 */
54struct zfcp_qdio { 44struct zfcp_qdio {
55 struct zfcp_qdio_queue resp_q; 45 struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
56 struct zfcp_qdio_queue req_q; 46 struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q];
47 u8 req_q_idx;
48 atomic_t req_q_free;
57 spinlock_t stat_lock; 49 spinlock_t stat_lock;
58 spinlock_t req_q_lock; 50 spinlock_t req_q_lock;
59 unsigned long long req_q_time; 51 unsigned long long req_q_time;
@@ -73,7 +65,6 @@ struct zfcp_qdio {
73 * @sbale_curr: current sbale at creation of this request 65 * @sbale_curr: current sbale at creation of this request
74 * @sbal_response: sbal used in interrupt 66 * @sbal_response: sbal used in interrupt
75 * @qdio_outb_usage: usage of outbound queue 67 * @qdio_outb_usage: usage of outbound queue
76 * @qdio_inb_usage: usage of inbound queue
77 */ 68 */
78struct zfcp_qdio_req { 69struct zfcp_qdio_req {
79 u32 sbtype; 70 u32 sbtype;
@@ -84,22 +75,9 @@ struct zfcp_qdio_req {
84 u8 sbale_curr; 75 u8 sbale_curr;
85 u8 sbal_response; 76 u8 sbal_response;
86 u16 qdio_outb_usage; 77 u16 qdio_outb_usage;
87 u16 qdio_inb_usage;
88}; 78};
89 79
90/** 80/**
91 * zfcp_qdio_sbale - return pointer to sbale in qdio queue
92 * @q: queue where to find sbal
93 * @sbal_idx: sbal index in queue
94 * @sbale_idx: sbale index in sbal
95 */
96static inline struct qdio_buffer_element *
97zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
98{
99 return &q->sbal[sbal_idx]->element[sbale_idx];
100}
101
102/**
103 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request 81 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
104 * @qdio: pointer to struct zfcp_qdio 82 * @qdio: pointer to struct zfcp_qdio
105 * @q_rec: pointer to struct zfcp_qdio_req 83 * @q_rec: pointer to struct zfcp_qdio_req
@@ -108,7 +86,7 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
108static inline struct qdio_buffer_element * 86static inline struct qdio_buffer_element *
109zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 87zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
110{ 88{
111 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); 89 return &qdio->req_q[q_req->sbal_last]->element[0];
112} 90}
113 91
114/** 92/**
@@ -120,8 +98,7 @@ zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
120static inline struct qdio_buffer_element * 98static inline struct qdio_buffer_element *
121zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 99zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
122{ 100{
123 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 101 return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
124 q_req->sbale_curr);
125} 102}
126 103
127/** 104/**
@@ -142,25 +119,25 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
142 unsigned long req_id, u32 sbtype, void *data, u32 len) 119 unsigned long req_id, u32 sbtype, void *data, u32 len)
143{ 120{
144 struct qdio_buffer_element *sbale; 121 struct qdio_buffer_element *sbale;
145 int count = min(atomic_read(&qdio->req_q.count), 122 int count = min(atomic_read(&qdio->req_q_free),
146 ZFCP_QDIO_MAX_SBALS_PER_REQ); 123 ZFCP_QDIO_MAX_SBALS_PER_REQ);
147 124
148 q_req->sbal_first = q_req->sbal_last = qdio->req_q.first; 125 q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
149 q_req->sbal_number = 1; 126 q_req->sbal_number = 1;
150 q_req->sbtype = sbtype; 127 q_req->sbtype = sbtype;
128 q_req->sbale_curr = 1;
151 q_req->sbal_limit = (q_req->sbal_first + count - 1) 129 q_req->sbal_limit = (q_req->sbal_first + count - 1)
152 % QDIO_MAX_BUFFERS_PER_Q; 130 % QDIO_MAX_BUFFERS_PER_Q;
153 131
154 sbale = zfcp_qdio_sbale_req(qdio, q_req); 132 sbale = zfcp_qdio_sbale_req(qdio, q_req);
155 sbale->addr = (void *) req_id; 133 sbale->addr = (void *) req_id;
156 sbale->flags |= SBAL_FLAGS0_COMMAND; 134 sbale->flags = SBAL_FLAGS0_COMMAND | sbtype;
157 sbale->flags |= sbtype;
158 135
159 q_req->sbale_curr = 1; 136 if (unlikely(!data))
137 return;
160 sbale++; 138 sbale++;
161 sbale->addr = data; 139 sbale->addr = data;
162 if (likely(data)) 140 sbale->length = len;
163 sbale->length = len;
164} 141}
165 142
166/** 143/**
@@ -232,7 +209,7 @@ static inline
232void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, 209void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
233 struct zfcp_qdio_req *q_req, int max_sbals) 210 struct zfcp_qdio_req *q_req, int max_sbals)
234{ 211{
235 int count = min(atomic_read(&qdio->req_q.count), max_sbals); 212 int count = min(atomic_read(&qdio->req_q_free), max_sbals);
236 213
237 q_req->sbal_limit = (q_req->sbal_first + count - 1) % 214 q_req->sbal_limit = (q_req->sbal_first + count - 1) %
238 QDIO_MAX_BUFFERS_PER_Q; 215 QDIO_MAX_BUFFERS_PER_Q;