aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2014-12-05 10:43:58 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-12-08 03:42:46 -0500
commitbbc610a96524fbfa4ed38c4b1fc6348a1169f358 (patch)
treedddbd956b535c2672bfc19f4679dcffac095a77d /drivers/s390
parentde88d0d28fe932637eb5b7ebf9e638256cf07979 (diff)
s390/scm_block: handle multiple requests in one HW request
Handle up to 8 block layer requests per HW request. These requests can be processed in parallel on the device leading to better throughput (and less interrupts). The overhead for additional requests is small since we don't blindly allocate new aidaws but try to use what's left of the previous one. Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/scm_blk.c145
-rw-r--r--drivers/s390/block/scm_blk.h3
-rw-r--r--drivers/s390/block/scm_blk_cluster.c47
3 files changed, 132 insertions, 63 deletions
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index f5c369ce7e73..cd27cb92ac6d 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -117,13 +117,19 @@ out:
117 117
118static void scm_request_done(struct scm_request *scmrq) 118static void scm_request_done(struct scm_request *scmrq)
119{ 119{
120 struct msb *msb = &scmrq->aob->msb[0];
121 u64 aidaw = msb->data_addr;
122 unsigned long flags; 120 unsigned long flags;
121 struct msb *msb;
122 u64 aidaw;
123 int i;
123 124
124 if ((msb->flags & MSB_FLAG_IDA) && aidaw && 125 for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++) {
125 IS_ALIGNED(aidaw, PAGE_SIZE)) 126 msb = &scmrq->aob->msb[i];
126 mempool_free(virt_to_page(aidaw), aidaw_pool); 127 aidaw = msb->data_addr;
128
129 if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
130 IS_ALIGNED(aidaw, PAGE_SIZE))
131 mempool_free(virt_to_page(aidaw), aidaw_pool);
132 }
127 133
128 spin_lock_irqsave(&list_lock, flags); 134 spin_lock_irqsave(&list_lock, flags);
129 list_add(&scmrq->list, &inactive_requests); 135 list_add(&scmrq->list, &inactive_requests);
@@ -167,51 +173,57 @@ static int scm_request_prepare(struct scm_request *scmrq)
167{ 173{
168 struct scm_blk_dev *bdev = scmrq->bdev; 174 struct scm_blk_dev *bdev = scmrq->bdev;
169 struct scm_device *scmdev = bdev->gendisk->private_data; 175 struct scm_device *scmdev = bdev->gendisk->private_data;
170 struct msb *msb = &scmrq->aob->msb[0]; 176 int pos = scmrq->aob->request.msb_count;
177 struct msb *msb = &scmrq->aob->msb[pos];
178 struct request *req = scmrq->request[pos];
171 struct req_iterator iter; 179 struct req_iterator iter;
172 struct aidaw *aidaw; 180 struct aidaw *aidaw;
173 struct bio_vec bv; 181 struct bio_vec bv;
174 182
175 aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(scmrq->request)); 183 aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
176 if (!aidaw) 184 if (!aidaw)
177 return -ENOMEM; 185 return -ENOMEM;
178 186
179 msb->bs = MSB_BS_4K; 187 msb->bs = MSB_BS_4K;
180 scmrq->aob->request.msb_count = 1; 188 scmrq->aob->request.msb_count++;
181 msb->scm_addr = scmdev->address + 189 msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
182 ((u64) blk_rq_pos(scmrq->request) << 9); 190 msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
183 msb->oc = (rq_data_dir(scmrq->request) == READ) ?
184 MSB_OC_READ : MSB_OC_WRITE;
185 msb->flags |= MSB_FLAG_IDA; 191 msb->flags |= MSB_FLAG_IDA;
186 msb->data_addr = (u64) aidaw; 192 msb->data_addr = (u64) aidaw;
187 193
188 rq_for_each_segment(bv, scmrq->request, iter) { 194 rq_for_each_segment(bv, req, iter) {
189 WARN_ON(bv.bv_offset); 195 WARN_ON(bv.bv_offset);
190 msb->blk_count += bv.bv_len >> 12; 196 msb->blk_count += bv.bv_len >> 12;
191 aidaw->data_addr = (u64) page_address(bv.bv_page); 197 aidaw->data_addr = (u64) page_address(bv.bv_page);
192 aidaw++; 198 aidaw++;
193 } 199 }
194 200
201 scmrq->next_aidaw = aidaw;
195 return 0; 202 return 0;
196} 203}
197 204
205static inline void scm_request_set(struct scm_request *scmrq,
206 struct request *req)
207{
208 scmrq->request[scmrq->aob->request.msb_count] = req;
209}
210
198static inline void scm_request_init(struct scm_blk_dev *bdev, 211static inline void scm_request_init(struct scm_blk_dev *bdev,
199 struct scm_request *scmrq, 212 struct scm_request *scmrq)
200 struct request *req)
201{ 213{
202 struct aob_rq_header *aobrq = to_aobrq(scmrq); 214 struct aob_rq_header *aobrq = to_aobrq(scmrq);
203 struct aob *aob = scmrq->aob; 215 struct aob *aob = scmrq->aob;
204 216
217 memset(scmrq->request, 0, sizeof(scmrq->request));
205 memset(aob, 0, sizeof(*aob)); 218 memset(aob, 0, sizeof(*aob));
206 aobrq->scmdev = bdev->scmdev; 219 aobrq->scmdev = bdev->scmdev;
207 aob->request.cmd_code = ARQB_CMD_MOVE; 220 aob->request.cmd_code = ARQB_CMD_MOVE;
208 aob->request.data = (u64) aobrq; 221 aob->request.data = (u64) aobrq;
209 scmrq->request = req;
210 scmrq->bdev = bdev; 222 scmrq->bdev = bdev;
211 scmrq->retries = 4; 223 scmrq->retries = 4;
212 scmrq->error = 0; 224 scmrq->error = 0;
213 /* We don't use all msbs - place aidaws at the end of the aob page. */ 225 /* We don't use all msbs - place aidaws at the end of the aob page. */
214 scmrq->next_aidaw = (void *) &aob->msb[1]; 226 scmrq->next_aidaw = (void *) &aob->msb[SCM_RQ_PER_IO];
215 scm_request_cluster_init(scmrq); 227 scm_request_cluster_init(scmrq);
216} 228}
217 229
@@ -227,9 +239,12 @@ static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
227void scm_request_requeue(struct scm_request *scmrq) 239void scm_request_requeue(struct scm_request *scmrq)
228{ 240{
229 struct scm_blk_dev *bdev = scmrq->bdev; 241 struct scm_blk_dev *bdev = scmrq->bdev;
242 int i;
230 243
231 scm_release_cluster(scmrq); 244 scm_release_cluster(scmrq);
232 blk_requeue_request(bdev->rq, scmrq->request); 245 for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++)
246 blk_requeue_request(bdev->rq, scmrq->request[i]);
247
233 atomic_dec(&bdev->queued_reqs); 248 atomic_dec(&bdev->queued_reqs);
234 scm_request_done(scmrq); 249 scm_request_done(scmrq);
235 scm_ensure_queue_restart(bdev); 250 scm_ensure_queue_restart(bdev);
@@ -238,20 +253,41 @@ void scm_request_requeue(struct scm_request *scmrq)
238void scm_request_finish(struct scm_request *scmrq) 253void scm_request_finish(struct scm_request *scmrq)
239{ 254{
240 struct scm_blk_dev *bdev = scmrq->bdev; 255 struct scm_blk_dev *bdev = scmrq->bdev;
256 int i;
241 257
242 scm_release_cluster(scmrq); 258 scm_release_cluster(scmrq);
243 blk_end_request_all(scmrq->request, scmrq->error); 259 for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++)
260 blk_end_request_all(scmrq->request[i], scmrq->error);
261
244 atomic_dec(&bdev->queued_reqs); 262 atomic_dec(&bdev->queued_reqs);
245 scm_request_done(scmrq); 263 scm_request_done(scmrq);
246} 264}
247 265
266static int scm_request_start(struct scm_request *scmrq)
267{
268 struct scm_blk_dev *bdev = scmrq->bdev;
269 int ret;
270
271 atomic_inc(&bdev->queued_reqs);
272 if (!scmrq->aob->request.msb_count) {
273 scm_request_requeue(scmrq);
274 return -EINVAL;
275 }
276
277 ret = eadm_start_aob(scmrq->aob);
278 if (ret) {
279 SCM_LOG(5, "no subchannel");
280 scm_request_requeue(scmrq);
281 }
282 return ret;
283}
284
248static void scm_blk_request(struct request_queue *rq) 285static void scm_blk_request(struct request_queue *rq)
249{ 286{
250 struct scm_device *scmdev = rq->queuedata; 287 struct scm_device *scmdev = rq->queuedata;
251 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); 288 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
252 struct scm_request *scmrq; 289 struct scm_request *scmrq = NULL;
253 struct request *req; 290 struct request *req;
254 int ret;
255 291
256 while ((req = blk_peek_request(rq))) { 292 while ((req = blk_peek_request(rq))) {
257 if (req->cmd_type != REQ_TYPE_FS) { 293 if (req->cmd_type != REQ_TYPE_FS) {
@@ -261,47 +297,64 @@ static void scm_blk_request(struct request_queue *rq)
261 continue; 297 continue;
262 } 298 }
263 299
264 if (!scm_permit_request(bdev, req)) { 300 if (!scm_permit_request(bdev, req))
265 scm_ensure_queue_restart(bdev); 301 goto out;
266 return; 302
267 }
268 scmrq = scm_request_fetch();
269 if (!scmrq) { 303 if (!scmrq) {
270 SCM_LOG(5, "no request"); 304 scmrq = scm_request_fetch();
271 scm_ensure_queue_restart(bdev); 305 if (!scmrq) {
272 return; 306 SCM_LOG(5, "no request");
307 goto out;
308 }
309 scm_request_init(bdev, scmrq);
273 } 310 }
274 scm_request_init(bdev, scmrq, req); 311 scm_request_set(scmrq, req);
312
275 if (!scm_reserve_cluster(scmrq)) { 313 if (!scm_reserve_cluster(scmrq)) {
276 SCM_LOG(5, "cluster busy"); 314 SCM_LOG(5, "cluster busy");
315 scm_request_set(scmrq, NULL);
316 if (scmrq->aob->request.msb_count)
317 goto out;
318
277 scm_request_done(scmrq); 319 scm_request_done(scmrq);
278 return; 320 return;
279 } 321 }
322
280 if (scm_need_cluster_request(scmrq)) { 323 if (scm_need_cluster_request(scmrq)) {
281 atomic_inc(&bdev->queued_reqs); 324 if (scmrq->aob->request.msb_count) {
282 blk_start_request(req); 325 /* Start cluster requests separately. */
283 scm_initiate_cluster_request(scmrq); 326 scm_request_set(scmrq, NULL);
284 return; 327 if (scm_request_start(scmrq))
328 return;
329 } else {
330 atomic_inc(&bdev->queued_reqs);
331 blk_start_request(req);
332 scm_initiate_cluster_request(scmrq);
333 }
334 scmrq = NULL;
335 continue;
285 } 336 }
286 337
287 if (scm_request_prepare(scmrq)) { 338 if (scm_request_prepare(scmrq)) {
288 SCM_LOG(5, "no aidaw"); 339 SCM_LOG(5, "aidaw alloc failed");
289 scm_release_cluster(scmrq); 340 scm_request_set(scmrq, NULL);
290 scm_request_done(scmrq); 341 goto out;
291 scm_ensure_queue_restart(bdev);
292 return;
293 } 342 }
294
295 atomic_inc(&bdev->queued_reqs);
296 blk_start_request(req); 343 blk_start_request(req);
297 344
298 ret = eadm_start_aob(scmrq->aob); 345 if (scmrq->aob->request.msb_count < SCM_RQ_PER_IO)
299 if (ret) { 346 continue;
300 SCM_LOG(5, "no subchannel"); 347
301 scm_request_requeue(scmrq); 348 if (scm_request_start(scmrq))
302 return; 349 return;
303 } 350
351 scmrq = NULL;
304 } 352 }
353out:
354 if (scmrq)
355 scm_request_start(scmrq);
356 else
357 scm_ensure_queue_restart(bdev);
305} 358}
306 359
307static void __scmrq_log_error(struct scm_request *scmrq) 360static void __scmrq_log_error(struct scm_request *scmrq)
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 6334e1609208..3dae0a3570ce 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -11,6 +11,7 @@
11#include <asm/eadm.h> 11#include <asm/eadm.h>
12 12
13#define SCM_NR_PARTS 8 13#define SCM_NR_PARTS 8
14#define SCM_RQ_PER_IO 8
14#define SCM_QUEUE_DELAY 5 15#define SCM_QUEUE_DELAY 5
15 16
16struct scm_blk_dev { 17struct scm_blk_dev {
@@ -31,7 +32,7 @@ struct scm_blk_dev {
31struct scm_request { 32struct scm_request {
32 struct scm_blk_dev *bdev; 33 struct scm_blk_dev *bdev;
33 struct aidaw *next_aidaw; 34 struct aidaw *next_aidaw;
34 struct request *request; 35 struct request *request[SCM_RQ_PER_IO];
35 struct aob *aob; 36 struct aob *aob;
36 struct list_head list; 37 struct list_head list;
37 u8 retries; 38 u8 retries;
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 2fd01320b978..09db45296eed 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -57,39 +57,52 @@ void scm_request_cluster_init(struct scm_request *scmrq)
57 scmrq->cluster.state = CLUSTER_NONE; 57 scmrq->cluster.state = CLUSTER_NONE;
58} 58}
59 59
60static bool clusters_intersect(struct scm_request *A, struct scm_request *B) 60static bool clusters_intersect(struct request *A, struct request *B)
61{ 61{
62 unsigned long firstA, lastA, firstB, lastB; 62 unsigned long firstA, lastA, firstB, lastB;
63 63
64 firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE; 64 firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE;
65 lastA = (((u64) blk_rq_pos(A->request) << 9) + 65 lastA = (((u64) blk_rq_pos(A) << 9) +
66 blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE; 66 blk_rq_bytes(A) - 1) / CLUSTER_SIZE;
67 67
68 firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE; 68 firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE;
69 lastB = (((u64) blk_rq_pos(B->request) << 9) + 69 lastB = (((u64) blk_rq_pos(B) << 9) +
70 blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE; 70 blk_rq_bytes(B) - 1) / CLUSTER_SIZE;
71 71
72 return (firstB <= lastA && firstA <= lastB); 72 return (firstB <= lastA && firstA <= lastB);
73} 73}
74 74
75bool scm_reserve_cluster(struct scm_request *scmrq) 75bool scm_reserve_cluster(struct scm_request *scmrq)
76{ 76{
77 struct request *req = scmrq->request[scmrq->aob->request.msb_count];
77 struct scm_blk_dev *bdev = scmrq->bdev; 78 struct scm_blk_dev *bdev = scmrq->bdev;
78 struct scm_request *iter; 79 struct scm_request *iter;
80 int pos, add = 1;
79 81
80 if (write_cluster_size == 0) 82 if (write_cluster_size == 0)
81 return true; 83 return true;
82 84
83 spin_lock(&bdev->lock); 85 spin_lock(&bdev->lock);
84 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { 86 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
85 if (clusters_intersect(scmrq, iter) && 87 if (iter == scmrq) {
86 (rq_data_dir(scmrq->request) == WRITE || 88 /*
87 rq_data_dir(iter->request) == WRITE)) { 89 * We don't have to use clusters_intersect here, since
88 spin_unlock(&bdev->lock); 90 * cluster requests are always started separately.
89 return false; 91 */
92 add = 0;
93 continue;
94 }
95 for (pos = 0; pos <= iter->aob->request.msb_count; pos++) {
96 if (clusters_intersect(req, iter->request[pos]) &&
97 (rq_data_dir(req) == WRITE ||
98 rq_data_dir(iter->request[pos]) == WRITE)) {
99 spin_unlock(&bdev->lock);
100 return false;
101 }
90 } 102 }
91 } 103 }
92 list_add(&scmrq->cluster.list, &bdev->cluster_list); 104 if (add)
105 list_add(&scmrq->cluster.list, &bdev->cluster_list);
93 spin_unlock(&bdev->lock); 106 spin_unlock(&bdev->lock);
94 107
95 return true; 108 return true;
@@ -118,7 +131,7 @@ static int scm_prepare_cluster_request(struct scm_request *scmrq)
118{ 131{
119 struct scm_blk_dev *bdev = scmrq->bdev; 132 struct scm_blk_dev *bdev = scmrq->bdev;
120 struct scm_device *scmdev = bdev->gendisk->private_data; 133 struct scm_device *scmdev = bdev->gendisk->private_data;
121 struct request *req = scmrq->request; 134 struct request *req = scmrq->request[0];
122 struct msb *msb = &scmrq->aob->msb[0]; 135 struct msb *msb = &scmrq->aob->msb[0];
123 struct req_iterator iter; 136 struct req_iterator iter;
124 struct aidaw *aidaw; 137 struct aidaw *aidaw;
@@ -183,10 +196,12 @@ static int scm_prepare_cluster_request(struct scm_request *scmrq)
183 196
184bool scm_need_cluster_request(struct scm_request *scmrq) 197bool scm_need_cluster_request(struct scm_request *scmrq)
185{ 198{
186 if (rq_data_dir(scmrq->request) == READ) 199 int pos = scmrq->aob->request.msb_count;
200
201 if (rq_data_dir(scmrq->request[pos]) == READ)
187 return false; 202 return false;
188 203
189 return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE; 204 return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE;
190} 205}
191 206
192/* Called with queue lock held. */ 207/* Called with queue lock held. */