diff options
author | Sebastian Ott <sebott@linux.vnet.ibm.com> | 2014-12-05 10:47:17 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-12-08 03:42:47 -0500 |
commit | 8622384f138b786b9ae639e79ccfb84c7db82cbc (patch) | |
tree | 79e1139bbc859c939a96df77a8cb78bd02755a5e /drivers/s390 | |
parent | bbc610a96524fbfa4ed38c4b1fc6348a1169f358 (diff) |
s390/scm_block: make the number of reqs per HW req configurable
Introduce a module parameter to specify the number of requests
we try to handle with one HW request.
Suggested-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/block/scm_blk.c | 48 | ||||
-rw-r--r-- | drivers/s390/block/scm_blk.h | 3 |
2 files changed, 34 insertions, 17 deletions
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index cd27cb92ac6d..75d9896deccb 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c | |||
@@ -25,10 +25,14 @@ static mempool_t *aidaw_pool; | |||
25 | static DEFINE_SPINLOCK(list_lock); | 25 | static DEFINE_SPINLOCK(list_lock); |
26 | static LIST_HEAD(inactive_requests); | 26 | static LIST_HEAD(inactive_requests); |
27 | static unsigned int nr_requests = 64; | 27 | static unsigned int nr_requests = 64; |
28 | static unsigned int nr_requests_per_io = 8; | ||
28 | static atomic_t nr_devices = ATOMIC_INIT(0); | 29 | static atomic_t nr_devices = ATOMIC_INIT(0); |
29 | module_param(nr_requests, uint, S_IRUGO); | 30 | module_param(nr_requests, uint, S_IRUGO); |
30 | MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); | 31 | MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); |
31 | 32 | ||
33 | module_param(nr_requests_per_io, uint, S_IRUGO); | ||
34 | MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO."); | ||
35 | |||
32 | MODULE_DESCRIPTION("Block driver for s390 storage class memory."); | 36 | MODULE_DESCRIPTION("Block driver for s390 storage class memory."); |
33 | MODULE_LICENSE("GPL"); | 37 | MODULE_LICENSE("GPL"); |
34 | MODULE_ALIAS("scm:scmdev*"); | 38 | MODULE_ALIAS("scm:scmdev*"); |
@@ -39,6 +43,7 @@ static void __scm_free_rq(struct scm_request *scmrq) | |||
39 | 43 | ||
40 | free_page((unsigned long) scmrq->aob); | 44 | free_page((unsigned long) scmrq->aob); |
41 | __scm_free_rq_cluster(scmrq); | 45 | __scm_free_rq_cluster(scmrq); |
46 | kfree(scmrq->request); | ||
42 | kfree(aobrq); | 47 | kfree(aobrq); |
43 | } | 48 | } |
44 | 49 | ||
@@ -69,15 +74,16 @@ static int __scm_alloc_rq(void) | |||
69 | 74 | ||
70 | scmrq = (void *) aobrq->data; | 75 | scmrq = (void *) aobrq->data; |
71 | scmrq->aob = (void *) get_zeroed_page(GFP_DMA); | 76 | scmrq->aob = (void *) get_zeroed_page(GFP_DMA); |
72 | if (!scmrq->aob) { | 77 | if (!scmrq->aob) |
73 | __scm_free_rq(scmrq); | 78 | goto free; |
74 | return -ENOMEM; | ||
75 | } | ||
76 | 79 | ||
77 | if (__scm_alloc_rq_cluster(scmrq)) { | 80 | scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]), |
78 | __scm_free_rq(scmrq); | 81 | GFP_KERNEL); |
79 | return -ENOMEM; | 82 | if (!scmrq->request) |
80 | } | 83 | goto free; |
84 | |||
85 | if (__scm_alloc_rq_cluster(scmrq)) | ||
86 | goto free; | ||
81 | 87 | ||
82 | INIT_LIST_HEAD(&scmrq->list); | 88 | INIT_LIST_HEAD(&scmrq->list); |
83 | spin_lock_irq(&list_lock); | 89 | spin_lock_irq(&list_lock); |
@@ -85,6 +91,9 @@ static int __scm_alloc_rq(void) | |||
85 | spin_unlock_irq(&list_lock); | 91 | spin_unlock_irq(&list_lock); |
86 | 92 | ||
87 | return 0; | 93 | return 0; |
94 | free: | ||
95 | __scm_free_rq(scmrq); | ||
96 | return -ENOMEM; | ||
88 | } | 97 | } |
89 | 98 | ||
90 | static int scm_alloc_rqs(unsigned int nrqs) | 99 | static int scm_alloc_rqs(unsigned int nrqs) |
@@ -122,7 +131,7 @@ static void scm_request_done(struct scm_request *scmrq) | |||
122 | u64 aidaw; | 131 | u64 aidaw; |
123 | int i; | 132 | int i; |
124 | 133 | ||
125 | for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++) { | 134 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { |
126 | msb = &scmrq->aob->msb[i]; | 135 | msb = &scmrq->aob->msb[i]; |
127 | aidaw = msb->data_addr; | 136 | aidaw = msb->data_addr; |
128 | 137 | ||
@@ -214,7 +223,8 @@ static inline void scm_request_init(struct scm_blk_dev *bdev, | |||
214 | struct aob_rq_header *aobrq = to_aobrq(scmrq); | 223 | struct aob_rq_header *aobrq = to_aobrq(scmrq); |
215 | struct aob *aob = scmrq->aob; | 224 | struct aob *aob = scmrq->aob; |
216 | 225 | ||
217 | memset(scmrq->request, 0, sizeof(scmrq->request)); | 226 | memset(scmrq->request, 0, |
227 | nr_requests_per_io * sizeof(scmrq->request[0])); | ||
218 | memset(aob, 0, sizeof(*aob)); | 228 | memset(aob, 0, sizeof(*aob)); |
219 | aobrq->scmdev = bdev->scmdev; | 229 | aobrq->scmdev = bdev->scmdev; |
220 | aob->request.cmd_code = ARQB_CMD_MOVE; | 230 | aob->request.cmd_code = ARQB_CMD_MOVE; |
@@ -223,7 +233,7 @@ static inline void scm_request_init(struct scm_blk_dev *bdev, | |||
223 | scmrq->retries = 4; | 233 | scmrq->retries = 4; |
224 | scmrq->error = 0; | 234 | scmrq->error = 0; |
225 | /* We don't use all msbs - place aidaws at the end of the aob page. */ | 235 | /* We don't use all msbs - place aidaws at the end of the aob page. */ |
226 | scmrq->next_aidaw = (void *) &aob->msb[SCM_RQ_PER_IO]; | 236 | scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; |
227 | scm_request_cluster_init(scmrq); | 237 | scm_request_cluster_init(scmrq); |
228 | } | 238 | } |
229 | 239 | ||
@@ -242,7 +252,7 @@ void scm_request_requeue(struct scm_request *scmrq) | |||
242 | int i; | 252 | int i; |
243 | 253 | ||
244 | scm_release_cluster(scmrq); | 254 | scm_release_cluster(scmrq); |
245 | for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++) | 255 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) |
246 | blk_requeue_request(bdev->rq, scmrq->request[i]); | 256 | blk_requeue_request(bdev->rq, scmrq->request[i]); |
247 | 257 | ||
248 | atomic_dec(&bdev->queued_reqs); | 258 | atomic_dec(&bdev->queued_reqs); |
@@ -256,7 +266,7 @@ void scm_request_finish(struct scm_request *scmrq) | |||
256 | int i; | 266 | int i; |
257 | 267 | ||
258 | scm_release_cluster(scmrq); | 268 | scm_release_cluster(scmrq); |
259 | for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++) | 269 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) |
260 | blk_end_request_all(scmrq->request[i], scmrq->error); | 270 | blk_end_request_all(scmrq->request[i], scmrq->error); |
261 | 271 | ||
262 | atomic_dec(&bdev->queued_reqs); | 272 | atomic_dec(&bdev->queued_reqs); |
@@ -342,7 +352,7 @@ static void scm_blk_request(struct request_queue *rq) | |||
342 | } | 352 | } |
343 | blk_start_request(req); | 353 | blk_start_request(req); |
344 | 354 | ||
345 | if (scmrq->aob->request.msb_count < SCM_RQ_PER_IO) | 355 | if (scmrq->aob->request.msb_count < nr_requests_per_io) |
346 | continue; | 356 | continue; |
347 | 357 | ||
348 | if (scm_request_start(scmrq)) | 358 | if (scm_request_start(scmrq)) |
@@ -551,11 +561,19 @@ void scm_blk_set_available(struct scm_blk_dev *bdev) | |||
551 | spin_unlock_irqrestore(&bdev->lock, flags); | 561 | spin_unlock_irqrestore(&bdev->lock, flags); |
552 | } | 562 | } |
553 | 563 | ||
564 | static bool __init scm_blk_params_valid(void) | ||
565 | { | ||
566 | if (!nr_requests_per_io || nr_requests_per_io > 64) | ||
567 | return false; | ||
568 | |||
569 | return scm_cluster_size_valid(); | ||
570 | } | ||
571 | |||
554 | static int __init scm_blk_init(void) | 572 | static int __init scm_blk_init(void) |
555 | { | 573 | { |
556 | int ret = -EINVAL; | 574 | int ret = -EINVAL; |
557 | 575 | ||
558 | if (!scm_cluster_size_valid()) | 576 | if (!scm_blk_params_valid()) |
559 | goto out; | 577 | goto out; |
560 | 578 | ||
561 | ret = register_blkdev(0, "scm"); | 579 | ret = register_blkdev(0, "scm"); |
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index 3dae0a3570ce..09218cdc5129 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <asm/eadm.h> | 11 | #include <asm/eadm.h> |
12 | 12 | ||
13 | #define SCM_NR_PARTS 8 | 13 | #define SCM_NR_PARTS 8 |
14 | #define SCM_RQ_PER_IO 8 | ||
15 | #define SCM_QUEUE_DELAY 5 | 14 | #define SCM_QUEUE_DELAY 5 |
16 | 15 | ||
17 | struct scm_blk_dev { | 16 | struct scm_blk_dev { |
@@ -32,7 +31,7 @@ struct scm_blk_dev { | |||
32 | struct scm_request { | 31 | struct scm_request { |
33 | struct scm_blk_dev *bdev; | 32 | struct scm_blk_dev *bdev; |
34 | struct aidaw *next_aidaw; | 33 | struct aidaw *next_aidaw; |
35 | struct request *request[SCM_RQ_PER_IO]; | 34 | struct request **request; |
36 | struct aob *aob; | 35 | struct aob *aob; |
37 | struct list_head list; | 36 | struct list_head list; |
38 | u8 retries; | 37 | u8 retries; |