aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2014-12-05 10:32:13 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-12-08 03:42:43 -0500
commit9d4df77fab7347a74a9938521ffad8d8fab2671d (patch)
treefa58349acb12cab91c219940650dc03c1d6061a2 /drivers/s390
parente2578b82c4dfb0d339e25abc57ef6d6c3a932ff6 (diff)
s390/scm_block: use mempool to manage aidaw requests
We currently use one preallocated page per HW request to store aidaws. With this patch we use mempool to allocate an aidaw page whenever we need it. Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/scm_blk.c45
-rw-r--r--drivers/s390/block/scm_blk.h3
-rw-r--r--drivers/s390/block/scm_blk_cluster.c19
3 files changed, 55 insertions, 12 deletions
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 56046ab39629..5b2abadea094 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/mempool.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/blkdev.h> 15#include <linux/blkdev.h>
15#include <linux/genhd.h> 16#include <linux/genhd.h>
@@ -20,6 +21,7 @@
20 21
21debug_info_t *scm_debug; 22debug_info_t *scm_debug;
22static int scm_major; 23static int scm_major;
24static mempool_t *aidaw_pool;
23static DEFINE_SPINLOCK(list_lock); 25static DEFINE_SPINLOCK(list_lock);
24static LIST_HEAD(inactive_requests); 26static LIST_HEAD(inactive_requests);
25static unsigned int nr_requests = 64; 27static unsigned int nr_requests = 64;
@@ -36,7 +38,6 @@ static void __scm_free_rq(struct scm_request *scmrq)
36 struct aob_rq_header *aobrq = to_aobrq(scmrq); 38 struct aob_rq_header *aobrq = to_aobrq(scmrq);
37 39
38 free_page((unsigned long) scmrq->aob); 40 free_page((unsigned long) scmrq->aob);
39 free_page((unsigned long) scmrq->aidaw);
40 __scm_free_rq_cluster(scmrq); 41 __scm_free_rq_cluster(scmrq);
41 kfree(aobrq); 42 kfree(aobrq);
42} 43}
@@ -53,6 +54,8 @@ static void scm_free_rqs(void)
53 __scm_free_rq(scmrq); 54 __scm_free_rq(scmrq);
54 } 55 }
55 spin_unlock_irq(&list_lock); 56 spin_unlock_irq(&list_lock);
57
58 mempool_destroy(aidaw_pool);
56} 59}
57 60
58static int __scm_alloc_rq(void) 61static int __scm_alloc_rq(void)
@@ -65,9 +68,8 @@ static int __scm_alloc_rq(void)
65 return -ENOMEM; 68 return -ENOMEM;
66 69
67 scmrq = (void *) aobrq->data; 70 scmrq = (void *) aobrq->data;
68 scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
69 scmrq->aob = (void *) get_zeroed_page(GFP_DMA); 71 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
70 if (!scmrq->aob || !scmrq->aidaw) { 72 if (!scmrq->aob) {
71 __scm_free_rq(scmrq); 73 __scm_free_rq(scmrq);
72 return -ENOMEM; 74 return -ENOMEM;
73 } 75 }
@@ -89,6 +91,10 @@ static int scm_alloc_rqs(unsigned int nrqs)
89{ 91{
90 int ret = 0; 92 int ret = 0;
91 93
94 aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
95 if (!aidaw_pool)
96 return -ENOMEM;
97
92 while (nrqs-- && !ret) 98 while (nrqs-- && !ret)
93 ret = __scm_alloc_rq(); 99 ret = __scm_alloc_rq();
94 100
@@ -111,8 +117,13 @@ out:
111 117
112static void scm_request_done(struct scm_request *scmrq) 118static void scm_request_done(struct scm_request *scmrq)
113{ 119{
120 struct msb *msb = &scmrq->aob->msb[0];
121 u64 aidaw = msb->data_addr;
114 unsigned long flags; 122 unsigned long flags;
115 123
124 if ((msb->flags & MSB_FLAG_IDA) && aidaw)
125 mempool_free(virt_to_page(aidaw), aidaw_pool);
126
116 spin_lock_irqsave(&list_lock, flags); 127 spin_lock_irqsave(&list_lock, flags);
117 list_add(&scmrq->list, &inactive_requests); 128 list_add(&scmrq->list, &inactive_requests);
118 spin_unlock_irqrestore(&list_lock, flags); 129 spin_unlock_irqrestore(&list_lock, flags);
@@ -123,15 +134,26 @@ static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
123 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; 134 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
124} 135}
125 136
126static void scm_request_prepare(struct scm_request *scmrq) 137struct aidaw *scm_aidaw_alloc(void)
138{
139 struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
140
141 return page ? page_address(page) : NULL;
142}
143
144static int scm_request_prepare(struct scm_request *scmrq)
127{ 145{
128 struct scm_blk_dev *bdev = scmrq->bdev; 146 struct scm_blk_dev *bdev = scmrq->bdev;
129 struct scm_device *scmdev = bdev->gendisk->private_data; 147 struct scm_device *scmdev = bdev->gendisk->private_data;
130 struct aidaw *aidaw = scmrq->aidaw; 148 struct aidaw *aidaw = scm_aidaw_alloc();
131 struct msb *msb = &scmrq->aob->msb[0]; 149 struct msb *msb = &scmrq->aob->msb[0];
132 struct req_iterator iter; 150 struct req_iterator iter;
133 struct bio_vec bv; 151 struct bio_vec bv;
134 152
153 if (!aidaw)
154 return -ENOMEM;
155
156 memset(aidaw, 0, PAGE_SIZE);
135 msb->bs = MSB_BS_4K; 157 msb->bs = MSB_BS_4K;
136 scmrq->aob->request.msb_count = 1; 158 scmrq->aob->request.msb_count = 1;
137 msb->scm_addr = scmdev->address + 159 msb->scm_addr = scmdev->address +
@@ -147,6 +169,8 @@ static void scm_request_prepare(struct scm_request *scmrq)
147 aidaw->data_addr = (u64) page_address(bv.bv_page); 169 aidaw->data_addr = (u64) page_address(bv.bv_page);
148 aidaw++; 170 aidaw++;
149 } 171 }
172
173 return 0;
150} 174}
151 175
152static inline void scm_request_init(struct scm_blk_dev *bdev, 176static inline void scm_request_init(struct scm_blk_dev *bdev,
@@ -157,7 +181,6 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
157 struct aob *aob = scmrq->aob; 181 struct aob *aob = scmrq->aob;
158 182
159 memset(aob, 0, sizeof(*aob)); 183 memset(aob, 0, sizeof(*aob));
160 memset(scmrq->aidaw, 0, PAGE_SIZE);
161 aobrq->scmdev = bdev->scmdev; 184 aobrq->scmdev = bdev->scmdev;
162 aob->request.cmd_code = ARQB_CMD_MOVE; 185 aob->request.cmd_code = ARQB_CMD_MOVE;
163 aob->request.data = (u64) aobrq; 186 aob->request.data = (u64) aobrq;
@@ -236,7 +259,15 @@ static void scm_blk_request(struct request_queue *rq)
236 scm_initiate_cluster_request(scmrq); 259 scm_initiate_cluster_request(scmrq);
237 return; 260 return;
238 } 261 }
239 scm_request_prepare(scmrq); 262
263 if (scm_request_prepare(scmrq)) {
264 SCM_LOG(5, "no aidaw");
265 scm_release_cluster(scmrq);
266 scm_request_done(scmrq);
267 scm_ensure_queue_restart(bdev);
268 return;
269 }
270
240 atomic_inc(&bdev->queued_reqs); 271 atomic_inc(&bdev->queued_reqs);
241 blk_start_request(req); 272 blk_start_request(req);
242 273
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index e59331e6c2e5..a315ef0e96f5 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -31,7 +31,6 @@ struct scm_blk_dev {
31struct scm_request { 31struct scm_request {
32 struct scm_blk_dev *bdev; 32 struct scm_blk_dev *bdev;
33 struct request *request; 33 struct request *request;
34 struct aidaw *aidaw;
35 struct aob *aob; 34 struct aob *aob;
36 struct list_head list; 35 struct list_head list;
37 u8 retries; 36 u8 retries;
@@ -55,6 +54,8 @@ void scm_blk_irq(struct scm_device *, void *, int);
55void scm_request_finish(struct scm_request *); 54void scm_request_finish(struct scm_request *);
56void scm_request_requeue(struct scm_request *); 55void scm_request_requeue(struct scm_request *);
57 56
57struct aidaw *scm_aidaw_alloc(void);
58
58int scm_drv_init(void); 59int scm_drv_init(void);
59void scm_drv_cleanup(void); 60void scm_drv_cleanup(void);
60 61
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 9aae909d47a5..4787f80e5537 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -114,14 +114,14 @@ void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
114 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); 114 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
115} 115}
116 116
117static void scm_prepare_cluster_request(struct scm_request *scmrq) 117static int scm_prepare_cluster_request(struct scm_request *scmrq)
118{ 118{
119 struct scm_blk_dev *bdev = scmrq->bdev; 119 struct scm_blk_dev *bdev = scmrq->bdev;
120 struct scm_device *scmdev = bdev->gendisk->private_data; 120 struct scm_device *scmdev = bdev->gendisk->private_data;
121 struct request *req = scmrq->request; 121 struct request *req = scmrq->request;
122 struct aidaw *aidaw = scmrq->aidaw;
123 struct msb *msb = &scmrq->aob->msb[0]; 122 struct msb *msb = &scmrq->aob->msb[0];
124 struct req_iterator iter; 123 struct req_iterator iter;
124 struct aidaw *aidaw;
125 struct bio_vec bv; 125 struct bio_vec bv;
126 int i = 0; 126 int i = 0;
127 u64 addr; 127 u64 addr;
@@ -131,6 +131,11 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
131 scmrq->cluster.state = CLUSTER_READ; 131 scmrq->cluster.state = CLUSTER_READ;
132 /* fall through */ 132 /* fall through */
133 case CLUSTER_READ: 133 case CLUSTER_READ:
134 aidaw = scm_aidaw_alloc();
135 if (!aidaw)
136 return -ENOMEM;
137
138 memset(aidaw, 0, PAGE_SIZE);
134 scmrq->aob->request.msb_count = 1; 139 scmrq->aob->request.msb_count = 1;
135 msb->bs = MSB_BS_4K; 140 msb->bs = MSB_BS_4K;
136 msb->oc = MSB_OC_READ; 141 msb->oc = MSB_OC_READ;
@@ -153,6 +158,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
153 158
154 break; 159 break;
155 case CLUSTER_WRITE: 160 case CLUSTER_WRITE:
161 aidaw = (void *) msb->data_addr;
156 msb->oc = MSB_OC_WRITE; 162 msb->oc = MSB_OC_WRITE;
157 163
158 for (addr = msb->scm_addr; 164 for (addr = msb->scm_addr;
@@ -173,6 +179,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
173 } 179 }
174 break; 180 break;
175 } 181 }
182 return 0;
176} 183}
177 184
178bool scm_need_cluster_request(struct scm_request *scmrq) 185bool scm_need_cluster_request(struct scm_request *scmrq)
@@ -186,9 +193,13 @@ bool scm_need_cluster_request(struct scm_request *scmrq)
186/* Called with queue lock held. */ 193/* Called with queue lock held. */
187void scm_initiate_cluster_request(struct scm_request *scmrq) 194void scm_initiate_cluster_request(struct scm_request *scmrq)
188{ 195{
189 scm_prepare_cluster_request(scmrq); 196 if (scm_prepare_cluster_request(scmrq))
197 goto requeue;
190 if (eadm_start_aob(scmrq->aob)) 198 if (eadm_start_aob(scmrq->aob))
191 scm_request_requeue(scmrq); 199 goto requeue;
200 return;
201requeue:
202 scm_request_requeue(scmrq);
192} 203}
193 204
194bool scm_test_cluster_request(struct scm_request *scmrq) 205bool scm_test_cluster_request(struct scm_request *scmrq)