aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2014-12-05 10:41:47 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-12-08 03:42:44 -0500
commitde88d0d28fe932637eb5b7ebf9e638256cf07979 (patch)
tree832876b01901d8c29c09ea170bc69f8dacfec25d /drivers/s390
parent9d4df77fab7347a74a9938521ffad8d8fab2671d (diff)
s390/scm_block: allocate aidaw pages only when necessary
AOBs (the structure describing the HW request) need to be 4K aligned but very little of that page is actually used. With this patch we place aidaws at the end of the AOB page and only allocate a separate page for aidaws when we have to (lists of aidaws must not cross page boundaries). Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/scm_blk.c32
-rw-r--r--drivers/s390/block/scm_blk.h3
-rw-r--r--drivers/s390/block/scm_blk_cluster.c13
3 files changed, 36 insertions, 12 deletions
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 5b2abadea094..f5c369ce7e73 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -121,7 +121,8 @@ static void scm_request_done(struct scm_request *scmrq)
121 u64 aidaw = msb->data_addr; 121 u64 aidaw = msb->data_addr;
122 unsigned long flags; 122 unsigned long flags;
123 123
124 if ((msb->flags & MSB_FLAG_IDA) && aidaw) 124 if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
125 IS_ALIGNED(aidaw, PAGE_SIZE))
125 mempool_free(virt_to_page(aidaw), aidaw_pool); 126 mempool_free(virt_to_page(aidaw), aidaw_pool);
126 127
127 spin_lock_irqsave(&list_lock, flags); 128 spin_lock_irqsave(&list_lock, flags);
@@ -134,26 +135,47 @@ static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
134 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; 135 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
135} 136}
136 137
137struct aidaw *scm_aidaw_alloc(void) 138static inline struct aidaw *scm_aidaw_alloc(void)
138{ 139{
139 struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC); 140 struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
140 141
141 return page ? page_address(page) : NULL; 142 return page ? page_address(page) : NULL;
142} 143}
143 144
145static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
146{
147 unsigned long _aidaw = (unsigned long) aidaw;
148 unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
149
150 return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
151}
152
153struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
154{
155 struct aidaw *aidaw;
156
157 if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
158 return scmrq->next_aidaw;
159
160 aidaw = scm_aidaw_alloc();
161 if (aidaw)
162 memset(aidaw, 0, PAGE_SIZE);
163 return aidaw;
164}
165
144static int scm_request_prepare(struct scm_request *scmrq) 166static int scm_request_prepare(struct scm_request *scmrq)
145{ 167{
146 struct scm_blk_dev *bdev = scmrq->bdev; 168 struct scm_blk_dev *bdev = scmrq->bdev;
147 struct scm_device *scmdev = bdev->gendisk->private_data; 169 struct scm_device *scmdev = bdev->gendisk->private_data;
148 struct aidaw *aidaw = scm_aidaw_alloc();
149 struct msb *msb = &scmrq->aob->msb[0]; 170 struct msb *msb = &scmrq->aob->msb[0];
150 struct req_iterator iter; 171 struct req_iterator iter;
172 struct aidaw *aidaw;
151 struct bio_vec bv; 173 struct bio_vec bv;
152 174
175 aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(scmrq->request));
153 if (!aidaw) 176 if (!aidaw)
154 return -ENOMEM; 177 return -ENOMEM;
155 178
156 memset(aidaw, 0, PAGE_SIZE);
157 msb->bs = MSB_BS_4K; 179 msb->bs = MSB_BS_4K;
158 scmrq->aob->request.msb_count = 1; 180 scmrq->aob->request.msb_count = 1;
159 msb->scm_addr = scmdev->address + 181 msb->scm_addr = scmdev->address +
@@ -188,6 +210,8 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
188 scmrq->bdev = bdev; 210 scmrq->bdev = bdev;
189 scmrq->retries = 4; 211 scmrq->retries = 4;
190 scmrq->error = 0; 212 scmrq->error = 0;
213 /* We don't use all msbs - place aidaws at the end of the aob page. */
214 scmrq->next_aidaw = (void *) &aob->msb[1];
191 scm_request_cluster_init(scmrq); 215 scm_request_cluster_init(scmrq);
192} 216}
193 217
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index a315ef0e96f5..6334e1609208 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -30,6 +30,7 @@ struct scm_blk_dev {
30 30
31struct scm_request { 31struct scm_request {
32 struct scm_blk_dev *bdev; 32 struct scm_blk_dev *bdev;
33 struct aidaw *next_aidaw;
33 struct request *request; 34 struct request *request;
34 struct aob *aob; 35 struct aob *aob;
35 struct list_head list; 36 struct list_head list;
@@ -54,7 +55,7 @@ void scm_blk_irq(struct scm_device *, void *, int);
54void scm_request_finish(struct scm_request *); 55void scm_request_finish(struct scm_request *);
55void scm_request_requeue(struct scm_request *); 56void scm_request_requeue(struct scm_request *);
56 57
57struct aidaw *scm_aidaw_alloc(void); 58struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
58 59
59int scm_drv_init(void); 60int scm_drv_init(void);
60void scm_drv_cleanup(void); 61void scm_drv_cleanup(void);
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 4787f80e5537..2fd01320b978 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -131,16 +131,9 @@ static int scm_prepare_cluster_request(struct scm_request *scmrq)
131 scmrq->cluster.state = CLUSTER_READ; 131 scmrq->cluster.state = CLUSTER_READ;
132 /* fall through */ 132 /* fall through */
133 case CLUSTER_READ: 133 case CLUSTER_READ:
134 aidaw = scm_aidaw_alloc();
135 if (!aidaw)
136 return -ENOMEM;
137
138 memset(aidaw, 0, PAGE_SIZE);
139 scmrq->aob->request.msb_count = 1;
140 msb->bs = MSB_BS_4K; 134 msb->bs = MSB_BS_4K;
141 msb->oc = MSB_OC_READ; 135 msb->oc = MSB_OC_READ;
142 msb->flags = MSB_FLAG_IDA; 136 msb->flags = MSB_FLAG_IDA;
143 msb->data_addr = (u64) aidaw;
144 msb->blk_count = write_cluster_size; 137 msb->blk_count = write_cluster_size;
145 138
146 addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); 139 addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
@@ -151,6 +144,12 @@ static int scm_prepare_cluster_request(struct scm_request *scmrq)
151 CLUSTER_SIZE)) 144 CLUSTER_SIZE))
152 msb->blk_count = 2 * write_cluster_size; 145 msb->blk_count = 2 * write_cluster_size;
153 146
147 aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE);
148 if (!aidaw)
149 return -ENOMEM;
150
151 scmrq->aob->request.msb_count = 1;
152 msb->data_addr = (u64) aidaw;
154 for (i = 0; i < msb->blk_count; i++) { 153 for (i = 0; i < msb->blk_count; i++) {
155 aidaw->data_addr = (u64) scmrq->cluster.buf[i]; 154 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
156 aidaw++; 155 aidaw++;