aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core/queue.c
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2017-05-18 05:29:32 -0400
committerUlf Hansson <ulf.hansson@linaro.org>2017-06-20 04:30:17 -0400
commit304419d8a7e9204c5d19b704467b814df8c8f5b1 (patch)
tree1cb76b5a60b0c505d69d4eb558b32a09725404c3 /drivers/mmc/core/queue.c
parentc3dccb74be28a345a2ebcc224e41b774529b8b8f (diff)
mmc: core: Allocate per-request data using the block layer core
The mmc_queue_req is a per-request state container the MMC core uses to carry bounce buffers, pointers to asynchronous requests and so on. Currently allocated as a static array of objects, then as a request comes in, a mmc_queue_req is assigned to it, and used during the lifetime of the request. This is backwards compared to how other block layer drivers work: they usally let the block core provide a per-request struct that get allocated right beind the struct request, and which can be obtained using the blk_mq_rq_to_pdu() helper. (The _mq_ infix in this function name is misleading: it is used by both the old and the MQ block layer.) The per-request struct gets allocated to the size stored in the queue variable .cmd_size initialized using the .init_rq_fn() and cleaned up using .exit_rq_fn(). The block layer code makes the MMC core rely on this mechanism to allocate the per-request mmc_queue_req state container. Doing this make a lot of complicated queue handling go away. We only need to keep the .qnct that keeps count of how many request are currently being processed by the MMC layer. The MQ block layer will replace also this once we transition to it. Doing this refactoring is necessary to move the ioctl() operations into custom block layer requests tagged with REQ_OP_DRV_[IN|OUT] instead of the custom code using the BigMMCHostLock that we have today: those require that per-request data be obtainable easily from a request after creating a custom request with e.g.: struct request *rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM); struct mmc_queue_req *mq_rq = req_to_mq_rq(rq); And this is not possible with the current construction, as the request is not immediately assigned the per-request state container, but instead it gets assigned when the request finally enters the MMC queue, which is way too late for custom requests. Signed-off-by: Linus Walleij <linus.walleij@linaro.org> [Ulf: Folded in the fix to drop a call to blk_cleanup_queue()] Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Tested-by: Heiner Kallweit <hkallweit1@gmail.com>
Diffstat (limited to 'drivers/mmc/core/queue.c')
-rw-r--r--drivers/mmc/core/queue.c220
1 files changed, 57 insertions, 163 deletions
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 70ba7f94c706..d6c7b4cde4db 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -40,35 +40,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
40 return BLKPREP_OK; 40 return BLKPREP_OK;
41} 41}
42 42
43struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq,
44 struct request *req)
45{
46 struct mmc_queue_req *mqrq;
47 int i = ffz(mq->qslots);
48
49 if (i >= mq->qdepth)
50 return NULL;
51
52 mqrq = &mq->mqrq[i];
53 WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth ||
54 test_bit(mqrq->task_id, &mq->qslots));
55 mqrq->req = req;
56 mq->qcnt += 1;
57 __set_bit(mqrq->task_id, &mq->qslots);
58
59 return mqrq;
60}
61
62void mmc_queue_req_free(struct mmc_queue *mq,
63 struct mmc_queue_req *mqrq)
64{
65 WARN_ON(!mqrq->req || mq->qcnt < 1 ||
66 !test_bit(mqrq->task_id, &mq->qslots));
67 mqrq->req = NULL;
68 mq->qcnt -= 1;
69 __clear_bit(mqrq->task_id, &mq->qslots);
70}
71
72static int mmc_queue_thread(void *d) 43static int mmc_queue_thread(void *d)
73{ 44{
74 struct mmc_queue *mq = d; 45 struct mmc_queue *mq = d;
@@ -149,11 +120,11 @@ static void mmc_request_fn(struct request_queue *q)
149 wake_up_process(mq->thread); 120 wake_up_process(mq->thread);
150} 121}
151 122
152static struct scatterlist *mmc_alloc_sg(int sg_len) 123static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
153{ 124{
154 struct scatterlist *sg; 125 struct scatterlist *sg;
155 126
156 sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL); 127 sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
157 if (sg) 128 if (sg)
158 sg_init_table(sg, sg_len); 129 sg_init_table(sg, sg_len);
159 130
@@ -179,80 +150,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
179 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); 150 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
180} 151}
181 152
182static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
183{
184 kfree(mqrq->bounce_sg);
185 mqrq->bounce_sg = NULL;
186
187 kfree(mqrq->sg);
188 mqrq->sg = NULL;
189
190 kfree(mqrq->bounce_buf);
191 mqrq->bounce_buf = NULL;
192}
193
194static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth)
195{
196 int i;
197
198 for (i = 0; i < qdepth; i++)
199 mmc_queue_req_free_bufs(&mqrq[i]);
200}
201
202static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth)
203{
204 mmc_queue_reqs_free_bufs(mqrq, qdepth);
205 kfree(mqrq);
206}
207
208static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
209{
210 struct mmc_queue_req *mqrq;
211 int i;
212
213 mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
214 if (mqrq) {
215 for (i = 0; i < qdepth; i++)
216 mqrq[i].task_id = i;
217 }
218
219 return mqrq;
220}
221
222static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth,
223 unsigned int bouncesz)
224{
225 int i;
226
227 for (i = 0; i < qdepth; i++) {
228 mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
229 if (!mqrq[i].bounce_buf)
230 return -ENOMEM;
231
232 mqrq[i].sg = mmc_alloc_sg(1);
233 if (!mqrq[i].sg)
234 return -ENOMEM;
235
236 mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512);
237 if (!mqrq[i].bounce_sg)
238 return -ENOMEM;
239 }
240
241 return 0;
242}
243
244static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth,
245 unsigned int bouncesz)
246{
247 int ret;
248
249 ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz);
250 if (ret)
251 mmc_queue_reqs_free_bufs(mqrq, qdepth);
252
253 return !ret;
254}
255
256static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) 153static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
257{ 154{
258 unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; 155 unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
@@ -273,71 +170,61 @@ static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
273 return bouncesz; 170 return bouncesz;
274} 171}
275 172
276static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth, 173/**
277 int max_segs) 174 * mmc_init_request() - initialize the MMC-specific per-request data
175 * @q: the request queue
176 * @req: the request
177 * @gfp: memory allocation policy
178 */
179static int mmc_init_request(struct request_queue *q, struct request *req,
180 gfp_t gfp)
278{ 181{
279 int i; 182 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
183 struct mmc_queue *mq = q->queuedata;
184 struct mmc_card *card = mq->card;
185 struct mmc_host *host = card->host;
280 186
281 for (i = 0; i < qdepth; i++) { 187 mq_rq->req = req;
282 mqrq[i].sg = mmc_alloc_sg(max_segs); 188
283 if (!mqrq[i].sg) 189 if (card->bouncesz) {
190 mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp);
191 if (!mq_rq->bounce_buf)
192 return -ENOMEM;
193 if (card->bouncesz > 512) {
194 mq_rq->sg = mmc_alloc_sg(1, gfp);
195 if (!mq_rq->sg)
196 return -ENOMEM;
197 mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
198 gfp);
199 if (!mq_rq->bounce_sg)
200 return -ENOMEM;
201 }
202 } else {
203 mq_rq->bounce_buf = NULL;
204 mq_rq->bounce_sg = NULL;
205 mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
206 if (!mq_rq->sg)
284 return -ENOMEM; 207 return -ENOMEM;
285 } 208 }
286 209
287 return 0; 210 return 0;
288} 211}
289 212
290void mmc_queue_free_shared_queue(struct mmc_card *card) 213static void mmc_exit_request(struct request_queue *q, struct request *req)
291{
292 if (card->mqrq) {
293 mmc_queue_free_mqrqs(card->mqrq, card->qdepth);
294 card->mqrq = NULL;
295 }
296}
297
298static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth)
299{ 214{
300 struct mmc_host *host = card->host; 215 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
301 struct mmc_queue_req *mqrq;
302 unsigned int bouncesz;
303 int ret = 0;
304
305 if (card->mqrq)
306 return -EINVAL;
307 216
308 mqrq = mmc_queue_alloc_mqrqs(qdepth); 217 /* It is OK to kfree(NULL) so this will be smooth */
309 if (!mqrq) 218 kfree(mq_rq->bounce_sg);
310 return -ENOMEM; 219 mq_rq->bounce_sg = NULL;
311
312 card->mqrq = mqrq;
313 card->qdepth = qdepth;
314 220
315 bouncesz = mmc_queue_calc_bouncesz(host); 221 kfree(mq_rq->bounce_buf);
222 mq_rq->bounce_buf = NULL;
316 223
317 if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) { 224 kfree(mq_rq->sg);
318 bouncesz = 0; 225 mq_rq->sg = NULL;
319 pr_warn("%s: unable to allocate bounce buffers\n",
320 mmc_card_name(card));
321 }
322 226
323 card->bouncesz = bouncesz; 227 mq_rq->req = NULL;
324
325 if (!bouncesz) {
326 ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs);
327 if (ret)
328 goto out_err;
329 }
330
331 return ret;
332
333out_err:
334 mmc_queue_free_shared_queue(card);
335 return ret;
336}
337
338int mmc_queue_alloc_shared_queue(struct mmc_card *card)
339{
340 return __mmc_queue_alloc_shared_queue(card, 2);
341} 228}
342 229
343/** 230/**
@@ -360,13 +247,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
360 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 247 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
361 248
362 mq->card = card; 249 mq->card = card;
363 mq->queue = blk_init_queue(mmc_request_fn, lock); 250 mq->queue = blk_alloc_queue(GFP_KERNEL);
364 if (!mq->queue) 251 if (!mq->queue)
365 return -ENOMEM; 252 return -ENOMEM;
366 253 mq->queue->queue_lock = lock;
367 mq->mqrq = card->mqrq; 254 mq->queue->request_fn = mmc_request_fn;
368 mq->qdepth = card->qdepth; 255 mq->queue->init_rq_fn = mmc_init_request;
256 mq->queue->exit_rq_fn = mmc_exit_request;
257 mq->queue->cmd_size = sizeof(struct mmc_queue_req);
369 mq->queue->queuedata = mq; 258 mq->queue->queuedata = mq;
259 mq->qcnt = 0;
260 ret = blk_init_allocated_queue(mq->queue);
261 if (ret) {
262 blk_cleanup_queue(mq->queue);
263 return ret;
264 }
370 265
371 blk_queue_prep_rq(mq->queue, mmc_prep_request); 266 blk_queue_prep_rq(mq->queue, mmc_prep_request);
372 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 267 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
@@ -374,6 +269,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
374 if (mmc_can_erase(card)) 269 if (mmc_can_erase(card))
375 mmc_queue_setup_discard(mq->queue, card); 270 mmc_queue_setup_discard(mq->queue, card);
376 271
272 card->bouncesz = mmc_queue_calc_bouncesz(host);
377 if (card->bouncesz) { 273 if (card->bouncesz) {
378 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 274 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
379 blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); 275 blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
@@ -400,7 +296,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
400 return 0; 296 return 0;
401 297
402cleanup_queue: 298cleanup_queue:
403 mq->mqrq = NULL;
404 blk_cleanup_queue(mq->queue); 299 blk_cleanup_queue(mq->queue);
405 return ret; 300 return ret;
406} 301}
@@ -422,7 +317,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
422 blk_start_queue(q); 317 blk_start_queue(q);
423 spin_unlock_irqrestore(q->queue_lock, flags); 318 spin_unlock_irqrestore(q->queue_lock, flags);
424 319
425 mq->mqrq = NULL;
426 mq->card = NULL; 320 mq->card = NULL;
427} 321}
428EXPORT_SYMBOL(mmc_cleanup_queue); 322EXPORT_SYMBOL(mmc_cleanup_queue);