diff options
Diffstat (limited to 'drivers/mmc/core/queue.c')
-rw-r--r-- | drivers/mmc/core/queue.c | 489 |
1 files changed, 489 insertions, 0 deletions
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c new file mode 100644 index 000000000000..f4e3d76792f3 --- /dev/null +++ b/drivers/mmc/core/queue.c | |||
@@ -0,0 +1,489 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 Russell King, All Rights Reserved. | ||
3 | * Copyright 2006-2007 Pierre Ossman | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | */ | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/blkdev.h> | ||
13 | #include <linux/freezer.h> | ||
14 | #include <linux/kthread.h> | ||
15 | #include <linux/scatterlist.h> | ||
16 | #include <linux/dma-mapping.h> | ||
17 | |||
18 | #include <linux/mmc/card.h> | ||
19 | #include <linux/mmc/host.h> | ||
20 | |||
21 | #include "queue.h" | ||
22 | #include "block.h" | ||
23 | |||
24 | #define MMC_QUEUE_BOUNCESZ 65536 | ||
25 | |||
26 | /* | ||
27 | * Prepare a MMC request. This just filters out odd stuff. | ||
28 | */ | ||
29 | static int mmc_prep_request(struct request_queue *q, struct request *req) | ||
30 | { | ||
31 | struct mmc_queue *mq = q->queuedata; | ||
32 | |||
33 | /* | ||
34 | * We only like normal block requests and discards. | ||
35 | */ | ||
36 | if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD && | ||
37 | req_op(req) != REQ_OP_SECURE_ERASE) { | ||
38 | blk_dump_rq_flags(req, "MMC bad request"); | ||
39 | return BLKPREP_KILL; | ||
40 | } | ||
41 | |||
42 | if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) | ||
43 | return BLKPREP_KILL; | ||
44 | |||
45 | req->cmd_flags |= REQ_DONTPREP; | ||
46 | |||
47 | return BLKPREP_OK; | ||
48 | } | ||
49 | |||
50 | static int mmc_queue_thread(void *d) | ||
51 | { | ||
52 | struct mmc_queue *mq = d; | ||
53 | struct request_queue *q = mq->queue; | ||
54 | struct mmc_context_info *cntx = &mq->card->host->context_info; | ||
55 | |||
56 | current->flags |= PF_MEMALLOC; | ||
57 | |||
58 | down(&mq->thread_sem); | ||
59 | do { | ||
60 | struct request *req = NULL; | ||
61 | |||
62 | spin_lock_irq(q->queue_lock); | ||
63 | set_current_state(TASK_INTERRUPTIBLE); | ||
64 | req = blk_fetch_request(q); | ||
65 | mq->asleep = false; | ||
66 | cntx->is_waiting_last_req = false; | ||
67 | cntx->is_new_req = false; | ||
68 | if (!req) { | ||
69 | /* | ||
70 | * Dispatch queue is empty so set flags for | ||
71 | * mmc_request_fn() to wake us up. | ||
72 | */ | ||
73 | if (mq->mqrq_prev->req) | ||
74 | cntx->is_waiting_last_req = true; | ||
75 | else | ||
76 | mq->asleep = true; | ||
77 | } | ||
78 | mq->mqrq_cur->req = req; | ||
79 | spin_unlock_irq(q->queue_lock); | ||
80 | |||
81 | if (req || mq->mqrq_prev->req) { | ||
82 | bool req_is_special = mmc_req_is_special(req); | ||
83 | |||
84 | set_current_state(TASK_RUNNING); | ||
85 | mmc_blk_issue_rq(mq, req); | ||
86 | cond_resched(); | ||
87 | if (mq->flags & MMC_QUEUE_NEW_REQUEST) { | ||
88 | mq->flags &= ~MMC_QUEUE_NEW_REQUEST; | ||
89 | continue; /* fetch again */ | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Current request becomes previous request | ||
94 | * and vice versa. | ||
95 | * In case of special requests, current request | ||
96 | * has been finished. Do not assign it to previous | ||
97 | * request. | ||
98 | */ | ||
99 | if (req_is_special) | ||
100 | mq->mqrq_cur->req = NULL; | ||
101 | |||
102 | mq->mqrq_prev->brq.mrq.data = NULL; | ||
103 | mq->mqrq_prev->req = NULL; | ||
104 | swap(mq->mqrq_prev, mq->mqrq_cur); | ||
105 | } else { | ||
106 | if (kthread_should_stop()) { | ||
107 | set_current_state(TASK_RUNNING); | ||
108 | break; | ||
109 | } | ||
110 | up(&mq->thread_sem); | ||
111 | schedule(); | ||
112 | down(&mq->thread_sem); | ||
113 | } | ||
114 | } while (1); | ||
115 | up(&mq->thread_sem); | ||
116 | |||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * Generic MMC request handler. This is called for any queue on a | ||
122 | * particular host. When the host is not busy, we look for a request | ||
123 | * on any queue on this host, and attempt to issue it. This may | ||
124 | * not be the queue we were asked to process. | ||
125 | */ | ||
126 | static void mmc_request_fn(struct request_queue *q) | ||
127 | { | ||
128 | struct mmc_queue *mq = q->queuedata; | ||
129 | struct request *req; | ||
130 | struct mmc_context_info *cntx; | ||
131 | |||
132 | if (!mq) { | ||
133 | while ((req = blk_fetch_request(q)) != NULL) { | ||
134 | req->cmd_flags |= REQ_QUIET; | ||
135 | __blk_end_request_all(req, -EIO); | ||
136 | } | ||
137 | return; | ||
138 | } | ||
139 | |||
140 | cntx = &mq->card->host->context_info; | ||
141 | |||
142 | if (cntx->is_waiting_last_req) { | ||
143 | cntx->is_new_req = true; | ||
144 | wake_up_interruptible(&cntx->wait); | ||
145 | } | ||
146 | |||
147 | if (mq->asleep) | ||
148 | wake_up_process(mq->thread); | ||
149 | } | ||
150 | |||
151 | static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) | ||
152 | { | ||
153 | struct scatterlist *sg; | ||
154 | |||
155 | sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); | ||
156 | if (!sg) | ||
157 | *err = -ENOMEM; | ||
158 | else { | ||
159 | *err = 0; | ||
160 | sg_init_table(sg, sg_len); | ||
161 | } | ||
162 | |||
163 | return sg; | ||
164 | } | ||
165 | |||
166 | static void mmc_queue_setup_discard(struct request_queue *q, | ||
167 | struct mmc_card *card) | ||
168 | { | ||
169 | unsigned max_discard; | ||
170 | |||
171 | max_discard = mmc_calc_max_discard(card); | ||
172 | if (!max_discard) | ||
173 | return; | ||
174 | |||
175 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | ||
176 | blk_queue_max_discard_sectors(q, max_discard); | ||
177 | if (card->erased_byte == 0 && !mmc_can_discard(card)) | ||
178 | q->limits.discard_zeroes_data = 1; | ||
179 | q->limits.discard_granularity = card->pref_erase << 9; | ||
180 | /* granularity must not be greater than max. discard */ | ||
181 | if (card->pref_erase > max_discard) | ||
182 | q->limits.discard_granularity = 0; | ||
183 | if (mmc_can_secure_erase_trim(card)) | ||
184 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); | ||
185 | } | ||
186 | |||
187 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | ||
188 | static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, | ||
189 | unsigned int bouncesz) | ||
190 | { | ||
191 | int i; | ||
192 | |||
193 | for (i = 0; i < mq->qdepth; i++) { | ||
194 | mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | ||
195 | if (!mq->mqrq[i].bounce_buf) | ||
196 | goto out_err; | ||
197 | } | ||
198 | |||
199 | return true; | ||
200 | |||
201 | out_err: | ||
202 | while (--i >= 0) { | ||
203 | kfree(mq->mqrq[i].bounce_buf); | ||
204 | mq->mqrq[i].bounce_buf = NULL; | ||
205 | } | ||
206 | pr_warn("%s: unable to allocate bounce buffers\n", | ||
207 | mmc_card_name(mq->card)); | ||
208 | return false; | ||
209 | } | ||
210 | |||
211 | static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq, | ||
212 | unsigned int bouncesz) | ||
213 | { | ||
214 | int i, ret; | ||
215 | |||
216 | for (i = 0; i < mq->qdepth; i++) { | ||
217 | mq->mqrq[i].sg = mmc_alloc_sg(1, &ret); | ||
218 | if (ret) | ||
219 | return ret; | ||
220 | |||
221 | mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); | ||
222 | if (ret) | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | return 0; | ||
227 | } | ||
228 | #endif | ||
229 | |||
230 | static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs) | ||
231 | { | ||
232 | int i, ret; | ||
233 | |||
234 | for (i = 0; i < mq->qdepth; i++) { | ||
235 | mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret); | ||
236 | if (ret) | ||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) | ||
244 | { | ||
245 | kfree(mqrq->bounce_sg); | ||
246 | mqrq->bounce_sg = NULL; | ||
247 | |||
248 | kfree(mqrq->sg); | ||
249 | mqrq->sg = NULL; | ||
250 | |||
251 | kfree(mqrq->bounce_buf); | ||
252 | mqrq->bounce_buf = NULL; | ||
253 | } | ||
254 | |||
255 | static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq) | ||
256 | { | ||
257 | int i; | ||
258 | |||
259 | for (i = 0; i < mq->qdepth; i++) | ||
260 | mmc_queue_req_free_bufs(&mq->mqrq[i]); | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * mmc_init_queue - initialise a queue structure. | ||
265 | * @mq: mmc queue | ||
266 | * @card: mmc card to attach this queue | ||
267 | * @lock: queue lock | ||
268 | * @subname: partition subname | ||
269 | * | ||
270 | * Initialise a MMC card request queue. | ||
271 | */ | ||
272 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | ||
273 | spinlock_t *lock, const char *subname) | ||
274 | { | ||
275 | struct mmc_host *host = card->host; | ||
276 | u64 limit = BLK_BOUNCE_HIGH; | ||
277 | bool bounce = false; | ||
278 | int ret = -ENOMEM; | ||
279 | |||
280 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | ||
281 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; | ||
282 | |||
283 | mq->card = card; | ||
284 | mq->queue = blk_init_queue(mmc_request_fn, lock); | ||
285 | if (!mq->queue) | ||
286 | return -ENOMEM; | ||
287 | |||
288 | mq->qdepth = 2; | ||
289 | mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req), | ||
290 | GFP_KERNEL); | ||
291 | if (!mq->mqrq) | ||
292 | goto blk_cleanup; | ||
293 | mq->mqrq_cur = &mq->mqrq[0]; | ||
294 | mq->mqrq_prev = &mq->mqrq[1]; | ||
295 | mq->queue->queuedata = mq; | ||
296 | |||
297 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | ||
298 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); | ||
299 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); | ||
300 | if (mmc_can_erase(card)) | ||
301 | mmc_queue_setup_discard(mq->queue, card); | ||
302 | |||
303 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | ||
304 | if (host->max_segs == 1) { | ||
305 | unsigned int bouncesz; | ||
306 | |||
307 | bouncesz = MMC_QUEUE_BOUNCESZ; | ||
308 | |||
309 | if (bouncesz > host->max_req_size) | ||
310 | bouncesz = host->max_req_size; | ||
311 | if (bouncesz > host->max_seg_size) | ||
312 | bouncesz = host->max_seg_size; | ||
313 | if (bouncesz > (host->max_blk_count * 512)) | ||
314 | bouncesz = host->max_blk_count * 512; | ||
315 | |||
316 | if (bouncesz > 512 && | ||
317 | mmc_queue_alloc_bounce_bufs(mq, bouncesz)) { | ||
318 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); | ||
319 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); | ||
320 | blk_queue_max_segments(mq->queue, bouncesz / 512); | ||
321 | blk_queue_max_segment_size(mq->queue, bouncesz); | ||
322 | |||
323 | ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz); | ||
324 | if (ret) | ||
325 | goto cleanup_queue; | ||
326 | bounce = true; | ||
327 | } | ||
328 | } | ||
329 | #endif | ||
330 | |||
331 | if (!bounce) { | ||
332 | blk_queue_bounce_limit(mq->queue, limit); | ||
333 | blk_queue_max_hw_sectors(mq->queue, | ||
334 | min(host->max_blk_count, host->max_req_size / 512)); | ||
335 | blk_queue_max_segments(mq->queue, host->max_segs); | ||
336 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | ||
337 | |||
338 | ret = mmc_queue_alloc_sgs(mq, host->max_segs); | ||
339 | if (ret) | ||
340 | goto cleanup_queue; | ||
341 | } | ||
342 | |||
343 | sema_init(&mq->thread_sem, 1); | ||
344 | |||
345 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", | ||
346 | host->index, subname ? subname : ""); | ||
347 | |||
348 | if (IS_ERR(mq->thread)) { | ||
349 | ret = PTR_ERR(mq->thread); | ||
350 | goto cleanup_queue; | ||
351 | } | ||
352 | |||
353 | return 0; | ||
354 | |||
355 | cleanup_queue: | ||
356 | mmc_queue_reqs_free_bufs(mq); | ||
357 | kfree(mq->mqrq); | ||
358 | mq->mqrq = NULL; | ||
359 | blk_cleanup: | ||
360 | blk_cleanup_queue(mq->queue); | ||
361 | return ret; | ||
362 | } | ||
363 | |||
364 | void mmc_cleanup_queue(struct mmc_queue *mq) | ||
365 | { | ||
366 | struct request_queue *q = mq->queue; | ||
367 | unsigned long flags; | ||
368 | |||
369 | /* Make sure the queue isn't suspended, as that will deadlock */ | ||
370 | mmc_queue_resume(mq); | ||
371 | |||
372 | /* Then terminate our worker thread */ | ||
373 | kthread_stop(mq->thread); | ||
374 | |||
375 | /* Empty the queue */ | ||
376 | spin_lock_irqsave(q->queue_lock, flags); | ||
377 | q->queuedata = NULL; | ||
378 | blk_start_queue(q); | ||
379 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
380 | |||
381 | mmc_queue_reqs_free_bufs(mq); | ||
382 | kfree(mq->mqrq); | ||
383 | mq->mqrq = NULL; | ||
384 | |||
385 | mq->card = NULL; | ||
386 | } | ||
387 | EXPORT_SYMBOL(mmc_cleanup_queue); | ||
388 | |||
389 | /** | ||
390 | * mmc_queue_suspend - suspend a MMC request queue | ||
391 | * @mq: MMC queue to suspend | ||
392 | * | ||
393 | * Stop the block request queue, and wait for our thread to | ||
394 | * complete any outstanding requests. This ensures that we | ||
395 | * won't suspend while a request is being processed. | ||
396 | */ | ||
397 | void mmc_queue_suspend(struct mmc_queue *mq) | ||
398 | { | ||
399 | struct request_queue *q = mq->queue; | ||
400 | unsigned long flags; | ||
401 | |||
402 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | ||
403 | mq->flags |= MMC_QUEUE_SUSPENDED; | ||
404 | |||
405 | spin_lock_irqsave(q->queue_lock, flags); | ||
406 | blk_stop_queue(q); | ||
407 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
408 | |||
409 | down(&mq->thread_sem); | ||
410 | } | ||
411 | } | ||
412 | |||
413 | /** | ||
414 | * mmc_queue_resume - resume a previously suspended MMC request queue | ||
415 | * @mq: MMC queue to resume | ||
416 | */ | ||
417 | void mmc_queue_resume(struct mmc_queue *mq) | ||
418 | { | ||
419 | struct request_queue *q = mq->queue; | ||
420 | unsigned long flags; | ||
421 | |||
422 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | ||
423 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | ||
424 | |||
425 | up(&mq->thread_sem); | ||
426 | |||
427 | spin_lock_irqsave(q->queue_lock, flags); | ||
428 | blk_start_queue(q); | ||
429 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
430 | } | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * Prepare the sg list(s) to be handed of to the host driver | ||
435 | */ | ||
436 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) | ||
437 | { | ||
438 | unsigned int sg_len; | ||
439 | size_t buflen; | ||
440 | struct scatterlist *sg; | ||
441 | int i; | ||
442 | |||
443 | if (!mqrq->bounce_buf) | ||
444 | return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); | ||
445 | |||
446 | sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); | ||
447 | |||
448 | mqrq->bounce_sg_len = sg_len; | ||
449 | |||
450 | buflen = 0; | ||
451 | for_each_sg(mqrq->bounce_sg, sg, sg_len, i) | ||
452 | buflen += sg->length; | ||
453 | |||
454 | sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); | ||
455 | |||
456 | return 1; | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * If writing, bounce the data to the buffer before the request | ||
461 | * is sent to the host driver | ||
462 | */ | ||
463 | void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) | ||
464 | { | ||
465 | if (!mqrq->bounce_buf) | ||
466 | return; | ||
467 | |||
468 | if (rq_data_dir(mqrq->req) != WRITE) | ||
469 | return; | ||
470 | |||
471 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, | ||
472 | mqrq->bounce_buf, mqrq->sg[0].length); | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * If reading, bounce the data from the buffer after the request | ||
477 | * has been handled by the host driver | ||
478 | */ | ||
479 | void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) | ||
480 | { | ||
481 | if (!mqrq->bounce_buf) | ||
482 | return; | ||
483 | |||
484 | if (rq_data_dir(mqrq->req) != READ) | ||
485 | return; | ||
486 | |||
487 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, | ||
488 | mqrq->bounce_buf, mqrq->sg[0].length); | ||
489 | } | ||