diff options
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r-- | drivers/mmc/card/queue.c | 223 |
1 files changed, 147 insertions, 76 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 6413afa318d..5db38cbcea6 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -52,14 +52,24 @@ static int mmc_queue_thread(void *d) | |||
52 | down(&mq->thread_sem); | 52 | down(&mq->thread_sem); |
53 | do { | 53 | do { |
54 | struct request *req = NULL; | 54 | struct request *req = NULL; |
55 | struct mmc_queue_req *tmp; | ||
55 | 56 | ||
56 | spin_lock_irq(q->queue_lock); | 57 | spin_lock_irq(q->queue_lock); |
57 | set_current_state(TASK_INTERRUPTIBLE); | 58 | set_current_state(TASK_INTERRUPTIBLE); |
58 | req = blk_fetch_request(q); | 59 | req = blk_fetch_request(q); |
59 | mq->req = req; | 60 | mq->mqrq_cur->req = req; |
60 | spin_unlock_irq(q->queue_lock); | 61 | spin_unlock_irq(q->queue_lock); |
61 | 62 | ||
62 | if (!req) { | 63 | if (req || mq->mqrq_prev->req) { |
64 | set_current_state(TASK_RUNNING); | ||
65 | mq->issue_fn(mq, req); | ||
66 | } else { | ||
67 | /* | ||
68 | * Since the queue is empty, start synchronous | ||
69 | * background ops if there is a request for it. | ||
70 | */ | ||
71 | if (mmc_card_need_bkops(mq->card)) | ||
72 | mmc_bkops_start(mq->card, true); | ||
63 | if (kthread_should_stop()) { | 73 | if (kthread_should_stop()) { |
64 | set_current_state(TASK_RUNNING); | 74 | set_current_state(TASK_RUNNING); |
65 | break; | 75 | break; |
@@ -67,11 +77,14 @@ static int mmc_queue_thread(void *d) | |||
67 | up(&mq->thread_sem); | 77 | up(&mq->thread_sem); |
68 | schedule(); | 78 | schedule(); |
69 | down(&mq->thread_sem); | 79 | down(&mq->thread_sem); |
70 | continue; | ||
71 | } | 80 | } |
72 | set_current_state(TASK_RUNNING); | ||
73 | 81 | ||
74 | mq->issue_fn(mq, req); | 82 | /* Current request becomes previous request and vice versa. */ |
83 | mq->mqrq_prev->brq.mrq.data = NULL; | ||
84 | mq->mqrq_prev->req = NULL; | ||
85 | tmp = mq->mqrq_prev; | ||
86 | mq->mqrq_prev = mq->mqrq_cur; | ||
87 | mq->mqrq_cur = tmp; | ||
75 | } while (1); | 88 | } while (1); |
76 | up(&mq->thread_sem); | 89 | up(&mq->thread_sem); |
77 | 90 | ||
@@ -97,10 +110,46 @@ static void mmc_request(struct request_queue *q) | |||
97 | return; | 110 | return; |
98 | } | 111 | } |
99 | 112 | ||
100 | if (!mq->req) | 113 | if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) |
101 | wake_up_process(mq->thread); | 114 | wake_up_process(mq->thread); |
102 | } | 115 | } |
103 | 116 | ||
117 | struct scatterlist *mmc_alloc_sg(int sg_len, int *err) | ||
118 | { | ||
119 | struct scatterlist *sg; | ||
120 | |||
121 | sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); | ||
122 | if (!sg) | ||
123 | *err = -ENOMEM; | ||
124 | else { | ||
125 | *err = 0; | ||
126 | sg_init_table(sg, sg_len); | ||
127 | } | ||
128 | |||
129 | return sg; | ||
130 | } | ||
131 | |||
132 | static void mmc_queue_setup_discard(struct request_queue *q, | ||
133 | struct mmc_card *card) | ||
134 | { | ||
135 | unsigned max_discard; | ||
136 | |||
137 | max_discard = mmc_calc_max_discard(card); | ||
138 | if (!max_discard) | ||
139 | return; | ||
140 | |||
141 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | ||
142 | q->limits.max_discard_sectors = max_discard; | ||
143 | if (card->erased_byte == 0) | ||
144 | q->limits.discard_zeroes_data = 1; | ||
145 | q->limits.discard_granularity = card->pref_erase << 9; | ||
146 | /* granularity must not be greater than max. discard */ | ||
147 | if (card->pref_erase > max_discard) | ||
148 | q->limits.discard_granularity = 0; | ||
149 | if (mmc_can_secure_erase_trim(card)) | ||
150 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); | ||
151 | } | ||
152 | |||
104 | /** | 153 | /** |
105 | * mmc_init_queue - initialise a queue structure. | 154 | * mmc_init_queue - initialise a queue structure. |
106 | * @mq: mmc queue | 155 | * @mq: mmc queue |
@@ -116,6 +165,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
116 | struct mmc_host *host = card->host; | 165 | struct mmc_host *host = card->host; |
117 | u64 limit = BLK_BOUNCE_HIGH; | 166 | u64 limit = BLK_BOUNCE_HIGH; |
118 | int ret; | 167 | int ret; |
168 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; | ||
169 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; | ||
119 | 170 | ||
120 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | 171 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
121 | limit = *mmc_dev(host)->dma_mask; | 172 | limit = *mmc_dev(host)->dma_mask; |
@@ -125,21 +176,16 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
125 | if (!mq->queue) | 176 | if (!mq->queue) |
126 | return -ENOMEM; | 177 | return -ENOMEM; |
127 | 178 | ||
179 | memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur)); | ||
180 | memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev)); | ||
181 | mq->mqrq_cur = mqrq_cur; | ||
182 | mq->mqrq_prev = mqrq_prev; | ||
128 | mq->queue->queuedata = mq; | 183 | mq->queue->queuedata = mq; |
129 | mq->req = NULL; | ||
130 | 184 | ||
131 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | 185 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
132 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); | 186 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
133 | if (mmc_can_erase(card)) { | 187 | if (mmc_can_erase(card)) |
134 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); | 188 | mmc_queue_setup_discard(mq->queue, card); |
135 | mq->queue->limits.max_discard_sectors = UINT_MAX; | ||
136 | if (card->erased_byte == 0) | ||
137 | mq->queue->limits.discard_zeroes_data = 1; | ||
138 | mq->queue->limits.discard_granularity = card->pref_erase << 9; | ||
139 | if (mmc_can_secure_erase_trim(card)) | ||
140 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, | ||
141 | mq->queue); | ||
142 | } | ||
143 | 189 | ||
144 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | 190 | #ifdef CONFIG_MMC_BLOCK_BOUNCE |
145 | if (host->max_segs == 1) { | 191 | if (host->max_segs == 1) { |
@@ -155,53 +201,64 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
155 | bouncesz = host->max_blk_count * 512; | 201 | bouncesz = host->max_blk_count * 512; |
156 | 202 | ||
157 | if (bouncesz > 512) { | 203 | if (bouncesz > 512) { |
158 | mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | 204 | mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
159 | if (!mq->bounce_buf) { | 205 | if (!mqrq_cur->bounce_buf) { |
206 | printk(KERN_WARNING "%s: unable to " | ||
207 | "allocate bounce cur buffer\n", | ||
208 | mmc_card_name(card)); | ||
209 | } | ||
210 | mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | ||
211 | if (!mqrq_prev->bounce_buf) { | ||
160 | printk(KERN_WARNING "%s: unable to " | 212 | printk(KERN_WARNING "%s: unable to " |
161 | "allocate bounce buffer\n", | 213 | "allocate bounce prev buffer\n", |
162 | mmc_card_name(card)); | 214 | mmc_card_name(card)); |
215 | kfree(mqrq_cur->bounce_buf); | ||
216 | mqrq_cur->bounce_buf = NULL; | ||
163 | } | 217 | } |
164 | } | 218 | } |
165 | 219 | ||
166 | if (mq->bounce_buf) { | 220 | if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { |
167 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); | 221 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
168 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); | 222 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); |
169 | blk_queue_max_segments(mq->queue, bouncesz / 512); | 223 | blk_queue_max_segments(mq->queue, bouncesz / 512); |
170 | blk_queue_max_segment_size(mq->queue, bouncesz); | 224 | blk_queue_max_segment_size(mq->queue, bouncesz); |
171 | 225 | ||
172 | mq->sg = kmalloc(sizeof(struct scatterlist), | 226 | mqrq_cur->sg = mmc_alloc_sg(1, &ret); |
173 | GFP_KERNEL); | 227 | if (ret) |
174 | if (!mq->sg) { | ||
175 | ret = -ENOMEM; | ||
176 | goto cleanup_queue; | 228 | goto cleanup_queue; |
177 | } | ||
178 | sg_init_table(mq->sg, 1); | ||
179 | 229 | ||
180 | mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * | 230 | mqrq_cur->bounce_sg = |
181 | bouncesz / 512, GFP_KERNEL); | 231 | mmc_alloc_sg(bouncesz / 512, &ret); |
182 | if (!mq->bounce_sg) { | 232 | if (ret) |
183 | ret = -ENOMEM; | 233 | goto cleanup_queue; |
234 | |||
235 | mqrq_prev->sg = mmc_alloc_sg(1, &ret); | ||
236 | if (ret) | ||
237 | goto cleanup_queue; | ||
238 | |||
239 | mqrq_prev->bounce_sg = | ||
240 | mmc_alloc_sg(bouncesz / 512, &ret); | ||
241 | if (ret) | ||
184 | goto cleanup_queue; | 242 | goto cleanup_queue; |
185 | } | ||
186 | sg_init_table(mq->bounce_sg, bouncesz / 512); | ||
187 | } | 243 | } |
188 | } | 244 | } |
189 | #endif | 245 | #endif |
190 | 246 | ||
191 | if (!mq->bounce_buf) { | 247 | if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { |
192 | blk_queue_bounce_limit(mq->queue, limit); | 248 | blk_queue_bounce_limit(mq->queue, limit); |
193 | blk_queue_max_hw_sectors(mq->queue, | 249 | blk_queue_max_hw_sectors(mq->queue, |
194 | min(host->max_blk_count, host->max_req_size / 512)); | 250 | min(host->max_blk_count, host->max_req_size / 512)); |
195 | blk_queue_max_segments(mq->queue, host->max_segs); | 251 | blk_queue_max_segments(mq->queue, host->max_segs); |
196 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | 252 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
197 | 253 | ||
198 | mq->sg = kmalloc(sizeof(struct scatterlist) * | 254 | mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); |
199 | host->max_segs, GFP_KERNEL); | 255 | if (ret) |
200 | if (!mq->sg) { | 256 | goto cleanup_queue; |
201 | ret = -ENOMEM; | 257 | |
258 | |||
259 | mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); | ||
260 | if (ret) | ||
202 | goto cleanup_queue; | 261 | goto cleanup_queue; |
203 | } | ||
204 | sg_init_table(mq->sg, host->max_segs); | ||
205 | } | 262 | } |
206 | 263 | ||
207 | sema_init(&mq->thread_sem, 1); | 264 | sema_init(&mq->thread_sem, 1); |
@@ -216,16 +273,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
216 | 273 | ||
217 | return 0; | 274 | return 0; |
218 | free_bounce_sg: | 275 | free_bounce_sg: |
219 | if (mq->bounce_sg) | 276 | kfree(mqrq_cur->bounce_sg); |
220 | kfree(mq->bounce_sg); | 277 | mqrq_cur->bounce_sg = NULL; |
221 | mq->bounce_sg = NULL; | 278 | kfree(mqrq_prev->bounce_sg); |
279 | mqrq_prev->bounce_sg = NULL; | ||
280 | |||
222 | cleanup_queue: | 281 | cleanup_queue: |
223 | if (mq->sg) | 282 | kfree(mqrq_cur->sg); |
224 | kfree(mq->sg); | 283 | mqrq_cur->sg = NULL; |
225 | mq->sg = NULL; | 284 | kfree(mqrq_cur->bounce_buf); |
226 | if (mq->bounce_buf) | 285 | mqrq_cur->bounce_buf = NULL; |
227 | kfree(mq->bounce_buf); | 286 | |
228 | mq->bounce_buf = NULL; | 287 | kfree(mqrq_prev->sg); |
288 | mqrq_prev->sg = NULL; | ||
289 | kfree(mqrq_prev->bounce_buf); | ||
290 | mqrq_prev->bounce_buf = NULL; | ||
291 | |||
229 | blk_cleanup_queue(mq->queue); | 292 | blk_cleanup_queue(mq->queue); |
230 | return ret; | 293 | return ret; |
231 | } | 294 | } |
@@ -234,6 +297,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq) | |||
234 | { | 297 | { |
235 | struct request_queue *q = mq->queue; | 298 | struct request_queue *q = mq->queue; |
236 | unsigned long flags; | 299 | unsigned long flags; |
300 | struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; | ||
301 | struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; | ||
237 | 302 | ||
238 | /* Make sure the queue isn't suspended, as that will deadlock */ | 303 | /* Make sure the queue isn't suspended, as that will deadlock */ |
239 | mmc_queue_resume(mq); | 304 | mmc_queue_resume(mq); |
@@ -247,16 +312,23 @@ void mmc_cleanup_queue(struct mmc_queue *mq) | |||
247 | blk_start_queue(q); | 312 | blk_start_queue(q); |
248 | spin_unlock_irqrestore(q->queue_lock, flags); | 313 | spin_unlock_irqrestore(q->queue_lock, flags); |
249 | 314 | ||
250 | if (mq->bounce_sg) | 315 | kfree(mqrq_cur->bounce_sg); |
251 | kfree(mq->bounce_sg); | 316 | mqrq_cur->bounce_sg = NULL; |
252 | mq->bounce_sg = NULL; | ||
253 | 317 | ||
254 | kfree(mq->sg); | 318 | kfree(mqrq_cur->sg); |
255 | mq->sg = NULL; | 319 | mqrq_cur->sg = NULL; |
256 | 320 | ||
257 | if (mq->bounce_buf) | 321 | kfree(mqrq_cur->bounce_buf); |
258 | kfree(mq->bounce_buf); | 322 | mqrq_cur->bounce_buf = NULL; |
259 | mq->bounce_buf = NULL; | 323 | |
324 | kfree(mqrq_prev->bounce_sg); | ||
325 | mqrq_prev->bounce_sg = NULL; | ||
326 | |||
327 | kfree(mqrq_prev->sg); | ||
328 | mqrq_prev->sg = NULL; | ||
329 | |||
330 | kfree(mqrq_prev->bounce_buf); | ||
331 | mqrq_prev->bounce_buf = NULL; | ||
260 | 332 | ||
261 | mq->card = NULL; | 333 | mq->card = NULL; |
262 | } | 334 | } |
@@ -309,27 +381,27 @@ void mmc_queue_resume(struct mmc_queue *mq) | |||
309 | /* | 381 | /* |
310 | * Prepare the sg list(s) to be handed of to the host driver | 382 | * Prepare the sg list(s) to be handed of to the host driver |
311 | */ | 383 | */ |
312 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) | 384 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
313 | { | 385 | { |
314 | unsigned int sg_len; | 386 | unsigned int sg_len; |
315 | size_t buflen; | 387 | size_t buflen; |
316 | struct scatterlist *sg; | 388 | struct scatterlist *sg; |
317 | int i; | 389 | int i; |
318 | 390 | ||
319 | if (!mq->bounce_buf) | 391 | if (!mqrq->bounce_buf) |
320 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); | 392 | return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); |
321 | 393 | ||
322 | BUG_ON(!mq->bounce_sg); | 394 | BUG_ON(!mqrq->bounce_sg); |
323 | 395 | ||
324 | sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); | 396 | sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); |
325 | 397 | ||
326 | mq->bounce_sg_len = sg_len; | 398 | mqrq->bounce_sg_len = sg_len; |
327 | 399 | ||
328 | buflen = 0; | 400 | buflen = 0; |
329 | for_each_sg(mq->bounce_sg, sg, sg_len, i) | 401 | for_each_sg(mqrq->bounce_sg, sg, sg_len, i) |
330 | buflen += sg->length; | 402 | buflen += sg->length; |
331 | 403 | ||
332 | sg_init_one(mq->sg, mq->bounce_buf, buflen); | 404 | sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); |
333 | 405 | ||
334 | return 1; | 406 | return 1; |
335 | } | 407 | } |
@@ -338,31 +410,30 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq) | |||
338 | * If writing, bounce the data to the buffer before the request | 410 | * If writing, bounce the data to the buffer before the request |
339 | * is sent to the host driver | 411 | * is sent to the host driver |
340 | */ | 412 | */ |
341 | void mmc_queue_bounce_pre(struct mmc_queue *mq) | 413 | void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) |
342 | { | 414 | { |
343 | if (!mq->bounce_buf) | 415 | if (!mqrq->bounce_buf) |
344 | return; | 416 | return; |
345 | 417 | ||
346 | if (rq_data_dir(mq->req) != WRITE) | 418 | if (rq_data_dir(mqrq->req) != WRITE) |
347 | return; | 419 | return; |
348 | 420 | ||
349 | sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, | 421 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
350 | mq->bounce_buf, mq->sg[0].length); | 422 | mqrq->bounce_buf, mqrq->sg[0].length); |
351 | } | 423 | } |
352 | 424 | ||
353 | /* | 425 | /* |
354 | * If reading, bounce the data from the buffer after the request | 426 | * If reading, bounce the data from the buffer after the request |
355 | * has been handled by the host driver | 427 | * has been handled by the host driver |
356 | */ | 428 | */ |
357 | void mmc_queue_bounce_post(struct mmc_queue *mq) | 429 | void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) |
358 | { | 430 | { |
359 | if (!mq->bounce_buf) | 431 | if (!mqrq->bounce_buf) |
360 | return; | 432 | return; |
361 | 433 | ||
362 | if (rq_data_dir(mq->req) != READ) | 434 | if (rq_data_dir(mqrq->req) != READ) |
363 | return; | 435 | return; |
364 | 436 | ||
365 | sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, | 437 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
366 | mq->bounce_buf, mq->sg[0].length); | 438 | mqrq->bounce_buf, mqrq->sg[0].length); |
367 | } | 439 | } |
368 | |||