diff options
author | Adrian Hunter <adrian.hunter@intel.com> | 2017-03-13 08:36:36 -0400 |
---|---|---|
committer | Ulf Hansson <ulf.hansson@linaro.org> | 2017-04-24 15:42:01 -0400 |
commit | 7b410d074b253a44624497a18e73f666a9574f37 (patch) | |
tree | 50c8855b01206c4843e6d088acadca59f0ce2657 /drivers/mmc/core/queue.c | |
parent | cdf8a6fb48882651049e468e6b16956fb83db86c (diff) |
mmc: queue: Share mmc request array between partitions
eMMC can have multiple internal partitions that are represented as separate
disks / queues. However switching between partitions is only done when the
queue is empty. Consequently the array of mmc requests that are queued can
be shared between partitions saving memory.
Keep a pointer to the mmc request queue on the card, and use that instead
of allocating a new one for each partition.
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/mmc/core/queue.c')
-rw-r--r-- | drivers/mmc/core/queue.c | 234 |
1 files changed, 139 insertions, 95 deletions
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 4a2045527b62..3423b7acf744 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c | |||
@@ -149,17 +149,13 @@ static void mmc_request_fn(struct request_queue *q) | |||
149 | wake_up_process(mq->thread); | 149 | wake_up_process(mq->thread); |
150 | } | 150 | } |
151 | 151 | ||
152 | static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) | 152 | static struct scatterlist *mmc_alloc_sg(int sg_len) |
153 | { | 153 | { |
154 | struct scatterlist *sg; | 154 | struct scatterlist *sg; |
155 | 155 | ||
156 | sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL); | 156 | sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL); |
157 | if (!sg) | 157 | if (sg) |
158 | *err = -ENOMEM; | ||
159 | else { | ||
160 | *err = 0; | ||
161 | sg_init_table(sg, sg_len); | 158 | sg_init_table(sg, sg_len); |
162 | } | ||
163 | 159 | ||
164 | return sg; | 160 | return sg; |
165 | } | 161 | } |
@@ -185,6 +181,32 @@ static void mmc_queue_setup_discard(struct request_queue *q, | |||
185 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); | 181 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); |
186 | } | 182 | } |
187 | 183 | ||
184 | static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) | ||
185 | { | ||
186 | kfree(mqrq->bounce_sg); | ||
187 | mqrq->bounce_sg = NULL; | ||
188 | |||
189 | kfree(mqrq->sg); | ||
190 | mqrq->sg = NULL; | ||
191 | |||
192 | kfree(mqrq->bounce_buf); | ||
193 | mqrq->bounce_buf = NULL; | ||
194 | } | ||
195 | |||
196 | static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth) | ||
197 | { | ||
198 | int i; | ||
199 | |||
200 | for (i = 0; i < qdepth; i++) | ||
201 | mmc_queue_req_free_bufs(&mqrq[i]); | ||
202 | } | ||
203 | |||
204 | static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth) | ||
205 | { | ||
206 | mmc_queue_reqs_free_bufs(mqrq, qdepth); | ||
207 | kfree(mqrq); | ||
208 | } | ||
209 | |||
188 | static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth) | 210 | static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth) |
189 | { | 211 | { |
190 | struct mmc_queue_req *mqrq; | 212 | struct mmc_queue_req *mqrq; |
@@ -200,79 +222,137 @@ static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth) | |||
200 | } | 222 | } |
201 | 223 | ||
202 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | 224 | #ifdef CONFIG_MMC_BLOCK_BOUNCE |
203 | static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, | 225 | static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth, |
204 | unsigned int bouncesz) | 226 | unsigned int bouncesz) |
205 | { | 227 | { |
206 | int i; | 228 | int i; |
207 | 229 | ||
208 | for (i = 0; i < mq->qdepth; i++) { | 230 | for (i = 0; i < qdepth; i++) { |
209 | mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | 231 | mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
210 | if (!mq->mqrq[i].bounce_buf) | 232 | if (!mqrq[i].bounce_buf) |
211 | goto out_err; | 233 | return -ENOMEM; |
212 | } | ||
213 | 234 | ||
214 | return true; | 235 | mqrq[i].sg = mmc_alloc_sg(1); |
236 | if (!mqrq[i].sg) | ||
237 | return -ENOMEM; | ||
215 | 238 | ||
216 | out_err: | 239 | mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512); |
217 | while (--i >= 0) { | 240 | if (!mqrq[i].bounce_sg) |
218 | kfree(mq->mqrq[i].bounce_buf); | 241 | return -ENOMEM; |
219 | mq->mqrq[i].bounce_buf = NULL; | ||
220 | } | 242 | } |
221 | pr_warn("%s: unable to allocate bounce buffers\n", | 243 | |
222 | mmc_card_name(mq->card)); | 244 | return 0; |
223 | return false; | ||
224 | } | 245 | } |
225 | 246 | ||
226 | static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq, | 247 | static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth, |
227 | unsigned int bouncesz) | 248 | unsigned int bouncesz) |
228 | { | 249 | { |
229 | int i, ret; | 250 | int ret; |
230 | 251 | ||
231 | for (i = 0; i < mq->qdepth; i++) { | 252 | ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz); |
232 | mq->mqrq[i].sg = mmc_alloc_sg(1, &ret); | 253 | if (ret) |
233 | if (ret) | 254 | mmc_queue_reqs_free_bufs(mqrq, qdepth); |
234 | return ret; | ||
235 | 255 | ||
236 | mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); | 256 | return !ret; |
237 | if (ret) | 257 | } |
238 | return ret; | 258 | |
239 | } | 259 | static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) |
260 | { | ||
261 | unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; | ||
262 | |||
263 | if (host->max_segs != 1) | ||
264 | return 0; | ||
265 | |||
266 | if (bouncesz > host->max_req_size) | ||
267 | bouncesz = host->max_req_size; | ||
268 | if (bouncesz > host->max_seg_size) | ||
269 | bouncesz = host->max_seg_size; | ||
270 | if (bouncesz > host->max_blk_count * 512) | ||
271 | bouncesz = host->max_blk_count * 512; | ||
272 | |||
273 | if (bouncesz <= 512) | ||
274 | return 0; | ||
275 | |||
276 | return bouncesz; | ||
277 | } | ||
278 | #else | ||
279 | static inline bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, | ||
280 | int qdepth, unsigned int bouncesz) | ||
281 | { | ||
282 | return false; | ||
283 | } | ||
240 | 284 | ||
285 | static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) | ||
286 | { | ||
241 | return 0; | 287 | return 0; |
242 | } | 288 | } |
243 | #endif | 289 | #endif |
244 | 290 | ||
245 | static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs) | 291 | static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth, |
292 | int max_segs) | ||
246 | { | 293 | { |
247 | int i, ret; | 294 | int i; |
248 | 295 | ||
249 | for (i = 0; i < mq->qdepth; i++) { | 296 | for (i = 0; i < qdepth; i++) { |
250 | mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret); | 297 | mqrq[i].sg = mmc_alloc_sg(max_segs); |
251 | if (ret) | 298 | if (!mqrq[i].sg) |
252 | return ret; | 299 | return -ENOMEM; |
253 | } | 300 | } |
254 | 301 | ||
255 | return 0; | 302 | return 0; |
256 | } | 303 | } |
257 | 304 | ||
258 | static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) | 305 | void mmc_queue_free_shared_queue(struct mmc_card *card) |
259 | { | 306 | { |
260 | kfree(mqrq->bounce_sg); | 307 | if (card->mqrq) { |
261 | mqrq->bounce_sg = NULL; | 308 | mmc_queue_free_mqrqs(card->mqrq, card->qdepth); |
309 | card->mqrq = NULL; | ||
310 | } | ||
311 | } | ||
262 | 312 | ||
263 | kfree(mqrq->sg); | 313 | static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth) |
264 | mqrq->sg = NULL; | 314 | { |
315 | struct mmc_host *host = card->host; | ||
316 | struct mmc_queue_req *mqrq; | ||
317 | unsigned int bouncesz; | ||
318 | int ret = 0; | ||
265 | 319 | ||
266 | kfree(mqrq->bounce_buf); | 320 | if (card->mqrq) |
267 | mqrq->bounce_buf = NULL; | 321 | return -EINVAL; |
322 | |||
323 | mqrq = mmc_queue_alloc_mqrqs(qdepth); | ||
324 | if (!mqrq) | ||
325 | return -ENOMEM; | ||
326 | |||
327 | card->mqrq = mqrq; | ||
328 | card->qdepth = qdepth; | ||
329 | |||
330 | bouncesz = mmc_queue_calc_bouncesz(host); | ||
331 | |||
332 | if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) { | ||
333 | bouncesz = 0; | ||
334 | pr_warn("%s: unable to allocate bounce buffers\n", | ||
335 | mmc_card_name(card)); | ||
336 | } | ||
337 | |||
338 | card->bouncesz = bouncesz; | ||
339 | |||
340 | if (!bouncesz) { | ||
341 | ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs); | ||
342 | if (ret) | ||
343 | goto out_err; | ||
344 | } | ||
345 | |||
346 | return ret; | ||
347 | |||
348 | out_err: | ||
349 | mmc_queue_free_shared_queue(card); | ||
350 | return ret; | ||
268 | } | 351 | } |
269 | 352 | ||
270 | static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq) | 353 | int mmc_queue_alloc_shared_queue(struct mmc_card *card) |
271 | { | 354 | { |
272 | int i; | 355 | return __mmc_queue_alloc_shared_queue(card, 2); |
273 | |||
274 | for (i = 0; i < mq->qdepth; i++) | ||
275 | mmc_queue_req_free_bufs(&mq->mqrq[i]); | ||
276 | } | 356 | } |
277 | 357 | ||
278 | /** | 358 | /** |
@@ -289,7 +369,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
289 | { | 369 | { |
290 | struct mmc_host *host = card->host; | 370 | struct mmc_host *host = card->host; |
291 | u64 limit = BLK_BOUNCE_HIGH; | 371 | u64 limit = BLK_BOUNCE_HIGH; |
292 | bool bounce = false; | ||
293 | int ret = -ENOMEM; | 372 | int ret = -ENOMEM; |
294 | 373 | ||
295 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | 374 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
@@ -300,10 +379,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
300 | if (!mq->queue) | 379 | if (!mq->queue) |
301 | return -ENOMEM; | 380 | return -ENOMEM; |
302 | 381 | ||
303 | mq->qdepth = 2; | 382 | mq->mqrq = card->mqrq; |
304 | mq->mqrq = mmc_queue_alloc_mqrqs(mq->qdepth); | 383 | mq->qdepth = card->qdepth; |
305 | if (!mq->mqrq) | ||
306 | goto blk_cleanup; | ||
307 | mq->queue->queuedata = mq; | 384 | mq->queue->queuedata = mq; |
308 | 385 | ||
309 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | 386 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
@@ -312,44 +389,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
312 | if (mmc_can_erase(card)) | 389 | if (mmc_can_erase(card)) |
313 | mmc_queue_setup_discard(mq->queue, card); | 390 | mmc_queue_setup_discard(mq->queue, card); |
314 | 391 | ||
315 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | 392 | if (card->bouncesz) { |
316 | if (host->max_segs == 1) { | 393 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
317 | unsigned int bouncesz; | 394 | blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); |
318 | 395 | blk_queue_max_segments(mq->queue, card->bouncesz / 512); | |
319 | bouncesz = MMC_QUEUE_BOUNCESZ; | 396 | blk_queue_max_segment_size(mq->queue, card->bouncesz); |
320 | 397 | } else { | |
321 | if (bouncesz > host->max_req_size) | ||
322 | bouncesz = host->max_req_size; | ||
323 | if (bouncesz > host->max_seg_size) | ||
324 | bouncesz = host->max_seg_size; | ||
325 | if (bouncesz > (host->max_blk_count * 512)) | ||
326 | bouncesz = host->max_blk_count * 512; | ||
327 | |||
328 | if (bouncesz > 512 && | ||
329 | mmc_queue_alloc_bounce_bufs(mq, bouncesz)) { | ||
330 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); | ||
331 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); | ||
332 | blk_queue_max_segments(mq->queue, bouncesz / 512); | ||
333 | blk_queue_max_segment_size(mq->queue, bouncesz); | ||
334 | |||
335 | ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz); | ||
336 | if (ret) | ||
337 | goto cleanup_queue; | ||
338 | bounce = true; | ||
339 | } | ||
340 | } | ||
341 | #endif | ||
342 | |||
343 | if (!bounce) { | ||
344 | blk_queue_bounce_limit(mq->queue, limit); | 398 | blk_queue_bounce_limit(mq->queue, limit); |
345 | blk_queue_max_hw_sectors(mq->queue, | 399 | blk_queue_max_hw_sectors(mq->queue, |
346 | min(host->max_blk_count, host->max_req_size / 512)); | 400 | min(host->max_blk_count, host->max_req_size / 512)); |
347 | blk_queue_max_segments(mq->queue, host->max_segs); | 401 | blk_queue_max_segments(mq->queue, host->max_segs); |
348 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | 402 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
349 | |||
350 | ret = mmc_queue_alloc_sgs(mq, host->max_segs); | ||
351 | if (ret) | ||
352 | goto cleanup_queue; | ||
353 | } | 403 | } |
354 | 404 | ||
355 | sema_init(&mq->thread_sem, 1); | 405 | sema_init(&mq->thread_sem, 1); |
@@ -364,11 +414,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
364 | 414 | ||
365 | return 0; | 415 | return 0; |
366 | 416 | ||
367 | cleanup_queue: | 417 | cleanup_queue: |
368 | mmc_queue_reqs_free_bufs(mq); | ||
369 | kfree(mq->mqrq); | ||
370 | mq->mqrq = NULL; | 418 | mq->mqrq = NULL; |
371 | blk_cleanup: | ||
372 | blk_cleanup_queue(mq->queue); | 419 | blk_cleanup_queue(mq->queue); |
373 | return ret; | 420 | return ret; |
374 | } | 421 | } |
@@ -390,10 +437,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq) | |||
390 | blk_start_queue(q); | 437 | blk_start_queue(q); |
391 | spin_unlock_irqrestore(q->queue_lock, flags); | 438 | spin_unlock_irqrestore(q->queue_lock, flags); |
392 | 439 | ||
393 | mmc_queue_reqs_free_bufs(mq); | ||
394 | kfree(mq->mqrq); | ||
395 | mq->mqrq = NULL; | 440 | mq->mqrq = NULL; |
396 | |||
397 | mq->card = NULL; | 441 | mq->card = NULL; |
398 | } | 442 | } |
399 | EXPORT_SYMBOL(mmc_cleanup_queue); | 443 | EXPORT_SYMBOL(mmc_cleanup_queue); |