aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2016-11-29 05:09:14 -0500
committerUlf Hansson <ulf.hansson@linaro.org>2016-12-05 04:31:06 -0500
commitc09949cff5eb408c30f154207ebdc706d94fe1f3 (patch)
tree7159e0870e289c8c7e734c2107b23a48689feb7e
parent64e29e42a61b8b531eb77f363ddb8e507dfd35ed (diff)
mmc: queue: Factor out mmc_queue_reqs_free_bufs()
In preparation for supporting a queue of requests, factor out mmc_queue_reqs_free_bufs(). Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Reviewed-by: Harjani Ritesh <riteshh@codeaurora.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
-rw-r--r--drivers/mmc/card/queue.c65
1 files changed, 26 insertions, 39 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 280708d804b9..8ba82cf5feff 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -252,6 +252,27 @@ static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
252 return ret; 252 return ret;
253} 253}
254 254
255static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
256{
257 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
258 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
259
260 kfree(mqrq_cur->bounce_sg);
261 mqrq_cur->bounce_sg = NULL;
262 kfree(mqrq_prev->bounce_sg);
263 mqrq_prev->bounce_sg = NULL;
264
265 kfree(mqrq_cur->sg);
266 mqrq_cur->sg = NULL;
267 kfree(mqrq_cur->bounce_buf);
268 mqrq_cur->bounce_buf = NULL;
269
270 kfree(mqrq_prev->sg);
271 mqrq_prev->sg = NULL;
272 kfree(mqrq_prev->bounce_buf);
273 mqrq_prev->bounce_buf = NULL;
274}
275
255/** 276/**
256 * mmc_init_queue - initialise a queue structure. 277 * mmc_init_queue - initialise a queue structure.
257 * @mq: mmc queue 278 * @mq: mmc queue
@@ -268,8 +289,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
268 u64 limit = BLK_BOUNCE_HIGH; 289 u64 limit = BLK_BOUNCE_HIGH;
269 bool bounce = false; 290 bool bounce = false;
270 int ret; 291 int ret;
271 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
272 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
273 292
274 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 293 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
275 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 294 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
@@ -279,8 +298,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
279 if (!mq->queue) 298 if (!mq->queue)
280 return -ENOMEM; 299 return -ENOMEM;
281 300
282 mq->mqrq_cur = mqrq_cur; 301 mq->mqrq_cur = &mq->mqrq[0];
283 mq->mqrq_prev = mqrq_prev; 302 mq->mqrq_prev = &mq->mqrq[1];
284 mq->queue->queuedata = mq; 303 mq->queue->queuedata = mq;
285 304
286 blk_queue_prep_rq(mq->queue, mmc_prep_request); 305 blk_queue_prep_rq(mq->queue, mmc_prep_request);
@@ -336,27 +355,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
336 355
337 if (IS_ERR(mq->thread)) { 356 if (IS_ERR(mq->thread)) {
338 ret = PTR_ERR(mq->thread); 357 ret = PTR_ERR(mq->thread);
339 goto free_bounce_sg; 358 goto cleanup_queue;
340 } 359 }
341 360
342 return 0; 361 return 0;
343 free_bounce_sg:
344 kfree(mqrq_cur->bounce_sg);
345 mqrq_cur->bounce_sg = NULL;
346 kfree(mqrq_prev->bounce_sg);
347 mqrq_prev->bounce_sg = NULL;
348 362
349 cleanup_queue: 363 cleanup_queue:
350 kfree(mqrq_cur->sg); 364 mmc_queue_reqs_free_bufs(mq);
351 mqrq_cur->sg = NULL;
352 kfree(mqrq_cur->bounce_buf);
353 mqrq_cur->bounce_buf = NULL;
354
355 kfree(mqrq_prev->sg);
356 mqrq_prev->sg = NULL;
357 kfree(mqrq_prev->bounce_buf);
358 mqrq_prev->bounce_buf = NULL;
359
360 blk_cleanup_queue(mq->queue); 365 blk_cleanup_queue(mq->queue);
361 return ret; 366 return ret;
362} 367}
@@ -365,8 +370,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
365{ 370{
366 struct request_queue *q = mq->queue; 371 struct request_queue *q = mq->queue;
367 unsigned long flags; 372 unsigned long flags;
368 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
369 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
370 373
371 /* Make sure the queue isn't suspended, as that will deadlock */ 374 /* Make sure the queue isn't suspended, as that will deadlock */
372 mmc_queue_resume(mq); 375 mmc_queue_resume(mq);
@@ -380,23 +383,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
380 blk_start_queue(q); 383 blk_start_queue(q);
381 spin_unlock_irqrestore(q->queue_lock, flags); 384 spin_unlock_irqrestore(q->queue_lock, flags);
382 385
383 kfree(mqrq_cur->bounce_sg); 386 mmc_queue_reqs_free_bufs(mq);
384 mqrq_cur->bounce_sg = NULL;
385
386 kfree(mqrq_cur->sg);
387 mqrq_cur->sg = NULL;
388
389 kfree(mqrq_cur->bounce_buf);
390 mqrq_cur->bounce_buf = NULL;
391
392 kfree(mqrq_prev->bounce_sg);
393 mqrq_prev->bounce_sg = NULL;
394
395 kfree(mqrq_prev->sg);
396 mqrq_prev->sg = NULL;
397
398 kfree(mqrq_prev->bounce_buf);
399 mqrq_prev->bounce_buf = NULL;
400 387
401 mq->card = NULL; 388 mq->card = NULL;
402} 389}