aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core/queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/core/queue.c')
-rw-r--r--drivers/mmc/core/queue.c41
1 files changed, 25 insertions, 16 deletions
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 0a4e77a5ba33..4f33d277b125 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -30,7 +30,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
30{ 30{
31 struct mmc_queue *mq = q->queuedata; 31 struct mmc_queue *mq = q->queuedata;
32 32
33 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) 33 if (mq && mmc_card_removed(mq->card))
34 return BLKPREP_KILL; 34 return BLKPREP_KILL;
35 35
36 req->rq_flags |= RQF_DONTPREP; 36 req->rq_flags |= RQF_DONTPREP;
@@ -177,6 +177,29 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
177 mq_rq->sg = NULL; 177 mq_rq->sg = NULL;
178} 178}
179 179
180static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
181{
182 struct mmc_host *host = card->host;
183 u64 limit = BLK_BOUNCE_HIGH;
184
185 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
186 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
187
188 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
189 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
190 if (mmc_can_erase(card))
191 mmc_queue_setup_discard(mq->queue, card);
192
193 blk_queue_bounce_limit(mq->queue, limit);
194 blk_queue_max_hw_sectors(mq->queue,
195 min(host->max_blk_count, host->max_req_size / 512));
196 blk_queue_max_segments(mq->queue, host->max_segs);
197 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
198
199 /* Initialize thread_sem even if it is not used */
200 sema_init(&mq->thread_sem, 1);
201}
202
180/** 203/**
181 * mmc_init_queue - initialise a queue structure. 204 * mmc_init_queue - initialise a queue structure.
182 * @mq: mmc queue 205 * @mq: mmc queue
@@ -190,12 +213,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
190 spinlock_t *lock, const char *subname) 213 spinlock_t *lock, const char *subname)
191{ 214{
192 struct mmc_host *host = card->host; 215 struct mmc_host *host = card->host;
193 u64 limit = BLK_BOUNCE_HIGH;
194 int ret = -ENOMEM; 216 int ret = -ENOMEM;
195 217
196 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
197 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
198
199 mq->card = card; 218 mq->card = card;
200 mq->queue = blk_alloc_queue(GFP_KERNEL); 219 mq->queue = blk_alloc_queue(GFP_KERNEL);
201 if (!mq->queue) 220 if (!mq->queue)
@@ -214,18 +233,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
214 } 233 }
215 234
216 blk_queue_prep_rq(mq->queue, mmc_prep_request); 235 blk_queue_prep_rq(mq->queue, mmc_prep_request);
217 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
218 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
219 if (mmc_can_erase(card))
220 mmc_queue_setup_discard(mq->queue, card);
221 236
222 blk_queue_bounce_limit(mq->queue, limit); 237 mmc_setup_queue(mq, card);
223 blk_queue_max_hw_sectors(mq->queue,
224 min(host->max_blk_count, host->max_req_size / 512));
225 blk_queue_max_segments(mq->queue, host->max_segs);
226 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
227
228 sema_init(&mq->thread_sem, 1);
229 238
230 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", 239 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
231 host->index, subname ? subname : ""); 240 host->index, subname ? subname : "");