aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core/queue.c
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2017-09-22 08:36:57 -0400
committerUlf Hansson <ulf.hansson@linaro.org>2017-10-30 06:45:52 -0400
commitc8b5fd031a3004dc382e201f69ea9a44ec62c04f (patch)
tree0831f8df792c495f62469b7ee733418761a2dd25 /drivers/mmc/core/queue.c
parent93482b3d70c2120aadb0f1d1281a59199866e70a (diff)
mmc: block: Factor out mmc_setup_queue()
Factor out some common code that will also be used with blk-mq. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/mmc/core/queue.c')
-rw-r--r--drivers/mmc/core/queue.c39
1 files changed, 24 insertions, 15 deletions
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index f74f9ef460cc..4f33d277b125 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -177,6 +177,29 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
177 mq_rq->sg = NULL; 177 mq_rq->sg = NULL;
178} 178}
179 179
180static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
181{
182 struct mmc_host *host = card->host;
183 u64 limit = BLK_BOUNCE_HIGH;
184
185 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
186 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
187
188 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
189 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
190 if (mmc_can_erase(card))
191 mmc_queue_setup_discard(mq->queue, card);
192
193 blk_queue_bounce_limit(mq->queue, limit);
194 blk_queue_max_hw_sectors(mq->queue,
195 min(host->max_blk_count, host->max_req_size / 512));
196 blk_queue_max_segments(mq->queue, host->max_segs);
197 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
198
199 /* Initialize thread_sem even if it is not used */
200 sema_init(&mq->thread_sem, 1);
201}
202
180/** 203/**
181 * mmc_init_queue - initialise a queue structure. 204 * mmc_init_queue - initialise a queue structure.
182 * @mq: mmc queue 205 * @mq: mmc queue
@@ -190,12 +213,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
190 spinlock_t *lock, const char *subname) 213 spinlock_t *lock, const char *subname)
191{ 214{
192 struct mmc_host *host = card->host; 215 struct mmc_host *host = card->host;
193 u64 limit = BLK_BOUNCE_HIGH;
194 int ret = -ENOMEM; 216 int ret = -ENOMEM;
195 217
196 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
197 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
198
199 mq->card = card; 218 mq->card = card;
200 mq->queue = blk_alloc_queue(GFP_KERNEL); 219 mq->queue = blk_alloc_queue(GFP_KERNEL);
201 if (!mq->queue) 220 if (!mq->queue)
@@ -214,18 +233,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
214 } 233 }
215 234
216 blk_queue_prep_rq(mq->queue, mmc_prep_request); 235 blk_queue_prep_rq(mq->queue, mmc_prep_request);
217 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
218 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
219 if (mmc_can_erase(card))
220 mmc_queue_setup_discard(mq->queue, card);
221 236
222 blk_queue_bounce_limit(mq->queue, limit); 237 mmc_setup_queue(mq, card);
223 blk_queue_max_hw_sectors(mq->queue,
224 min(host->max_blk_count, host->max_req_size / 512));
225 blk_queue_max_segments(mq->queue, host->max_segs);
226 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
227
228 sema_init(&mq->thread_sem, 1);
229 238
230 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", 239 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
231 host->index, subname ? subname : ""); 240 host->index, subname ? subname : "");