diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-04 14:11:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-04 14:11:56 -0400 |
commit | 17ece345a042347224e50032e959ad3959638b21 (patch) | |
tree | fd9dd89cce014a4088fda9aee837f2ac6e47ade8 /drivers/mmc/core/queue.c | |
parent | 650fc870a2ef35b83397eebd35b8c8df211bff78 (diff) | |
parent | e47c0b96678c5fd731c125dca677880e06d6394c (diff) |
Merge tag 'mmc-v4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
Pull MMC updates from Ulf Hansson:
"MMC core:
- Add support to enable irq wake for slot gpio
- Remove MMC_CAP2_HC_ERASE_SZ and make it the default behaviour
- Improve R1 response error checks for stop commands
- Cleanup and clarify some MMC specific code
- Keep card runtime resumed while adding SDIO function devices
- Use device_property_read instead of of_property_read in mmc_of_parse()
- Move boot partition locking into a driver op to enable proper I/O scheduling
- Move multi/single-ioctl() to use block layer to enable proper I/O scheduling
- Delete bounce buffer Kconfig option
- Improve the eMMC HW reset support provided via the eMMC pwrseq
- Add host API to manage SDIO IRQs from a workqueue
MMC host:
- dw_mmc: Drop support for multiple slots
- dw_mmc: Use device_property_read instead of of_property_read
- dw_mmc-rockchip: Optional improved tuning to greatly decrease tuning time
- dw_mmc: Prevent rpm suspend for SDIO IRQs instead of always for SDIO cards
- dw_mmc: Convert to use MMC_CAP2_SDIO_IRQ_NOTHREAD for SDIO IRQs
- omap_hsmmc: Convert to mmc regulator APIs to consolidate code
- omap_hsmmc: Deprecate "vmmc_aux" in DT and use "vqmmc" instead
- tmio: make sure SDIO gets reinitialized after resume
- sdhi: add CMD23 support to R-Car Gen2 & Gen3
- tmio: add CMD23 support
- sdhi/tmio: Refactor code and rename files to simplify Kconfig options
- sdhci-pci: Enable card detect wake for Intel BYT-related SD controllers
- sdhci-pci: Add support for Intel CNP
- sdhci-esdhc-imx: Remove ENGcm07207 workaround - allow multi block transfers
- sdhci-esdhc-imx: Allow all supported prescaler values
- sdhci-esdhc-imx: Fix DAT line software reset
- sdhci-esdhc: Add SDHCI_QUIRK_32BIT_DMA_ADDR
- atmel-mci: Drop AVR32 support"
* tag 'mmc-v4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (86 commits)
mmc: dw_mmc: remove the unnecessary slot variable
mmc: dw_mmc: use the 'slot' instead of 'cur_slot'
mmc: dw_mmc: remove the 'id' arguments about functions relevant to slot
mmc: dw_mmc: change the array of slots
mmc: dw_mmc: remove the loop about finding slots
mmc: dw_mmc: deprecated the "num-slots" property
mmc: dw_mmc-rockchip: parse rockchip, desired-num-phases from DT
dt-bindings: rockchip-dw-mshc: add optional rockchip, desired-num-phases
mmc: renesas-sdhi: improve checkpatch cleanness
mmc: tmio: improve checkpatch cleanness
mmc: sdhci-pci: Enable card detect wake for Intel BYT-related SD controllers
mmc: slot-gpio: Add support to enable irq wake on cd_irq
mmc: core: Remove MMC_CAP2_HC_ERASE_SZ
mmc: core: for data errors, take response of stop cmd into account
mmc: core: check also R1 response for stop commands
mmc: core: Clarify code for sending CSD
mmc: core: Drop mmc_all_send_cid() and use mmc_send_cxd_native() instead
mmc: core: Re-factor code for sending CID
mmc: core: Remove redundant code in mmc_send_cid()
mmc: core: Make mmc_can_reset() static
...
Diffstat (limited to 'drivers/mmc/core/queue.c')
-rw-r--r-- | drivers/mmc/core/queue.c | 242 |
1 files changed, 60 insertions, 182 deletions
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index b659a28c8018..affa7370ba82 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c | |||
@@ -40,35 +40,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) | |||
40 | return BLKPREP_OK; | 40 | return BLKPREP_OK; |
41 | } | 41 | } |
42 | 42 | ||
43 | struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq, | ||
44 | struct request *req) | ||
45 | { | ||
46 | struct mmc_queue_req *mqrq; | ||
47 | int i = ffz(mq->qslots); | ||
48 | |||
49 | if (i >= mq->qdepth) | ||
50 | return NULL; | ||
51 | |||
52 | mqrq = &mq->mqrq[i]; | ||
53 | WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth || | ||
54 | test_bit(mqrq->task_id, &mq->qslots)); | ||
55 | mqrq->req = req; | ||
56 | mq->qcnt += 1; | ||
57 | __set_bit(mqrq->task_id, &mq->qslots); | ||
58 | |||
59 | return mqrq; | ||
60 | } | ||
61 | |||
62 | void mmc_queue_req_free(struct mmc_queue *mq, | ||
63 | struct mmc_queue_req *mqrq) | ||
64 | { | ||
65 | WARN_ON(!mqrq->req || mq->qcnt < 1 || | ||
66 | !test_bit(mqrq->task_id, &mq->qslots)); | ||
67 | mqrq->req = NULL; | ||
68 | mq->qcnt -= 1; | ||
69 | __clear_bit(mqrq->task_id, &mq->qslots); | ||
70 | } | ||
71 | |||
72 | static int mmc_queue_thread(void *d) | 43 | static int mmc_queue_thread(void *d) |
73 | { | 44 | { |
74 | struct mmc_queue *mq = d; | 45 | struct mmc_queue *mq = d; |
@@ -149,11 +120,11 @@ static void mmc_request_fn(struct request_queue *q) | |||
149 | wake_up_process(mq->thread); | 120 | wake_up_process(mq->thread); |
150 | } | 121 | } |
151 | 122 | ||
152 | static struct scatterlist *mmc_alloc_sg(int sg_len) | 123 | static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) |
153 | { | 124 | { |
154 | struct scatterlist *sg; | 125 | struct scatterlist *sg; |
155 | 126 | ||
156 | sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL); | 127 | sg = kmalloc_array(sg_len, sizeof(*sg), gfp); |
157 | if (sg) | 128 | if (sg) |
158 | sg_init_table(sg, sg_len); | 129 | sg_init_table(sg, sg_len); |
159 | 130 | ||
@@ -179,86 +150,11 @@ static void mmc_queue_setup_discard(struct request_queue *q, | |||
179 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); | 150 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); |
180 | } | 151 | } |
181 | 152 | ||
182 | static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) | ||
183 | { | ||
184 | kfree(mqrq->bounce_sg); | ||
185 | mqrq->bounce_sg = NULL; | ||
186 | |||
187 | kfree(mqrq->sg); | ||
188 | mqrq->sg = NULL; | ||
189 | |||
190 | kfree(mqrq->bounce_buf); | ||
191 | mqrq->bounce_buf = NULL; | ||
192 | } | ||
193 | |||
194 | static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth) | ||
195 | { | ||
196 | int i; | ||
197 | |||
198 | for (i = 0; i < qdepth; i++) | ||
199 | mmc_queue_req_free_bufs(&mqrq[i]); | ||
200 | } | ||
201 | |||
202 | static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth) | ||
203 | { | ||
204 | mmc_queue_reqs_free_bufs(mqrq, qdepth); | ||
205 | kfree(mqrq); | ||
206 | } | ||
207 | |||
208 | static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth) | ||
209 | { | ||
210 | struct mmc_queue_req *mqrq; | ||
211 | int i; | ||
212 | |||
213 | mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL); | ||
214 | if (mqrq) { | ||
215 | for (i = 0; i < qdepth; i++) | ||
216 | mqrq[i].task_id = i; | ||
217 | } | ||
218 | |||
219 | return mqrq; | ||
220 | } | ||
221 | |||
222 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | ||
223 | static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth, | ||
224 | unsigned int bouncesz) | ||
225 | { | ||
226 | int i; | ||
227 | |||
228 | for (i = 0; i < qdepth; i++) { | ||
229 | mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | ||
230 | if (!mqrq[i].bounce_buf) | ||
231 | return -ENOMEM; | ||
232 | |||
233 | mqrq[i].sg = mmc_alloc_sg(1); | ||
234 | if (!mqrq[i].sg) | ||
235 | return -ENOMEM; | ||
236 | |||
237 | mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512); | ||
238 | if (!mqrq[i].bounce_sg) | ||
239 | return -ENOMEM; | ||
240 | } | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth, | ||
246 | unsigned int bouncesz) | ||
247 | { | ||
248 | int ret; | ||
249 | |||
250 | ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz); | ||
251 | if (ret) | ||
252 | mmc_queue_reqs_free_bufs(mqrq, qdepth); | ||
253 | |||
254 | return !ret; | ||
255 | } | ||
256 | |||
257 | static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) | 153 | static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) |
258 | { | 154 | { |
259 | unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; | 155 | unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; |
260 | 156 | ||
261 | if (host->max_segs != 1) | 157 | if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF)) |
262 | return 0; | 158 | return 0; |
263 | 159 | ||
264 | if (bouncesz > host->max_req_size) | 160 | if (bouncesz > host->max_req_size) |
@@ -273,84 +169,58 @@ static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) | |||
273 | 169 | ||
274 | return bouncesz; | 170 | return bouncesz; |
275 | } | 171 | } |
276 | #else | ||
277 | static inline bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, | ||
278 | int qdepth, unsigned int bouncesz) | ||
279 | { | ||
280 | return false; | ||
281 | } | ||
282 | 172 | ||
283 | static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) | 173 | /** |
284 | { | 174 | * mmc_init_request() - initialize the MMC-specific per-request data |
285 | return 0; | 175 | * @q: the request queue |
286 | } | 176 | * @req: the request |
287 | #endif | 177 | * @gfp: memory allocation policy |
288 | 178 | */ | |
289 | static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth, | 179 | static int mmc_init_request(struct request_queue *q, struct request *req, |
290 | int max_segs) | 180 | gfp_t gfp) |
291 | { | 181 | { |
292 | int i; | 182 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
183 | struct mmc_queue *mq = q->queuedata; | ||
184 | struct mmc_card *card = mq->card; | ||
185 | struct mmc_host *host = card->host; | ||
293 | 186 | ||
294 | for (i = 0; i < qdepth; i++) { | 187 | if (card->bouncesz) { |
295 | mqrq[i].sg = mmc_alloc_sg(max_segs); | 188 | mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); |
296 | if (!mqrq[i].sg) | 189 | if (!mq_rq->bounce_buf) |
190 | return -ENOMEM; | ||
191 | if (card->bouncesz > 512) { | ||
192 | mq_rq->sg = mmc_alloc_sg(1, gfp); | ||
193 | if (!mq_rq->sg) | ||
194 | return -ENOMEM; | ||
195 | mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512, | ||
196 | gfp); | ||
197 | if (!mq_rq->bounce_sg) | ||
198 | return -ENOMEM; | ||
199 | } | ||
200 | } else { | ||
201 | mq_rq->bounce_buf = NULL; | ||
202 | mq_rq->bounce_sg = NULL; | ||
203 | mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); | ||
204 | if (!mq_rq->sg) | ||
297 | return -ENOMEM; | 205 | return -ENOMEM; |
298 | } | 206 | } |
299 | 207 | ||
300 | return 0; | 208 | return 0; |
301 | } | 209 | } |
302 | 210 | ||
303 | void mmc_queue_free_shared_queue(struct mmc_card *card) | 211 | static void mmc_exit_request(struct request_queue *q, struct request *req) |
304 | { | 212 | { |
305 | if (card->mqrq) { | 213 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
306 | mmc_queue_free_mqrqs(card->mqrq, card->qdepth); | ||
307 | card->mqrq = NULL; | ||
308 | } | ||
309 | } | ||
310 | |||
311 | static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth) | ||
312 | { | ||
313 | struct mmc_host *host = card->host; | ||
314 | struct mmc_queue_req *mqrq; | ||
315 | unsigned int bouncesz; | ||
316 | int ret = 0; | ||
317 | |||
318 | if (card->mqrq) | ||
319 | return -EINVAL; | ||
320 | 214 | ||
321 | mqrq = mmc_queue_alloc_mqrqs(qdepth); | 215 | /* It is OK to kfree(NULL) so this will be smooth */ |
322 | if (!mqrq) | 216 | kfree(mq_rq->bounce_sg); |
323 | return -ENOMEM; | 217 | mq_rq->bounce_sg = NULL; |
324 | |||
325 | card->mqrq = mqrq; | ||
326 | card->qdepth = qdepth; | ||
327 | |||
328 | bouncesz = mmc_queue_calc_bouncesz(host); | ||
329 | |||
330 | if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) { | ||
331 | bouncesz = 0; | ||
332 | pr_warn("%s: unable to allocate bounce buffers\n", | ||
333 | mmc_card_name(card)); | ||
334 | } | ||
335 | 218 | ||
336 | card->bouncesz = bouncesz; | 219 | kfree(mq_rq->bounce_buf); |
220 | mq_rq->bounce_buf = NULL; | ||
337 | 221 | ||
338 | if (!bouncesz) { | 222 | kfree(mq_rq->sg); |
339 | ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs); | 223 | mq_rq->sg = NULL; |
340 | if (ret) | ||
341 | goto out_err; | ||
342 | } | ||
343 | |||
344 | return ret; | ||
345 | |||
346 | out_err: | ||
347 | mmc_queue_free_shared_queue(card); | ||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | int mmc_queue_alloc_shared_queue(struct mmc_card *card) | ||
352 | { | ||
353 | return __mmc_queue_alloc_shared_queue(card, 2); | ||
354 | } | 224 | } |
355 | 225 | ||
356 | /** | 226 | /** |
@@ -373,13 +243,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
373 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; | 243 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; |
374 | 244 | ||
375 | mq->card = card; | 245 | mq->card = card; |
376 | mq->queue = blk_init_queue(mmc_request_fn, lock); | 246 | mq->queue = blk_alloc_queue(GFP_KERNEL); |
377 | if (!mq->queue) | 247 | if (!mq->queue) |
378 | return -ENOMEM; | 248 | return -ENOMEM; |
379 | 249 | mq->queue->queue_lock = lock; | |
380 | mq->mqrq = card->mqrq; | 250 | mq->queue->request_fn = mmc_request_fn; |
381 | mq->qdepth = card->qdepth; | 251 | mq->queue->init_rq_fn = mmc_init_request; |
252 | mq->queue->exit_rq_fn = mmc_exit_request; | ||
253 | mq->queue->cmd_size = sizeof(struct mmc_queue_req); | ||
382 | mq->queue->queuedata = mq; | 254 | mq->queue->queuedata = mq; |
255 | mq->qcnt = 0; | ||
256 | ret = blk_init_allocated_queue(mq->queue); | ||
257 | if (ret) { | ||
258 | blk_cleanup_queue(mq->queue); | ||
259 | return ret; | ||
260 | } | ||
383 | 261 | ||
384 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | 262 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
385 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); | 263 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
@@ -387,6 +265,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
387 | if (mmc_can_erase(card)) | 265 | if (mmc_can_erase(card)) |
388 | mmc_queue_setup_discard(mq->queue, card); | 266 | mmc_queue_setup_discard(mq->queue, card); |
389 | 267 | ||
268 | card->bouncesz = mmc_queue_calc_bouncesz(host); | ||
390 | if (card->bouncesz) { | 269 | if (card->bouncesz) { |
391 | blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); | 270 | blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); |
392 | blk_queue_max_segments(mq->queue, card->bouncesz / 512); | 271 | blk_queue_max_segments(mq->queue, card->bouncesz / 512); |
@@ -412,7 +291,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
412 | return 0; | 291 | return 0; |
413 | 292 | ||
414 | cleanup_queue: | 293 | cleanup_queue: |
415 | mq->mqrq = NULL; | ||
416 | blk_cleanup_queue(mq->queue); | 294 | blk_cleanup_queue(mq->queue); |
417 | return ret; | 295 | return ret; |
418 | } | 296 | } |
@@ -434,7 +312,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq) | |||
434 | blk_start_queue(q); | 312 | blk_start_queue(q); |
435 | spin_unlock_irqrestore(q->queue_lock, flags); | 313 | spin_unlock_irqrestore(q->queue_lock, flags); |
436 | 314 | ||
437 | mq->mqrq = NULL; | ||
438 | mq->card = NULL; | 315 | mq->card = NULL; |
439 | } | 316 | } |
440 | EXPORT_SYMBOL(mmc_cleanup_queue); | 317 | EXPORT_SYMBOL(mmc_cleanup_queue); |
@@ -491,12 +368,13 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) | |||
491 | unsigned int sg_len; | 368 | unsigned int sg_len; |
492 | size_t buflen; | 369 | size_t buflen; |
493 | struct scatterlist *sg; | 370 | struct scatterlist *sg; |
371 | struct request *req = mmc_queue_req_to_req(mqrq); | ||
494 | int i; | 372 | int i; |
495 | 373 | ||
496 | if (!mqrq->bounce_buf) | 374 | if (!mqrq->bounce_buf) |
497 | return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); | 375 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); |
498 | 376 | ||
499 | sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); | 377 | sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg); |
500 | 378 | ||
501 | mqrq->bounce_sg_len = sg_len; | 379 | mqrq->bounce_sg_len = sg_len; |
502 | 380 | ||
@@ -518,7 +396,7 @@ void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) | |||
518 | if (!mqrq->bounce_buf) | 396 | if (!mqrq->bounce_buf) |
519 | return; | 397 | return; |
520 | 398 | ||
521 | if (rq_data_dir(mqrq->req) != WRITE) | 399 | if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE) |
522 | return; | 400 | return; |
523 | 401 | ||
524 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, | 402 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
@@ -534,7 +412,7 @@ void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) | |||
534 | if (!mqrq->bounce_buf) | 412 | if (!mqrq->bounce_buf) |
535 | return; | 413 | return; |
536 | 414 | ||
537 | if (rq_data_dir(mqrq->req) != READ) | 415 | if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ) |
538 | return; | 416 | return; |
539 | 417 | ||
540 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, | 418 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |