aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core/queue.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-09 03:02:35 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-09 03:02:35 -0400
commit1236d6bb6e19fc72ffc6bbcdeb1bfefe450e54ee (patch)
tree47da3feee8e263e8c9352c85cf518e624be3c211 /drivers/mmc/core/queue.c
parent750b1a6894ecc9b178c6e3d0a1170122971b2036 (diff)
parent8a5776a5f49812d29fe4b2d0a2d71675c3facf3f (diff)
Merge 4.14-rc4 into staging-next
We want the staging/iio fixes in here as well to handle merge issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/mmc/core/queue.c')
-rw-r--r--drivers/mmc/core/queue.c120
1 files changed, 9 insertions, 111 deletions
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index affa7370ba82..0a4e77a5ba33 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -23,8 +23,6 @@
23#include "core.h" 23#include "core.h"
24#include "card.h" 24#include "card.h"
25 25
26#define MMC_QUEUE_BOUNCESZ 65536
27
28/* 26/*
29 * Prepare a MMC request. This just filters out odd stuff. 27 * Prepare a MMC request. This just filters out odd stuff.
30 */ 28 */
@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
150 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); 148 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
151} 149}
152 150
153static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
154{
155 unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
156
157 if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
158 return 0;
159
160 if (bouncesz > host->max_req_size)
161 bouncesz = host->max_req_size;
162 if (bouncesz > host->max_seg_size)
163 bouncesz = host->max_seg_size;
164 if (bouncesz > host->max_blk_count * 512)
165 bouncesz = host->max_blk_count * 512;
166
167 if (bouncesz <= 512)
168 return 0;
169
170 return bouncesz;
171}
172
173/** 151/**
174 * mmc_init_request() - initialize the MMC-specific per-request data 152 * mmc_init_request() - initialize the MMC-specific per-request data
175 * @q: the request queue 153 * @q: the request queue
@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
184 struct mmc_card *card = mq->card; 162 struct mmc_card *card = mq->card;
185 struct mmc_host *host = card->host; 163 struct mmc_host *host = card->host;
186 164
187 if (card->bouncesz) { 165 mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
188 mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); 166 if (!mq_rq->sg)
189 if (!mq_rq->bounce_buf) 167 return -ENOMEM;
190 return -ENOMEM;
191 if (card->bouncesz > 512) {
192 mq_rq->sg = mmc_alloc_sg(1, gfp);
193 if (!mq_rq->sg)
194 return -ENOMEM;
195 mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
196 gfp);
197 if (!mq_rq->bounce_sg)
198 return -ENOMEM;
199 }
200 } else {
201 mq_rq->bounce_buf = NULL;
202 mq_rq->bounce_sg = NULL;
203 mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
204 if (!mq_rq->sg)
205 return -ENOMEM;
206 }
207 168
208 return 0; 169 return 0;
209} 170}
@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
212{ 173{
213 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); 174 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
214 175
215 /* It is OK to kfree(NULL) so this will be smooth */
216 kfree(mq_rq->bounce_sg);
217 mq_rq->bounce_sg = NULL;
218
219 kfree(mq_rq->bounce_buf);
220 mq_rq->bounce_buf = NULL;
221
222 kfree(mq_rq->sg); 176 kfree(mq_rq->sg);
223 mq_rq->sg = NULL; 177 mq_rq->sg = NULL;
224} 178}
@@ -265,18 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
265 if (mmc_can_erase(card)) 219 if (mmc_can_erase(card))
266 mmc_queue_setup_discard(mq->queue, card); 220 mmc_queue_setup_discard(mq->queue, card);
267 221
268 card->bouncesz = mmc_queue_calc_bouncesz(host); 222 blk_queue_bounce_limit(mq->queue, limit);
269 if (card->bouncesz) { 223 blk_queue_max_hw_sectors(mq->queue,
270 blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); 224 min(host->max_blk_count, host->max_req_size / 512));
271 blk_queue_max_segments(mq->queue, card->bouncesz / 512); 225 blk_queue_max_segments(mq->queue, host->max_segs);
272 blk_queue_max_segment_size(mq->queue, card->bouncesz); 226 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
273 } else {
274 blk_queue_bounce_limit(mq->queue, limit);
275 blk_queue_max_hw_sectors(mq->queue,
276 min(host->max_blk_count, host->max_req_size / 512));
277 blk_queue_max_segments(mq->queue, host->max_segs);
278 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
279 }
280 227
281 sema_init(&mq->thread_sem, 1); 228 sema_init(&mq->thread_sem, 1);
282 229
@@ -365,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
365 */ 312 */
366unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 313unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
367{ 314{
368 unsigned int sg_len;
369 size_t buflen;
370 struct scatterlist *sg;
371 struct request *req = mmc_queue_req_to_req(mqrq); 315 struct request *req = mmc_queue_req_to_req(mqrq);
372 int i;
373
374 if (!mqrq->bounce_buf)
375 return blk_rq_map_sg(mq->queue, req, mqrq->sg);
376
377 sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
378
379 mqrq->bounce_sg_len = sg_len;
380
381 buflen = 0;
382 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
383 buflen += sg->length;
384
385 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
386
387 return 1;
388}
389
390/*
391 * If writing, bounce the data to the buffer before the request
392 * is sent to the host driver
393 */
394void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
395{
396 if (!mqrq->bounce_buf)
397 return;
398
399 if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
400 return;
401
402 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
403 mqrq->bounce_buf, mqrq->sg[0].length);
404}
405
406/*
407 * If reading, bounce the data from the buffer after the request
408 * has been handled by the host driver
409 */
410void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
411{
412 if (!mqrq->bounce_buf)
413 return;
414
415 if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
416 return;
417 316
418 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 317 return blk_rq_map_sg(mq->queue, req, mqrq->sg);
419 mqrq->bounce_buf, mqrq->sg[0].length);
420} 318}