aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/Kconfig18
-rw-r--r--drivers/mmc/card/block.c7
-rw-r--r--drivers/mmc/card/queue.c191
-rw-r--r--drivers/mmc/card/queue.h7
4 files changed, 208 insertions, 15 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 9320a8c73239..a49cb9737cd8 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -14,3 +14,21 @@ config MMC_BLOCK
14 mount the filesystem. Almost everyone wishing MMC support 14 mount the filesystem. Almost everyone wishing MMC support
15 should say Y or M here. 15 should say Y or M here.
16 16
17config MMC_BLOCK_BOUNCE
18 bool "Use bounce buffer for simple hosts"
19 depends on MMC_BLOCK
20 default y
21 help
22 SD/MMC is a high latency protocol where it is crucial to
23 send large requests in order to get high performance. Many
24 controllers, however, are restricted to continuous memory
25 (i.e. they can't do scatter-gather), something the kernel
26 rarely can provide.
27
28 Say Y here to help these restricted hosts by bouncing
29 requests back and forth from a large buffer. You will get
30 a big performance gain at the cost of up to 64 KiB of
31 physical memory.
32
33 If unsure, say Y here.
34
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 540ff4bea54c..cbd4b6e3e17c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -262,7 +262,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
262 } 262 }
263 263
264 brq.data.sg = mq->sg; 264 brq.data.sg = mq->sg;
265 brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg); 265 brq.data.sg_len = mmc_queue_map_sg(mq);
266
267 mmc_queue_bounce_pre(mq);
266 268
267 if (brq.data.blocks != 269 if (brq.data.blocks !=
268 (req->nr_sectors >> (md->block_bits - 9))) { 270 (req->nr_sectors >> (md->block_bits - 9))) {
@@ -279,6 +281,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
279 } 281 }
280 282
281 mmc_wait_for_req(card->host, &brq.mrq); 283 mmc_wait_for_req(card->host, &brq.mrq);
284
285 mmc_queue_bounce_post(mq);
286
282 if (brq.cmd.error) { 287 if (brq.cmd.error) {
283 printk(KERN_ERR "%s: error %d sending read/write command\n", 288 printk(KERN_ERR "%s: error %d sending read/write command\n",
284 req->rq_disk->disk_name, brq.cmd.error); 289 req->rq_disk->disk_name, brq.cmd.error);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index dd97bc798409..4fb2089dc690 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -17,6 +17,8 @@
17#include <linux/mmc/host.h> 17#include <linux/mmc/host.h>
18#include "queue.h" 18#include "queue.h"
19 19
20#define MMC_QUEUE_BOUNCESZ 65536
21
20#define MMC_QUEUE_SUSPENDED (1 << 0) 22#define MMC_QUEUE_SUSPENDED (1 << 0)
21 23
22/* 24/*
@@ -118,6 +120,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
118 struct mmc_host *host = card->host; 120 struct mmc_host *host = card->host;
119 u64 limit = BLK_BOUNCE_HIGH; 121 u64 limit = BLK_BOUNCE_HIGH;
120 int ret; 122 int ret;
123 unsigned int bouncesz;
121 124
122 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 125 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
123 limit = *mmc_dev(host)->dma_mask; 126 limit = *mmc_dev(host)->dma_mask;
@@ -127,21 +130,61 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
127 if (!mq->queue) 130 if (!mq->queue)
128 return -ENOMEM; 131 return -ENOMEM;
129 132
130 blk_queue_prep_rq(mq->queue, mmc_prep_request);
131 blk_queue_bounce_limit(mq->queue, limit);
132 blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
133 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
134 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
135 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
136
137 mq->queue->queuedata = mq; 133 mq->queue->queuedata = mq;
138 mq->req = NULL; 134 mq->req = NULL;
139 135
140 mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, 136 blk_queue_prep_rq(mq->queue, mmc_prep_request);
141 GFP_KERNEL); 137
142 if (!mq->sg) { 138#ifdef CONFIG_MMC_BLOCK_BOUNCE
143 ret = -ENOMEM; 139 if (host->max_hw_segs == 1) {
144 goto cleanup_queue; 140 bouncesz = MMC_QUEUE_BOUNCESZ;
141
142 if (bouncesz > host->max_req_size)
143 bouncesz = host->max_req_size;
144 if (bouncesz > host->max_seg_size)
145 bouncesz = host->max_seg_size;
146
147 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
148 if (!mq->bounce_buf) {
149 printk(KERN_WARNING "%s: unable to allocate "
150 "bounce buffer\n", mmc_card_name(card));
151 } else {
152 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
153 blk_queue_max_sectors(mq->queue, bouncesz / 512);
154 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
155 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
156 blk_queue_max_segment_size(mq->queue, bouncesz);
157
158 mq->sg = kmalloc(sizeof(struct scatterlist),
159 GFP_KERNEL);
160 if (!mq->sg) {
161 ret = -ENOMEM;
162 goto free_bounce_buf;
163 }
164
165 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
166 bouncesz / 512, GFP_KERNEL);
167 if (!mq->bounce_sg) {
168 ret = -ENOMEM;
169 goto free_sg;
170 }
171 }
172 }
173#endif
174
175 if (!mq->bounce_buf) {
176 blk_queue_bounce_limit(mq->queue, limit);
177 blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
178 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
179 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
180 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
181
182 mq->sg = kmalloc(sizeof(struct scatterlist) *
183 host->max_phys_segs, GFP_KERNEL);
184 if (!mq->sg) {
185 ret = -ENOMEM;
186 goto cleanup_queue;
187 }
145 } 188 }
146 189
147 init_MUTEX(&mq->thread_sem); 190 init_MUTEX(&mq->thread_sem);
@@ -149,14 +192,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
149 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); 192 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
150 if (IS_ERR(mq->thread)) { 193 if (IS_ERR(mq->thread)) {
151 ret = PTR_ERR(mq->thread); 194 ret = PTR_ERR(mq->thread);
152 goto free_sg; 195 goto free_bounce_sg;
153 } 196 }
154 197
155 return 0; 198 return 0;
156 199 free_bounce_sg:
200 if (mq->bounce_sg)
201 kfree(mq->bounce_sg);
202 mq->bounce_sg = NULL;
157 free_sg: 203 free_sg:
158 kfree(mq->sg); 204 kfree(mq->sg);
159 mq->sg = NULL; 205 mq->sg = NULL;
206 free_bounce_buf:
207 if (mq->bounce_buf)
208 kfree(mq->bounce_buf);
209 mq->bounce_buf = NULL;
160 cleanup_queue: 210 cleanup_queue:
161 blk_cleanup_queue(mq->queue); 211 blk_cleanup_queue(mq->queue);
162 return ret; 212 return ret;
@@ -178,9 +228,17 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
178 /* Then terminate our worker thread */ 228 /* Then terminate our worker thread */
179 kthread_stop(mq->thread); 229 kthread_stop(mq->thread);
180 230
231 if (mq->bounce_sg)
232 kfree(mq->bounce_sg);
233 mq->bounce_sg = NULL;
234
181 kfree(mq->sg); 235 kfree(mq->sg);
182 mq->sg = NULL; 236 mq->sg = NULL;
183 237
238 if (mq->bounce_buf)
239 kfree(mq->bounce_buf);
240 mq->bounce_buf = NULL;
241
184 blk_cleanup_queue(mq->queue); 242 blk_cleanup_queue(mq->queue);
185 243
186 mq->card = NULL; 244 mq->card = NULL;
@@ -231,3 +289,108 @@ void mmc_queue_resume(struct mmc_queue *mq)
231 } 289 }
232} 290}
233 291
292static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
293 struct scatterlist *src, unsigned int src_len)
294{
295 unsigned int chunk;
296 char *dst_buf, *src_buf;
297 unsigned int dst_size, src_size;
298
299 dst_buf = NULL;
300 src_buf = NULL;
301 dst_size = 0;
302 src_size = 0;
303
304 while (src_len) {
305 BUG_ON(dst_len == 0);
306
307 if (dst_size == 0) {
308 dst_buf = page_address(dst->page) + dst->offset;
309 dst_size = dst->length;
310 }
311
312 if (src_size == 0) {
313 src_buf = page_address(src->page) + src->offset;
314 src_size = src->length;
315 }
316
317 chunk = min(dst_size, src_size);
318
319 memcpy(dst_buf, src_buf, chunk);
320
321 dst_buf += chunk;
322 src_buf += chunk;
323 dst_size -= chunk;
324 src_size -= chunk;
325
326 if (dst_size == 0) {
327 dst++;
328 dst_len--;
329 }
330
331 if (src_size == 0) {
332 src++;
333 src_len--;
334 }
335 }
336}
337
338unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
339{
340 unsigned int sg_len;
341
342 if (!mq->bounce_buf)
343 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
344
345 BUG_ON(!mq->bounce_sg);
346
347 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
348
349 mq->bounce_sg_len = sg_len;
350
351 /*
352 * Shortcut in the event we only get a single entry.
353 */
354 if (sg_len == 1) {
355 memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
356 return 1;
357 }
358
359 mq->sg[0].page = virt_to_page(mq->bounce_buf);
360 mq->sg[0].offset = offset_in_page(mq->bounce_buf);
361 mq->sg[0].length = 0;
362
363 while (sg_len) {
364 mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
365 sg_len--;
366 }
367
368 return 1;
369}
370
371void mmc_queue_bounce_pre(struct mmc_queue *mq)
372{
373 if (!mq->bounce_buf)
374 return;
375
376 if (mq->bounce_sg_len == 1)
377 return;
378 if (rq_data_dir(mq->req) != WRITE)
379 return;
380
381 copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len);
382}
383
384void mmc_queue_bounce_post(struct mmc_queue *mq)
385{
386 if (!mq->bounce_buf)
387 return;
388
389 if (mq->bounce_sg_len == 1)
390 return;
391 if (rq_data_dir(mq->req) != READ)
392 return;
393
394 copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1);
395}
396
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 1590b3f3f1f7..64e66e0d4994 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -14,6 +14,9 @@ struct mmc_queue {
14 void *data; 14 void *data;
15 struct request_queue *queue; 15 struct request_queue *queue;
16 struct scatterlist *sg; 16 struct scatterlist *sg;
17 char *bounce_buf;
18 struct scatterlist *bounce_sg;
19 unsigned int bounce_sg_len;
17}; 20};
18 21
19extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *); 22extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
@@ -21,4 +24,8 @@ extern void mmc_cleanup_queue(struct mmc_queue *);
21extern void mmc_queue_suspend(struct mmc_queue *); 24extern void mmc_queue_suspend(struct mmc_queue *);
22extern void mmc_queue_resume(struct mmc_queue *); 25extern void mmc_queue_resume(struct mmc_queue *);
23 26
27extern unsigned int mmc_queue_map_sg(struct mmc_queue *);
28extern void mmc_queue_bounce_pre(struct mmc_queue *);
29extern void mmc_queue_bounce_post(struct mmc_queue *);
30
24#endif 31#endif