aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/queue.c
diff options
context:
space:
mode:
authorPierre Ossman <drzeus@drzeus.cx>2007-05-11 18:26:16 -0400
committerPierre Ossman <drzeus@drzeus.cx>2007-07-09 15:22:53 -0400
commit98ccf14909ba02a41c5925b0b2c92aeeef23d3b9 (patch)
tree331b645ee008f858305f7406f4639119b275ff82 /drivers/mmc/card/queue.c
parent7dcca30a32aadb0520417521b0c44f42d09fe05c (diff)
mmc: bounce requests for simple hosts
Some hosts cannot do scatter/gather in hardware. Since not doing sg is such a big performance hit, we (optionally) bounce the requests to a simple linear buffer that we hand over to the driver. Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r--drivers/mmc/card/queue.c191
1 files changed, 177 insertions, 14 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index dd97bc798409..4fb2089dc690 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -17,6 +17,8 @@
17#include <linux/mmc/host.h> 17#include <linux/mmc/host.h>
18#include "queue.h" 18#include "queue.h"
19 19
20#define MMC_QUEUE_BOUNCESZ 65536
21
20#define MMC_QUEUE_SUSPENDED (1 << 0) 22#define MMC_QUEUE_SUSPENDED (1 << 0)
21 23
22/* 24/*
@@ -118,6 +120,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
118 struct mmc_host *host = card->host; 120 struct mmc_host *host = card->host;
119 u64 limit = BLK_BOUNCE_HIGH; 121 u64 limit = BLK_BOUNCE_HIGH;
120 int ret; 122 int ret;
123 unsigned int bouncesz;
121 124
122 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 125 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
123 limit = *mmc_dev(host)->dma_mask; 126 limit = *mmc_dev(host)->dma_mask;
@@ -127,21 +130,61 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
127 if (!mq->queue) 130 if (!mq->queue)
128 return -ENOMEM; 131 return -ENOMEM;
129 132
130 blk_queue_prep_rq(mq->queue, mmc_prep_request);
131 blk_queue_bounce_limit(mq->queue, limit);
132 blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
133 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
134 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
135 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
136
137 mq->queue->queuedata = mq; 133 mq->queue->queuedata = mq;
138 mq->req = NULL; 134 mq->req = NULL;
139 135
140 mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, 136 blk_queue_prep_rq(mq->queue, mmc_prep_request);
141 GFP_KERNEL); 137
142 if (!mq->sg) { 138#ifdef CONFIG_MMC_BLOCK_BOUNCE
143 ret = -ENOMEM; 139 if (host->max_hw_segs == 1) {
144 goto cleanup_queue; 140 bouncesz = MMC_QUEUE_BOUNCESZ;
141
142 if (bouncesz > host->max_req_size)
143 bouncesz = host->max_req_size;
144 if (bouncesz > host->max_seg_size)
145 bouncesz = host->max_seg_size;
146
147 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
148 if (!mq->bounce_buf) {
149 printk(KERN_WARNING "%s: unable to allocate "
150 "bounce buffer\n", mmc_card_name(card));
151 } else {
152 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
153 blk_queue_max_sectors(mq->queue, bouncesz / 512);
154 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
155 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
156 blk_queue_max_segment_size(mq->queue, bouncesz);
157
158 mq->sg = kmalloc(sizeof(struct scatterlist),
159 GFP_KERNEL);
160 if (!mq->sg) {
161 ret = -ENOMEM;
162 goto free_bounce_buf;
163 }
164
165 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
166 bouncesz / 512, GFP_KERNEL);
167 if (!mq->bounce_sg) {
168 ret = -ENOMEM;
169 goto free_sg;
170 }
171 }
172 }
173#endif
174
175 if (!mq->bounce_buf) {
176 blk_queue_bounce_limit(mq->queue, limit);
177 blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
178 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
179 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
180 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
181
182 mq->sg = kmalloc(sizeof(struct scatterlist) *
183 host->max_phys_segs, GFP_KERNEL);
184 if (!mq->sg) {
185 ret = -ENOMEM;
186 goto cleanup_queue;
187 }
145 } 188 }
146 189
147 init_MUTEX(&mq->thread_sem); 190 init_MUTEX(&mq->thread_sem);
@@ -149,14 +192,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
149 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); 192 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
150 if (IS_ERR(mq->thread)) { 193 if (IS_ERR(mq->thread)) {
151 ret = PTR_ERR(mq->thread); 194 ret = PTR_ERR(mq->thread);
152 goto free_sg; 195 goto free_bounce_sg;
153 } 196 }
154 197
155 return 0; 198 return 0;
156 199 free_bounce_sg:
200 if (mq->bounce_sg)
201 kfree(mq->bounce_sg);
202 mq->bounce_sg = NULL;
157 free_sg: 203 free_sg:
158 kfree(mq->sg); 204 kfree(mq->sg);
159 mq->sg = NULL; 205 mq->sg = NULL;
206 free_bounce_buf:
207 if (mq->bounce_buf)
208 kfree(mq->bounce_buf);
209 mq->bounce_buf = NULL;
160 cleanup_queue: 210 cleanup_queue:
161 blk_cleanup_queue(mq->queue); 211 blk_cleanup_queue(mq->queue);
162 return ret; 212 return ret;
@@ -178,9 +228,17 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
178 /* Then terminate our worker thread */ 228 /* Then terminate our worker thread */
179 kthread_stop(mq->thread); 229 kthread_stop(mq->thread);
180 230
231 if (mq->bounce_sg)
232 kfree(mq->bounce_sg);
233 mq->bounce_sg = NULL;
234
181 kfree(mq->sg); 235 kfree(mq->sg);
182 mq->sg = NULL; 236 mq->sg = NULL;
183 237
238 if (mq->bounce_buf)
239 kfree(mq->bounce_buf);
240 mq->bounce_buf = NULL;
241
184 blk_cleanup_queue(mq->queue); 242 blk_cleanup_queue(mq->queue);
185 243
186 mq->card = NULL; 244 mq->card = NULL;
@@ -231,3 +289,108 @@ void mmc_queue_resume(struct mmc_queue *mq)
231 } 289 }
232} 290}
233 291
292static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
293 struct scatterlist *src, unsigned int src_len)
294{
295 unsigned int chunk;
296 char *dst_buf, *src_buf;
297 unsigned int dst_size, src_size;
298
299 dst_buf = NULL;
300 src_buf = NULL;
301 dst_size = 0;
302 src_size = 0;
303
304 while (src_len) {
305 BUG_ON(dst_len == 0);
306
307 if (dst_size == 0) {
308 dst_buf = page_address(dst->page) + dst->offset;
309 dst_size = dst->length;
310 }
311
312 if (src_size == 0) {
313 src_buf = page_address(src->page) + src->offset;
314 src_size = src->length;
315 }
316
317 chunk = min(dst_size, src_size);
318
319 memcpy(dst_buf, src_buf, chunk);
320
321 dst_buf += chunk;
322 src_buf += chunk;
323 dst_size -= chunk;
324 src_size -= chunk;
325
326 if (dst_size == 0) {
327 dst++;
328 dst_len--;
329 }
330
331 if (src_size == 0) {
332 src++;
333 src_len--;
334 }
335 }
336}
337
338unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
339{
340 unsigned int sg_len;
341
342 if (!mq->bounce_buf)
343 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
344
345 BUG_ON(!mq->bounce_sg);
346
347 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
348
349 mq->bounce_sg_len = sg_len;
350
351 /*
352 * Shortcut in the event we only get a single entry.
353 */
354 if (sg_len == 1) {
355 memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
356 return 1;
357 }
358
359 mq->sg[0].page = virt_to_page(mq->bounce_buf);
360 mq->sg[0].offset = offset_in_page(mq->bounce_buf);
361 mq->sg[0].length = 0;
362
363 while (sg_len) {
364 mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
365 sg_len--;
366 }
367
368 return 1;
369}
370
371void mmc_queue_bounce_pre(struct mmc_queue *mq)
372{
373 if (!mq->bounce_buf)
374 return;
375
376 if (mq->bounce_sg_len == 1)
377 return;
378 if (rq_data_dir(mq->req) != WRITE)
379 return;
380
381 copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len);
382}
383
384void mmc_queue_bounce_post(struct mmc_queue *mq)
385{
386 if (!mq->bounce_buf)
387 return;
388
389 if (mq->bounce_sg_len == 1)
390 return;
391 if (rq_data_dir(mq->req) != READ)
392 return;
393
394 copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1);
395}
396