diff options
author | Pierre Ossman <drzeus@drzeus.cx> | 2008-07-22 08:35:42 -0400 |
---|---|---|
committer | Pierre Ossman <drzeus@drzeus.cx> | 2008-07-23 08:42:09 -0400 |
commit | 2ff1fa679115e3c8c78ad74ad8fd2d7fd87ae4e7 (patch) | |
tree | 441ea088fa64fa0d22e23b048731ad16a16bc3a6 | |
parent | 719a61b452ff74cf81a96e4212748d9d63bcc924 (diff) |
mmc_block: bounce buffer highmem support
Support highmem pages in the bounce buffer code by using the
sg_copy_from/to_buffer() functions.
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
-rw-r--r-- | drivers/mmc/card/queue.c | 97 |
1 files changed, 31 insertions, 66 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 7731ddefdc1b..3dee97e7d165 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -148,7 +148,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock | |||
148 | printk(KERN_WARNING "%s: unable to allocate " | 148 | printk(KERN_WARNING "%s: unable to allocate " |
149 | "bounce buffer\n", mmc_card_name(card)); | 149 | "bounce buffer\n", mmc_card_name(card)); |
150 | } else { | 150 | } else { |
151 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); | 151 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
152 | blk_queue_max_sectors(mq->queue, bouncesz / 512); | 152 | blk_queue_max_sectors(mq->queue, bouncesz / 512); |
153 | blk_queue_max_phys_segments(mq->queue, bouncesz / 512); | 153 | blk_queue_max_phys_segments(mq->queue, bouncesz / 512); |
154 | blk_queue_max_hw_segments(mq->queue, bouncesz / 512); | 154 | blk_queue_max_hw_segments(mq->queue, bouncesz / 512); |
@@ -290,55 +290,15 @@ void mmc_queue_resume(struct mmc_queue *mq) | |||
290 | } | 290 | } |
291 | } | 291 | } |
292 | 292 | ||
293 | static void copy_sg(struct scatterlist *dst, unsigned int dst_len, | 293 | /* |
294 | struct scatterlist *src, unsigned int src_len) | 294 | * Prepare the sg list(s) to be handed of to the host driver |
295 | { | 295 | */ |
296 | unsigned int chunk; | ||
297 | char *dst_buf, *src_buf; | ||
298 | unsigned int dst_size, src_size; | ||
299 | |||
300 | dst_buf = NULL; | ||
301 | src_buf = NULL; | ||
302 | dst_size = 0; | ||
303 | src_size = 0; | ||
304 | |||
305 | while (src_len) { | ||
306 | BUG_ON(dst_len == 0); | ||
307 | |||
308 | if (dst_size == 0) { | ||
309 | dst_buf = sg_virt(dst); | ||
310 | dst_size = dst->length; | ||
311 | } | ||
312 | |||
313 | if (src_size == 0) { | ||
314 | src_buf = sg_virt(src); | ||
315 | src_size = src->length; | ||
316 | } | ||
317 | |||
318 | chunk = min(dst_size, src_size); | ||
319 | |||
320 | memcpy(dst_buf, src_buf, chunk); | ||
321 | |||
322 | dst_buf += chunk; | ||
323 | src_buf += chunk; | ||
324 | dst_size -= chunk; | ||
325 | src_size -= chunk; | ||
326 | |||
327 | if (dst_size == 0) { | ||
328 | dst++; | ||
329 | dst_len--; | ||
330 | } | ||
331 | |||
332 | if (src_size == 0) { | ||
333 | src++; | ||
334 | src_len--; | ||
335 | } | ||
336 | } | ||
337 | } | ||
338 | |||
339 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) | 296 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) |
340 | { | 297 | { |
341 | unsigned int sg_len; | 298 | unsigned int sg_len; |
299 | size_t buflen; | ||
300 | struct scatterlist *sg; | ||
301 | int i; | ||
342 | 302 | ||
343 | if (!mq->bounce_buf) | 303 | if (!mq->bounce_buf) |
344 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); | 304 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); |
@@ -349,47 +309,52 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq) | |||
349 | 309 | ||
350 | mq->bounce_sg_len = sg_len; | 310 | mq->bounce_sg_len = sg_len; |
351 | 311 | ||
352 | /* | 312 | buflen = 0; |
353 | * Shortcut in the event we only get a single entry. | 313 | for_each_sg(mq->bounce_sg, sg, sg_len, i) |
354 | */ | 314 | buflen += sg->length; |
355 | if (sg_len == 1) { | ||
356 | memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist)); | ||
357 | return 1; | ||
358 | } | ||
359 | 315 | ||
360 | sg_init_one(mq->sg, mq->bounce_buf, 0); | 316 | sg_init_one(mq->sg, mq->bounce_buf, buflen); |
361 | |||
362 | while (sg_len) { | ||
363 | mq->sg[0].length += mq->bounce_sg[sg_len - 1].length; | ||
364 | sg_len--; | ||
365 | } | ||
366 | 317 | ||
367 | return 1; | 318 | return 1; |
368 | } | 319 | } |
369 | 320 | ||
321 | /* | ||
322 | * If writing, bounce the data to the buffer before the request | ||
323 | * is sent to the host driver | ||
324 | */ | ||
370 | void mmc_queue_bounce_pre(struct mmc_queue *mq) | 325 | void mmc_queue_bounce_pre(struct mmc_queue *mq) |
371 | { | 326 | { |
327 | unsigned long flags; | ||
328 | |||
372 | if (!mq->bounce_buf) | 329 | if (!mq->bounce_buf) |
373 | return; | 330 | return; |
374 | 331 | ||
375 | if (mq->bounce_sg_len == 1) | ||
376 | return; | ||
377 | if (rq_data_dir(mq->req) != WRITE) | 332 | if (rq_data_dir(mq->req) != WRITE) |
378 | return; | 333 | return; |
379 | 334 | ||
380 | copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len); | 335 | local_irq_save(flags); |
336 | sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, | ||
337 | mq->bounce_buf, mq->sg[0].length); | ||
338 | local_irq_restore(flags); | ||
381 | } | 339 | } |
382 | 340 | ||
341 | /* | ||
342 | * If reading, bounce the data from the buffer after the request | ||
343 | * has been handled by the host driver | ||
344 | */ | ||
383 | void mmc_queue_bounce_post(struct mmc_queue *mq) | 345 | void mmc_queue_bounce_post(struct mmc_queue *mq) |
384 | { | 346 | { |
347 | unsigned long flags; | ||
348 | |||
385 | if (!mq->bounce_buf) | 349 | if (!mq->bounce_buf) |
386 | return; | 350 | return; |
387 | 351 | ||
388 | if (mq->bounce_sg_len == 1) | ||
389 | return; | ||
390 | if (rq_data_dir(mq->req) != READ) | 352 | if (rq_data_dir(mq->req) != READ) |
391 | return; | 353 | return; |
392 | 354 | ||
393 | copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1); | 355 | local_irq_save(flags); |
356 | sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, | ||
357 | mq->bounce_buf, mq->sg[0].length); | ||
358 | local_irq_restore(flags); | ||
394 | } | 359 | } |
395 | 360 | ||