diff options
author | John Ogness <john.ogness@linutronix.de> | 2011-03-25 04:47:37 -0400 |
---|---|---|
committer | Chris Ball <cjb@laptop.org> | 2011-05-24 20:59:17 -0400 |
commit | 0b38c4ebf037290eef27b31bdc37cacf804c7a48 (patch) | |
tree | 97a947537da85f161b2862d2d26221402d637194 /drivers/mmc/card | |
parent | 41e2a4893566ced3c46af15df5b727326881e47d (diff) |
mmc: remove redundant irq disabling
There is no need to disable irq's when using the sg_copy_*_buffer()
functions because those functions do that already. There are also
no races for the mm_queue struct here that would require the irq's
to be disabled before calling sg_copy_*_buffer().
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r-- | drivers/mmc/card/queue.c | 8 |
1 files changed, 0 insertions, 8 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 2ae727568df9..c07322c2658c 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -343,18 +343,14 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq) | |||
343 | */ | 343 | */ |
344 | void mmc_queue_bounce_pre(struct mmc_queue *mq) | 344 | void mmc_queue_bounce_pre(struct mmc_queue *mq) |
345 | { | 345 | { |
346 | unsigned long flags; | ||
347 | |||
348 | if (!mq->bounce_buf) | 346 | if (!mq->bounce_buf) |
349 | return; | 347 | return; |
350 | 348 | ||
351 | if (rq_data_dir(mq->req) != WRITE) | 349 | if (rq_data_dir(mq->req) != WRITE) |
352 | return; | 350 | return; |
353 | 351 | ||
354 | local_irq_save(flags); | ||
355 | sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, | 352 | sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, |
356 | mq->bounce_buf, mq->sg[0].length); | 353 | mq->bounce_buf, mq->sg[0].length); |
357 | local_irq_restore(flags); | ||
358 | } | 354 | } |
359 | 355 | ||
360 | /* | 356 | /* |
@@ -363,17 +359,13 @@ void mmc_queue_bounce_pre(struct mmc_queue *mq) | |||
363 | */ | 359 | */ |
364 | void mmc_queue_bounce_post(struct mmc_queue *mq) | 360 | void mmc_queue_bounce_post(struct mmc_queue *mq) |
365 | { | 361 | { |
366 | unsigned long flags; | ||
367 | |||
368 | if (!mq->bounce_buf) | 362 | if (!mq->bounce_buf) |
369 | return; | 363 | return; |
370 | 364 | ||
371 | if (rq_data_dir(mq->req) != READ) | 365 | if (rq_data_dir(mq->req) != READ) |
372 | return; | 366 | return; |
373 | 367 | ||
374 | local_irq_save(flags); | ||
375 | sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, | 368 | sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, |
376 | mq->bounce_buf, mq->sg[0].length); | 369 | mq->bounce_buf, mq->sg[0].length); |
377 | local_irq_restore(flags); | ||
378 | } | 370 | } |
379 | 371 | ||