aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/queue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-12 14:51:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-12 14:51:57 -0400
commit46b5e34029fef7a042f3ff16e319e737257e5c7b (patch)
treec2e90b7a6d7c39c3a35eed1dfd0fd19077467c93 /drivers/mmc/card/queue.c
parent94a9f8ad337aec011da2ca901ef89ae7e885f24c (diff)
parent6ee6c6adf1cfebbf432b8d1f204c7f96e395933e (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc: (24 commits) MMC: Use timeout values from CSR MMC: CSD and CID timeout values sdhci: 'scratch' may be used uninitialized mmc: explicitly mention SDIO support in Kconfig mmc: remove redundant "depends on" Fix comment in include/linux/mmc/host.h sdio: high-speed support mmc_block: hard code 512 byte block size sdhci: force high speed capability on some controllers mmc_block: filter out PC requests mmc_block: indicate strict ordering mmc_block: inform block layer about sector count restriction sdio: give sdio irq thread a host specific name sdio: make sleep on error interruptable sdhci: reduce card detection delay sdhci: let the controller wait for busy state to end atmel-mci: Add missing flush_dcache_page() in PIO transfer code atmel-mci: Don't overwrite error bits when NOTBUSY is set atmel-mci: Add experimental DMA support atmel-mci: support multiple mmc slots ...
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r--drivers/mmc/card/queue.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 3dee97e7d165..406989e992ba 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -31,7 +31,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
31 /* 31 /*
32 * We only like normal block requests. 32 * We only like normal block requests.
33 */ 33 */
34 if (!blk_fs_request(req) && !blk_pc_request(req)) { 34 if (!blk_fs_request(req)) {
35 blk_dump_rq_flags(req, "MMC bad request"); 35 blk_dump_rq_flags(req, "MMC bad request");
36 return BLKPREP_KILL; 36 return BLKPREP_KILL;
37 } 37 }
@@ -131,6 +131,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
131 mq->req = NULL; 131 mq->req = NULL;
132 132
133 blk_queue_prep_rq(mq->queue, mmc_prep_request); 133 blk_queue_prep_rq(mq->queue, mmc_prep_request);
134 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
134 135
135#ifdef CONFIG_MMC_BLOCK_BOUNCE 136#ifdef CONFIG_MMC_BLOCK_BOUNCE
136 if (host->max_hw_segs == 1) { 137 if (host->max_hw_segs == 1) {
@@ -142,12 +143,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
142 bouncesz = host->max_req_size; 143 bouncesz = host->max_req_size;
143 if (bouncesz > host->max_seg_size) 144 if (bouncesz > host->max_seg_size)
144 bouncesz = host->max_seg_size; 145 bouncesz = host->max_seg_size;
146 if (bouncesz > (host->max_blk_count * 512))
147 bouncesz = host->max_blk_count * 512;
148
149 if (bouncesz > 512) {
150 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
151 if (!mq->bounce_buf) {
152 printk(KERN_WARNING "%s: unable to "
153 "allocate bounce buffer\n",
154 mmc_card_name(card));
155 }
156 }
145 157
146 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 158 if (mq->bounce_buf) {
147 if (!mq->bounce_buf) {
148 printk(KERN_WARNING "%s: unable to allocate "
149 "bounce buffer\n", mmc_card_name(card));
150 } else {
151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 159 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
152 blk_queue_max_sectors(mq->queue, bouncesz / 512); 160 blk_queue_max_sectors(mq->queue, bouncesz / 512);
153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 161 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
@@ -175,7 +183,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
175 183
176 if (!mq->bounce_buf) { 184 if (!mq->bounce_buf) {
177 blk_queue_bounce_limit(mq->queue, limit); 185 blk_queue_bounce_limit(mq->queue, limit);
178 blk_queue_max_sectors(mq->queue, host->max_req_size / 512); 186 blk_queue_max_sectors(mq->queue,
187 min(host->max_blk_count, host->max_req_size / 512));
179 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 188 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
180 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 189 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
181 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 190 blk_queue_max_segment_size(mq->queue, host->max_seg_size);