aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r--drivers/mmc/card/queue.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 3dee97e7d165..406989e992ba 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -31,7 +31,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
31 /* 31 /*
32 * We only like normal block requests. 32 * We only like normal block requests.
33 */ 33 */
34 if (!blk_fs_request(req) && !blk_pc_request(req)) { 34 if (!blk_fs_request(req)) {
35 blk_dump_rq_flags(req, "MMC bad request"); 35 blk_dump_rq_flags(req, "MMC bad request");
36 return BLKPREP_KILL; 36 return BLKPREP_KILL;
37 } 37 }
@@ -131,6 +131,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
131 mq->req = NULL; 131 mq->req = NULL;
132 132
133 blk_queue_prep_rq(mq->queue, mmc_prep_request); 133 blk_queue_prep_rq(mq->queue, mmc_prep_request);
134 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
134 135
135#ifdef CONFIG_MMC_BLOCK_BOUNCE 136#ifdef CONFIG_MMC_BLOCK_BOUNCE
136 if (host->max_hw_segs == 1) { 137 if (host->max_hw_segs == 1) {
@@ -142,12 +143,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
142 bouncesz = host->max_req_size; 143 bouncesz = host->max_req_size;
143 if (bouncesz > host->max_seg_size) 144 if (bouncesz > host->max_seg_size)
144 bouncesz = host->max_seg_size; 145 bouncesz = host->max_seg_size;
146 if (bouncesz > (host->max_blk_count * 512))
147 bouncesz = host->max_blk_count * 512;
148
149 if (bouncesz > 512) {
150 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
151 if (!mq->bounce_buf) {
152 printk(KERN_WARNING "%s: unable to "
153 "allocate bounce buffer\n",
154 mmc_card_name(card));
155 }
156 }
145 157
146 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 158 if (mq->bounce_buf) {
147 if (!mq->bounce_buf) {
148 printk(KERN_WARNING "%s: unable to allocate "
149 "bounce buffer\n", mmc_card_name(card));
150 } else {
151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 159 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
152 blk_queue_max_sectors(mq->queue, bouncesz / 512); 160 blk_queue_max_sectors(mq->queue, bouncesz / 512);
153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 161 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
@@ -175,7 +183,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
175 183
176 if (!mq->bounce_buf) { 184 if (!mq->bounce_buf) {
177 blk_queue_bounce_limit(mq->queue, limit); 185 blk_queue_bounce_limit(mq->queue, limit);
178 blk_queue_max_sectors(mq->queue, host->max_req_size / 512); 186 blk_queue_max_sectors(mq->queue,
187 min(host->max_blk_count, host->max_req_size / 512));
179 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 188 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
180 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 189 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
181 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 190 blk_queue_max_segment_size(mq->queue, host->max_seg_size);