aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r--drivers/mmc/card/queue.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 49e582356c65..d6ded247d941 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -9,6 +9,7 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 * 10 *
11 */ 11 */
12#include <linux/slab.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/blkdev.h> 14#include <linux/blkdev.h>
14#include <linux/freezer.h> 15#include <linux/freezer.h>
@@ -90,9 +91,10 @@ static void mmc_request(struct request_queue *q)
90 struct request *req; 91 struct request *req;
91 92
92 if (!mq) { 93 if (!mq) {
93 printk(KERN_ERR "MMC: killing requests for dead queue\n"); 94 while ((req = blk_fetch_request(q)) != NULL) {
94 while ((req = blk_fetch_request(q)) != NULL) 95 req->cmd_flags |= REQ_QUIET;
95 __blk_end_request_all(req, -EIO); 96 __blk_end_request_all(req, -EIO);
97 }
96 return; 98 return;
97 } 99 }
98 100
@@ -153,9 +155,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
153 155
154 if (mq->bounce_buf) { 156 if (mq->bounce_buf) {
155 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 157 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
156 blk_queue_max_sectors(mq->queue, bouncesz / 512); 158 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
157 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 159 blk_queue_max_segments(mq->queue, bouncesz / 512);
158 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
159 blk_queue_max_segment_size(mq->queue, bouncesz); 160 blk_queue_max_segment_size(mq->queue, bouncesz);
160 161
161 mq->sg = kmalloc(sizeof(struct scatterlist), 162 mq->sg = kmalloc(sizeof(struct scatterlist),
@@ -179,10 +180,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
179 180
180 if (!mq->bounce_buf) { 181 if (!mq->bounce_buf) {
181 blk_queue_bounce_limit(mq->queue, limit); 182 blk_queue_bounce_limit(mq->queue, limit);
182 blk_queue_max_sectors(mq->queue, 183 blk_queue_max_hw_sectors(mq->queue,
183 min(host->max_blk_count, host->max_req_size / 512)); 184 min(host->max_blk_count, host->max_req_size / 512));
184 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 185 blk_queue_max_segments(mq->queue, host->max_hw_segs);
185 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
186 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 186 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
187 187
188 mq->sg = kmalloc(sizeof(struct scatterlist) * 188 mq->sg = kmalloc(sizeof(struct scatterlist) *
@@ -223,17 +223,18 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
223 struct request_queue *q = mq->queue; 223 struct request_queue *q = mq->queue;
224 unsigned long flags; 224 unsigned long flags;
225 225
226 /* Mark that we should start throwing out stragglers */
227 spin_lock_irqsave(q->queue_lock, flags);
228 q->queuedata = NULL;
229 spin_unlock_irqrestore(q->queue_lock, flags);
230
231 /* Make sure the queue isn't suspended, as that will deadlock */ 226 /* Make sure the queue isn't suspended, as that will deadlock */
232 mmc_queue_resume(mq); 227 mmc_queue_resume(mq);
233 228
234 /* Then terminate our worker thread */ 229 /* Then terminate our worker thread */
235 kthread_stop(mq->thread); 230 kthread_stop(mq->thread);
236 231
232 /* Empty the queue */
233 spin_lock_irqsave(q->queue_lock, flags);
234 q->queuedata = NULL;
235 blk_start_queue(q);
236 spin_unlock_irqrestore(q->queue_lock, flags);
237
237 if (mq->bounce_sg) 238 if (mq->bounce_sg)
238 kfree(mq->bounce_sg); 239 kfree(mq->bounce_sg);
239 mq->bounce_sg = NULL; 240 mq->bounce_sg = NULL;
@@ -245,8 +246,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
245 kfree(mq->bounce_buf); 246 kfree(mq->bounce_buf);
246 mq->bounce_buf = NULL; 247 mq->bounce_buf = NULL;
247 248
248 blk_cleanup_queue(mq->queue);
249
250 mq->card = NULL; 249 mq->card = NULL;
251} 250}
252EXPORT_SYMBOL(mmc_cleanup_queue); 251EXPORT_SYMBOL(mmc_cleanup_queue);