diff options
Diffstat (limited to 'drivers/mtd/mtd_blkdevs.c')
-rw-r--r-- | drivers/mtd/mtd_blkdevs.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 50c76a2ca76e..3e10442615d1 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -89,18 +89,22 @@ static int mtd_blktrans_thread(void *arg) | |||
89 | { | 89 | { |
90 | struct mtd_blktrans_ops *tr = arg; | 90 | struct mtd_blktrans_ops *tr = arg; |
91 | struct request_queue *rq = tr->blkcore_priv->rq; | 91 | struct request_queue *rq = tr->blkcore_priv->rq; |
92 | struct request *req = NULL; | ||
92 | 93 | ||
93 | /* we might get involved when memory gets low, so use PF_MEMALLOC */ | 94 | /* we might get involved when memory gets low, so use PF_MEMALLOC */ |
94 | current->flags |= PF_MEMALLOC; | 95 | current->flags |= PF_MEMALLOC; |
95 | 96 | ||
96 | spin_lock_irq(rq->queue_lock); | 97 | spin_lock_irq(rq->queue_lock); |
98 | |||
97 | while (!kthread_should_stop()) { | 99 | while (!kthread_should_stop()) { |
98 | struct request *req; | ||
99 | struct mtd_blktrans_dev *dev; | 100 | struct mtd_blktrans_dev *dev; |
100 | int res; | 101 | int res; |
101 | 102 | ||
102 | req = elv_next_request(rq); | 103 | if (!req) { |
103 | 104 | req = elv_next_request(rq); | |
105 | if (req) | ||
106 | blkdev_dequeue_request(req); | ||
107 | } | ||
104 | if (!req) { | 108 | if (!req) { |
105 | set_current_state(TASK_INTERRUPTIBLE); | 109 | set_current_state(TASK_INTERRUPTIBLE); |
106 | spin_unlock_irq(rq->queue_lock); | 110 | spin_unlock_irq(rq->queue_lock); |
@@ -120,8 +124,13 @@ static int mtd_blktrans_thread(void *arg) | |||
120 | 124 | ||
121 | spin_lock_irq(rq->queue_lock); | 125 | spin_lock_irq(rq->queue_lock); |
122 | 126 | ||
123 | __blk_end_request_cur(req, res); | 127 | if (!__blk_end_request_cur(req, res)) |
128 | req = NULL; | ||
124 | } | 129 | } |
130 | |||
131 | if (req) | ||
132 | __blk_end_request_all(req, -EIO); | ||
133 | |||
125 | spin_unlock_irq(rq->queue_lock); | 134 | spin_unlock_irq(rq->queue_lock); |
126 | 135 | ||
127 | return 0; | 136 | return 0; |