diff options
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/mtd_blkdevs.c | 43 |
1 files changed, 24 insertions, 19 deletions
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index a49a9c8f2cb1..aaac3b6800b7 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -47,40 +47,41 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
47 | unsigned long block, nsect; | 47 | unsigned long block, nsect; |
48 | char *buf; | 48 | char *buf; |
49 | 49 | ||
50 | block = req->sector << 9 >> tr->blkshift; | 50 | block = blk_rq_pos(req) << 9 >> tr->blkshift; |
51 | nsect = req->current_nr_sectors << 9 >> tr->blkshift; | 51 | nsect = blk_rq_cur_bytes(req) >> tr->blkshift; |
52 | 52 | ||
53 | buf = req->buffer; | 53 | buf = req->buffer; |
54 | 54 | ||
55 | if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && | 55 | if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && |
56 | req->cmd[0] == REQ_LB_OP_DISCARD) | 56 | req->cmd[0] == REQ_LB_OP_DISCARD) |
57 | return !tr->discard(dev, block, nsect); | 57 | return tr->discard(dev, block, nsect); |
58 | 58 | ||
59 | if (!blk_fs_request(req)) | 59 | if (!blk_fs_request(req)) |
60 | return 0; | 60 | return -EIO; |
61 | 61 | ||
62 | if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) | 62 | if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > |
63 | return 0; | 63 | get_capacity(req->rq_disk)) |
64 | return -EIO; | ||
64 | 65 | ||
65 | switch(rq_data_dir(req)) { | 66 | switch(rq_data_dir(req)) { |
66 | case READ: | 67 | case READ: |
67 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) | 68 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
68 | if (tr->readsect(dev, block, buf)) | 69 | if (tr->readsect(dev, block, buf)) |
69 | return 0; | 70 | return -EIO; |
70 | return 1; | 71 | return 0; |
71 | 72 | ||
72 | case WRITE: | 73 | case WRITE: |
73 | if (!tr->writesect) | 74 | if (!tr->writesect) |
74 | return 0; | 75 | return -EIO; |
75 | 76 | ||
76 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) | 77 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
77 | if (tr->writesect(dev, block, buf)) | 78 | if (tr->writesect(dev, block, buf)) |
78 | return 0; | 79 | return -EIO; |
79 | return 1; | 80 | return 0; |
80 | 81 | ||
81 | default: | 82 | default: |
82 | printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); | 83 | printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); |
83 | return 0; | 84 | return -EIO; |
84 | } | 85 | } |
85 | } | 86 | } |
86 | 87 | ||
@@ -88,19 +89,18 @@ static int mtd_blktrans_thread(void *arg) | |||
88 | { | 89 | { |
89 | struct mtd_blktrans_ops *tr = arg; | 90 | struct mtd_blktrans_ops *tr = arg; |
90 | struct request_queue *rq = tr->blkcore_priv->rq; | 91 | struct request_queue *rq = tr->blkcore_priv->rq; |
92 | struct request *req = NULL; | ||
91 | 93 | ||
92 | /* we might get involved when memory gets low, so use PF_MEMALLOC */ | 94 | /* we might get involved when memory gets low, so use PF_MEMALLOC */ |
93 | current->flags |= PF_MEMALLOC; | 95 | current->flags |= PF_MEMALLOC; |
94 | 96 | ||
95 | spin_lock_irq(rq->queue_lock); | 97 | spin_lock_irq(rq->queue_lock); |
98 | |||
96 | while (!kthread_should_stop()) { | 99 | while (!kthread_should_stop()) { |
97 | struct request *req; | ||
98 | struct mtd_blktrans_dev *dev; | 100 | struct mtd_blktrans_dev *dev; |
99 | int res = 0; | 101 | int res; |
100 | |||
101 | req = elv_next_request(rq); | ||
102 | 102 | ||
103 | if (!req) { | 103 | if (!req && !(req = blk_fetch_request(rq))) { |
104 | set_current_state(TASK_INTERRUPTIBLE); | 104 | set_current_state(TASK_INTERRUPTIBLE); |
105 | spin_unlock_irq(rq->queue_lock); | 105 | spin_unlock_irq(rq->queue_lock); |
106 | schedule(); | 106 | schedule(); |
@@ -119,8 +119,13 @@ static int mtd_blktrans_thread(void *arg) | |||
119 | 119 | ||
120 | spin_lock_irq(rq->queue_lock); | 120 | spin_lock_irq(rq->queue_lock); |
121 | 121 | ||
122 | end_request(req, res); | 122 | if (!__blk_end_request_cur(req, res)) |
123 | req = NULL; | ||
123 | } | 124 | } |
125 | |||
126 | if (req) | ||
127 | __blk_end_request_all(req, -EIO); | ||
128 | |||
124 | spin_unlock_irq(rq->queue_lock); | 129 | spin_unlock_irq(rq->queue_lock); |
125 | 130 | ||
126 | return 0; | 131 | return 0; |
@@ -373,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) | |||
373 | } | 378 | } |
374 | 379 | ||
375 | tr->blkcore_priv->rq->queuedata = tr; | 380 | tr->blkcore_priv->rq->queuedata = tr; |
376 | blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); | 381 | blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); |
377 | if (tr->discard) | 382 | if (tr->discard) |
378 | blk_queue_set_discard(tr->blkcore_priv->rq, | 383 | blk_queue_set_discard(tr->blkcore_priv->rq, |
379 | blktrans_discard_request); | 384 | blktrans_discard_request); |