diff options
Diffstat (limited to 'drivers/mtd/mtd_blkdevs.c')
-rw-r--r-- | drivers/mtd/mtd_blkdevs.c | 51 |
1 files changed, 17 insertions, 34 deletions
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index f1f06715d4e0..5ad39bb5ab4c 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <linux/hdreg.h> | 32 | #include <linux/hdreg.h> |
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
35 | #include <linux/kthread.h> | ||
36 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
37 | 36 | ||
38 | #include "mtdcore.h" | 37 | #include "mtdcore.h" |
@@ -121,16 +120,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
121 | 120 | ||
122 | int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) | 121 | int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) |
123 | { | 122 | { |
124 | if (kthread_should_stop()) | ||
125 | return 1; | ||
126 | |||
127 | return dev->bg_stop; | 123 | return dev->bg_stop; |
128 | } | 124 | } |
129 | EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); | 125 | EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); |
130 | 126 | ||
131 | static int mtd_blktrans_thread(void *arg) | 127 | static void mtd_blktrans_work(struct work_struct *work) |
132 | { | 128 | { |
133 | struct mtd_blktrans_dev *dev = arg; | 129 | struct mtd_blktrans_dev *dev = |
130 | container_of(work, struct mtd_blktrans_dev, work); | ||
134 | struct mtd_blktrans_ops *tr = dev->tr; | 131 | struct mtd_blktrans_ops *tr = dev->tr; |
135 | struct request_queue *rq = dev->rq; | 132 | struct request_queue *rq = dev->rq; |
136 | struct request *req = NULL; | 133 | struct request *req = NULL; |
@@ -138,7 +135,7 @@ static int mtd_blktrans_thread(void *arg) | |||
138 | 135 | ||
139 | spin_lock_irq(rq->queue_lock); | 136 | spin_lock_irq(rq->queue_lock); |
140 | 137 | ||
141 | while (!kthread_should_stop()) { | 138 | while (1) { |
142 | int res; | 139 | int res; |
143 | 140 | ||
144 | dev->bg_stop = false; | 141 | dev->bg_stop = false; |
@@ -156,15 +153,7 @@ static int mtd_blktrans_thread(void *arg) | |||
156 | background_done = !dev->bg_stop; | 153 | background_done = !dev->bg_stop; |
157 | continue; | 154 | continue; |
158 | } | 155 | } |
159 | set_current_state(TASK_INTERRUPTIBLE); | 156 | break; |
160 | |||
161 | if (kthread_should_stop()) | ||
162 | set_current_state(TASK_RUNNING); | ||
163 | |||
164 | spin_unlock_irq(rq->queue_lock); | ||
165 | schedule(); | ||
166 | spin_lock_irq(rq->queue_lock); | ||
167 | continue; | ||
168 | } | 157 | } |
169 | 158 | ||
170 | spin_unlock_irq(rq->queue_lock); | 159 | spin_unlock_irq(rq->queue_lock); |
@@ -185,8 +174,6 @@ static int mtd_blktrans_thread(void *arg) | |||
185 | __blk_end_request_all(req, -EIO); | 174 | __blk_end_request_all(req, -EIO); |
186 | 175 | ||
187 | spin_unlock_irq(rq->queue_lock); | 176 | spin_unlock_irq(rq->queue_lock); |
188 | |||
189 | return 0; | ||
190 | } | 177 | } |
191 | 178 | ||
192 | static void mtd_blktrans_request(struct request_queue *rq) | 179 | static void mtd_blktrans_request(struct request_queue *rq) |
@@ -199,10 +186,8 @@ static void mtd_blktrans_request(struct request_queue *rq) | |||
199 | if (!dev) | 186 | if (!dev) |
200 | while ((req = blk_fetch_request(rq)) != NULL) | 187 | while ((req = blk_fetch_request(rq)) != NULL) |
201 | __blk_end_request_all(req, -ENODEV); | 188 | __blk_end_request_all(req, -ENODEV); |
202 | else { | 189 | else |
203 | dev->bg_stop = true; | 190 | queue_work(dev->wq, &dev->work); |
204 | wake_up_process(dev->thread); | ||
205 | } | ||
206 | } | 191 | } |
207 | 192 | ||
208 | static int blktrans_open(struct block_device *bdev, fmode_t mode) | 193 | static int blktrans_open(struct block_device *bdev, fmode_t mode) |
@@ -325,7 +310,7 @@ unlock: | |||
325 | return ret; | 310 | return ret; |
326 | } | 311 | } |
327 | 312 | ||
328 | static const struct block_device_operations mtd_blktrans_ops = { | 313 | static const struct block_device_operations mtd_block_ops = { |
329 | .owner = THIS_MODULE, | 314 | .owner = THIS_MODULE, |
330 | .open = blktrans_open, | 315 | .open = blktrans_open, |
331 | .release = blktrans_release, | 316 | .release = blktrans_release, |
@@ -401,7 +386,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
401 | gd->private_data = new; | 386 | gd->private_data = new; |
402 | gd->major = tr->major; | 387 | gd->major = tr->major; |
403 | gd->first_minor = (new->devnum) << tr->part_bits; | 388 | gd->first_minor = (new->devnum) << tr->part_bits; |
404 | gd->fops = &mtd_blktrans_ops; | 389 | gd->fops = &mtd_block_ops; |
405 | 390 | ||
406 | if (tr->part_bits) | 391 | if (tr->part_bits) |
407 | if (new->devnum < 26) | 392 | if (new->devnum < 26) |
@@ -437,14 +422,13 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
437 | 422 | ||
438 | gd->queue = new->rq; | 423 | gd->queue = new->rq; |
439 | 424 | ||
440 | /* Create processing thread */ | 425 | /* Create processing workqueue */ |
441 | /* TODO: workqueue ? */ | 426 | new->wq = alloc_workqueue("%s%d", 0, 0, |
442 | new->thread = kthread_run(mtd_blktrans_thread, new, | 427 | tr->name, new->mtd->index); |
443 | "%s%d", tr->name, new->mtd->index); | 428 | if (!new->wq) |
444 | if (IS_ERR(new->thread)) { | ||
445 | ret = PTR_ERR(new->thread); | ||
446 | goto error4; | 429 | goto error4; |
447 | } | 430 | INIT_WORK(&new->work, mtd_blktrans_work); |
431 | |||
448 | gd->driverfs_dev = &new->mtd->dev; | 432 | gd->driverfs_dev = &new->mtd->dev; |
449 | 433 | ||
450 | if (new->readonly) | 434 | if (new->readonly) |
@@ -484,9 +468,8 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) | |||
484 | /* Stop new requests to arrive */ | 468 | /* Stop new requests to arrive */ |
485 | del_gendisk(old->disk); | 469 | del_gendisk(old->disk); |
486 | 470 | ||
487 | 471 | /* Stop workqueue. This will perform any pending request. */ | |
488 | /* Stop the thread */ | 472 | destroy_workqueue(old->wq); |
489 | kthread_stop(old->thread); | ||
490 | 473 | ||
491 | /* Kill current requests */ | 474 | /* Kill current requests */ |
492 | spin_lock_irqsave(&old->queue_lock, flags); | 475 | spin_lock_irqsave(&old->queue_lock, flags); |