aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2007-04-22 15:40:57 -0400
committerDavid Woodhouse <dwmw2@infradead.org>2007-04-22 15:48:29 -0400
commit3e67fe4543333048e486d7f360a0e2ae5d76c053 (patch)
tree77ecafd7bc47c64968a8fd5dd9541eda33680aac
parenta491486a2087ac3dfc00efb4f838c8d684afaf54 (diff)
[MTD] Finish conversion mtd_blkdevs to use the kthread API
Remove waitqueue, 'exiting' flag and completion; use kthread APIs instead. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: David Woodhouse <dwmw2@infradead.org>
-rw-r--r--drivers/mtd/mtd_blkdevs.c32
1 files changed, 9 insertions, 23 deletions
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 1aa018abd332..524b83b5ebf5 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -29,9 +29,7 @@ extern struct mutex mtd_table_mutex;
29extern struct mtd_info *mtd_table[]; 29extern struct mtd_info *mtd_table[];
30 30
31struct mtd_blkcore_priv { 31struct mtd_blkcore_priv {
32 struct completion thread_dead; 32 struct task_struct *thread;
33 int exiting;
34 wait_queue_head_t thread_wq;
35 struct request_queue *rq; 33 struct request_queue *rq;
36 spinlock_t queue_lock; 34 spinlock_t queue_lock;
37}; 35};
@@ -85,26 +83,18 @@ static int mtd_blktrans_thread(void *arg)
85 current->flags |= PF_MEMALLOC | PF_NOFREEZE; 83 current->flags |= PF_MEMALLOC | PF_NOFREEZE;
86 84
87 spin_lock_irq(rq->queue_lock); 85 spin_lock_irq(rq->queue_lock);
88 86 while (!kthread_should_stop()) {
89 while (!tr->blkcore_priv->exiting) {
90 struct request *req; 87 struct request *req;
91 struct mtd_blktrans_dev *dev; 88 struct mtd_blktrans_dev *dev;
92 int res = 0; 89 int res = 0;
93 DECLARE_WAITQUEUE(wait, current);
94 90
95 req = elv_next_request(rq); 91 req = elv_next_request(rq);
96 92
97 if (!req) { 93 if (!req) {
98 add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
99 set_current_state(TASK_INTERRUPTIBLE); 94 set_current_state(TASK_INTERRUPTIBLE);
100
101 spin_unlock_irq(rq->queue_lock); 95 spin_unlock_irq(rq->queue_lock);
102
103 schedule(); 96 schedule();
104 remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
105
106 spin_lock_irq(rq->queue_lock); 97 spin_lock_irq(rq->queue_lock);
107
108 continue; 98 continue;
109 } 99 }
110 100
@@ -123,13 +113,13 @@ static int mtd_blktrans_thread(void *arg)
123 } 113 }
124 spin_unlock_irq(rq->queue_lock); 114 spin_unlock_irq(rq->queue_lock);
125 115
126 complete_and_exit(&tr->blkcore_priv->thread_dead, 0); 116 return 0;
127} 117}
128 118
129static void mtd_blktrans_request(struct request_queue *rq) 119static void mtd_blktrans_request(struct request_queue *rq)
130{ 120{
131 struct mtd_blktrans_ops *tr = rq->queuedata; 121 struct mtd_blktrans_ops *tr = rq->queuedata;
132 wake_up(&tr->blkcore_priv->thread_wq); 122 wake_up_process(tr->blkcore_priv->thread);
133} 123}
134 124
135 125
@@ -355,7 +345,6 @@ static struct mtd_notifier blktrans_notifier = {
355 345
356int register_mtd_blktrans(struct mtd_blktrans_ops *tr) 346int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
357{ 347{
358 struct task_struct *task;
359 int ret, i; 348 int ret, i;
360 349
361 /* Register the notifier if/when the first device type is 350 /* Register the notifier if/when the first device type is
@@ -379,8 +368,6 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
379 return ret; 368 return ret;
380 } 369 }
381 spin_lock_init(&tr->blkcore_priv->queue_lock); 370 spin_lock_init(&tr->blkcore_priv->queue_lock);
382 init_completion(&tr->blkcore_priv->thread_dead);
383 init_waitqueue_head(&tr->blkcore_priv->thread_wq);
384 371
385 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock); 372 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
386 if (!tr->blkcore_priv->rq) { 373 if (!tr->blkcore_priv->rq) {
@@ -394,13 +381,14 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
394 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 381 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
395 tr->blkshift = ffs(tr->blksize) - 1; 382 tr->blkshift = ffs(tr->blksize) - 1;
396 383
397 task = kthread_run(mtd_blktrans_thread, tr, "%sd", tr->name); 384 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
398 if (IS_ERR(task)) { 385 "%sd", tr->name);
386 if (IS_ERR(tr->blkcore_priv->thread)) {
399 blk_cleanup_queue(tr->blkcore_priv->rq); 387 blk_cleanup_queue(tr->blkcore_priv->rq);
400 unregister_blkdev(tr->major, tr->name); 388 unregister_blkdev(tr->major, tr->name);
401 kfree(tr->blkcore_priv); 389 kfree(tr->blkcore_priv);
402 mutex_unlock(&mtd_table_mutex); 390 mutex_unlock(&mtd_table_mutex);
403 return PTR_ERR(task); 391 return PTR_ERR(tr->blkcore_priv->thread);
404 } 392 }
405 393
406 INIT_LIST_HEAD(&tr->devs); 394 INIT_LIST_HEAD(&tr->devs);
@@ -423,9 +411,7 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
423 mutex_lock(&mtd_table_mutex); 411 mutex_lock(&mtd_table_mutex);
424 412
425 /* Clean up the kernel thread */ 413 /* Clean up the kernel thread */
426 tr->blkcore_priv->exiting = 1; 414 kthread_stop(tr->blkcore_priv->thread);
427 wake_up(&tr->blkcore_priv->thread_wq);
428 wait_for_completion(&tr->blkcore_priv->thread_dead);
429 415
430 /* Remove it from the list of active majors */ 416 /* Remove it from the list of active majors */
431 list_del(&tr->list); 417 list_del(&tr->list);