diff options
author | Jarkko Lavinen <jarkko.lavinen@nokia.com> | 2011-02-14 09:16:09 -0500 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2011-03-11 09:22:46 -0500 |
commit | c7519dbf6f4b4408229d279d799c938ffdd06f21 (patch) | |
tree | 77952c980c6a744b8f318a4514f30cfb26a9d4df /drivers/mtd/mtd_blkdevs.c | |
parent | 13ce77f46c79a3839e4c2ff9722c9416c165f498 (diff) |
mtd_blkdevs: Add background processing support
Add a new background method into mtd_blktrans_ops, add background support
into mtd_blktrans_thread(), and add mtd_blktrans_cease_background().
If the mtd blktrans dev has the background support, the thread will
call background function when the request queue becomes empty. The background
operation may run as long as needs to until
mtd_blktrans_cease_background() tells to stop.
Signed-off-by: Jarkko Lavinen <jarkko.lavinen@nokia.com>
Tested-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd/mtd_blkdevs.c')
-rw-r--r-- | drivers/mtd/mtd_blkdevs.c | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 344ac1037ee7..e0b5f6442171 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -119,11 +119,22 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) | ||
123 | { | ||
124 | if (kthread_should_stop()) | ||
125 | return 1; | ||
126 | |||
127 | return !elv_queue_empty(dev->rq); | ||
128 | } | ||
129 | EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); | ||
130 | |||
122 | static int mtd_blktrans_thread(void *arg) | 131 | static int mtd_blktrans_thread(void *arg) |
123 | { | 132 | { |
124 | struct mtd_blktrans_dev *dev = arg; | 133 | struct mtd_blktrans_dev *dev = arg; |
134 | struct mtd_blktrans_ops *tr = dev->tr; | ||
125 | struct request_queue *rq = dev->rq; | 135 | struct request_queue *rq = dev->rq; |
126 | struct request *req = NULL; | 136 | struct request *req = NULL; |
137 | int background_done = 0; | ||
127 | 138 | ||
128 | spin_lock_irq(rq->queue_lock); | 139 | spin_lock_irq(rq->queue_lock); |
129 | 140 | ||
@@ -131,6 +142,19 @@ static int mtd_blktrans_thread(void *arg) | |||
131 | int res; | 142 | int res; |
132 | 143 | ||
133 | if (!req && !(req = blk_fetch_request(rq))) { | 144 | if (!req && !(req = blk_fetch_request(rq))) { |
145 | if (tr->background && !background_done) { | ||
146 | spin_unlock_irq(rq->queue_lock); | ||
147 | mutex_lock(&dev->lock); | ||
148 | tr->background(dev); | ||
149 | mutex_unlock(&dev->lock); | ||
150 | spin_lock_irq(rq->queue_lock); | ||
151 | /* | ||
152 | * Do background processing just once per idle | ||
153 | * period. | ||
154 | */ | ||
155 | background_done = 1; | ||
156 | continue; | ||
157 | } | ||
134 | set_current_state(TASK_INTERRUPTIBLE); | 158 | set_current_state(TASK_INTERRUPTIBLE); |
135 | 159 | ||
136 | if (kthread_should_stop()) | 160 | if (kthread_should_stop()) |
@@ -152,6 +176,8 @@ static int mtd_blktrans_thread(void *arg) | |||
152 | 176 | ||
153 | if (!__blk_end_request_cur(req, res)) | 177 | if (!__blk_end_request_cur(req, res)) |
154 | req = NULL; | 178 | req = NULL; |
179 | |||
180 | background_done = 0; | ||
155 | } | 181 | } |
156 | 182 | ||
157 | if (req) | 183 | if (req) |