diff options
author | Maxim Levitsky <maximlevitsky@gmail.com> | 2010-02-22 13:39:30 -0500 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2010-02-26 12:05:26 -0500 |
commit | 048d87199566663e4edc4880df3703c04bcf41d9 (patch) | |
tree | da875d4f7802d6ca8a7db9f2cdba57b74d6c3672 /drivers/mtd/mtd_blkdevs.c | |
parent | a863862257b7dd08d855bafcb0aedd9ad848ed91 (diff) |
mtd: blktrans: Hotplug fixes
* Add locking where it was missing.
* Don't do a get_mtd_device in blktrans_open because it would lead to a
deadlock; instead do that in add_mtd_blktrans_dev.
* Only free the mtd_blktrans_dev structure when the last user exits.
* Flush request queue on device removal.
* Track users, and call tr->release in del_mtd_blktrans_dev
Due to that ->open and release aren't called more that once.
Now it is safe to call del_mtd_blktrans_dev while the device is still in use.
Signed-off-by: Maxim Levitsky <maximlevitsky@gmail.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd/mtd_blkdevs.c')
-rw-r--r-- | drivers/mtd/mtd_blkdevs.c | 202 |
1 files changed, 148 insertions, 54 deletions
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 6a572625bfc0..646cc84ae692 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -24,6 +24,40 @@ | |||
24 | #include "mtdcore.h" | 24 | #include "mtdcore.h" |
25 | 25 | ||
26 | static LIST_HEAD(blktrans_majors); | 26 | static LIST_HEAD(blktrans_majors); |
27 | static DEFINE_MUTEX(blktrans_ref_mutex); | ||
28 | |||
29 | void blktrans_dev_release(struct kref *kref) | ||
30 | { | ||
31 | struct mtd_blktrans_dev *dev = | ||
32 | container_of(kref, struct mtd_blktrans_dev, ref); | ||
33 | |||
34 | dev->disk->private_data = NULL; | ||
35 | put_disk(dev->disk); | ||
36 | list_del(&dev->list); | ||
37 | kfree(dev); | ||
38 | } | ||
39 | |||
40 | static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk) | ||
41 | { | ||
42 | struct mtd_blktrans_dev *dev; | ||
43 | |||
44 | mutex_lock(&blktrans_ref_mutex); | ||
45 | dev = disk->private_data; | ||
46 | |||
47 | if (!dev) | ||
48 | goto unlock; | ||
49 | kref_get(&dev->ref); | ||
50 | unlock: | ||
51 | mutex_unlock(&blktrans_ref_mutex); | ||
52 | return dev; | ||
53 | } | ||
54 | |||
55 | void blktrans_dev_put(struct mtd_blktrans_dev *dev) | ||
56 | { | ||
57 | mutex_lock(&blktrans_ref_mutex); | ||
58 | kref_put(&dev->ref, blktrans_dev_release); | ||
59 | mutex_unlock(&blktrans_ref_mutex); | ||
60 | } | ||
27 | 61 | ||
28 | 62 | ||
29 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, | 63 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, |
@@ -111,81 +145,112 @@ static int mtd_blktrans_thread(void *arg) | |||
111 | 145 | ||
112 | static void mtd_blktrans_request(struct request_queue *rq) | 146 | static void mtd_blktrans_request(struct request_queue *rq) |
113 | { | 147 | { |
114 | struct mtd_blktrans_dev *dev = rq->queuedata; | 148 | struct mtd_blktrans_dev *dev; |
115 | wake_up_process(dev->thread); | 149 | struct request *req = NULL; |
116 | } | 150 | |
151 | dev = rq->queuedata; | ||
117 | 152 | ||
153 | if (!dev) | ||
154 | while ((req = blk_fetch_request(rq)) != NULL) | ||
155 | __blk_end_request_all(req, -ENODEV); | ||
156 | else | ||
157 | wake_up_process(dev->thread); | ||
158 | } | ||
118 | 159 | ||
119 | static int blktrans_open(struct block_device *bdev, fmode_t mode) | 160 | static int blktrans_open(struct block_device *bdev, fmode_t mode) |
120 | { | 161 | { |
121 | struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; | 162 | struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); |
122 | struct mtd_blktrans_ops *tr = dev->tr; | 163 | int ret; |
123 | int ret = -ENODEV; | 164 | |
124 | 165 | if (!dev) | |
125 | if (!get_mtd_device(NULL, dev->mtd->index)) | 166 | return -ERESTARTSYS; |
126 | goto out; | 167 | |
127 | 168 | mutex_lock(&dev->lock); | |
128 | if (!try_module_get(tr->owner)) | 169 | |
129 | goto out_tr; | 170 | if (!dev->mtd) { |
130 | 171 | ret = -ENXIO; | |
131 | /* FIXME: Locking. A hot pluggable device can go away | 172 | goto unlock; |
132 | (del_mtd_device can be called for it) without its module | ||
133 | being unloaded. */ | ||
134 | dev->mtd->usecount++; | ||
135 | |||
136 | ret = 0; | ||
137 | if (tr->open && (ret = tr->open(dev))) { | ||
138 | dev->mtd->usecount--; | ||
139 | put_mtd_device(dev->mtd); | ||
140 | out_tr: | ||
141 | module_put(tr->owner); | ||
142 | } | 173 | } |
143 | out: | 174 | |
175 | ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0; | ||
176 | |||
177 | /* Take another reference on the device so it won't go away till | ||
178 | last release */ | ||
179 | if (!ret) | ||
180 | kref_get(&dev->ref); | ||
181 | unlock: | ||
182 | mutex_unlock(&dev->lock); | ||
183 | blktrans_dev_put(dev); | ||
144 | return ret; | 184 | return ret; |
145 | } | 185 | } |
146 | 186 | ||
147 | static int blktrans_release(struct gendisk *disk, fmode_t mode) | 187 | static int blktrans_release(struct gendisk *disk, fmode_t mode) |
148 | { | 188 | { |
149 | struct mtd_blktrans_dev *dev = disk->private_data; | 189 | struct mtd_blktrans_dev *dev = blktrans_dev_get(disk); |
150 | struct mtd_blktrans_ops *tr = dev->tr; | 190 | int ret = -ENXIO; |
151 | int ret = 0; | ||
152 | 191 | ||
153 | if (tr->release) | 192 | if (!dev) |
154 | ret = tr->release(dev); | 193 | return ret; |
155 | 194 | ||
156 | if (!ret) { | 195 | mutex_lock(&dev->lock); |
157 | dev->mtd->usecount--; | 196 | |
158 | put_mtd_device(dev->mtd); | 197 | /* Release one reference, we sure its not the last one here*/ |
159 | module_put(tr->owner); | 198 | kref_put(&dev->ref, blktrans_dev_release); |
160 | } | ||
161 | 199 | ||
200 | if (!dev->mtd) | ||
201 | goto unlock; | ||
202 | |||
203 | ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0; | ||
204 | unlock: | ||
205 | mutex_unlock(&dev->lock); | ||
206 | blktrans_dev_put(dev); | ||
162 | return ret; | 207 | return ret; |
163 | } | 208 | } |
164 | 209 | ||
165 | static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 210 | static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
166 | { | 211 | { |
167 | struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; | 212 | struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); |
213 | int ret = -ENXIO; | ||
214 | |||
215 | if (!dev) | ||
216 | return ret; | ||
217 | |||
218 | mutex_lock(&dev->lock); | ||
168 | 219 | ||
169 | if (dev->tr->getgeo) | 220 | if (!dev->mtd) |
170 | return dev->tr->getgeo(dev, geo); | 221 | goto unlock; |
171 | return -ENOTTY; | 222 | |
223 | ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0; | ||
224 | unlock: | ||
225 | mutex_unlock(&dev->lock); | ||
226 | blktrans_dev_put(dev); | ||
227 | return ret; | ||
172 | } | 228 | } |
173 | 229 | ||
174 | static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, | 230 | static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, |
175 | unsigned int cmd, unsigned long arg) | 231 | unsigned int cmd, unsigned long arg) |
176 | { | 232 | { |
177 | struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; | 233 | struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); |
178 | struct mtd_blktrans_ops *tr = dev->tr; | 234 | int ret = -ENXIO; |
235 | |||
236 | if (!dev) | ||
237 | return ret; | ||
238 | |||
239 | mutex_lock(&dev->lock); | ||
240 | |||
241 | if (!dev->mtd) | ||
242 | goto unlock; | ||
179 | 243 | ||
180 | switch (cmd) { | 244 | switch (cmd) { |
181 | case BLKFLSBUF: | 245 | case BLKFLSBUF: |
182 | if (tr->flush) | 246 | ret = dev->tr->flush ? dev->tr->flush(dev) : 0; |
183 | return tr->flush(dev); | ||
184 | /* The core code did the work, we had nothing to do. */ | ||
185 | return 0; | ||
186 | default: | 247 | default: |
187 | return -ENOTTY; | 248 | ret = -ENOTTY; |
188 | } | 249 | } |
250 | unlock: | ||
251 | mutex_unlock(&dev->lock); | ||
252 | blktrans_dev_put(dev); | ||
253 | return ret; | ||
189 | } | 254 | } |
190 | 255 | ||
191 | static const struct block_device_operations mtd_blktrans_ops = { | 256 | static const struct block_device_operations mtd_blktrans_ops = { |
@@ -209,6 +274,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
209 | BUG(); | 274 | BUG(); |
210 | } | 275 | } |
211 | 276 | ||
277 | mutex_lock(&blktrans_ref_mutex); | ||
212 | list_for_each_entry(d, &tr->devs, list) { | 278 | list_for_each_entry(d, &tr->devs, list) { |
213 | if (new->devnum == -1) { | 279 | if (new->devnum == -1) { |
214 | /* Use first free number */ | 280 | /* Use first free number */ |
@@ -220,6 +286,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
220 | } | 286 | } |
221 | } else if (d->devnum == new->devnum) { | 287 | } else if (d->devnum == new->devnum) { |
222 | /* Required number taken */ | 288 | /* Required number taken */ |
289 | mutex_unlock(&blktrans_ref_mutex); | ||
223 | return -EBUSY; | 290 | return -EBUSY; |
224 | } else if (d->devnum > new->devnum) { | 291 | } else if (d->devnum > new->devnum) { |
225 | /* Required number was free */ | 292 | /* Required number was free */ |
@@ -237,16 +304,20 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
237 | * minor numbers and that the disk naming code below can cope | 304 | * minor numbers and that the disk naming code below can cope |
238 | * with this number. */ | 305 | * with this number. */ |
239 | if (new->devnum > (MINORMASK >> tr->part_bits) || | 306 | if (new->devnum > (MINORMASK >> tr->part_bits) || |
240 | (tr->part_bits && new->devnum >= 27 * 26)) | 307 | (tr->part_bits && new->devnum >= 27 * 26)) { |
308 | mutex_unlock(&blktrans_ref_mutex); | ||
241 | goto error1; | 309 | goto error1; |
310 | } | ||
242 | 311 | ||
243 | list_add_tail(&new->list, &tr->devs); | 312 | list_add_tail(&new->list, &tr->devs); |
244 | added: | 313 | added: |
314 | mutex_unlock(&blktrans_ref_mutex); | ||
315 | |||
245 | mutex_init(&new->lock); | 316 | mutex_init(&new->lock); |
317 | kref_init(&new->ref); | ||
246 | if (!tr->writesect) | 318 | if (!tr->writesect) |
247 | new->readonly = 1; | 319 | new->readonly = 1; |
248 | 320 | ||
249 | |||
250 | /* Create gendisk */ | 321 | /* Create gendisk */ |
251 | ret = -ENOMEM; | 322 | ret = -ENOMEM; |
252 | gd = alloc_disk(1 << tr->part_bits); | 323 | gd = alloc_disk(1 << tr->part_bits); |
@@ -275,7 +346,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
275 | 346 | ||
276 | set_capacity(gd, (new->size * tr->blksize) >> 9); | 347 | set_capacity(gd, (new->size * tr->blksize) >> 9); |
277 | 348 | ||
278 | |||
279 | /* Create the request queue */ | 349 | /* Create the request queue */ |
280 | spin_lock_init(&new->queue_lock); | 350 | spin_lock_init(&new->queue_lock); |
281 | new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock); | 351 | new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock); |
@@ -292,6 +362,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
292 | 362 | ||
293 | gd->queue = new->rq; | 363 | gd->queue = new->rq; |
294 | 364 | ||
365 | __get_mtd_device(new->mtd); | ||
366 | __module_get(tr->owner); | ||
367 | |||
295 | /* Create processing thread */ | 368 | /* Create processing thread */ |
296 | /* TODO: workqueue ? */ | 369 | /* TODO: workqueue ? */ |
297 | new->thread = kthread_run(mtd_blktrans_thread, new, | 370 | new->thread = kthread_run(mtd_blktrans_thread, new, |
@@ -308,6 +381,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
308 | add_disk(gd); | 381 | add_disk(gd); |
309 | return 0; | 382 | return 0; |
310 | error4: | 383 | error4: |
384 | module_put(tr->owner); | ||
385 | __put_mtd_device(new->mtd); | ||
311 | blk_cleanup_queue(new->rq); | 386 | blk_cleanup_queue(new->rq); |
312 | error3: | 387 | error3: |
313 | put_disk(new->disk); | 388 | put_disk(new->disk); |
@@ -320,20 +395,41 @@ error1: | |||
320 | 395 | ||
321 | int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) | 396 | int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) |
322 | { | 397 | { |
398 | unsigned long flags; | ||
399 | |||
323 | if (mutex_trylock(&mtd_table_mutex)) { | 400 | if (mutex_trylock(&mtd_table_mutex)) { |
324 | mutex_unlock(&mtd_table_mutex); | 401 | mutex_unlock(&mtd_table_mutex); |
325 | BUG(); | 402 | BUG(); |
326 | } | 403 | } |
327 | 404 | ||
328 | list_del(&old->list); | 405 | /* Stop new requests to arrive */ |
329 | |||
330 | /* stop new requests to arrive */ | ||
331 | del_gendisk(old->disk); | 406 | del_gendisk(old->disk); |
332 | 407 | ||
333 | /* Stop the thread */ | 408 | /* Stop the thread */ |
334 | kthread_stop(old->thread); | 409 | kthread_stop(old->thread); |
335 | 410 | ||
411 | /* Kill current requests */ | ||
412 | spin_lock_irqsave(&old->queue_lock, flags); | ||
413 | old->rq->queuedata = NULL; | ||
414 | blk_start_queue(old->rq); | ||
415 | spin_unlock_irqrestore(&old->queue_lock, flags); | ||
336 | blk_cleanup_queue(old->rq); | 416 | blk_cleanup_queue(old->rq); |
417 | |||
418 | /* Ask trans driver for release to the mtd device */ | ||
419 | mutex_lock(&old->lock); | ||
420 | if (old->open && old->tr->release) { | ||
421 | old->tr->release(old); | ||
422 | old->open = 0; | ||
423 | } | ||
424 | |||
425 | __put_mtd_device(old->mtd); | ||
426 | module_put(old->tr->owner); | ||
427 | |||
428 | /* At that point, we don't touch the mtd anymore */ | ||
429 | old->mtd = NULL; | ||
430 | |||
431 | mutex_unlock(&old->lock); | ||
432 | blktrans_dev_put(old); | ||
337 | return 0; | 433 | return 0; |
338 | } | 434 | } |
339 | 435 | ||
@@ -396,7 +492,6 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) | |||
396 | tr->add_mtd(tr, mtd); | 492 | tr->add_mtd(tr, mtd); |
397 | 493 | ||
398 | mutex_unlock(&mtd_table_mutex); | 494 | mutex_unlock(&mtd_table_mutex); |
399 | |||
400 | return 0; | 495 | return 0; |
401 | } | 496 | } |
402 | 497 | ||
@@ -406,7 +501,6 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) | |||
406 | 501 | ||
407 | mutex_lock(&mtd_table_mutex); | 502 | mutex_lock(&mtd_table_mutex); |
408 | 503 | ||
409 | |||
410 | /* Remove it from the list of active majors */ | 504 | /* Remove it from the list of active majors */ |
411 | list_del(&tr->list); | 505 | list_del(&tr->list); |
412 | 506 | ||