diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/mtd/mtd_blkdevs.c |
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/mtd/mtd_blkdevs.c')
-rw-r--r-- | drivers/mtd/mtd_blkdevs.c | 478 |
1 files changed, 478 insertions, 0 deletions
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c new file mode 100644 index 000000000000..f8d2185819e7 --- /dev/null +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -0,0 +1,478 @@ | |||
1 | /* | ||
2 | * $Id: mtd_blkdevs.c,v 1.24 2004/11/16 18:28:59 dwmw2 Exp $ | ||
3 | * | ||
4 | * (C) 2003 David Woodhouse <dwmw2@infradead.org> | ||
5 | * | ||
6 | * Interface to Linux 2.5 block layer for MTD 'translation layers'. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/list.h> | ||
14 | #include <linux/fs.h> | ||
15 | #include <linux/mtd/blktrans.h> | ||
16 | #include <linux/mtd/mtd.h> | ||
17 | #include <linux/blkdev.h> | ||
18 | #include <linux/blkpg.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/hdreg.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <asm/semaphore.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | #include <linux/devfs_fs_kernel.h> | ||
25 | |||
26 | static LIST_HEAD(blktrans_majors); | ||
27 | |||
28 | extern struct semaphore mtd_table_mutex; | ||
29 | extern struct mtd_info *mtd_table[]; | ||
30 | |||
31 | struct mtd_blkcore_priv { | ||
32 | struct completion thread_dead; | ||
33 | int exiting; | ||
34 | wait_queue_head_t thread_wq; | ||
35 | struct request_queue *rq; | ||
36 | spinlock_t queue_lock; | ||
37 | }; | ||
38 | |||
39 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, | ||
40 | struct mtd_blktrans_dev *dev, | ||
41 | struct request *req) | ||
42 | { | ||
43 | unsigned long block, nsect; | ||
44 | char *buf; | ||
45 | |||
46 | block = req->sector; | ||
47 | nsect = req->current_nr_sectors; | ||
48 | buf = req->buffer; | ||
49 | |||
50 | if (!(req->flags & REQ_CMD)) | ||
51 | return 0; | ||
52 | |||
53 | if (block + nsect > get_capacity(req->rq_disk)) | ||
54 | return 0; | ||
55 | |||
56 | switch(rq_data_dir(req)) { | ||
57 | case READ: | ||
58 | for (; nsect > 0; nsect--, block++, buf += 512) | ||
59 | if (tr->readsect(dev, block, buf)) | ||
60 | return 0; | ||
61 | return 1; | ||
62 | |||
63 | case WRITE: | ||
64 | if (!tr->writesect) | ||
65 | return 0; | ||
66 | |||
67 | for (; nsect > 0; nsect--, block++, buf += 512) | ||
68 | if (tr->writesect(dev, block, buf)) | ||
69 | return 0; | ||
70 | return 1; | ||
71 | |||
72 | default: | ||
73 | printk(KERN_NOTICE "Unknown request %ld\n", rq_data_dir(req)); | ||
74 | return 0; | ||
75 | } | ||
76 | } | ||
77 | |||
78 | static int mtd_blktrans_thread(void *arg) | ||
79 | { | ||
80 | struct mtd_blktrans_ops *tr = arg; | ||
81 | struct request_queue *rq = tr->blkcore_priv->rq; | ||
82 | |||
83 | /* we might get involved when memory gets low, so use PF_MEMALLOC */ | ||
84 | current->flags |= PF_MEMALLOC | PF_NOFREEZE; | ||
85 | |||
86 | daemonize("%sd", tr->name); | ||
87 | |||
88 | /* daemonize() doesn't do this for us since some kernel threads | ||
89 | actually want to deal with signals. We can't just call | ||
90 | exit_sighand() since that'll cause an oops when we finally | ||
91 | do exit. */ | ||
92 | spin_lock_irq(¤t->sighand->siglock); | ||
93 | sigfillset(¤t->blocked); | ||
94 | recalc_sigpending(); | ||
95 | spin_unlock_irq(¤t->sighand->siglock); | ||
96 | |||
97 | spin_lock_irq(rq->queue_lock); | ||
98 | |||
99 | while (!tr->blkcore_priv->exiting) { | ||
100 | struct request *req; | ||
101 | struct mtd_blktrans_dev *dev; | ||
102 | int res = 0; | ||
103 | DECLARE_WAITQUEUE(wait, current); | ||
104 | |||
105 | req = elv_next_request(rq); | ||
106 | |||
107 | if (!req) { | ||
108 | add_wait_queue(&tr->blkcore_priv->thread_wq, &wait); | ||
109 | set_current_state(TASK_INTERRUPTIBLE); | ||
110 | |||
111 | spin_unlock_irq(rq->queue_lock); | ||
112 | |||
113 | schedule(); | ||
114 | remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait); | ||
115 | |||
116 | spin_lock_irq(rq->queue_lock); | ||
117 | |||
118 | continue; | ||
119 | } | ||
120 | |||
121 | dev = req->rq_disk->private_data; | ||
122 | tr = dev->tr; | ||
123 | |||
124 | spin_unlock_irq(rq->queue_lock); | ||
125 | |||
126 | down(&dev->sem); | ||
127 | res = do_blktrans_request(tr, dev, req); | ||
128 | up(&dev->sem); | ||
129 | |||
130 | spin_lock_irq(rq->queue_lock); | ||
131 | |||
132 | end_request(req, res); | ||
133 | } | ||
134 | spin_unlock_irq(rq->queue_lock); | ||
135 | |||
136 | complete_and_exit(&tr->blkcore_priv->thread_dead, 0); | ||
137 | } | ||
138 | |||
139 | static void mtd_blktrans_request(struct request_queue *rq) | ||
140 | { | ||
141 | struct mtd_blktrans_ops *tr = rq->queuedata; | ||
142 | wake_up(&tr->blkcore_priv->thread_wq); | ||
143 | } | ||
144 | |||
145 | |||
146 | static int blktrans_open(struct inode *i, struct file *f) | ||
147 | { | ||
148 | struct mtd_blktrans_dev *dev; | ||
149 | struct mtd_blktrans_ops *tr; | ||
150 | int ret = -ENODEV; | ||
151 | |||
152 | dev = i->i_bdev->bd_disk->private_data; | ||
153 | tr = dev->tr; | ||
154 | |||
155 | if (!try_module_get(dev->mtd->owner)) | ||
156 | goto out; | ||
157 | |||
158 | if (!try_module_get(tr->owner)) | ||
159 | goto out_tr; | ||
160 | |||
161 | /* FIXME: Locking. A hot pluggable device can go away | ||
162 | (del_mtd_device can be called for it) without its module | ||
163 | being unloaded. */ | ||
164 | dev->mtd->usecount++; | ||
165 | |||
166 | ret = 0; | ||
167 | if (tr->open && (ret = tr->open(dev))) { | ||
168 | dev->mtd->usecount--; | ||
169 | module_put(dev->mtd->owner); | ||
170 | out_tr: | ||
171 | module_put(tr->owner); | ||
172 | } | ||
173 | out: | ||
174 | return ret; | ||
175 | } | ||
176 | |||
177 | static int blktrans_release(struct inode *i, struct file *f) | ||
178 | { | ||
179 | struct mtd_blktrans_dev *dev; | ||
180 | struct mtd_blktrans_ops *tr; | ||
181 | int ret = 0; | ||
182 | |||
183 | dev = i->i_bdev->bd_disk->private_data; | ||
184 | tr = dev->tr; | ||
185 | |||
186 | if (tr->release) | ||
187 | ret = tr->release(dev); | ||
188 | |||
189 | if (!ret) { | ||
190 | dev->mtd->usecount--; | ||
191 | module_put(dev->mtd->owner); | ||
192 | module_put(tr->owner); | ||
193 | } | ||
194 | |||
195 | return ret; | ||
196 | } | ||
197 | |||
198 | |||
199 | static int blktrans_ioctl(struct inode *inode, struct file *file, | ||
200 | unsigned int cmd, unsigned long arg) | ||
201 | { | ||
202 | struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data; | ||
203 | struct mtd_blktrans_ops *tr = dev->tr; | ||
204 | |||
205 | switch (cmd) { | ||
206 | case BLKFLSBUF: | ||
207 | if (tr->flush) | ||
208 | return tr->flush(dev); | ||
209 | /* The core code did the work, we had nothing to do. */ | ||
210 | return 0; | ||
211 | |||
212 | case HDIO_GETGEO: | ||
213 | if (tr->getgeo) { | ||
214 | struct hd_geometry g; | ||
215 | int ret; | ||
216 | |||
217 | memset(&g, 0, sizeof(g)); | ||
218 | ret = tr->getgeo(dev, &g); | ||
219 | if (ret) | ||
220 | return ret; | ||
221 | |||
222 | g.start = get_start_sect(inode->i_bdev); | ||
223 | if (copy_to_user((void __user *)arg, &g, sizeof(g))) | ||
224 | return -EFAULT; | ||
225 | return 0; | ||
226 | } /* else */ | ||
227 | default: | ||
228 | return -ENOTTY; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | struct block_device_operations mtd_blktrans_ops = { | ||
233 | .owner = THIS_MODULE, | ||
234 | .open = blktrans_open, | ||
235 | .release = blktrans_release, | ||
236 | .ioctl = blktrans_ioctl, | ||
237 | }; | ||
238 | |||
239 | int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | ||
240 | { | ||
241 | struct mtd_blktrans_ops *tr = new->tr; | ||
242 | struct list_head *this; | ||
243 | int last_devnum = -1; | ||
244 | struct gendisk *gd; | ||
245 | |||
246 | if (!down_trylock(&mtd_table_mutex)) { | ||
247 | up(&mtd_table_mutex); | ||
248 | BUG(); | ||
249 | } | ||
250 | |||
251 | list_for_each(this, &tr->devs) { | ||
252 | struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list); | ||
253 | if (new->devnum == -1) { | ||
254 | /* Use first free number */ | ||
255 | if (d->devnum != last_devnum+1) { | ||
256 | /* Found a free devnum. Plug it in here */ | ||
257 | new->devnum = last_devnum+1; | ||
258 | list_add_tail(&new->list, &d->list); | ||
259 | goto added; | ||
260 | } | ||
261 | } else if (d->devnum == new->devnum) { | ||
262 | /* Required number taken */ | ||
263 | return -EBUSY; | ||
264 | } else if (d->devnum > new->devnum) { | ||
265 | /* Required number was free */ | ||
266 | list_add_tail(&new->list, &d->list); | ||
267 | goto added; | ||
268 | } | ||
269 | last_devnum = d->devnum; | ||
270 | } | ||
271 | if (new->devnum == -1) | ||
272 | new->devnum = last_devnum+1; | ||
273 | |||
274 | if ((new->devnum << tr->part_bits) > 256) { | ||
275 | return -EBUSY; | ||
276 | } | ||
277 | |||
278 | init_MUTEX(&new->sem); | ||
279 | list_add_tail(&new->list, &tr->devs); | ||
280 | added: | ||
281 | if (!tr->writesect) | ||
282 | new->readonly = 1; | ||
283 | |||
284 | gd = alloc_disk(1 << tr->part_bits); | ||
285 | if (!gd) { | ||
286 | list_del(&new->list); | ||
287 | return -ENOMEM; | ||
288 | } | ||
289 | gd->major = tr->major; | ||
290 | gd->first_minor = (new->devnum) << tr->part_bits; | ||
291 | gd->fops = &mtd_blktrans_ops; | ||
292 | |||
293 | snprintf(gd->disk_name, sizeof(gd->disk_name), | ||
294 | "%s%c", tr->name, (tr->part_bits?'a':'0') + new->devnum); | ||
295 | snprintf(gd->devfs_name, sizeof(gd->devfs_name), | ||
296 | "%s/%c", tr->name, (tr->part_bits?'a':'0') + new->devnum); | ||
297 | |||
298 | /* 2.5 has capacity in units of 512 bytes while still | ||
299 | having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */ | ||
300 | set_capacity(gd, (new->size * new->blksize) >> 9); | ||
301 | |||
302 | gd->private_data = new; | ||
303 | new->blkcore_priv = gd; | ||
304 | gd->queue = tr->blkcore_priv->rq; | ||
305 | |||
306 | if (new->readonly) | ||
307 | set_disk_ro(gd, 1); | ||
308 | |||
309 | add_disk(gd); | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) | ||
315 | { | ||
316 | if (!down_trylock(&mtd_table_mutex)) { | ||
317 | up(&mtd_table_mutex); | ||
318 | BUG(); | ||
319 | } | ||
320 | |||
321 | list_del(&old->list); | ||
322 | |||
323 | del_gendisk(old->blkcore_priv); | ||
324 | put_disk(old->blkcore_priv); | ||
325 | |||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | static void blktrans_notify_remove(struct mtd_info *mtd) | ||
330 | { | ||
331 | struct list_head *this, *this2, *next; | ||
332 | |||
333 | list_for_each(this, &blktrans_majors) { | ||
334 | struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list); | ||
335 | |||
336 | list_for_each_safe(this2, next, &tr->devs) { | ||
337 | struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list); | ||
338 | |||
339 | if (dev->mtd == mtd) | ||
340 | tr->remove_dev(dev); | ||
341 | } | ||
342 | } | ||
343 | } | ||
344 | |||
345 | static void blktrans_notify_add(struct mtd_info *mtd) | ||
346 | { | ||
347 | struct list_head *this; | ||
348 | |||
349 | if (mtd->type == MTD_ABSENT) | ||
350 | return; | ||
351 | |||
352 | list_for_each(this, &blktrans_majors) { | ||
353 | struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list); | ||
354 | |||
355 | tr->add_mtd(tr, mtd); | ||
356 | } | ||
357 | |||
358 | } | ||
359 | |||
360 | static struct mtd_notifier blktrans_notifier = { | ||
361 | .add = blktrans_notify_add, | ||
362 | .remove = blktrans_notify_remove, | ||
363 | }; | ||
364 | |||
365 | int register_mtd_blktrans(struct mtd_blktrans_ops *tr) | ||
366 | { | ||
367 | int ret, i; | ||
368 | |||
369 | /* Register the notifier if/when the first device type is | ||
370 | registered, to prevent the link/init ordering from fucking | ||
371 | us over. */ | ||
372 | if (!blktrans_notifier.list.next) | ||
373 | register_mtd_user(&blktrans_notifier); | ||
374 | |||
375 | tr->blkcore_priv = kmalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL); | ||
376 | if (!tr->blkcore_priv) | ||
377 | return -ENOMEM; | ||
378 | |||
379 | memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv)); | ||
380 | |||
381 | down(&mtd_table_mutex); | ||
382 | |||
383 | ret = register_blkdev(tr->major, tr->name); | ||
384 | if (ret) { | ||
385 | printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", | ||
386 | tr->name, tr->major, ret); | ||
387 | kfree(tr->blkcore_priv); | ||
388 | up(&mtd_table_mutex); | ||
389 | return ret; | ||
390 | } | ||
391 | spin_lock_init(&tr->blkcore_priv->queue_lock); | ||
392 | init_completion(&tr->blkcore_priv->thread_dead); | ||
393 | init_waitqueue_head(&tr->blkcore_priv->thread_wq); | ||
394 | |||
395 | tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock); | ||
396 | if (!tr->blkcore_priv->rq) { | ||
397 | unregister_blkdev(tr->major, tr->name); | ||
398 | kfree(tr->blkcore_priv); | ||
399 | up(&mtd_table_mutex); | ||
400 | return -ENOMEM; | ||
401 | } | ||
402 | |||
403 | tr->blkcore_priv->rq->queuedata = tr; | ||
404 | |||
405 | ret = kernel_thread(mtd_blktrans_thread, tr, CLONE_KERNEL); | ||
406 | if (ret < 0) { | ||
407 | blk_cleanup_queue(tr->blkcore_priv->rq); | ||
408 | unregister_blkdev(tr->major, tr->name); | ||
409 | kfree(tr->blkcore_priv); | ||
410 | up(&mtd_table_mutex); | ||
411 | return ret; | ||
412 | } | ||
413 | |||
414 | devfs_mk_dir(tr->name); | ||
415 | |||
416 | INIT_LIST_HEAD(&tr->devs); | ||
417 | list_add(&tr->list, &blktrans_majors); | ||
418 | |||
419 | for (i=0; i<MAX_MTD_DEVICES; i++) { | ||
420 | if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT) | ||
421 | tr->add_mtd(tr, mtd_table[i]); | ||
422 | } | ||
423 | |||
424 | up(&mtd_table_mutex); | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) | ||
430 | { | ||
431 | struct list_head *this, *next; | ||
432 | |||
433 | down(&mtd_table_mutex); | ||
434 | |||
435 | /* Clean up the kernel thread */ | ||
436 | tr->blkcore_priv->exiting = 1; | ||
437 | wake_up(&tr->blkcore_priv->thread_wq); | ||
438 | wait_for_completion(&tr->blkcore_priv->thread_dead); | ||
439 | |||
440 | /* Remove it from the list of active majors */ | ||
441 | list_del(&tr->list); | ||
442 | |||
443 | list_for_each_safe(this, next, &tr->devs) { | ||
444 | struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list); | ||
445 | tr->remove_dev(dev); | ||
446 | } | ||
447 | |||
448 | devfs_remove(tr->name); | ||
449 | blk_cleanup_queue(tr->blkcore_priv->rq); | ||
450 | unregister_blkdev(tr->major, tr->name); | ||
451 | |||
452 | up(&mtd_table_mutex); | ||
453 | |||
454 | kfree(tr->blkcore_priv); | ||
455 | |||
456 | if (!list_empty(&tr->devs)) | ||
457 | BUG(); | ||
458 | return 0; | ||
459 | } | ||
460 | |||
461 | static void __exit mtd_blktrans_exit(void) | ||
462 | { | ||
463 | /* No race here -- if someone's currently in register_mtd_blktrans | ||
464 | we're screwed anyway. */ | ||
465 | if (blktrans_notifier.list.next) | ||
466 | unregister_mtd_user(&blktrans_notifier); | ||
467 | } | ||
468 | |||
469 | module_exit(mtd_blktrans_exit); | ||
470 | |||
471 | EXPORT_SYMBOL_GPL(register_mtd_blktrans); | ||
472 | EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); | ||
473 | EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); | ||
474 | EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); | ||
475 | |||
476 | MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); | ||
477 | MODULE_LICENSE("GPL"); | ||
478 | MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'"); | ||