diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/mtd/devices/blkmtd.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/mtd/devices/blkmtd.c')
-rw-r--r-- | drivers/mtd/devices/blkmtd.c | 823 |
1 files changed, 823 insertions, 0 deletions
diff --git a/drivers/mtd/devices/blkmtd.c b/drivers/mtd/devices/blkmtd.c new file mode 100644 index 000000000000..662e807801ed --- /dev/null +++ b/drivers/mtd/devices/blkmtd.c | |||
@@ -0,0 +1,823 @@ | |||
1 | /* | ||
2 | * $Id: blkmtd.c,v 1.24 2004/11/16 18:29:01 dwmw2 Exp $ | ||
3 | * | ||
4 | * blkmtd.c - use a block device as a fake MTD | ||
5 | * | ||
6 | * Author: Simon Evans <spse@secret.org.uk> | ||
7 | * | ||
8 | * Copyright (C) 2001,2002 Simon Evans | ||
9 | * | ||
10 | * Licence: GPL | ||
11 | * | ||
12 | * How it works: | ||
13 | * The driver uses raw/io to read/write the device and the page | ||
14 | * cache to cache access. Writes update the page cache with the | ||
15 | * new data and mark it dirty and add the page into a BIO which | ||
16 | * is then written out. | ||
17 | * | ||
18 | * It can be loaded Read-Only to prevent erases and writes to the | ||
19 | * medium. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/fs.h> | ||
26 | #include <linux/blkdev.h> | ||
27 | #include <linux/bio.h> | ||
28 | #include <linux/pagemap.h> | ||
29 | #include <linux/list.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/mtd/mtd.h> | ||
32 | |||
33 | |||
34 | #define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg) | ||
35 | #define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg) | ||
36 | #define warn(format, arg...) printk(KERN_WARNING "blkmtd: " format "\n" , ## arg) | ||
37 | #define crit(format, arg...) printk(KERN_CRIT "blkmtd: " format "\n" , ## arg) | ||
38 | |||
39 | |||
40 | /* Default erase size in K, always make it a multiple of PAGE_SIZE */ | ||
41 | #define CONFIG_MTD_BLKDEV_ERASESIZE (128 << 10) /* 128KiB */ | ||
42 | #define VERSION "$Revision: 1.24 $" | ||
43 | |||
44 | /* Info for the block device */ | ||
45 | struct blkmtd_dev { | ||
46 | struct list_head list; | ||
47 | struct block_device *blkdev; | ||
48 | struct mtd_info mtd_info; | ||
49 | struct semaphore wrbuf_mutex; | ||
50 | }; | ||
51 | |||
52 | |||
53 | /* Static info about the MTD, used in cleanup_module */ | ||
54 | static LIST_HEAD(blkmtd_device_list); | ||
55 | |||
56 | |||
57 | static void blkmtd_sync(struct mtd_info *mtd); | ||
58 | |||
59 | #define MAX_DEVICES 4 | ||
60 | |||
61 | /* Module parameters passed by insmod/modprobe */ | ||
62 | static char *device[MAX_DEVICES]; /* the block device to use */ | ||
63 | static int erasesz[MAX_DEVICES]; /* optional default erase size */ | ||
64 | static int ro[MAX_DEVICES]; /* optional read only flag */ | ||
65 | static int sync; | ||
66 | |||
67 | |||
68 | MODULE_LICENSE("GPL"); | ||
69 | MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>"); | ||
70 | MODULE_DESCRIPTION("Emulate an MTD using a block device"); | ||
71 | module_param_array(device, charp, NULL, 0); | ||
72 | MODULE_PARM_DESC(device, "block device to use"); | ||
73 | module_param_array(erasesz, int, NULL, 0); | ||
74 | MODULE_PARM_DESC(erasesz, "optional erase size to use in KiB. eg 4=4KiB."); | ||
75 | module_param_array(ro, bool, NULL, 0); | ||
76 | MODULE_PARM_DESC(ro, "1=Read only, writes and erases cause errors"); | ||
77 | module_param(sync, bool, 0); | ||
78 | MODULE_PARM_DESC(sync, "1=Synchronous writes"); | ||
79 | |||
80 | |||
81 | /* completion handler for BIO reads */ | ||
82 | static int bi_read_complete(struct bio *bio, unsigned int bytes_done, int error) | ||
83 | { | ||
84 | if (bio->bi_size) | ||
85 | return 1; | ||
86 | |||
87 | complete((struct completion*)bio->bi_private); | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | |||
92 | /* completion handler for BIO writes */ | ||
93 | static int bi_write_complete(struct bio *bio, unsigned int bytes_done, int error) | ||
94 | { | ||
95 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
96 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
97 | |||
98 | if (bio->bi_size) | ||
99 | return 1; | ||
100 | |||
101 | if(!uptodate) | ||
102 | err("bi_write_complete: not uptodate\n"); | ||
103 | |||
104 | do { | ||
105 | struct page *page = bvec->bv_page; | ||
106 | DEBUG(3, "Cleaning up page %ld\n", page->index); | ||
107 | if (--bvec >= bio->bi_io_vec) | ||
108 | prefetchw(&bvec->bv_page->flags); | ||
109 | |||
110 | if (uptodate) { | ||
111 | SetPageUptodate(page); | ||
112 | } else { | ||
113 | ClearPageUptodate(page); | ||
114 | SetPageError(page); | ||
115 | } | ||
116 | ClearPageDirty(page); | ||
117 | unlock_page(page); | ||
118 | page_cache_release(page); | ||
119 | } while (bvec >= bio->bi_io_vec); | ||
120 | |||
121 | complete((struct completion*)bio->bi_private); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | |||
126 | /* read one page from the block device */ | ||
127 | static int blkmtd_readpage(struct blkmtd_dev *dev, struct page *page) | ||
128 | { | ||
129 | struct bio *bio; | ||
130 | struct completion event; | ||
131 | int err = -ENOMEM; | ||
132 | |||
133 | if(PageUptodate(page)) { | ||
134 | DEBUG(2, "blkmtd: readpage page %ld is already upto date\n", page->index); | ||
135 | unlock_page(page); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | ClearPageUptodate(page); | ||
140 | ClearPageError(page); | ||
141 | |||
142 | bio = bio_alloc(GFP_KERNEL, 1); | ||
143 | if(bio) { | ||
144 | init_completion(&event); | ||
145 | bio->bi_bdev = dev->blkdev; | ||
146 | bio->bi_sector = page->index << (PAGE_SHIFT-9); | ||
147 | bio->bi_private = &event; | ||
148 | bio->bi_end_io = bi_read_complete; | ||
149 | if(bio_add_page(bio, page, PAGE_SIZE, 0) == PAGE_SIZE) { | ||
150 | submit_bio(READ_SYNC, bio); | ||
151 | wait_for_completion(&event); | ||
152 | err = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : -EIO; | ||
153 | bio_put(bio); | ||
154 | } | ||
155 | } | ||
156 | |||
157 | if(err) | ||
158 | SetPageError(page); | ||
159 | else | ||
160 | SetPageUptodate(page); | ||
161 | flush_dcache_page(page); | ||
162 | unlock_page(page); | ||
163 | return err; | ||
164 | } | ||
165 | |||
166 | |||
167 | /* write out the current BIO and wait for it to finish */ | ||
168 | static int blkmtd_write_out(struct bio *bio) | ||
169 | { | ||
170 | struct completion event; | ||
171 | int err; | ||
172 | |||
173 | if(!bio->bi_vcnt) { | ||
174 | bio_put(bio); | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | init_completion(&event); | ||
179 | bio->bi_private = &event; | ||
180 | bio->bi_end_io = bi_write_complete; | ||
181 | submit_bio(WRITE_SYNC, bio); | ||
182 | wait_for_completion(&event); | ||
183 | DEBUG(3, "submit_bio completed, bi_vcnt = %d\n", bio->bi_vcnt); | ||
184 | err = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : -EIO; | ||
185 | bio_put(bio); | ||
186 | return err; | ||
187 | } | ||
188 | |||
189 | |||
190 | /** | ||
191 | * blkmtd_add_page - add a page to the current BIO | ||
192 | * @bio: bio to add to (NULL to alloc initial bio) | ||
193 | * @blkdev: block device | ||
194 | * @page: page to add | ||
195 | * @pagecnt: pages left to add | ||
196 | * | ||
197 | * Adds a page to the current bio, allocating it if necessary. If it cannot be | ||
198 | * added, the current bio is written out and a new one is allocated. Returns | ||
199 | * the new bio to add or NULL on error | ||
200 | */ | ||
201 | static struct bio *blkmtd_add_page(struct bio *bio, struct block_device *blkdev, | ||
202 | struct page *page, int pagecnt) | ||
203 | { | ||
204 | |||
205 | retry: | ||
206 | if(!bio) { | ||
207 | bio = bio_alloc(GFP_KERNEL, pagecnt); | ||
208 | if(!bio) | ||
209 | return NULL; | ||
210 | bio->bi_sector = page->index << (PAGE_SHIFT-9); | ||
211 | bio->bi_bdev = blkdev; | ||
212 | } | ||
213 | |||
214 | if(bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) { | ||
215 | blkmtd_write_out(bio); | ||
216 | bio = NULL; | ||
217 | goto retry; | ||
218 | } | ||
219 | return bio; | ||
220 | } | ||
221 | |||
222 | |||
223 | /** | ||
224 | * write_pages - write block of data to device via the page cache | ||
225 | * @dev: device to write to | ||
226 | * @buf: data source or NULL if erase (output is set to 0xff) | ||
227 | * @to: offset into output device | ||
228 | * @len: amount to data to write | ||
229 | * @retlen: amount of data written | ||
230 | * | ||
231 | * Grab pages from the page cache and fill them with the source data. | ||
232 | * Non page aligned start and end result in a readin of the page and | ||
233 | * part of the page being modified. Pages are added to the bio and then written | ||
234 | * out. | ||
235 | */ | ||
236 | static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to, | ||
237 | size_t len, size_t *retlen) | ||
238 | { | ||
239 | int pagenr, offset; | ||
240 | size_t start_len = 0, end_len; | ||
241 | int pagecnt = 0; | ||
242 | int err = 0; | ||
243 | struct bio *bio = NULL; | ||
244 | size_t thislen = 0; | ||
245 | |||
246 | pagenr = to >> PAGE_SHIFT; | ||
247 | offset = to & ~PAGE_MASK; | ||
248 | |||
249 | DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %zd pagenr = %d offset = %d\n", | ||
250 | buf, (long)to, len, pagenr, offset); | ||
251 | |||
252 | /* see if we have to do a partial write at the start */ | ||
253 | if(offset) { | ||
254 | start_len = ((offset + len) > PAGE_SIZE) ? PAGE_SIZE - offset : len; | ||
255 | len -= start_len; | ||
256 | } | ||
257 | |||
258 | /* calculate the length of the other two regions */ | ||
259 | end_len = len & ~PAGE_MASK; | ||
260 | len -= end_len; | ||
261 | |||
262 | if(start_len) | ||
263 | pagecnt++; | ||
264 | |||
265 | if(len) | ||
266 | pagecnt += len >> PAGE_SHIFT; | ||
267 | |||
268 | if(end_len) | ||
269 | pagecnt++; | ||
270 | |||
271 | down(&dev->wrbuf_mutex); | ||
272 | |||
273 | DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n", | ||
274 | start_len, len, end_len, pagecnt); | ||
275 | |||
276 | if(start_len) { | ||
277 | /* do partial start region */ | ||
278 | struct page *page; | ||
279 | |||
280 | DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %zd offset = %d\n", | ||
281 | pagenr, start_len, offset); | ||
282 | |||
283 | BUG_ON(!buf); | ||
284 | page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev); | ||
285 | lock_page(page); | ||
286 | if(PageDirty(page)) { | ||
287 | err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n", | ||
288 | to, start_len, len, end_len, pagenr); | ||
289 | BUG(); | ||
290 | } | ||
291 | memcpy(page_address(page)+offset, buf, start_len); | ||
292 | SetPageDirty(page); | ||
293 | SetPageUptodate(page); | ||
294 | buf += start_len; | ||
295 | thislen = start_len; | ||
296 | bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt); | ||
297 | if(!bio) { | ||
298 | err = -ENOMEM; | ||
299 | err("bio_add_page failed\n"); | ||
300 | goto write_err; | ||
301 | } | ||
302 | pagecnt--; | ||
303 | pagenr++; | ||
304 | } | ||
305 | |||
306 | /* Now do the main loop to a page aligned, n page sized output */ | ||
307 | if(len) { | ||
308 | int pagesc = len >> PAGE_SHIFT; | ||
309 | DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n", | ||
310 | pagenr, pagesc); | ||
311 | while(pagesc) { | ||
312 | struct page *page; | ||
313 | |||
314 | /* see if page is in the page cache */ | ||
315 | DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr); | ||
316 | page = grab_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr); | ||
317 | if(PageDirty(page)) { | ||
318 | BUG(); | ||
319 | } | ||
320 | if(!page) { | ||
321 | warn("write: cannot grab cache page %d", pagenr); | ||
322 | err = -ENOMEM; | ||
323 | goto write_err; | ||
324 | } | ||
325 | if(!buf) { | ||
326 | memset(page_address(page), 0xff, PAGE_SIZE); | ||
327 | } else { | ||
328 | memcpy(page_address(page), buf, PAGE_SIZE); | ||
329 | buf += PAGE_SIZE; | ||
330 | } | ||
331 | bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt); | ||
332 | if(!bio) { | ||
333 | err = -ENOMEM; | ||
334 | err("bio_add_page failed\n"); | ||
335 | goto write_err; | ||
336 | } | ||
337 | pagenr++; | ||
338 | pagecnt--; | ||
339 | SetPageDirty(page); | ||
340 | SetPageUptodate(page); | ||
341 | pagesc--; | ||
342 | thislen += PAGE_SIZE; | ||
343 | } | ||
344 | } | ||
345 | |||
346 | if(end_len) { | ||
347 | /* do the third region */ | ||
348 | struct page *page; | ||
349 | DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %zd\n", | ||
350 | pagenr, end_len); | ||
351 | BUG_ON(!buf); | ||
352 | page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev); | ||
353 | lock_page(page); | ||
354 | if(PageDirty(page)) { | ||
355 | err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n", | ||
356 | to, start_len, len, end_len, pagenr); | ||
357 | BUG(); | ||
358 | } | ||
359 | memcpy(page_address(page), buf, end_len); | ||
360 | SetPageDirty(page); | ||
361 | SetPageUptodate(page); | ||
362 | DEBUG(3, "blkmtd: write: writing out partial end\n"); | ||
363 | thislen += end_len; | ||
364 | bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt); | ||
365 | if(!bio) { | ||
366 | err = -ENOMEM; | ||
367 | err("bio_add_page failed\n"); | ||
368 | goto write_err; | ||
369 | } | ||
370 | pagenr++; | ||
371 | } | ||
372 | |||
373 | DEBUG(3, "blkmtd: write: got %d vectors to write\n", bio->bi_vcnt); | ||
374 | write_err: | ||
375 | if(bio) | ||
376 | blkmtd_write_out(bio); | ||
377 | |||
378 | DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err); | ||
379 | up(&dev->wrbuf_mutex); | ||
380 | |||
381 | if(retlen) | ||
382 | *retlen = thislen; | ||
383 | return err; | ||
384 | } | ||
385 | |||
386 | |||
387 | /* erase a specified part of the device */ | ||
388 | static int blkmtd_erase(struct mtd_info *mtd, struct erase_info *instr) | ||
389 | { | ||
390 | struct blkmtd_dev *dev = mtd->priv; | ||
391 | struct mtd_erase_region_info *einfo = mtd->eraseregions; | ||
392 | int numregions = mtd->numeraseregions; | ||
393 | size_t from; | ||
394 | u_long len; | ||
395 | int err = -EIO; | ||
396 | size_t retlen; | ||
397 | |||
398 | instr->state = MTD_ERASING; | ||
399 | from = instr->addr; | ||
400 | len = instr->len; | ||
401 | |||
402 | /* check erase region has valid start and length */ | ||
403 | DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%zx len = 0x%lx\n", | ||
404 | mtd->name+9, from, len); | ||
405 | while(numregions) { | ||
406 | DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n", | ||
407 | einfo->offset, einfo->erasesize, einfo->numblocks); | ||
408 | if(from >= einfo->offset | ||
409 | && from < einfo->offset + (einfo->erasesize * einfo->numblocks)) { | ||
410 | if(len == einfo->erasesize | ||
411 | && ( (from - einfo->offset) % einfo->erasesize == 0)) | ||
412 | break; | ||
413 | } | ||
414 | numregions--; | ||
415 | einfo++; | ||
416 | } | ||
417 | |||
418 | if(!numregions) { | ||
419 | /* Not a valid erase block */ | ||
420 | err("erase: invalid erase request 0x%lX @ 0x%08zX", len, from); | ||
421 | instr->state = MTD_ERASE_FAILED; | ||
422 | err = -EIO; | ||
423 | } | ||
424 | |||
425 | if(instr->state != MTD_ERASE_FAILED) { | ||
426 | /* do the erase */ | ||
427 | DEBUG(3, "Doing erase from = %zd len = %ld\n", from, len); | ||
428 | err = write_pages(dev, NULL, from, len, &retlen); | ||
429 | if(err || retlen != len) { | ||
430 | err("erase failed err = %d", err); | ||
431 | instr->state = MTD_ERASE_FAILED; | ||
432 | } else { | ||
433 | instr->state = MTD_ERASE_DONE; | ||
434 | } | ||
435 | } | ||
436 | |||
437 | DEBUG(3, "blkmtd: erase: checking callback\n"); | ||
438 | mtd_erase_callback(instr); | ||
439 | DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err); | ||
440 | return err; | ||
441 | } | ||
442 | |||
443 | |||
444 | /* read a range of the data via the page cache */ | ||
445 | static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
446 | size_t *retlen, u_char *buf) | ||
447 | { | ||
448 | struct blkmtd_dev *dev = mtd->priv; | ||
449 | int err = 0; | ||
450 | int offset; | ||
451 | int pagenr, pages; | ||
452 | size_t thislen = 0; | ||
453 | |||
454 | DEBUG(2, "blkmtd: read: dev = `%s' from = %lld len = %zd buf = %p\n", | ||
455 | mtd->name+9, from, len, buf); | ||
456 | |||
457 | if(from > mtd->size) | ||
458 | return -EINVAL; | ||
459 | if(from + len > mtd->size) | ||
460 | len = mtd->size - from; | ||
461 | |||
462 | pagenr = from >> PAGE_SHIFT; | ||
463 | offset = from - (pagenr << PAGE_SHIFT); | ||
464 | |||
465 | pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT; | ||
466 | DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n", | ||
467 | pagenr, offset, pages); | ||
468 | |||
469 | while(pages) { | ||
470 | struct page *page; | ||
471 | int cpylen; | ||
472 | |||
473 | DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr); | ||
474 | page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev); | ||
475 | if(IS_ERR(page)) { | ||
476 | err = -EIO; | ||
477 | goto readerr; | ||
478 | } | ||
479 | |||
480 | cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE; | ||
481 | if(offset+cpylen > PAGE_SIZE) | ||
482 | cpylen = PAGE_SIZE-offset; | ||
483 | |||
484 | memcpy(buf + thislen, page_address(page) + offset, cpylen); | ||
485 | offset = 0; | ||
486 | len -= cpylen; | ||
487 | thislen += cpylen; | ||
488 | pagenr++; | ||
489 | pages--; | ||
490 | if(!PageDirty(page)) | ||
491 | page_cache_release(page); | ||
492 | } | ||
493 | |||
494 | readerr: | ||
495 | if(retlen) | ||
496 | *retlen = thislen; | ||
497 | DEBUG(2, "blkmtd: end read: retlen = %zd, err = %d\n", thislen, err); | ||
498 | return err; | ||
499 | } | ||
500 | |||
501 | |||
502 | /* write data to the underlying device */ | ||
503 | static int blkmtd_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
504 | size_t *retlen, const u_char *buf) | ||
505 | { | ||
506 | struct blkmtd_dev *dev = mtd->priv; | ||
507 | int err; | ||
508 | |||
509 | if(!len) | ||
510 | return 0; | ||
511 | |||
512 | DEBUG(2, "blkmtd: write: dev = `%s' to = %lld len = %zd buf = %p\n", | ||
513 | mtd->name+9, to, len, buf); | ||
514 | |||
515 | if(to >= mtd->size) { | ||
516 | return -ENOSPC; | ||
517 | } | ||
518 | |||
519 | if(to + len > mtd->size) { | ||
520 | len = mtd->size - to; | ||
521 | } | ||
522 | |||
523 | err = write_pages(dev, buf, to, len, retlen); | ||
524 | if(err > 0) | ||
525 | err = 0; | ||
526 | DEBUG(2, "blkmtd: write: end, err = %d\n", err); | ||
527 | return err; | ||
528 | } | ||
529 | |||
530 | |||
531 | /* sync the device - wait until the write queue is empty */ | ||
532 | static void blkmtd_sync(struct mtd_info *mtd) | ||
533 | { | ||
534 | /* Currently all writes are synchronous */ | ||
535 | } | ||
536 | |||
537 | |||
538 | static void free_device(struct blkmtd_dev *dev) | ||
539 | { | ||
540 | DEBUG(2, "blkmtd: free_device() dev = %p\n", dev); | ||
541 | if(dev) { | ||
542 | if(dev->mtd_info.eraseregions) | ||
543 | kfree(dev->mtd_info.eraseregions); | ||
544 | if(dev->mtd_info.name) | ||
545 | kfree(dev->mtd_info.name); | ||
546 | |||
547 | if(dev->blkdev) { | ||
548 | invalidate_inode_pages(dev->blkdev->bd_inode->i_mapping); | ||
549 | close_bdev_excl(dev->blkdev); | ||
550 | } | ||
551 | kfree(dev); | ||
552 | } | ||
553 | } | ||
554 | |||
555 | |||
556 | /* For a given size and initial erase size, calculate the number | ||
557 | * and size of each erase region. Goes round the loop twice, | ||
558 | * once to find out how many regions, then allocates space, | ||
559 | * then round the loop again to fill it in. | ||
560 | */ | ||
561 | static struct mtd_erase_region_info *calc_erase_regions( | ||
562 | size_t erase_size, size_t total_size, int *regions) | ||
563 | { | ||
564 | struct mtd_erase_region_info *info = NULL; | ||
565 | |||
566 | DEBUG(2, "calc_erase_regions, es = %zd size = %zd regions = %d\n", | ||
567 | erase_size, total_size, *regions); | ||
568 | /* Make any user specified erasesize be a power of 2 | ||
569 | and at least PAGE_SIZE */ | ||
570 | if(erase_size) { | ||
571 | int es = erase_size; | ||
572 | erase_size = 1; | ||
573 | while(es != 1) { | ||
574 | es >>= 1; | ||
575 | erase_size <<= 1; | ||
576 | } | ||
577 | if(erase_size < PAGE_SIZE) | ||
578 | erase_size = PAGE_SIZE; | ||
579 | } else { | ||
580 | erase_size = CONFIG_MTD_BLKDEV_ERASESIZE; | ||
581 | } | ||
582 | |||
583 | *regions = 0; | ||
584 | |||
585 | do { | ||
586 | int tot_size = total_size; | ||
587 | int er_size = erase_size; | ||
588 | int count = 0, offset = 0, regcnt = 0; | ||
589 | |||
590 | while(tot_size) { | ||
591 | count = tot_size / er_size; | ||
592 | if(count) { | ||
593 | tot_size = tot_size % er_size; | ||
594 | if(info) { | ||
595 | DEBUG(2, "adding to erase info off=%d er=%d cnt=%d\n", | ||
596 | offset, er_size, count); | ||
597 | (info+regcnt)->offset = offset; | ||
598 | (info+regcnt)->erasesize = er_size; | ||
599 | (info+regcnt)->numblocks = count; | ||
600 | (*regions)++; | ||
601 | } | ||
602 | regcnt++; | ||
603 | offset += (count * er_size); | ||
604 | } | ||
605 | while(er_size > tot_size) | ||
606 | er_size >>= 1; | ||
607 | } | ||
608 | if(info == NULL) { | ||
609 | info = kmalloc(regcnt * sizeof(struct mtd_erase_region_info), GFP_KERNEL); | ||
610 | if(!info) | ||
611 | break; | ||
612 | } | ||
613 | } while(!(*regions)); | ||
614 | DEBUG(2, "calc_erase_regions done, es = %zd size = %zd regions = %d\n", | ||
615 | erase_size, total_size, *regions); | ||
616 | return info; | ||
617 | } | ||
618 | |||
619 | |||
620 | extern dev_t __init name_to_dev_t(const char *line); | ||
621 | |||
622 | static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size) | ||
623 | { | ||
624 | struct block_device *bdev; | ||
625 | int mode; | ||
626 | struct blkmtd_dev *dev; | ||
627 | |||
628 | if(!devname) | ||
629 | return NULL; | ||
630 | |||
631 | /* Get a handle on the device */ | ||
632 | |||
633 | |||
634 | #ifdef MODULE | ||
635 | mode = (readonly) ? O_RDONLY : O_RDWR; | ||
636 | bdev = open_bdev_excl(devname, mode, NULL); | ||
637 | #else | ||
638 | mode = (readonly) ? FMODE_READ : FMODE_WRITE; | ||
639 | bdev = open_by_devnum(name_to_dev_t(devname), mode); | ||
640 | #endif | ||
641 | if(IS_ERR(bdev)) { | ||
642 | err("error: cannot open device %s", devname); | ||
643 | DEBUG(2, "blkmtd: opening bdev returned %ld\n", PTR_ERR(bdev)); | ||
644 | return NULL; | ||
645 | } | ||
646 | |||
647 | DEBUG(1, "blkmtd: found a block device major = %d, minor = %d\n", | ||
648 | MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); | ||
649 | |||
650 | if(MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { | ||
651 | err("attempting to use an MTD device as a block device"); | ||
652 | blkdev_put(bdev); | ||
653 | return NULL; | ||
654 | } | ||
655 | |||
656 | dev = kmalloc(sizeof(struct blkmtd_dev), GFP_KERNEL); | ||
657 | if(dev == NULL) { | ||
658 | blkdev_put(bdev); | ||
659 | return NULL; | ||
660 | } | ||
661 | |||
662 | memset(dev, 0, sizeof(struct blkmtd_dev)); | ||
663 | dev->blkdev = bdev; | ||
664 | if(!readonly) { | ||
665 | init_MUTEX(&dev->wrbuf_mutex); | ||
666 | } | ||
667 | |||
668 | dev->mtd_info.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; | ||
669 | |||
670 | /* Setup the MTD structure */ | ||
671 | /* make the name contain the block device in */ | ||
672 | dev->mtd_info.name = kmalloc(sizeof("blkmtd: ") + strlen(devname), GFP_KERNEL); | ||
673 | if(dev->mtd_info.name == NULL) | ||
674 | goto devinit_err; | ||
675 | |||
676 | sprintf(dev->mtd_info.name, "blkmtd: %s", devname); | ||
677 | dev->mtd_info.eraseregions = calc_erase_regions(erase_size, dev->mtd_info.size, | ||
678 | &dev->mtd_info.numeraseregions); | ||
679 | if(dev->mtd_info.eraseregions == NULL) | ||
680 | goto devinit_err; | ||
681 | |||
682 | dev->mtd_info.erasesize = dev->mtd_info.eraseregions->erasesize; | ||
683 | DEBUG(1, "blkmtd: init: found %d erase regions\n", | ||
684 | dev->mtd_info.numeraseregions); | ||
685 | |||
686 | if(readonly) { | ||
687 | dev->mtd_info.type = MTD_ROM; | ||
688 | dev->mtd_info.flags = MTD_CAP_ROM; | ||
689 | } else { | ||
690 | dev->mtd_info.type = MTD_RAM; | ||
691 | dev->mtd_info.flags = MTD_CAP_RAM; | ||
692 | dev->mtd_info.erase = blkmtd_erase; | ||
693 | dev->mtd_info.write = blkmtd_write; | ||
694 | dev->mtd_info.writev = default_mtd_writev; | ||
695 | dev->mtd_info.sync = blkmtd_sync; | ||
696 | } | ||
697 | dev->mtd_info.read = blkmtd_read; | ||
698 | dev->mtd_info.readv = default_mtd_readv; | ||
699 | dev->mtd_info.priv = dev; | ||
700 | dev->mtd_info.owner = THIS_MODULE; | ||
701 | |||
702 | list_add(&dev->list, &blkmtd_device_list); | ||
703 | if (add_mtd_device(&dev->mtd_info)) { | ||
704 | /* Device didnt get added, so free the entry */ | ||
705 | list_del(&dev->list); | ||
706 | goto devinit_err; | ||
707 | } else { | ||
708 | info("mtd%d: [%s] erase_size = %dKiB %s", | ||
709 | dev->mtd_info.index, dev->mtd_info.name + strlen("blkmtd: "), | ||
710 | dev->mtd_info.erasesize >> 10, | ||
711 | readonly ? "(read-only)" : ""); | ||
712 | } | ||
713 | |||
714 | return dev; | ||
715 | |||
716 | devinit_err: | ||
717 | free_device(dev); | ||
718 | return NULL; | ||
719 | } | ||
720 | |||
721 | |||
722 | /* Cleanup and exit - sync the device and kill of the kernel thread */ | ||
723 | static void __devexit cleanup_blkmtd(void) | ||
724 | { | ||
725 | struct list_head *temp1, *temp2; | ||
726 | |||
727 | /* Remove the MTD devices */ | ||
728 | list_for_each_safe(temp1, temp2, &blkmtd_device_list) { | ||
729 | struct blkmtd_dev *dev = list_entry(temp1, struct blkmtd_dev, | ||
730 | list); | ||
731 | blkmtd_sync(&dev->mtd_info); | ||
732 | del_mtd_device(&dev->mtd_info); | ||
733 | info("mtd%d: [%s] removed", dev->mtd_info.index, | ||
734 | dev->mtd_info.name + strlen("blkmtd: ")); | ||
735 | list_del(&dev->list); | ||
736 | free_device(dev); | ||
737 | } | ||
738 | } | ||
739 | |||
740 | #ifndef MODULE | ||
741 | |||
742 | /* Handle kernel boot params */ | ||
743 | |||
744 | |||
745 | static int __init param_blkmtd_device(char *str) | ||
746 | { | ||
747 | int i; | ||
748 | |||
749 | for(i = 0; i < MAX_DEVICES; i++) { | ||
750 | device[i] = str; | ||
751 | DEBUG(2, "blkmtd: device setup: %d = %s\n", i, device[i]); | ||
752 | strsep(&str, ","); | ||
753 | } | ||
754 | return 1; | ||
755 | } | ||
756 | |||
757 | |||
758 | static int __init param_blkmtd_erasesz(char *str) | ||
759 | { | ||
760 | int i; | ||
761 | for(i = 0; i < MAX_DEVICES; i++) { | ||
762 | char *val = strsep(&str, ","); | ||
763 | if(val) | ||
764 | erasesz[i] = simple_strtoul(val, NULL, 0); | ||
765 | DEBUG(2, "blkmtd: erasesz setup: %d = %d\n", i, erasesz[i]); | ||
766 | } | ||
767 | |||
768 | return 1; | ||
769 | } | ||
770 | |||
771 | |||
772 | static int __init param_blkmtd_ro(char *str) | ||
773 | { | ||
774 | int i; | ||
775 | for(i = 0; i < MAX_DEVICES; i++) { | ||
776 | char *val = strsep(&str, ","); | ||
777 | if(val) | ||
778 | ro[i] = simple_strtoul(val, NULL, 0); | ||
779 | DEBUG(2, "blkmtd: ro setup: %d = %d\n", i, ro[i]); | ||
780 | } | ||
781 | |||
782 | return 1; | ||
783 | } | ||
784 | |||
785 | |||
786 | static int __init param_blkmtd_sync(char *str) | ||
787 | { | ||
788 | if(str[0] == '1') | ||
789 | sync = 1; | ||
790 | return 1; | ||
791 | } | ||
792 | |||
793 | __setup("blkmtd_device=", param_blkmtd_device); | ||
794 | __setup("blkmtd_erasesz=", param_blkmtd_erasesz); | ||
795 | __setup("blkmtd_ro=", param_blkmtd_ro); | ||
796 | __setup("blkmtd_sync=", param_blkmtd_sync); | ||
797 | |||
798 | #endif | ||
799 | |||
800 | |||
801 | /* Startup */ | ||
802 | static int __init init_blkmtd(void) | ||
803 | { | ||
804 | int i; | ||
805 | |||
806 | info("version " VERSION); | ||
807 | /* Check args - device[0] is the bare minimum*/ | ||
808 | if(!device[0]) { | ||
809 | err("error: missing `device' name\n"); | ||
810 | return -EINVAL; | ||
811 | } | ||
812 | |||
813 | for(i = 0; i < MAX_DEVICES; i++) | ||
814 | add_device(device[i], ro[i], erasesz[i] << 10); | ||
815 | |||
816 | if(list_empty(&blkmtd_device_list)) | ||
817 | return -EINVAL; | ||
818 | |||
819 | return 0; | ||
820 | } | ||
821 | |||
822 | module_init(init_blkmtd); | ||
823 | module_exit(cleanup_blkmtd); | ||