diff options
author | Joern Engel <joern@wohnheim.fh-wedel.de> | 2006-04-11 01:54:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-04-11 09:18:43 -0400 |
commit | acc8dadc0b3f007e6e60da77feb2efe2a19c5cda (patch) | |
tree | df1d7ba842f9b944a0445f65e82a590c3e768dd4 /drivers | |
parent | f5e902817fee1589badca1284f49eecc0ef0c200 (diff) |
[PATCH] Remove blkmtd
Remove the blkmtd driver.
- An alternative exists (block2mtd) that hasn't had bug report for > 1 year.
- Most embedded people tend to use ancient kernels with custom patches from
mtd cvs and elsewhere, so the 1 year warning period neither helps nor hurts
them too much.
- It's in the way of klibc. The problems caused by pulling blkmtd support
are fairly low, while the problems caused by delaying klibc can be fairly
substantial. At best, this would be a severe burden on hpa's time.
Signed-off-by: Joern Engel <joern@wohnheim.fh-wedel.de>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/mtd/devices/Kconfig | 13 | ||||
-rw-r--r-- | drivers/mtd/devices/Makefile | 1 | ||||
-rw-r--r-- | drivers/mtd/devices/blkmtd.c | 819 |
3 files changed, 2 insertions, 831 deletions
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index dd628cb51e31..7fac438b5c32 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig | |||
@@ -129,8 +129,8 @@ config MTDRAM_ABS_POS | |||
129 | allocating space from Linux's available memory. Otherwise, leave | 129 | allocating space from Linux's available memory. Otherwise, leave |
130 | this set to zero. Most people will want to leave this as zero. | 130 | this set to zero. Most people will want to leave this as zero. |
131 | 131 | ||
132 | config MTD_BLKMTD | 132 | config MTD_BLOCK2MTD |
133 | tristate "MTD emulation using block device" | 133 | tristate "MTD using block device" |
134 | depends on MTD | 134 | depends on MTD |
135 | help | 135 | help |
136 | This driver allows a block device to appear as an MTD. It would | 136 | This driver allows a block device to appear as an MTD. It would |
@@ -141,15 +141,6 @@ config MTD_BLKMTD | |||
141 | Testing MTD users (eg JFFS2) on large media and media that might | 141 | Testing MTD users (eg JFFS2) on large media and media that might |
142 | be removed during a write (using the floppy drive). | 142 | be removed during a write (using the floppy drive). |
143 | 143 | ||
144 | config MTD_BLOCK2MTD | ||
145 | tristate "MTD using block device (rewrite)" | ||
146 | depends on MTD && EXPERIMENTAL | ||
147 | help | ||
148 | This driver is basically the same at MTD_BLKMTD above, but | ||
149 | experienced some interface changes plus serious speedups. In | ||
150 | the long term, it should replace MTD_BLKMTD. Right now, you | ||
151 | shouldn't entrust important data to it yet. | ||
152 | |||
153 | comment "Disk-On-Chip Device Drivers" | 144 | comment "Disk-On-Chip Device Drivers" |
154 | 145 | ||
155 | config MTD_DOC2000 | 146 | config MTD_DOC2000 |
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index 7c5ed2178380..b6573670316f 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile | |||
@@ -21,7 +21,6 @@ obj-$(CONFIG_MTD_PMC551) += pmc551.o | |||
21 | obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o | 21 | obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o |
22 | obj-$(CONFIG_MTD_MTDRAM) += mtdram.o | 22 | obj-$(CONFIG_MTD_MTDRAM) += mtdram.o |
23 | obj-$(CONFIG_MTD_LART) += lart.o | 23 | obj-$(CONFIG_MTD_LART) += lart.o |
24 | obj-$(CONFIG_MTD_BLKMTD) += blkmtd.o | ||
25 | obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o | 24 | obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o |
26 | obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o | 25 | obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o |
27 | obj-$(CONFIG_MTD_M25P80) += m25p80.o | 26 | obj-$(CONFIG_MTD_M25P80) += m25p80.o |
diff --git a/drivers/mtd/devices/blkmtd.c b/drivers/mtd/devices/blkmtd.c deleted file mode 100644 index 79f2e1f23ebd..000000000000 --- a/drivers/mtd/devices/blkmtd.c +++ /dev/null | |||
@@ -1,819 +0,0 @@ | |||
1 | /* | ||
2 | * $Id: blkmtd.c,v 1.27 2005/11/07 11:14:24 gleixner Exp $ | ||
3 | * | ||
4 | * blkmtd.c - use a block device as a fake MTD | ||
5 | * | ||
6 | * Author: Simon Evans <spse@secret.org.uk> | ||
7 | * | ||
8 | * Copyright (C) 2001,2002 Simon Evans | ||
9 | * | ||
10 | * Licence: GPL | ||
11 | * | ||
12 | * How it works: | ||
13 | * The driver uses raw/io to read/write the device and the page | ||
14 | * cache to cache access. Writes update the page cache with the | ||
15 | * new data and mark it dirty and add the page into a BIO which | ||
16 | * is then written out. | ||
17 | * | ||
18 | * It can be loaded Read-Only to prevent erases and writes to the | ||
19 | * medium. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/fs.h> | ||
26 | #include <linux/blkdev.h> | ||
27 | #include <linux/bio.h> | ||
28 | #include <linux/pagemap.h> | ||
29 | #include <linux/list.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/mount.h> | ||
32 | #include <linux/mtd/mtd.h> | ||
33 | #include <linux/mutex.h> | ||
34 | |||
35 | #define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg) | ||
36 | #define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg) | ||
37 | #define warn(format, arg...) printk(KERN_WARNING "blkmtd: " format "\n" , ## arg) | ||
38 | #define crit(format, arg...) printk(KERN_CRIT "blkmtd: " format "\n" , ## arg) | ||
39 | |||
40 | |||
41 | /* Default erase size in K, always make it a multiple of PAGE_SIZE */ | ||
42 | #define CONFIG_MTD_BLKDEV_ERASESIZE (128 << 10) /* 128KiB */ | ||
43 | #define VERSION "$Revision: 1.27 $" | ||
44 | |||
45 | /* Info for the block device */ | ||
46 | struct blkmtd_dev { | ||
47 | struct list_head list; | ||
48 | struct block_device *blkdev; | ||
49 | struct mtd_info mtd_info; | ||
50 | struct mutex wrbuf_mutex; | ||
51 | }; | ||
52 | |||
53 | |||
54 | /* Static info about the MTD, used in cleanup_module */ | ||
55 | static LIST_HEAD(blkmtd_device_list); | ||
56 | |||
57 | |||
58 | static void blkmtd_sync(struct mtd_info *mtd); | ||
59 | |||
60 | #define MAX_DEVICES 4 | ||
61 | |||
62 | /* Module parameters passed by insmod/modprobe */ | ||
63 | static char *device[MAX_DEVICES]; /* the block device to use */ | ||
64 | static int erasesz[MAX_DEVICES]; /* optional default erase size */ | ||
65 | static int ro[MAX_DEVICES]; /* optional read only flag */ | ||
66 | static int sync; | ||
67 | |||
68 | |||
69 | MODULE_LICENSE("GPL"); | ||
70 | MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>"); | ||
71 | MODULE_DESCRIPTION("Emulate an MTD using a block device"); | ||
72 | module_param_array(device, charp, NULL, 0); | ||
73 | MODULE_PARM_DESC(device, "block device to use"); | ||
74 | module_param_array(erasesz, int, NULL, 0); | ||
75 | MODULE_PARM_DESC(erasesz, "optional erase size to use in KiB. eg 4=4KiB."); | ||
76 | module_param_array(ro, bool, NULL, 0); | ||
77 | MODULE_PARM_DESC(ro, "1=Read only, writes and erases cause errors"); | ||
78 | module_param(sync, bool, 0); | ||
79 | MODULE_PARM_DESC(sync, "1=Synchronous writes"); | ||
80 | |||
81 | |||
82 | /* completion handler for BIO reads */ | ||
83 | static int bi_read_complete(struct bio *bio, unsigned int bytes_done, int error) | ||
84 | { | ||
85 | if (bio->bi_size) | ||
86 | return 1; | ||
87 | |||
88 | complete((struct completion*)bio->bi_private); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | |||
93 | /* completion handler for BIO writes */ | ||
94 | static int bi_write_complete(struct bio *bio, unsigned int bytes_done, int error) | ||
95 | { | ||
96 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
97 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
98 | |||
99 | if (bio->bi_size) | ||
100 | return 1; | ||
101 | |||
102 | if(!uptodate) | ||
103 | err("bi_write_complete: not uptodate\n"); | ||
104 | |||
105 | do { | ||
106 | struct page *page = bvec->bv_page; | ||
107 | DEBUG(3, "Cleaning up page %ld\n", page->index); | ||
108 | if (--bvec >= bio->bi_io_vec) | ||
109 | prefetchw(&bvec->bv_page->flags); | ||
110 | |||
111 | if (uptodate) { | ||
112 | SetPageUptodate(page); | ||
113 | } else { | ||
114 | ClearPageUptodate(page); | ||
115 | SetPageError(page); | ||
116 | } | ||
117 | clear_page_dirty(page); | ||
118 | unlock_page(page); | ||
119 | page_cache_release(page); | ||
120 | } while (bvec >= bio->bi_io_vec); | ||
121 | |||
122 | complete((struct completion*)bio->bi_private); | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | |||
127 | /* read one page from the block device */ | ||
128 | static int blkmtd_readpage(struct blkmtd_dev *dev, struct page *page) | ||
129 | { | ||
130 | struct bio *bio; | ||
131 | struct completion event; | ||
132 | int err = -ENOMEM; | ||
133 | |||
134 | if(PageUptodate(page)) { | ||
135 | DEBUG(2, "blkmtd: readpage page %ld is already upto date\n", page->index); | ||
136 | unlock_page(page); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | ClearPageUptodate(page); | ||
141 | ClearPageError(page); | ||
142 | |||
143 | bio = bio_alloc(GFP_KERNEL, 1); | ||
144 | if(bio) { | ||
145 | init_completion(&event); | ||
146 | bio->bi_bdev = dev->blkdev; | ||
147 | bio->bi_sector = page->index << (PAGE_SHIFT-9); | ||
148 | bio->bi_private = &event; | ||
149 | bio->bi_end_io = bi_read_complete; | ||
150 | if(bio_add_page(bio, page, PAGE_SIZE, 0) == PAGE_SIZE) { | ||
151 | submit_bio(READ_SYNC, bio); | ||
152 | wait_for_completion(&event); | ||
153 | err = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : -EIO; | ||
154 | bio_put(bio); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | if(err) | ||
159 | SetPageError(page); | ||
160 | else | ||
161 | SetPageUptodate(page); | ||
162 | flush_dcache_page(page); | ||
163 | unlock_page(page); | ||
164 | return err; | ||
165 | } | ||
166 | |||
167 | |||
168 | /* write out the current BIO and wait for it to finish */ | ||
169 | static int blkmtd_write_out(struct bio *bio) | ||
170 | { | ||
171 | struct completion event; | ||
172 | int err; | ||
173 | |||
174 | if(!bio->bi_vcnt) { | ||
175 | bio_put(bio); | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | init_completion(&event); | ||
180 | bio->bi_private = &event; | ||
181 | bio->bi_end_io = bi_write_complete; | ||
182 | submit_bio(WRITE_SYNC, bio); | ||
183 | wait_for_completion(&event); | ||
184 | DEBUG(3, "submit_bio completed, bi_vcnt = %d\n", bio->bi_vcnt); | ||
185 | err = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : -EIO; | ||
186 | bio_put(bio); | ||
187 | return err; | ||
188 | } | ||
189 | |||
190 | |||
191 | /** | ||
192 | * blkmtd_add_page - add a page to the current BIO | ||
193 | * @bio: bio to add to (NULL to alloc initial bio) | ||
194 | * @blkdev: block device | ||
195 | * @page: page to add | ||
196 | * @pagecnt: pages left to add | ||
197 | * | ||
198 | * Adds a page to the current bio, allocating it if necessary. If it cannot be | ||
199 | * added, the current bio is written out and a new one is allocated. Returns | ||
200 | * the new bio to add or NULL on error | ||
201 | */ | ||
202 | static struct bio *blkmtd_add_page(struct bio *bio, struct block_device *blkdev, | ||
203 | struct page *page, int pagecnt) | ||
204 | { | ||
205 | |||
206 | retry: | ||
207 | if(!bio) { | ||
208 | bio = bio_alloc(GFP_KERNEL, pagecnt); | ||
209 | if(!bio) | ||
210 | return NULL; | ||
211 | bio->bi_sector = page->index << (PAGE_SHIFT-9); | ||
212 | bio->bi_bdev = blkdev; | ||
213 | } | ||
214 | |||
215 | if(bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) { | ||
216 | blkmtd_write_out(bio); | ||
217 | bio = NULL; | ||
218 | goto retry; | ||
219 | } | ||
220 | return bio; | ||
221 | } | ||
222 | |||
223 | |||
224 | /** | ||
225 | * write_pages - write block of data to device via the page cache | ||
226 | * @dev: device to write to | ||
227 | * @buf: data source or NULL if erase (output is set to 0xff) | ||
228 | * @to: offset into output device | ||
229 | * @len: amount to data to write | ||
230 | * @retlen: amount of data written | ||
231 | * | ||
232 | * Grab pages from the page cache and fill them with the source data. | ||
233 | * Non page aligned start and end result in a readin of the page and | ||
234 | * part of the page being modified. Pages are added to the bio and then written | ||
235 | * out. | ||
236 | */ | ||
237 | static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to, | ||
238 | size_t len, size_t *retlen) | ||
239 | { | ||
240 | int pagenr, offset; | ||
241 | size_t start_len = 0, end_len; | ||
242 | int pagecnt = 0; | ||
243 | int err = 0; | ||
244 | struct bio *bio = NULL; | ||
245 | size_t thislen = 0; | ||
246 | |||
247 | pagenr = to >> PAGE_SHIFT; | ||
248 | offset = to & ~PAGE_MASK; | ||
249 | |||
250 | DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %zd pagenr = %d offset = %d\n", | ||
251 | buf, (long)to, len, pagenr, offset); | ||
252 | |||
253 | /* see if we have to do a partial write at the start */ | ||
254 | if(offset) { | ||
255 | start_len = ((offset + len) > PAGE_SIZE) ? PAGE_SIZE - offset : len; | ||
256 | len -= start_len; | ||
257 | } | ||
258 | |||
259 | /* calculate the length of the other two regions */ | ||
260 | end_len = len & ~PAGE_MASK; | ||
261 | len -= end_len; | ||
262 | |||
263 | if(start_len) | ||
264 | pagecnt++; | ||
265 | |||
266 | if(len) | ||
267 | pagecnt += len >> PAGE_SHIFT; | ||
268 | |||
269 | if(end_len) | ||
270 | pagecnt++; | ||
271 | |||
272 | mutex_lock(&dev->wrbuf_mutex); | ||
273 | |||
274 | DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n", | ||
275 | start_len, len, end_len, pagecnt); | ||
276 | |||
277 | if(start_len) { | ||
278 | /* do partial start region */ | ||
279 | struct page *page; | ||
280 | |||
281 | DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %zd offset = %d\n", | ||
282 | pagenr, start_len, offset); | ||
283 | |||
284 | BUG_ON(!buf); | ||
285 | page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev); | ||
286 | lock_page(page); | ||
287 | if(PageDirty(page)) { | ||
288 | err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n", | ||
289 | to, start_len, len, end_len, pagenr); | ||
290 | BUG(); | ||
291 | } | ||
292 | memcpy(page_address(page)+offset, buf, start_len); | ||
293 | set_page_dirty(page); | ||
294 | SetPageUptodate(page); | ||
295 | buf += start_len; | ||
296 | thislen = start_len; | ||
297 | bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt); | ||
298 | if(!bio) { | ||
299 | err = -ENOMEM; | ||
300 | err("bio_add_page failed\n"); | ||
301 | goto write_err; | ||
302 | } | ||
303 | pagecnt--; | ||
304 | pagenr++; | ||
305 | } | ||
306 | |||
307 | /* Now do the main loop to a page aligned, n page sized output */ | ||
308 | if(len) { | ||
309 | int pagesc = len >> PAGE_SHIFT; | ||
310 | DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n", | ||
311 | pagenr, pagesc); | ||
312 | while(pagesc) { | ||
313 | struct page *page; | ||
314 | |||
315 | /* see if page is in the page cache */ | ||
316 | DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr); | ||
317 | page = grab_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr); | ||
318 | if(PageDirty(page)) { | ||
319 | BUG(); | ||
320 | } | ||
321 | if(!page) { | ||
322 | warn("write: cannot grab cache page %d", pagenr); | ||
323 | err = -ENOMEM; | ||
324 | goto write_err; | ||
325 | } | ||
326 | if(!buf) { | ||
327 | memset(page_address(page), 0xff, PAGE_SIZE); | ||
328 | } else { | ||
329 | memcpy(page_address(page), buf, PAGE_SIZE); | ||
330 | buf += PAGE_SIZE; | ||
331 | } | ||
332 | bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt); | ||
333 | if(!bio) { | ||
334 | err = -ENOMEM; | ||
335 | err("bio_add_page failed\n"); | ||
336 | goto write_err; | ||
337 | } | ||
338 | pagenr++; | ||
339 | pagecnt--; | ||
340 | set_page_dirty(page); | ||
341 | SetPageUptodate(page); | ||
342 | pagesc--; | ||
343 | thislen += PAGE_SIZE; | ||
344 | } | ||
345 | } | ||
346 | |||
347 | if(end_len) { | ||
348 | /* do the third region */ | ||
349 | struct page *page; | ||
350 | DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %zd\n", | ||
351 | pagenr, end_len); | ||
352 | BUG_ON(!buf); | ||
353 | page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev); | ||
354 | lock_page(page); | ||
355 | if(PageDirty(page)) { | ||
356 | err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n", | ||
357 | to, start_len, len, end_len, pagenr); | ||
358 | BUG(); | ||
359 | } | ||
360 | memcpy(page_address(page), buf, end_len); | ||
361 | set_page_dirty(page); | ||
362 | SetPageUptodate(page); | ||
363 | DEBUG(3, "blkmtd: write: writing out partial end\n"); | ||
364 | thislen += end_len; | ||
365 | bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt); | ||
366 | if(!bio) { | ||
367 | err = -ENOMEM; | ||
368 | err("bio_add_page failed\n"); | ||
369 | goto write_err; | ||
370 | } | ||
371 | pagenr++; | ||
372 | } | ||
373 | |||
374 | DEBUG(3, "blkmtd: write: got %d vectors to write\n", bio->bi_vcnt); | ||
375 | write_err: | ||
376 | if(bio) | ||
377 | blkmtd_write_out(bio); | ||
378 | |||
379 | DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err); | ||
380 | mutex_unlock(&dev->wrbuf_mutex); | ||
381 | |||
382 | if(retlen) | ||
383 | *retlen = thislen; | ||
384 | return err; | ||
385 | } | ||
386 | |||
387 | |||
388 | /* erase a specified part of the device */ | ||
389 | static int blkmtd_erase(struct mtd_info *mtd, struct erase_info *instr) | ||
390 | { | ||
391 | struct blkmtd_dev *dev = mtd->priv; | ||
392 | struct mtd_erase_region_info *einfo = mtd->eraseregions; | ||
393 | int numregions = mtd->numeraseregions; | ||
394 | size_t from; | ||
395 | u_long len; | ||
396 | int err = -EIO; | ||
397 | size_t retlen; | ||
398 | |||
399 | instr->state = MTD_ERASING; | ||
400 | from = instr->addr; | ||
401 | len = instr->len; | ||
402 | |||
403 | /* check erase region has valid start and length */ | ||
404 | DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%zx len = 0x%lx\n", | ||
405 | mtd->name+9, from, len); | ||
406 | while(numregions) { | ||
407 | DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n", | ||
408 | einfo->offset, einfo->erasesize, einfo->numblocks); | ||
409 | if(from >= einfo->offset | ||
410 | && from < einfo->offset + (einfo->erasesize * einfo->numblocks)) { | ||
411 | if(len == einfo->erasesize | ||
412 | && ( (from - einfo->offset) % einfo->erasesize == 0)) | ||
413 | break; | ||
414 | } | ||
415 | numregions--; | ||
416 | einfo++; | ||
417 | } | ||
418 | |||
419 | if(!numregions) { | ||
420 | /* Not a valid erase block */ | ||
421 | err("erase: invalid erase request 0x%lX @ 0x%08zX", len, from); | ||
422 | instr->state = MTD_ERASE_FAILED; | ||
423 | err = -EIO; | ||
424 | } | ||
425 | |||
426 | if(instr->state != MTD_ERASE_FAILED) { | ||
427 | /* do the erase */ | ||
428 | DEBUG(3, "Doing erase from = %zd len = %ld\n", from, len); | ||
429 | err = write_pages(dev, NULL, from, len, &retlen); | ||
430 | if(err || retlen != len) { | ||
431 | err("erase failed err = %d", err); | ||
432 | instr->state = MTD_ERASE_FAILED; | ||
433 | } else { | ||
434 | instr->state = MTD_ERASE_DONE; | ||
435 | } | ||
436 | } | ||
437 | |||
438 | DEBUG(3, "blkmtd: erase: checking callback\n"); | ||
439 | mtd_erase_callback(instr); | ||
440 | DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err); | ||
441 | return err; | ||
442 | } | ||
443 | |||
444 | |||
445 | /* read a range of the data via the page cache */ | ||
446 | static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
447 | size_t *retlen, u_char *buf) | ||
448 | { | ||
449 | struct blkmtd_dev *dev = mtd->priv; | ||
450 | int err = 0; | ||
451 | int offset; | ||
452 | int pagenr, pages; | ||
453 | size_t thislen = 0; | ||
454 | |||
455 | DEBUG(2, "blkmtd: read: dev = `%s' from = %lld len = %zd buf = %p\n", | ||
456 | mtd->name+9, from, len, buf); | ||
457 | |||
458 | if(from > mtd->size) | ||
459 | return -EINVAL; | ||
460 | if(from + len > mtd->size) | ||
461 | len = mtd->size - from; | ||
462 | |||
463 | pagenr = from >> PAGE_SHIFT; | ||
464 | offset = from - (pagenr << PAGE_SHIFT); | ||
465 | |||
466 | pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT; | ||
467 | DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n", | ||
468 | pagenr, offset, pages); | ||
469 | |||
470 | while(pages) { | ||
471 | struct page *page; | ||
472 | int cpylen; | ||
473 | |||
474 | DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr); | ||
475 | page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev); | ||
476 | if(IS_ERR(page)) { | ||
477 | err = -EIO; | ||
478 | goto readerr; | ||
479 | } | ||
480 | |||
481 | cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE; | ||
482 | if(offset+cpylen > PAGE_SIZE) | ||
483 | cpylen = PAGE_SIZE-offset; | ||
484 | |||
485 | memcpy(buf + thislen, page_address(page) + offset, cpylen); | ||
486 | offset = 0; | ||
487 | len -= cpylen; | ||
488 | thislen += cpylen; | ||
489 | pagenr++; | ||
490 | pages--; | ||
491 | if(!PageDirty(page)) | ||
492 | page_cache_release(page); | ||
493 | } | ||
494 | |||
495 | readerr: | ||
496 | if(retlen) | ||
497 | *retlen = thislen; | ||
498 | DEBUG(2, "blkmtd: end read: retlen = %zd, err = %d\n", thislen, err); | ||
499 | return err; | ||
500 | } | ||
501 | |||
502 | |||
503 | /* write data to the underlying device */ | ||
504 | static int blkmtd_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
505 | size_t *retlen, const u_char *buf) | ||
506 | { | ||
507 | struct blkmtd_dev *dev = mtd->priv; | ||
508 | int err; | ||
509 | |||
510 | if(!len) | ||
511 | return 0; | ||
512 | |||
513 | DEBUG(2, "blkmtd: write: dev = `%s' to = %lld len = %zd buf = %p\n", | ||
514 | mtd->name+9, to, len, buf); | ||
515 | |||
516 | if(to >= mtd->size) { | ||
517 | return -ENOSPC; | ||
518 | } | ||
519 | |||
520 | if(to + len > mtd->size) { | ||
521 | len = mtd->size - to; | ||
522 | } | ||
523 | |||
524 | err = write_pages(dev, buf, to, len, retlen); | ||
525 | if(err > 0) | ||
526 | err = 0; | ||
527 | DEBUG(2, "blkmtd: write: end, err = %d\n", err); | ||
528 | return err; | ||
529 | } | ||
530 | |||
531 | |||
532 | /* sync the device - wait until the write queue is empty */ | ||
533 | static void blkmtd_sync(struct mtd_info *mtd) | ||
534 | { | ||
535 | /* Currently all writes are synchronous */ | ||
536 | } | ||
537 | |||
538 | |||
539 | static void free_device(struct blkmtd_dev *dev) | ||
540 | { | ||
541 | DEBUG(2, "blkmtd: free_device() dev = %p\n", dev); | ||
542 | if(dev) { | ||
543 | kfree(dev->mtd_info.eraseregions); | ||
544 | kfree(dev->mtd_info.name); | ||
545 | if(dev->blkdev) { | ||
546 | invalidate_inode_pages(dev->blkdev->bd_inode->i_mapping); | ||
547 | close_bdev_excl(dev->blkdev); | ||
548 | } | ||
549 | kfree(dev); | ||
550 | } | ||
551 | } | ||
552 | |||
553 | |||
554 | /* For a given size and initial erase size, calculate the number | ||
555 | * and size of each erase region. Goes round the loop twice, | ||
556 | * once to find out how many regions, then allocates space, | ||
557 | * then round the loop again to fill it in. | ||
558 | */ | ||
559 | static struct mtd_erase_region_info *calc_erase_regions( | ||
560 | size_t erase_size, size_t total_size, int *regions) | ||
561 | { | ||
562 | struct mtd_erase_region_info *info = NULL; | ||
563 | |||
564 | DEBUG(2, "calc_erase_regions, es = %zd size = %zd regions = %d\n", | ||
565 | erase_size, total_size, *regions); | ||
566 | /* Make any user specified erasesize be a power of 2 | ||
567 | and at least PAGE_SIZE */ | ||
568 | if(erase_size) { | ||
569 | int es = erase_size; | ||
570 | erase_size = 1; | ||
571 | while(es != 1) { | ||
572 | es >>= 1; | ||
573 | erase_size <<= 1; | ||
574 | } | ||
575 | if(erase_size < PAGE_SIZE) | ||
576 | erase_size = PAGE_SIZE; | ||
577 | } else { | ||
578 | erase_size = CONFIG_MTD_BLKDEV_ERASESIZE; | ||
579 | } | ||
580 | |||
581 | *regions = 0; | ||
582 | |||
583 | do { | ||
584 | int tot_size = total_size; | ||
585 | int er_size = erase_size; | ||
586 | int count = 0, offset = 0, regcnt = 0; | ||
587 | |||
588 | while(tot_size) { | ||
589 | count = tot_size / er_size; | ||
590 | if(count) { | ||
591 | tot_size = tot_size % er_size; | ||
592 | if(info) { | ||
593 | DEBUG(2, "adding to erase info off=%d er=%d cnt=%d\n", | ||
594 | offset, er_size, count); | ||
595 | (info+regcnt)->offset = offset; | ||
596 | (info+regcnt)->erasesize = er_size; | ||
597 | (info+regcnt)->numblocks = count; | ||
598 | (*regions)++; | ||
599 | } | ||
600 | regcnt++; | ||
601 | offset += (count * er_size); | ||
602 | } | ||
603 | while(er_size > tot_size) | ||
604 | er_size >>= 1; | ||
605 | } | ||
606 | if(info == NULL) { | ||
607 | info = kmalloc(regcnt * sizeof(struct mtd_erase_region_info), GFP_KERNEL); | ||
608 | if(!info) | ||
609 | break; | ||
610 | } | ||
611 | } while(!(*regions)); | ||
612 | DEBUG(2, "calc_erase_regions done, es = %zd size = %zd regions = %d\n", | ||
613 | erase_size, total_size, *regions); | ||
614 | return info; | ||
615 | } | ||
616 | |||
617 | |||
618 | static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size) | ||
619 | { | ||
620 | struct block_device *bdev; | ||
621 | int mode; | ||
622 | struct blkmtd_dev *dev; | ||
623 | |||
624 | if(!devname) | ||
625 | return NULL; | ||
626 | |||
627 | /* Get a handle on the device */ | ||
628 | |||
629 | |||
630 | #ifdef MODULE | ||
631 | mode = (readonly) ? O_RDONLY : O_RDWR; | ||
632 | bdev = open_bdev_excl(devname, mode, NULL); | ||
633 | #else | ||
634 | mode = (readonly) ? FMODE_READ : FMODE_WRITE; | ||
635 | bdev = open_by_devnum(name_to_dev_t(devname), mode); | ||
636 | #endif | ||
637 | if(IS_ERR(bdev)) { | ||
638 | err("error: cannot open device %s", devname); | ||
639 | DEBUG(2, "blkmtd: opening bdev returned %ld\n", PTR_ERR(bdev)); | ||
640 | return NULL; | ||
641 | } | ||
642 | |||
643 | DEBUG(1, "blkmtd: found a block device major = %d, minor = %d\n", | ||
644 | MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); | ||
645 | |||
646 | if(MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { | ||
647 | err("attempting to use an MTD device as a block device"); | ||
648 | blkdev_put(bdev); | ||
649 | return NULL; | ||
650 | } | ||
651 | |||
652 | dev = kmalloc(sizeof(struct blkmtd_dev), GFP_KERNEL); | ||
653 | if(dev == NULL) { | ||
654 | blkdev_put(bdev); | ||
655 | return NULL; | ||
656 | } | ||
657 | |||
658 | memset(dev, 0, sizeof(struct blkmtd_dev)); | ||
659 | dev->blkdev = bdev; | ||
660 | if(!readonly) { | ||
661 | mutex_init(&dev->wrbuf_mutex); | ||
662 | } | ||
663 | |||
664 | dev->mtd_info.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; | ||
665 | |||
666 | /* Setup the MTD structure */ | ||
667 | /* make the name contain the block device in */ | ||
668 | dev->mtd_info.name = kmalloc(sizeof("blkmtd: ") + strlen(devname), GFP_KERNEL); | ||
669 | if(dev->mtd_info.name == NULL) | ||
670 | goto devinit_err; | ||
671 | |||
672 | sprintf(dev->mtd_info.name, "blkmtd: %s", devname); | ||
673 | dev->mtd_info.eraseregions = calc_erase_regions(erase_size, dev->mtd_info.size, | ||
674 | &dev->mtd_info.numeraseregions); | ||
675 | if(dev->mtd_info.eraseregions == NULL) | ||
676 | goto devinit_err; | ||
677 | |||
678 | dev->mtd_info.erasesize = dev->mtd_info.eraseregions->erasesize; | ||
679 | DEBUG(1, "blkmtd: init: found %d erase regions\n", | ||
680 | dev->mtd_info.numeraseregions); | ||
681 | |||
682 | if(readonly) { | ||
683 | dev->mtd_info.type = MTD_ROM; | ||
684 | dev->mtd_info.flags = MTD_CAP_ROM; | ||
685 | } else { | ||
686 | dev->mtd_info.type = MTD_RAM; | ||
687 | dev->mtd_info.flags = MTD_CAP_RAM; | ||
688 | dev->mtd_info.erase = blkmtd_erase; | ||
689 | dev->mtd_info.write = blkmtd_write; | ||
690 | dev->mtd_info.writev = default_mtd_writev; | ||
691 | dev->mtd_info.sync = blkmtd_sync; | ||
692 | } | ||
693 | dev->mtd_info.read = blkmtd_read; | ||
694 | dev->mtd_info.readv = default_mtd_readv; | ||
695 | dev->mtd_info.priv = dev; | ||
696 | dev->mtd_info.owner = THIS_MODULE; | ||
697 | |||
698 | list_add(&dev->list, &blkmtd_device_list); | ||
699 | if (add_mtd_device(&dev->mtd_info)) { | ||
700 | /* Device didnt get added, so free the entry */ | ||
701 | list_del(&dev->list); | ||
702 | goto devinit_err; | ||
703 | } else { | ||
704 | info("mtd%d: [%s] erase_size = %dKiB %s", | ||
705 | dev->mtd_info.index, dev->mtd_info.name + strlen("blkmtd: "), | ||
706 | dev->mtd_info.erasesize >> 10, | ||
707 | readonly ? "(read-only)" : ""); | ||
708 | } | ||
709 | |||
710 | return dev; | ||
711 | |||
712 | devinit_err: | ||
713 | free_device(dev); | ||
714 | return NULL; | ||
715 | } | ||
716 | |||
717 | |||
718 | /* Cleanup and exit - sync the device and kill of the kernel thread */ | ||
719 | static void __devexit cleanup_blkmtd(void) | ||
720 | { | ||
721 | struct list_head *temp1, *temp2; | ||
722 | |||
723 | /* Remove the MTD devices */ | ||
724 | list_for_each_safe(temp1, temp2, &blkmtd_device_list) { | ||
725 | struct blkmtd_dev *dev = list_entry(temp1, struct blkmtd_dev, | ||
726 | list); | ||
727 | blkmtd_sync(&dev->mtd_info); | ||
728 | del_mtd_device(&dev->mtd_info); | ||
729 | info("mtd%d: [%s] removed", dev->mtd_info.index, | ||
730 | dev->mtd_info.name + strlen("blkmtd: ")); | ||
731 | list_del(&dev->list); | ||
732 | free_device(dev); | ||
733 | } | ||
734 | } | ||
735 | |||
736 | #ifndef MODULE | ||
737 | |||
738 | /* Handle kernel boot params */ | ||
739 | |||
740 | |||
741 | static int __init param_blkmtd_device(char *str) | ||
742 | { | ||
743 | int i; | ||
744 | |||
745 | for(i = 0; i < MAX_DEVICES; i++) { | ||
746 | device[i] = str; | ||
747 | DEBUG(2, "blkmtd: device setup: %d = %s\n", i, device[i]); | ||
748 | strsep(&str, ","); | ||
749 | } | ||
750 | return 1; | ||
751 | } | ||
752 | |||
753 | |||
754 | static int __init param_blkmtd_erasesz(char *str) | ||
755 | { | ||
756 | int i; | ||
757 | for(i = 0; i < MAX_DEVICES; i++) { | ||
758 | char *val = strsep(&str, ","); | ||
759 | if(val) | ||
760 | erasesz[i] = simple_strtoul(val, NULL, 0); | ||
761 | DEBUG(2, "blkmtd: erasesz setup: %d = %d\n", i, erasesz[i]); | ||
762 | } | ||
763 | |||
764 | return 1; | ||
765 | } | ||
766 | |||
767 | |||
768 | static int __init param_blkmtd_ro(char *str) | ||
769 | { | ||
770 | int i; | ||
771 | for(i = 0; i < MAX_DEVICES; i++) { | ||
772 | char *val = strsep(&str, ","); | ||
773 | if(val) | ||
774 | ro[i] = simple_strtoul(val, NULL, 0); | ||
775 | DEBUG(2, "blkmtd: ro setup: %d = %d\n", i, ro[i]); | ||
776 | } | ||
777 | |||
778 | return 1; | ||
779 | } | ||
780 | |||
781 | |||
782 | static int __init param_blkmtd_sync(char *str) | ||
783 | { | ||
784 | if(str[0] == '1') | ||
785 | sync = 1; | ||
786 | return 1; | ||
787 | } | ||
788 | |||
789 | __setup("blkmtd_device=", param_blkmtd_device); | ||
790 | __setup("blkmtd_erasesz=", param_blkmtd_erasesz); | ||
791 | __setup("blkmtd_ro=", param_blkmtd_ro); | ||
792 | __setup("blkmtd_sync=", param_blkmtd_sync); | ||
793 | |||
794 | #endif | ||
795 | |||
796 | |||
797 | /* Startup */ | ||
798 | static int __init init_blkmtd(void) | ||
799 | { | ||
800 | int i; | ||
801 | |||
802 | info("version " VERSION); | ||
803 | /* Check args - device[0] is the bare minimum*/ | ||
804 | if(!device[0]) { | ||
805 | err("error: missing `device' name\n"); | ||
806 | return -EINVAL; | ||
807 | } | ||
808 | |||
809 | for(i = 0; i < MAX_DEVICES; i++) | ||
810 | add_device(device[i], ro[i], erasesz[i] << 10); | ||
811 | |||
812 | if(list_empty(&blkmtd_device_list)) | ||
813 | return -EINVAL; | ||
814 | |||
815 | return 0; | ||
816 | } | ||
817 | |||
818 | module_init(init_blkmtd); | ||
819 | module_exit(cleanup_blkmtd); | ||