aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jffs2/fs.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/jffs2/fs.c')
-rw-r--r--fs/jffs2/fs.c104
1 files changed, 48 insertions, 56 deletions
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 5687c3f42002..543420665c5b 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * For licensing information, see the file 'LICENCE' in this directory. 8 * For licensing information, see the file 'LICENCE' in this directory.
9 * 9 *
10 * $Id: fs.c,v 1.56 2005/07/06 12:13:09 dwmw2 Exp $ 10 * $Id: fs.c,v 1.66 2005/09/27 13:17:29 dedekind Exp $
11 * 11 *
12 */ 12 */
13 13
@@ -40,7 +40,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
40 int ret; 40 int ret;
41 D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); 41 D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
42 ret = inode_change_ok(inode, iattr); 42 ret = inode_change_ok(inode, iattr);
43 if (ret) 43 if (ret)
44 return ret; 44 return ret;
45 45
46 /* Special cases - we don't want more than one data node 46 /* Special cases - we don't want more than one data node
@@ -73,8 +73,9 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
73 kfree(mdata); 73 kfree(mdata);
74 return -ENOMEM; 74 return -ENOMEM;
75 } 75 }
76 76
77 ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, ALLOC_NORMAL); 77 ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen,
78 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
78 if (ret) { 79 if (ret) {
79 jffs2_free_raw_inode(ri); 80 jffs2_free_raw_inode(ri);
80 if (S_ISLNK(inode->i_mode & S_IFMT)) 81 if (S_ISLNK(inode->i_mode & S_IFMT))
@@ -83,7 +84,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
83 } 84 }
84 down(&f->sem); 85 down(&f->sem);
85 ivalid = iattr->ia_valid; 86 ivalid = iattr->ia_valid;
86 87
87 ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); 88 ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
88 ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); 89 ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
89 ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); 90 ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
@@ -99,7 +100,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
99 if (iattr->ia_mode & S_ISGID && 100 if (iattr->ia_mode & S_ISGID &&
100 !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID)) 101 !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID))
101 ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID); 102 ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID);
102 else 103 else
103 ri->mode = cpu_to_jemode(iattr->ia_mode); 104 ri->mode = cpu_to_jemode(iattr->ia_mode);
104 else 105 else
105 ri->mode = cpu_to_jemode(inode->i_mode); 106 ri->mode = cpu_to_jemode(inode->i_mode);
@@ -128,7 +129,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
128 new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL); 129 new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL);
129 if (S_ISLNK(inode->i_mode)) 130 if (S_ISLNK(inode->i_mode))
130 kfree(mdata); 131 kfree(mdata);
131 132
132 if (IS_ERR(new_metadata)) { 133 if (IS_ERR(new_metadata)) {
133 jffs2_complete_reservation(c); 134 jffs2_complete_reservation(c);
134 jffs2_free_raw_inode(ri); 135 jffs2_free_raw_inode(ri);
@@ -147,7 +148,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
147 old_metadata = f->metadata; 148 old_metadata = f->metadata;
148 149
149 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) 150 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
150 jffs2_truncate_fraglist (c, &f->fragtree, iattr->ia_size); 151 jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
151 152
152 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { 153 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
153 jffs2_add_full_dnode_to_inode(c, f, new_metadata); 154 jffs2_add_full_dnode_to_inode(c, f, new_metadata);
@@ -166,7 +167,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
166 jffs2_complete_reservation(c); 167 jffs2_complete_reservation(c);
167 168
168 /* We have to do the vmtruncate() without f->sem held, since 169 /* We have to do the vmtruncate() without f->sem held, since
169 some pages may be locked and waiting for it in readpage(). 170 some pages may be locked and waiting for it in readpage().
170 We are protected from a simultaneous write() extending i_size 171 We are protected from a simultaneous write() extending i_size
171 back past iattr->ia_size, because do_truncate() holds the 172 back past iattr->ia_size, because do_truncate() holds the
172 generic inode semaphore. */ 173 generic inode semaphore. */
@@ -194,31 +195,27 @@ int jffs2_statfs(struct super_block *sb, struct kstatfs *buf)
194 buf->f_namelen = JFFS2_MAX_NAME_LEN; 195 buf->f_namelen = JFFS2_MAX_NAME_LEN;
195 196
196 spin_lock(&c->erase_completion_lock); 197 spin_lock(&c->erase_completion_lock);
197
198 avail = c->dirty_size + c->free_size; 198 avail = c->dirty_size + c->free_size;
199 if (avail > c->sector_size * c->resv_blocks_write) 199 if (avail > c->sector_size * c->resv_blocks_write)
200 avail -= c->sector_size * c->resv_blocks_write; 200 avail -= c->sector_size * c->resv_blocks_write;
201 else 201 else
202 avail = 0; 202 avail = 0;
203 spin_unlock(&c->erase_completion_lock);
203 204
204 buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT; 205 buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
205 206
206 D2(jffs2_dump_block_lists(c));
207
208 spin_unlock(&c->erase_completion_lock);
209
210 return 0; 207 return 0;
211} 208}
212 209
213 210
214void jffs2_clear_inode (struct inode *inode) 211void jffs2_clear_inode (struct inode *inode)
215{ 212{
216 /* We can forget about this inode for now - drop all 213 /* We can forget about this inode for now - drop all
217 * the nodelists associated with it, etc. 214 * the nodelists associated with it, etc.
218 */ 215 */
219 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 216 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
220 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 217 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
221 218
222 D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); 219 D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
223 220
224 jffs2_do_clear_inode(c, f); 221 jffs2_do_clear_inode(c, f);
@@ -237,7 +234,7 @@ void jffs2_read_inode (struct inode *inode)
237 c = JFFS2_SB_INFO(inode->i_sb); 234 c = JFFS2_SB_INFO(inode->i_sb);
238 235
239 jffs2_init_inode_info(f); 236 jffs2_init_inode_info(f);
240 237
241 ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); 238 ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
242 239
243 if (ret) { 240 if (ret) {
@@ -257,14 +254,14 @@ void jffs2_read_inode (struct inode *inode)
257 254
258 inode->i_blksize = PAGE_SIZE; 255 inode->i_blksize = PAGE_SIZE;
259 inode->i_blocks = (inode->i_size + 511) >> 9; 256 inode->i_blocks = (inode->i_size + 511) >> 9;
260 257
261 switch (inode->i_mode & S_IFMT) { 258 switch (inode->i_mode & S_IFMT) {
262 jint16_t rdev; 259 jint16_t rdev;
263 260
264 case S_IFLNK: 261 case S_IFLNK:
265 inode->i_op = &jffs2_symlink_inode_operations; 262 inode->i_op = &jffs2_symlink_inode_operations;
266 break; 263 break;
267 264
268 case S_IFDIR: 265 case S_IFDIR:
269 { 266 {
270 struct jffs2_full_dirent *fd; 267 struct jffs2_full_dirent *fd;
@@ -301,7 +298,7 @@ void jffs2_read_inode (struct inode *inode)
301 jffs2_do_clear_inode(c, f); 298 jffs2_do_clear_inode(c, f);
302 make_bad_inode(inode); 299 make_bad_inode(inode);
303 return; 300 return;
304 } 301 }
305 302
306 case S_IFSOCK: 303 case S_IFSOCK:
307 case S_IFIFO: 304 case S_IFIFO:
@@ -357,11 +354,11 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
357 down(&c->alloc_sem); 354 down(&c->alloc_sem);
358 jffs2_flush_wbuf_pad(c); 355 jffs2_flush_wbuf_pad(c);
359 up(&c->alloc_sem); 356 up(&c->alloc_sem);
360 } 357 }
361 358
362 if (!(*flags & MS_RDONLY)) 359 if (!(*flags & MS_RDONLY))
363 jffs2_start_garbage_collect_thread(c); 360 jffs2_start_garbage_collect_thread(c);
364 361
365 *flags |= MS_NOATIME; 362 *flags |= MS_NOATIME;
366 363
367 return 0; 364 return 0;
@@ -395,9 +392,9 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i
395 D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode)); 392 D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode));
396 393
397 c = JFFS2_SB_INFO(sb); 394 c = JFFS2_SB_INFO(sb);
398 395
399 inode = new_inode(sb); 396 inode = new_inode(sb);
400 397
401 if (!inode) 398 if (!inode)
402 return ERR_PTR(-ENOMEM); 399 return ERR_PTR(-ENOMEM);
403 400
@@ -461,40 +458,24 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
461#endif 458#endif
462 459
463 c->flash_size = c->mtd->size; 460 c->flash_size = c->mtd->size;
464 461 c->sector_size = c->mtd->erasesize;
465 /*
466 * Check, if we have to concatenate physical blocks to larger virtual blocks
467 * to reduce the memorysize for c->blocks. (kmalloc allows max. 128K allocation)
468 */
469 c->sector_size = c->mtd->erasesize;
470 blocks = c->flash_size / c->sector_size; 462 blocks = c->flash_size / c->sector_size;
471 if (!(c->mtd->flags & MTD_NO_VIRTBLOCKS)) {
472 while ((blocks * sizeof (struct jffs2_eraseblock)) > (128 * 1024)) {
473 blocks >>= 1;
474 c->sector_size <<= 1;
475 }
476 }
477 463
478 /* 464 /*
479 * Size alignment check 465 * Size alignment check
480 */ 466 */
481 if ((c->sector_size * blocks) != c->flash_size) { 467 if ((c->sector_size * blocks) != c->flash_size) {
482 c->flash_size = c->sector_size * blocks; 468 c->flash_size = c->sector_size * blocks;
483 printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n", 469 printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n",
484 c->flash_size / 1024); 470 c->flash_size / 1024);
485 } 471 }
486 472
487 if (c->sector_size != c->mtd->erasesize)
488 printk(KERN_INFO "jffs2: Erase block size too small (%dKiB). Using virtual blocks size (%dKiB) instead\n",
489 c->mtd->erasesize / 1024, c->sector_size / 1024);
490
491 if (c->flash_size < 5*c->sector_size) { 473 if (c->flash_size < 5*c->sector_size) {
492 printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size); 474 printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size);
493 return -EINVAL; 475 return -EINVAL;
494 } 476 }
495 477
496 c->cleanmarker_size = sizeof(struct jffs2_unknown_node); 478 c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
497 /* Joern -- stick alignment for weird 8-byte-page flash here */
498 479
499 /* NAND (or other bizarre) flash... do setup accordingly */ 480 /* NAND (or other bizarre) flash... do setup accordingly */
500 ret = jffs2_flash_setup(c); 481 ret = jffs2_flash_setup(c);
@@ -517,7 +498,7 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
517 root_i = iget(sb, 1); 498 root_i = iget(sb, 1);
518 if (is_bad_inode(root_i)) { 499 if (is_bad_inode(root_i)) {
519 D1(printk(KERN_WARNING "get root inode failed\n")); 500 D1(printk(KERN_WARNING "get root inode failed\n"));
520 goto out_nodes; 501 goto out_root_i;
521 } 502 }
522 503
523 D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); 504 D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n"));
@@ -535,10 +516,9 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
535 516
536 out_root_i: 517 out_root_i:
537 iput(root_i); 518 iput(root_i);
538 out_nodes:
539 jffs2_free_ino_caches(c); 519 jffs2_free_ino_caches(c);
540 jffs2_free_raw_node_refs(c); 520 jffs2_free_raw_node_refs(c);
541 if (c->mtd->flags & MTD_NO_VIRTBLOCKS) 521 if (jffs2_blocks_use_vmalloc(c))
542 vfree(c->blocks); 522 vfree(c->blocks);
543 else 523 else
544 kfree(c->blocks); 524 kfree(c->blocks);
@@ -563,16 +543,16 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
563 struct jffs2_inode_cache *ic; 543 struct jffs2_inode_cache *ic;
564 if (!nlink) { 544 if (!nlink) {
565 /* The inode has zero nlink but its nodes weren't yet marked 545 /* The inode has zero nlink but its nodes weren't yet marked
566 obsolete. This has to be because we're still waiting for 546 obsolete. This has to be because we're still waiting for
567 the final (close() and) iput() to happen. 547 the final (close() and) iput() to happen.
568 548
569 There's a possibility that the final iput() could have 549 There's a possibility that the final iput() could have
570 happened while we were contemplating. In order to ensure 550 happened while we were contemplating. In order to ensure
571 that we don't cause a new read_inode() (which would fail) 551 that we don't cause a new read_inode() (which would fail)
572 for the inode in question, we use ilookup() in this case 552 for the inode in question, we use ilookup() in this case
573 instead of iget(). 553 instead of iget().
574 554
575 The nlink can't _become_ zero at this point because we're 555 The nlink can't _become_ zero at this point because we're
576 holding the alloc_sem, and jffs2_do_unlink() would also 556 holding the alloc_sem, and jffs2_do_unlink() would also
577 need that while decrementing nlink on any inode. 557 need that while decrementing nlink on any inode.
578 */ 558 */
@@ -619,19 +599,19 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
619 return JFFS2_INODE_INFO(inode); 599 return JFFS2_INODE_INFO(inode);
620} 600}
621 601
622unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, 602unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
623 struct jffs2_inode_info *f, 603 struct jffs2_inode_info *f,
624 unsigned long offset, 604 unsigned long offset,
625 unsigned long *priv) 605 unsigned long *priv)
626{ 606{
627 struct inode *inode = OFNI_EDONI_2SFFJ(f); 607 struct inode *inode = OFNI_EDONI_2SFFJ(f);
628 struct page *pg; 608 struct page *pg;
629 609
630 pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, 610 pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
631 (void *)jffs2_do_readpage_unlock, inode); 611 (void *)jffs2_do_readpage_unlock, inode);
632 if (IS_ERR(pg)) 612 if (IS_ERR(pg))
633 return (void *)pg; 613 return (void *)pg;
634 614
635 *priv = (unsigned long)pg; 615 *priv = (unsigned long)pg;
636 return kmap(pg); 616 return kmap(pg);
637} 617}
@@ -648,7 +628,7 @@ void jffs2_gc_release_page(struct jffs2_sb_info *c,
648 628
649static int jffs2_flash_setup(struct jffs2_sb_info *c) { 629static int jffs2_flash_setup(struct jffs2_sb_info *c) {
650 int ret = 0; 630 int ret = 0;
651 631
652 if (jffs2_cleanmarker_oob(c)) { 632 if (jffs2_cleanmarker_oob(c)) {
653 /* NAND flash... do setup accordingly */ 633 /* NAND flash... do setup accordingly */
654 ret = jffs2_nand_flash_setup(c); 634 ret = jffs2_nand_flash_setup(c);
@@ -662,14 +642,21 @@ static int jffs2_flash_setup(struct jffs2_sb_info *c) {
662 if (ret) 642 if (ret)
663 return ret; 643 return ret;
664 } 644 }
665 645
666 /* and Dataflash */ 646 /* and Dataflash */
667 if (jffs2_dataflash(c)) { 647 if (jffs2_dataflash(c)) {
668 ret = jffs2_dataflash_setup(c); 648 ret = jffs2_dataflash_setup(c);
669 if (ret) 649 if (ret)
670 return ret; 650 return ret;
671 } 651 }
672 652
653 /* and Intel "Sibley" flash */
654 if (jffs2_nor_wbuf_flash(c)) {
655 ret = jffs2_nor_wbuf_flash_setup(c);
656 if (ret)
657 return ret;
658 }
659
673 return ret; 660 return ret;
674} 661}
675 662
@@ -683,9 +670,14 @@ void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
683 if (jffs2_nor_ecc(c)) { 670 if (jffs2_nor_ecc(c)) {
684 jffs2_nor_ecc_flash_cleanup(c); 671 jffs2_nor_ecc_flash_cleanup(c);
685 } 672 }
686 673
687 /* and DataFlash */ 674 /* and DataFlash */
688 if (jffs2_dataflash(c)) { 675 if (jffs2_dataflash(c)) {
689 jffs2_dataflash_cleanup(c); 676 jffs2_dataflash_cleanup(c);
690 } 677 }
678
679 /* and Intel "Sibley" flash */
680 if (jffs2_nor_wbuf_flash(c)) {
681 jffs2_nor_wbuf_flash_cleanup(c);
682 }
691} 683}