diff options
Diffstat (limited to 'fs')
112 files changed, 6070 insertions, 2852 deletions
diff --git a/fs/9p/error.c b/fs/9p/error.c index fee5d19179c5..834cb179e388 100644 --- a/fs/9p/error.c +++ b/fs/9p/error.c | |||
@@ -33,6 +33,7 @@ | |||
33 | 33 | ||
34 | #include <linux/list.h> | 34 | #include <linux/list.h> |
35 | #include <linux/jhash.h> | 35 | #include <linux/jhash.h> |
36 | #include <linux/string.h> | ||
36 | 37 | ||
37 | #include "debug.h" | 38 | #include "debug.h" |
38 | #include "error.h" | 39 | #include "error.h" |
diff --git a/fs/9p/trans_sock.c b/fs/9p/trans_sock.c index 01e26f0013ac..a93c2bf94c33 100644 --- a/fs/9p/trans_sock.c +++ b/fs/9p/trans_sock.c | |||
@@ -269,8 +269,7 @@ static void v9fs_sock_close(struct v9fs_transport *trans) | |||
269 | dprintk(DEBUG_TRANS, "socket closed\n"); | 269 | dprintk(DEBUG_TRANS, "socket closed\n"); |
270 | } | 270 | } |
271 | 271 | ||
272 | if (ts) | 272 | kfree(ts); |
273 | kfree(ts); | ||
274 | 273 | ||
275 | trans->priv = NULL; | 274 | trans->priv = NULL; |
276 | } | 275 | } |
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 82303f3bf76f..418c3743fdee 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c | |||
@@ -266,7 +266,7 @@ v9fs_session_init(struct v9fs_session_info *v9ses, | |||
266 | 266 | ||
267 | v9ses->remotename = __getname(); | 267 | v9ses->remotename = __getname(); |
268 | if (!v9ses->remotename) { | 268 | if (!v9ses->remotename) { |
269 | putname(v9ses->name); | 269 | __putname(v9ses->name); |
270 | return -ENOMEM; | 270 | return -ENOMEM; |
271 | } | 271 | } |
272 | 272 | ||
@@ -411,8 +411,8 @@ void v9fs_session_close(struct v9fs_session_info *v9ses) | |||
411 | if (v9ses->transport) | 411 | if (v9ses->transport) |
412 | v9ses->transport->close(v9ses->transport); | 412 | v9ses->transport->close(v9ses->transport); |
413 | 413 | ||
414 | putname(v9ses->name); | 414 | __putname(v9ses->name); |
415 | putname(v9ses->remotename); | 415 | __putname(v9ses->remotename); |
416 | } | 416 | } |
417 | 417 | ||
418 | /** | 418 | /** |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 2b696ae6655a..be7288184fa9 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -1105,7 +1105,7 @@ static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer, | |||
1105 | } | 1105 | } |
1106 | } | 1106 | } |
1107 | 1107 | ||
1108 | putname(link); | 1108 | __putname(link); |
1109 | return retval; | 1109 | return retval; |
1110 | } | 1110 | } |
1111 | 1111 | ||
@@ -1129,7 +1129,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
1129 | len = v9fs_readlink(dentry, link, strlen(link)); | 1129 | len = v9fs_readlink(dentry, link, strlen(link)); |
1130 | 1130 | ||
1131 | if (len < 0) { | 1131 | if (len < 0) { |
1132 | putname(link); | 1132 | __putname(link); |
1133 | link = ERR_PTR(len); | 1133 | link = ERR_PTR(len); |
1134 | } else | 1134 | } else |
1135 | link[len] = 0; | 1135 | link[len] = 0; |
@@ -1152,7 +1152,7 @@ static void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void | |||
1152 | 1152 | ||
1153 | dprintk(DEBUG_VFS, " %s %s\n", dentry->d_name.name, s); | 1153 | dprintk(DEBUG_VFS, " %s %s\n", dentry->d_name.name, s); |
1154 | if (!IS_ERR(s)) | 1154 | if (!IS_ERR(s)) |
1155 | putname(s); | 1155 | __putname(s); |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | /** | 1158 | /** |
@@ -1228,7 +1228,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir, | |||
1228 | FreeMem: | 1228 | FreeMem: |
1229 | kfree(mistat); | 1229 | kfree(mistat); |
1230 | kfree(fcall); | 1230 | kfree(fcall); |
1231 | putname(symname); | 1231 | __putname(symname); |
1232 | return retval; | 1232 | return retval; |
1233 | } | 1233 | } |
1234 | 1234 | ||
@@ -1319,7 +1319,7 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) | |||
1319 | FreeMem: | 1319 | FreeMem: |
1320 | kfree(mistat); | 1320 | kfree(mistat); |
1321 | kfree(fcall); | 1321 | kfree(fcall); |
1322 | putname(symname); | 1322 | __putname(symname); |
1323 | 1323 | ||
1324 | return retval; | 1324 | return retval; |
1325 | } | 1325 | } |
diff --git a/fs/Kconfig b/fs/Kconfig index 01a295232f75..7d6ae369ce44 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -898,6 +898,7 @@ config AFFS_FS | |||
898 | config HFS_FS | 898 | config HFS_FS |
899 | tristate "Apple Macintosh file system support (EXPERIMENTAL)" | 899 | tristate "Apple Macintosh file system support (EXPERIMENTAL)" |
900 | depends on EXPERIMENTAL | 900 | depends on EXPERIMENTAL |
901 | select NLS | ||
901 | help | 902 | help |
902 | If you say Y here, you will be able to mount Macintosh-formatted | 903 | If you say Y here, you will be able to mount Macintosh-formatted |
903 | floppy disks and hard drive partitions with full read-write access. | 904 | floppy disks and hard drive partitions with full read-write access. |
@@ -1050,6 +1051,19 @@ config JFFS2_FS_WRITEBUFFER | |||
1050 | - NOR flash with transparent ECC | 1051 | - NOR flash with transparent ECC |
1051 | - DataFlash | 1052 | - DataFlash |
1052 | 1053 | ||
1054 | config JFFS2_SUMMARY | ||
1055 | bool "JFFS2 summary support (EXPERIMENTAL)" | ||
1056 | depends on JFFS2_FS && EXPERIMENTAL | ||
1057 | default n | ||
1058 | help | ||
1059 | This feature makes it possible to use summary information | ||
1060 | for faster filesystem mount. | ||
1061 | |||
1062 | The summary information can be inserted into a filesystem image | ||
1063 | by the utility 'sumtool'. | ||
1064 | |||
1065 | If unsure, say 'N'. | ||
1066 | |||
1053 | config JFFS2_COMPRESSION_OPTIONS | 1067 | config JFFS2_COMPRESSION_OPTIONS |
1054 | bool "Advanced compression options for JFFS2" | 1068 | bool "Advanced compression options for JFFS2" |
1055 | depends on JFFS2_FS | 1069 | depends on JFFS2_FS |
@@ -1071,10 +1085,10 @@ config JFFS2_ZLIB | |||
1071 | default y | 1085 | default y |
1072 | help | 1086 | help |
1073 | Zlib is designed to be a free, general-purpose, legally unencumbered, | 1087 | Zlib is designed to be a free, general-purpose, legally unencumbered, |
1074 | lossless data-compression library for use on virtually any computer | 1088 | lossless data-compression library for use on virtually any computer |
1075 | hardware and operating system. See <http://www.gzip.org/zlib/> for | 1089 | hardware and operating system. See <http://www.gzip.org/zlib/> for |
1076 | further information. | 1090 | further information. |
1077 | 1091 | ||
1078 | Say 'Y' if unsure. | 1092 | Say 'Y' if unsure. |
1079 | 1093 | ||
1080 | config JFFS2_RTIME | 1094 | config JFFS2_RTIME |
@@ -1096,7 +1110,7 @@ choice | |||
1096 | default JFFS2_CMODE_PRIORITY | 1110 | default JFFS2_CMODE_PRIORITY |
1097 | depends on JFFS2_FS | 1111 | depends on JFFS2_FS |
1098 | help | 1112 | help |
1099 | You can set here the default compression mode of JFFS2 from | 1113 | You can set here the default compression mode of JFFS2 from |
1100 | the available compression modes. Don't touch if unsure. | 1114 | the available compression modes. Don't touch if unsure. |
1101 | 1115 | ||
1102 | config JFFS2_CMODE_NONE | 1116 | config JFFS2_CMODE_NONE |
@@ -1107,13 +1121,13 @@ config JFFS2_CMODE_NONE | |||
1107 | config JFFS2_CMODE_PRIORITY | 1121 | config JFFS2_CMODE_PRIORITY |
1108 | bool "priority" | 1122 | bool "priority" |
1109 | help | 1123 | help |
1110 | Tries the compressors in a predefinied order and chooses the first | 1124 | Tries the compressors in a predefinied order and chooses the first |
1111 | successful one. | 1125 | successful one. |
1112 | 1126 | ||
1113 | config JFFS2_CMODE_SIZE | 1127 | config JFFS2_CMODE_SIZE |
1114 | bool "size (EXPERIMENTAL)" | 1128 | bool "size (EXPERIMENTAL)" |
1115 | help | 1129 | help |
1116 | Tries all compressors and chooses the one which has the smallest | 1130 | Tries all compressors and chooses the one which has the smallest |
1117 | result. | 1131 | result. |
1118 | 1132 | ||
1119 | endchoice | 1133 | endchoice |
diff --git a/fs/Makefile b/fs/Makefile index 1972da186272..4c2655759078 100644 --- a/fs/Makefile +++ b/fs/Makefile | |||
@@ -10,7 +10,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \ | |||
10 | ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \ | 10 | ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \ |
11 | attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \ | 11 | attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \ |
12 | seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \ | 12 | seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \ |
13 | ioprio.o | 13 | ioprio.o pnode.o |
14 | 14 | ||
15 | obj-$(CONFIG_INOTIFY) += inotify.o | 15 | obj-$(CONFIG_INOTIFY) += inotify.o |
16 | obj-$(CONFIG_EPOLL) += eventpoll.o | 16 | obj-$(CONFIG_EPOLL) += eventpoll.o |
diff --git a/fs/affs/file.c b/fs/affs/file.c index 6744924b6905..f72fb776ecdf 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c | |||
@@ -22,14 +22,13 @@ static int affs_grow_extcache(struct inode *inode, u32 lc_idx); | |||
22 | static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext); | 22 | static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext); |
23 | static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext); | 23 | static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext); |
24 | static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); | 24 | static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); |
25 | static ssize_t affs_file_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos); | ||
26 | static int affs_file_open(struct inode *inode, struct file *filp); | 25 | static int affs_file_open(struct inode *inode, struct file *filp); |
27 | static int affs_file_release(struct inode *inode, struct file *filp); | 26 | static int affs_file_release(struct inode *inode, struct file *filp); |
28 | 27 | ||
29 | struct file_operations affs_file_operations = { | 28 | struct file_operations affs_file_operations = { |
30 | .llseek = generic_file_llseek, | 29 | .llseek = generic_file_llseek, |
31 | .read = generic_file_read, | 30 | .read = generic_file_read, |
32 | .write = affs_file_write, | 31 | .write = generic_file_write, |
33 | .mmap = generic_file_mmap, | 32 | .mmap = generic_file_mmap, |
34 | .open = affs_file_open, | 33 | .open = affs_file_open, |
35 | .release = affs_file_release, | 34 | .release = affs_file_release, |
@@ -473,21 +472,6 @@ affs_getemptyblk_ino(struct inode *inode, int block) | |||
473 | return ERR_PTR(err); | 472 | return ERR_PTR(err); |
474 | } | 473 | } |
475 | 474 | ||
476 | static ssize_t | ||
477 | affs_file_write(struct file *file, const char __user *buf, | ||
478 | size_t count, loff_t *ppos) | ||
479 | { | ||
480 | ssize_t retval; | ||
481 | |||
482 | retval = generic_file_write (file, buf, count, ppos); | ||
483 | if (retval >0) { | ||
484 | struct inode *inode = file->f_dentry->d_inode; | ||
485 | inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; | ||
486 | mark_inode_dirty(inode); | ||
487 | } | ||
488 | return retval; | ||
489 | } | ||
490 | |||
491 | static int | 475 | static int |
492 | affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to) | 476 | affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to) |
493 | { | 477 | { |
diff --git a/fs/affs/super.c b/fs/affs/super.c index 9c3080716c92..aaec015a16e4 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c | |||
@@ -35,8 +35,7 @@ affs_put_super(struct super_block *sb) | |||
35 | mark_buffer_dirty(sbi->s_root_bh); | 35 | mark_buffer_dirty(sbi->s_root_bh); |
36 | } | 36 | } |
37 | 37 | ||
38 | if (sbi->s_prefix) | 38 | kfree(sbi->s_prefix); |
39 | kfree(sbi->s_prefix); | ||
40 | affs_free_bitmap(sb); | 39 | affs_free_bitmap(sb); |
41 | affs_brelse(sbi->s_root_bh); | 40 | affs_brelse(sbi->s_root_bh); |
42 | kfree(sbi); | 41 | kfree(sbi); |
@@ -198,10 +197,9 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s | |||
198 | *mount_opts |= SF_MUFS; | 197 | *mount_opts |= SF_MUFS; |
199 | break; | 198 | break; |
200 | case Opt_prefix: | 199 | case Opt_prefix: |
201 | if (*prefix) { /* Free any previous prefix */ | 200 | /* Free any previous prefix */ |
202 | kfree(*prefix); | 201 | kfree(*prefix); |
203 | *prefix = NULL; | 202 | *prefix = NULL; |
204 | } | ||
205 | *prefix = match_strdup(&args[0]); | 203 | *prefix = match_strdup(&args[0]); |
206 | if (!*prefix) | 204 | if (!*prefix) |
207 | return 0; | 205 | return 0; |
@@ -462,11 +460,9 @@ got_root: | |||
462 | out_error: | 460 | out_error: |
463 | if (root_inode) | 461 | if (root_inode) |
464 | iput(root_inode); | 462 | iput(root_inode); |
465 | if (sbi->s_bitmap) | 463 | kfree(sbi->s_bitmap); |
466 | kfree(sbi->s_bitmap); | ||
467 | affs_brelse(root_bh); | 464 | affs_brelse(root_bh); |
468 | if (sbi->s_prefix) | 465 | kfree(sbi->s_prefix); |
469 | kfree(sbi->s_prefix); | ||
470 | kfree(sbi); | 466 | kfree(sbi); |
471 | sb->s_fs_info = NULL; | 467 | sb->s_fs_info = NULL; |
472 | return -EINVAL; | 468 | return -EINVAL; |
diff --git a/fs/afs/file.c b/fs/afs/file.c index 4975c9c193dd..150b19227922 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c | |||
@@ -31,24 +31,10 @@ static int afs_file_readpage(struct file *file, struct page *page); | |||
31 | static int afs_file_invalidatepage(struct page *page, unsigned long offset); | 31 | static int afs_file_invalidatepage(struct page *page, unsigned long offset); |
32 | static int afs_file_releasepage(struct page *page, gfp_t gfp_flags); | 32 | static int afs_file_releasepage(struct page *page, gfp_t gfp_flags); |
33 | 33 | ||
34 | static ssize_t afs_file_write(struct file *file, const char __user *buf, | ||
35 | size_t size, loff_t *off); | ||
36 | |||
37 | struct inode_operations afs_file_inode_operations = { | 34 | struct inode_operations afs_file_inode_operations = { |
38 | .getattr = afs_inode_getattr, | 35 | .getattr = afs_inode_getattr, |
39 | }; | 36 | }; |
40 | 37 | ||
41 | struct file_operations afs_file_file_operations = { | ||
42 | .read = generic_file_read, | ||
43 | .write = afs_file_write, | ||
44 | .mmap = generic_file_mmap, | ||
45 | #if 0 | ||
46 | .open = afs_file_open, | ||
47 | .release = afs_file_release, | ||
48 | .fsync = afs_file_fsync, | ||
49 | #endif | ||
50 | }; | ||
51 | |||
52 | struct address_space_operations afs_fs_aops = { | 38 | struct address_space_operations afs_fs_aops = { |
53 | .readpage = afs_file_readpage, | 39 | .readpage = afs_file_readpage, |
54 | .sync_page = block_sync_page, | 40 | .sync_page = block_sync_page, |
@@ -59,22 +45,6 @@ struct address_space_operations afs_fs_aops = { | |||
59 | 45 | ||
60 | /*****************************************************************************/ | 46 | /*****************************************************************************/ |
61 | /* | 47 | /* |
62 | * AFS file write | ||
63 | */ | ||
64 | static ssize_t afs_file_write(struct file *file, const char __user *buf, | ||
65 | size_t size, loff_t *off) | ||
66 | { | ||
67 | struct afs_vnode *vnode; | ||
68 | |||
69 | vnode = AFS_FS_I(file->f_dentry->d_inode); | ||
70 | if (vnode->flags & AFS_VNODE_DELETED) | ||
71 | return -ESTALE; | ||
72 | |||
73 | return -EIO; | ||
74 | } /* end afs_file_write() */ | ||
75 | |||
76 | /*****************************************************************************/ | ||
77 | /* | ||
78 | * deal with notification that a page was read from the cache | 48 | * deal with notification that a page was read from the cache |
79 | */ | 49 | */ |
80 | #ifdef AFS_CACHING_SUPPORT | 50 | #ifdef AFS_CACHING_SUPPORT |
@@ -295,8 +265,7 @@ static int afs_file_releasepage(struct page *page, gfp_t gfp_flags) | |||
295 | set_page_private(page, 0); | 265 | set_page_private(page, 0); |
296 | ClearPagePrivate(page); | 266 | ClearPagePrivate(page); |
297 | 267 | ||
298 | if (pageio) | 268 | kfree(pageio); |
299 | kfree(pageio); | ||
300 | } | 269 | } |
301 | 270 | ||
302 | _leave(" = 0"); | 271 | _leave(" = 0"); |
diff --git a/fs/afs/inode.c b/fs/afs/inode.c index c476fde33fbc..4ebb30a50ed5 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c | |||
@@ -49,7 +49,7 @@ static int afs_inode_map_status(struct afs_vnode *vnode) | |||
49 | case AFS_FTYPE_FILE: | 49 | case AFS_FTYPE_FILE: |
50 | inode->i_mode = S_IFREG | vnode->status.mode; | 50 | inode->i_mode = S_IFREG | vnode->status.mode; |
51 | inode->i_op = &afs_file_inode_operations; | 51 | inode->i_op = &afs_file_inode_operations; |
52 | inode->i_fop = &afs_file_file_operations; | 52 | inode->i_fop = &generic_ro_fops; |
53 | break; | 53 | break; |
54 | case AFS_FTYPE_DIR: | 54 | case AFS_FTYPE_DIR: |
55 | inode->i_mode = S_IFDIR | vnode->status.mode; | 55 | inode->i_mode = S_IFDIR | vnode->status.mode; |
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index f09860b45c1a..ab8f87c66319 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
@@ -71,7 +71,6 @@ extern struct file_operations afs_dir_file_operations; | |||
71 | */ | 71 | */ |
72 | extern struct address_space_operations afs_fs_aops; | 72 | extern struct address_space_operations afs_fs_aops; |
73 | extern struct inode_operations afs_file_inode_operations; | 73 | extern struct inode_operations afs_file_inode_operations; |
74 | extern struct file_operations afs_file_file_operations; | ||
75 | 74 | ||
76 | #ifdef AFS_CACHING_SUPPORT | 75 | #ifdef AFS_CACHING_SUPPORT |
77 | extern int afs_cache_get_page_cookie(struct page *page, | 76 | extern int afs_cache_get_page_cookie(struct page *page, |
@@ -42,8 +42,9 @@ | |||
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | /*------ sysctl variables----*/ | 44 | /*------ sysctl variables----*/ |
45 | atomic_t aio_nr = ATOMIC_INIT(0); /* current system wide number of aio requests */ | 45 | static DEFINE_SPINLOCK(aio_nr_lock); |
46 | unsigned aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ | 46 | unsigned long aio_nr; /* current system wide number of aio requests */ |
47 | unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ | ||
47 | /*----end sysctl variables---*/ | 48 | /*----end sysctl variables---*/ |
48 | 49 | ||
49 | static kmem_cache_t *kiocb_cachep; | 50 | static kmem_cache_t *kiocb_cachep; |
@@ -208,7 +209,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
208 | return ERR_PTR(-EINVAL); | 209 | return ERR_PTR(-EINVAL); |
209 | } | 210 | } |
210 | 211 | ||
211 | if (nr_events > aio_max_nr) | 212 | if ((unsigned long)nr_events > aio_max_nr) |
212 | return ERR_PTR(-EAGAIN); | 213 | return ERR_PTR(-EAGAIN); |
213 | 214 | ||
214 | ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL); | 215 | ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL); |
@@ -233,8 +234,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
233 | goto out_freectx; | 234 | goto out_freectx; |
234 | 235 | ||
235 | /* limit the number of system wide aios */ | 236 | /* limit the number of system wide aios */ |
236 | atomic_add(ctx->max_reqs, &aio_nr); /* undone by __put_ioctx */ | 237 | spin_lock(&aio_nr_lock); |
237 | if (unlikely(atomic_read(&aio_nr) > aio_max_nr)) | 238 | if (aio_nr + ctx->max_reqs > aio_max_nr || |
239 | aio_nr + ctx->max_reqs < aio_nr) | ||
240 | ctx->max_reqs = 0; | ||
241 | else | ||
242 | aio_nr += ctx->max_reqs; | ||
243 | spin_unlock(&aio_nr_lock); | ||
244 | if (ctx->max_reqs == 0) | ||
238 | goto out_cleanup; | 245 | goto out_cleanup; |
239 | 246 | ||
240 | /* now link into global list. kludge. FIXME */ | 247 | /* now link into global list. kludge. FIXME */ |
@@ -248,8 +255,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
248 | return ctx; | 255 | return ctx; |
249 | 256 | ||
250 | out_cleanup: | 257 | out_cleanup: |
251 | atomic_sub(ctx->max_reqs, &aio_nr); | ||
252 | ctx->max_reqs = 0; /* prevent __put_ioctx from sub'ing aio_nr */ | ||
253 | __put_ioctx(ctx); | 258 | __put_ioctx(ctx); |
254 | return ERR_PTR(-EAGAIN); | 259 | return ERR_PTR(-EAGAIN); |
255 | 260 | ||
@@ -374,7 +379,12 @@ void fastcall __put_ioctx(struct kioctx *ctx) | |||
374 | pr_debug("__put_ioctx: freeing %p\n", ctx); | 379 | pr_debug("__put_ioctx: freeing %p\n", ctx); |
375 | kmem_cache_free(kioctx_cachep, ctx); | 380 | kmem_cache_free(kioctx_cachep, ctx); |
376 | 381 | ||
377 | atomic_sub(nr_events, &aio_nr); | 382 | if (nr_events) { |
383 | spin_lock(&aio_nr_lock); | ||
384 | BUG_ON(aio_nr - nr_events > aio_nr); | ||
385 | aio_nr -= nr_events; | ||
386 | spin_unlock(&aio_nr_lock); | ||
387 | } | ||
378 | } | 388 | } |
379 | 389 | ||
380 | /* aio_get_req | 390 | /* aio_get_req |
@@ -1258,8 +1268,9 @@ asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp) | |||
1258 | goto out; | 1268 | goto out; |
1259 | 1269 | ||
1260 | ret = -EINVAL; | 1270 | ret = -EINVAL; |
1261 | if (unlikely(ctx || (int)nr_events <= 0)) { | 1271 | if (unlikely(ctx || nr_events == 0)) { |
1262 | pr_debug("EINVAL: io_setup: ctx or nr_events > max\n"); | 1272 | pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", |
1273 | ctx, nr_events); | ||
1263 | goto out; | 1274 | goto out; |
1264 | } | 1275 | } |
1265 | 1276 | ||
diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c index 1fcaa1568541..633f628005b4 100644 --- a/fs/autofs/waitq.c +++ b/fs/autofs/waitq.c | |||
@@ -150,10 +150,8 @@ int autofs_wait(struct autofs_sb_info *sbi, struct qstr *name) | |||
150 | if ( sbi->catatonic ) { | 150 | if ( sbi->catatonic ) { |
151 | /* We might have slept, so check again for catatonic mode */ | 151 | /* We might have slept, so check again for catatonic mode */ |
152 | wq->status = -ENOENT; | 152 | wq->status = -ENOENT; |
153 | if ( wq->name ) { | 153 | kfree(wq->name); |
154 | kfree(wq->name); | 154 | wq->name = NULL; |
155 | wq->name = NULL; | ||
156 | } | ||
157 | } | 155 | } |
158 | 156 | ||
159 | if ( wq->name ) { | 157 | if ( wq->name ) { |
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 0a3c05d10167..818b37be5153 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c | |||
@@ -22,10 +22,8 @@ | |||
22 | 22 | ||
23 | static void ino_lnkfree(struct autofs_info *ino) | 23 | static void ino_lnkfree(struct autofs_info *ino) |
24 | { | 24 | { |
25 | if (ino->u.symlink) { | 25 | kfree(ino->u.symlink); |
26 | kfree(ino->u.symlink); | 26 | ino->u.symlink = NULL; |
27 | ino->u.symlink = NULL; | ||
28 | } | ||
29 | } | 27 | } |
30 | 28 | ||
31 | struct autofs_info *autofs4_init_ino(struct autofs_info *ino, | 29 | struct autofs_info *autofs4_init_ino(struct autofs_info *ino, |
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 3df86285a1c7..394ff36ef8f1 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
@@ -243,10 +243,8 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
243 | if ( sbi->catatonic ) { | 243 | if ( sbi->catatonic ) { |
244 | /* We might have slept, so check again for catatonic mode */ | 244 | /* We might have slept, so check again for catatonic mode */ |
245 | wq->status = -ENOENT; | 245 | wq->status = -ENOENT; |
246 | if ( wq->name ) { | 246 | kfree(wq->name); |
247 | kfree(wq->name); | 247 | wq->name = NULL; |
248 | wq->name = NULL; | ||
249 | } | ||
250 | } | 248 | } |
251 | 249 | ||
252 | if ( wq->name ) { | 250 | if ( wq->name ) { |
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index e0a6025f1d06..2d365cb8eec6 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c | |||
@@ -73,12 +73,6 @@ static struct inode_operations befs_dir_inode_operations = { | |||
73 | .lookup = befs_lookup, | 73 | .lookup = befs_lookup, |
74 | }; | 74 | }; |
75 | 75 | ||
76 | static struct file_operations befs_file_operations = { | ||
77 | .llseek = default_llseek, | ||
78 | .read = generic_file_read, | ||
79 | .mmap = generic_file_readonly_mmap, | ||
80 | }; | ||
81 | |||
82 | static struct address_space_operations befs_aops = { | 76 | static struct address_space_operations befs_aops = { |
83 | .readpage = befs_readpage, | 77 | .readpage = befs_readpage, |
84 | .sync_page = block_sync_page, | 78 | .sync_page = block_sync_page, |
@@ -398,7 +392,7 @@ befs_read_inode(struct inode *inode) | |||
398 | inode->i_mapping->a_ops = &befs_aops; | 392 | inode->i_mapping->a_ops = &befs_aops; |
399 | 393 | ||
400 | if (S_ISREG(inode->i_mode)) { | 394 | if (S_ISREG(inode->i_mode)) { |
401 | inode->i_fop = &befs_file_operations; | 395 | inode->i_fop = &generic_ro_fops; |
402 | } else if (S_ISDIR(inode->i_mode)) { | 396 | } else if (S_ISDIR(inode->i_mode)) { |
403 | inode->i_op = &befs_dir_inode_operations; | 397 | inode->i_op = &befs_dir_inode_operations; |
404 | inode->i_fop = &befs_dir_operations; | 398 | inode->i_fop = &befs_dir_operations; |
@@ -731,20 +725,16 @@ parse_options(char *options, befs_mount_options * opts) | |||
731 | static void | 725 | static void |
732 | befs_put_super(struct super_block *sb) | 726 | befs_put_super(struct super_block *sb) |
733 | { | 727 | { |
734 | if (BEFS_SB(sb)->mount_opts.iocharset) { | 728 | kfree(BEFS_SB(sb)->mount_opts.iocharset); |
735 | kfree(BEFS_SB(sb)->mount_opts.iocharset); | 729 | BEFS_SB(sb)->mount_opts.iocharset = NULL; |
736 | BEFS_SB(sb)->mount_opts.iocharset = NULL; | ||
737 | } | ||
738 | 730 | ||
739 | if (BEFS_SB(sb)->nls) { | 731 | if (BEFS_SB(sb)->nls) { |
740 | unload_nls(BEFS_SB(sb)->nls); | 732 | unload_nls(BEFS_SB(sb)->nls); |
741 | BEFS_SB(sb)->nls = NULL; | 733 | BEFS_SB(sb)->nls = NULL; |
742 | } | 734 | } |
743 | 735 | ||
744 | if (sb->s_fs_info) { | 736 | kfree(sb->s_fs_info); |
745 | kfree(sb->s_fs_info); | 737 | sb->s_fs_info = NULL; |
746 | sb->s_fs_info = NULL; | ||
747 | } | ||
748 | return; | 738 | return; |
749 | } | 739 | } |
750 | 740 | ||
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 6fa6adc40972..f36f2210204f 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1006,8 +1006,7 @@ out_free_dentry: | |||
1006 | if (interpreter) | 1006 | if (interpreter) |
1007 | fput(interpreter); | 1007 | fput(interpreter); |
1008 | out_free_interp: | 1008 | out_free_interp: |
1009 | if (elf_interpreter) | 1009 | kfree(elf_interpreter); |
1010 | kfree(elf_interpreter); | ||
1011 | out_free_file: | 1010 | out_free_file: |
1012 | sys_close(elf_exec_fileno); | 1011 | sys_close(elf_exec_fileno); |
1013 | out_free_fh: | 1012 | out_free_fh: |
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index dda87c4c82a3..e0344f69c79d 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c | |||
@@ -411,16 +411,11 @@ error: | |||
411 | allow_write_access(interpreter); | 411 | allow_write_access(interpreter); |
412 | fput(interpreter); | 412 | fput(interpreter); |
413 | } | 413 | } |
414 | if (interpreter_name) | 414 | kfree(interpreter_name); |
415 | kfree(interpreter_name); | 415 | kfree(exec_params.phdrs); |
416 | if (exec_params.phdrs) | 416 | kfree(exec_params.loadmap); |
417 | kfree(exec_params.phdrs); | 417 | kfree(interp_params.phdrs); |
418 | if (exec_params.loadmap) | 418 | kfree(interp_params.loadmap); |
419 | kfree(exec_params.loadmap); | ||
420 | if (interp_params.phdrs) | ||
421 | kfree(interp_params.phdrs); | ||
422 | if (interp_params.loadmap) | ||
423 | kfree(interp_params.loadmap); | ||
424 | return retval; | 419 | return retval; |
425 | 420 | ||
426 | /* unrecoverable error - kill the process */ | 421 | /* unrecoverable error - kill the process */ |
diff --git a/fs/buffer.c b/fs/buffer.c index 35fa34977e81..5287be18633b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -396,7 +396,7 @@ asmlinkage long sys_fdatasync(unsigned int fd) | |||
396 | * private_lock is contended then so is mapping->tree_lock). | 396 | * private_lock is contended then so is mapping->tree_lock). |
397 | */ | 397 | */ |
398 | static struct buffer_head * | 398 | static struct buffer_head * |
399 | __find_get_block_slow(struct block_device *bdev, sector_t block, int unused) | 399 | __find_get_block_slow(struct block_device *bdev, sector_t block) |
400 | { | 400 | { |
401 | struct inode *bd_inode = bdev->bd_inode; | 401 | struct inode *bd_inode = bdev->bd_inode; |
402 | struct address_space *bd_mapping = bd_inode->i_mapping; | 402 | struct address_space *bd_mapping = bd_inode->i_mapping; |
@@ -1438,7 +1438,7 @@ __find_get_block(struct block_device *bdev, sector_t block, int size) | |||
1438 | struct buffer_head *bh = lookup_bh_lru(bdev, block, size); | 1438 | struct buffer_head *bh = lookup_bh_lru(bdev, block, size); |
1439 | 1439 | ||
1440 | if (bh == NULL) { | 1440 | if (bh == NULL) { |
1441 | bh = __find_get_block_slow(bdev, block, size); | 1441 | bh = __find_get_block_slow(bdev, block); |
1442 | if (bh) | 1442 | if (bh) |
1443 | bh_lru_install(bh); | 1443 | bh_lru_install(bh); |
1444 | } | 1444 | } |
@@ -1705,7 +1705,7 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block) | |||
1705 | 1705 | ||
1706 | might_sleep(); | 1706 | might_sleep(); |
1707 | 1707 | ||
1708 | old_bh = __find_get_block_slow(bdev, block, 0); | 1708 | old_bh = __find_get_block_slow(bdev, block); |
1709 | if (old_bh) { | 1709 | if (old_bh) { |
1710 | clear_buffer_dirty(old_bh); | 1710 | clear_buffer_dirty(old_bh); |
1711 | wait_on_buffer(old_bh); | 1711 | wait_on_buffer(old_bh); |
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c index 98539e2afe81..086ae8f4a207 100644 --- a/fs/cifs/asn1.c +++ b/fs/cifs/asn1.c | |||
@@ -553,8 +553,7 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
553 | *(oid + 3))); | 553 | *(oid + 3))); |
554 | rc = compare_oid(oid, oidlen, NTLMSSP_OID, | 554 | rc = compare_oid(oid, oidlen, NTLMSSP_OID, |
555 | NTLMSSP_OID_LEN); | 555 | NTLMSSP_OID_LEN); |
556 | if(oid) | 556 | kfree(oid); |
557 | kfree(oid); | ||
558 | if (rc) | 557 | if (rc) |
559 | use_ntlmssp = TRUE; | 558 | use_ntlmssp = TRUE; |
560 | } | 559 | } |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index d74367a08d51..450ab75d6546 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -1265,8 +1265,7 @@ connect_to_dfs_path(int xid, struct cifsSesInfo *pSesInfo, | |||
1265 | the helper that resolves tcp names, mount to it, try to | 1265 | the helper that resolves tcp names, mount to it, try to |
1266 | tcon to it unmount it if fail */ | 1266 | tcon to it unmount it if fail */ |
1267 | 1267 | ||
1268 | if(referrals) | 1268 | kfree(referrals); |
1269 | kfree(referrals); | ||
1270 | 1269 | ||
1271 | return rc; | 1270 | return rc; |
1272 | } | 1271 | } |
@@ -1535,10 +1534,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1535 | 1534 | ||
1536 | memset(&volume_info,0,sizeof(struct smb_vol)); | 1535 | memset(&volume_info,0,sizeof(struct smb_vol)); |
1537 | if (cifs_parse_mount_options(mount_data, devname, &volume_info)) { | 1536 | if (cifs_parse_mount_options(mount_data, devname, &volume_info)) { |
1538 | if(volume_info.UNC) | 1537 | kfree(volume_info.UNC); |
1539 | kfree(volume_info.UNC); | 1538 | kfree(volume_info.password); |
1540 | if(volume_info.password) | ||
1541 | kfree(volume_info.password); | ||
1542 | FreeXid(xid); | 1539 | FreeXid(xid); |
1543 | return -EINVAL; | 1540 | return -EINVAL; |
1544 | } | 1541 | } |
@@ -1551,10 +1548,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1551 | cifserror("No username specified "); | 1548 | cifserror("No username specified "); |
1552 | /* In userspace mount helper we can get user name from alternate | 1549 | /* In userspace mount helper we can get user name from alternate |
1553 | locations such as env variables and files on disk */ | 1550 | locations such as env variables and files on disk */ |
1554 | if(volume_info.UNC) | 1551 | kfree(volume_info.UNC); |
1555 | kfree(volume_info.UNC); | 1552 | kfree(volume_info.password); |
1556 | if(volume_info.password) | ||
1557 | kfree(volume_info.password); | ||
1558 | FreeXid(xid); | 1553 | FreeXid(xid); |
1559 | return -EINVAL; | 1554 | return -EINVAL; |
1560 | } | 1555 | } |
@@ -1573,10 +1568,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1573 | 1568 | ||
1574 | if(rc <= 0) { | 1569 | if(rc <= 0) { |
1575 | /* we failed translating address */ | 1570 | /* we failed translating address */ |
1576 | if(volume_info.UNC) | 1571 | kfree(volume_info.UNC); |
1577 | kfree(volume_info.UNC); | 1572 | kfree(volume_info.password); |
1578 | if(volume_info.password) | ||
1579 | kfree(volume_info.password); | ||
1580 | FreeXid(xid); | 1573 | FreeXid(xid); |
1581 | return -EINVAL; | 1574 | return -EINVAL; |
1582 | } | 1575 | } |
@@ -1587,19 +1580,15 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1587 | } else if (volume_info.UNCip){ | 1580 | } else if (volume_info.UNCip){ |
1588 | /* BB using ip addr as server name connect to the DFS root below */ | 1581 | /* BB using ip addr as server name connect to the DFS root below */ |
1589 | cERROR(1,("Connecting to DFS root not implemented yet")); | 1582 | cERROR(1,("Connecting to DFS root not implemented yet")); |
1590 | if(volume_info.UNC) | 1583 | kfree(volume_info.UNC); |
1591 | kfree(volume_info.UNC); | 1584 | kfree(volume_info.password); |
1592 | if(volume_info.password) | ||
1593 | kfree(volume_info.password); | ||
1594 | FreeXid(xid); | 1585 | FreeXid(xid); |
1595 | return -EINVAL; | 1586 | return -EINVAL; |
1596 | } else /* which servers DFS root would we conect to */ { | 1587 | } else /* which servers DFS root would we conect to */ { |
1597 | cERROR(1, | 1588 | cERROR(1, |
1598 | ("CIFS mount error: No UNC path (e.g. -o unc=//192.168.1.100/public) specified ")); | 1589 | ("CIFS mount error: No UNC path (e.g. -o unc=//192.168.1.100/public) specified ")); |
1599 | if(volume_info.UNC) | 1590 | kfree(volume_info.UNC); |
1600 | kfree(volume_info.UNC); | 1591 | kfree(volume_info.password); |
1601 | if(volume_info.password) | ||
1602 | kfree(volume_info.password); | ||
1603 | FreeXid(xid); | 1592 | FreeXid(xid); |
1604 | return -EINVAL; | 1593 | return -EINVAL; |
1605 | } | 1594 | } |
@@ -1612,10 +1601,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1612 | cifs_sb->local_nls = load_nls(volume_info.iocharset); | 1601 | cifs_sb->local_nls = load_nls(volume_info.iocharset); |
1613 | if(cifs_sb->local_nls == NULL) { | 1602 | if(cifs_sb->local_nls == NULL) { |
1614 | cERROR(1,("CIFS mount error: iocharset %s not found",volume_info.iocharset)); | 1603 | cERROR(1,("CIFS mount error: iocharset %s not found",volume_info.iocharset)); |
1615 | if(volume_info.UNC) | 1604 | kfree(volume_info.UNC); |
1616 | kfree(volume_info.UNC); | 1605 | kfree(volume_info.password); |
1617 | if(volume_info.password) | ||
1618 | kfree(volume_info.password); | ||
1619 | FreeXid(xid); | 1606 | FreeXid(xid); |
1620 | return -ELIBACC; | 1607 | return -ELIBACC; |
1621 | } | 1608 | } |
@@ -1630,10 +1617,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1630 | &sin_server6.sin6_addr, | 1617 | &sin_server6.sin6_addr, |
1631 | volume_info.username, &srvTcp); | 1618 | volume_info.username, &srvTcp); |
1632 | else { | 1619 | else { |
1633 | if(volume_info.UNC) | 1620 | kfree(volume_info.UNC); |
1634 | kfree(volume_info.UNC); | 1621 | kfree(volume_info.password); |
1635 | if(volume_info.password) | ||
1636 | kfree(volume_info.password); | ||
1637 | FreeXid(xid); | 1622 | FreeXid(xid); |
1638 | return -EINVAL; | 1623 | return -EINVAL; |
1639 | } | 1624 | } |
@@ -1654,10 +1639,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1654 | ("Error connecting to IPv4 socket. Aborting operation")); | 1639 | ("Error connecting to IPv4 socket. Aborting operation")); |
1655 | if(csocket != NULL) | 1640 | if(csocket != NULL) |
1656 | sock_release(csocket); | 1641 | sock_release(csocket); |
1657 | if(volume_info.UNC) | 1642 | kfree(volume_info.UNC); |
1658 | kfree(volume_info.UNC); | 1643 | kfree(volume_info.password); |
1659 | if(volume_info.password) | ||
1660 | kfree(volume_info.password); | ||
1661 | FreeXid(xid); | 1644 | FreeXid(xid); |
1662 | return rc; | 1645 | return rc; |
1663 | } | 1646 | } |
@@ -1666,10 +1649,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1666 | if (srvTcp == NULL) { | 1649 | if (srvTcp == NULL) { |
1667 | rc = -ENOMEM; | 1650 | rc = -ENOMEM; |
1668 | sock_release(csocket); | 1651 | sock_release(csocket); |
1669 | if(volume_info.UNC) | 1652 | kfree(volume_info.UNC); |
1670 | kfree(volume_info.UNC); | 1653 | kfree(volume_info.password); |
1671 | if(volume_info.password) | ||
1672 | kfree(volume_info.password); | ||
1673 | FreeXid(xid); | 1654 | FreeXid(xid); |
1674 | return rc; | 1655 | return rc; |
1675 | } else { | 1656 | } else { |
@@ -1692,10 +1673,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1692 | if(rc < 0) { | 1673 | if(rc < 0) { |
1693 | rc = -ENOMEM; | 1674 | rc = -ENOMEM; |
1694 | sock_release(csocket); | 1675 | sock_release(csocket); |
1695 | if(volume_info.UNC) | 1676 | kfree(volume_info.UNC); |
1696 | kfree(volume_info.UNC); | 1677 | kfree(volume_info.password); |
1697 | if(volume_info.password) | ||
1698 | kfree(volume_info.password); | ||
1699 | FreeXid(xid); | 1678 | FreeXid(xid); |
1700 | return rc; | 1679 | return rc; |
1701 | } | 1680 | } |
@@ -1710,8 +1689,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1710 | if (existingCifsSes) { | 1689 | if (existingCifsSes) { |
1711 | pSesInfo = existingCifsSes; | 1690 | pSesInfo = existingCifsSes; |
1712 | cFYI(1, ("Existing smb sess found ")); | 1691 | cFYI(1, ("Existing smb sess found ")); |
1713 | if(volume_info.password) | 1692 | kfree(volume_info.password); |
1714 | kfree(volume_info.password); | ||
1715 | /* volume_info.UNC freed at end of function */ | 1693 | /* volume_info.UNC freed at end of function */ |
1716 | } else if (!rc) { | 1694 | } else if (!rc) { |
1717 | cFYI(1, ("Existing smb sess not found ")); | 1695 | cFYI(1, ("Existing smb sess not found ")); |
@@ -1741,8 +1719,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1741 | if(!rc) | 1719 | if(!rc) |
1742 | atomic_inc(&srvTcp->socketUseCount); | 1720 | atomic_inc(&srvTcp->socketUseCount); |
1743 | } else | 1721 | } else |
1744 | if(volume_info.password) | 1722 | kfree(volume_info.password); |
1745 | kfree(volume_info.password); | ||
1746 | } | 1723 | } |
1747 | 1724 | ||
1748 | /* search for existing tcon to this server share */ | 1725 | /* search for existing tcon to this server share */ |
@@ -1821,8 +1798,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1821 | "", cifs_sb->local_nls, | 1798 | "", cifs_sb->local_nls, |
1822 | cifs_sb->mnt_cifs_flags & | 1799 | cifs_sb->mnt_cifs_flags & |
1823 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 1800 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
1824 | if(volume_info.UNC) | 1801 | kfree(volume_info.UNC); |
1825 | kfree(volume_info.UNC); | ||
1826 | FreeXid(xid); | 1802 | FreeXid(xid); |
1827 | return -ENODEV; | 1803 | return -ENODEV; |
1828 | } else { | 1804 | } else { |
@@ -1925,8 +1901,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
1925 | (in which case it is not needed anymore) but when new sesion is created | 1901 | (in which case it is not needed anymore) but when new sesion is created |
1926 | the password ptr is put in the new session structure (in which case the | 1902 | the password ptr is put in the new session structure (in which case the |
1927 | password will be freed at unmount time) */ | 1903 | password will be freed at unmount time) */ |
1928 | if(volume_info.UNC) | 1904 | kfree(volume_info.UNC); |
1929 | kfree(volume_info.UNC); | ||
1930 | FreeXid(xid); | 1905 | FreeXid(xid); |
1931 | return rc; | 1906 | return rc; |
1932 | } | 1907 | } |
@@ -3283,8 +3258,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | |||
3283 | if ((bcc_ptr + (2 * length)) - | 3258 | if ((bcc_ptr + (2 * length)) - |
3284 | pByteArea(smb_buffer_response) <= | 3259 | pByteArea(smb_buffer_response) <= |
3285 | BCC(smb_buffer_response)) { | 3260 | BCC(smb_buffer_response)) { |
3286 | if(tcon->nativeFileSystem) | 3261 | kfree(tcon->nativeFileSystem); |
3287 | kfree(tcon->nativeFileSystem); | ||
3288 | tcon->nativeFileSystem = | 3262 | tcon->nativeFileSystem = |
3289 | kzalloc(length + 2, GFP_KERNEL); | 3263 | kzalloc(length + 2, GFP_KERNEL); |
3290 | cifs_strfromUCS_le(tcon->nativeFileSystem, | 3264 | cifs_strfromUCS_le(tcon->nativeFileSystem, |
@@ -3301,8 +3275,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | |||
3301 | if ((bcc_ptr + length) - | 3275 | if ((bcc_ptr + length) - |
3302 | pByteArea(smb_buffer_response) <= | 3276 | pByteArea(smb_buffer_response) <= |
3303 | BCC(smb_buffer_response)) { | 3277 | BCC(smb_buffer_response)) { |
3304 | if(tcon->nativeFileSystem) | 3278 | kfree(tcon->nativeFileSystem); |
3305 | kfree(tcon->nativeFileSystem); | ||
3306 | tcon->nativeFileSystem = | 3279 | tcon->nativeFileSystem = |
3307 | kzalloc(length + 1, GFP_KERNEL); | 3280 | kzalloc(length + 1, GFP_KERNEL); |
3308 | strncpy(tcon->nativeFileSystem, bcc_ptr, | 3281 | strncpy(tcon->nativeFileSystem, bcc_ptr, |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index b43e071fe110..0f99aae33162 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
@@ -84,10 +84,8 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, | |||
84 | cifsInode->time = 0; /* will force revalidate to go get info when needed */ | 84 | cifsInode->time = 0; /* will force revalidate to go get info when needed */ |
85 | 85 | ||
86 | cifs_hl_exit: | 86 | cifs_hl_exit: |
87 | if (fromName) | 87 | kfree(fromName); |
88 | kfree(fromName); | 88 | kfree(toName); |
89 | if (toName) | ||
90 | kfree(toName); | ||
91 | FreeXid(xid); | 89 | FreeXid(xid); |
92 | return rc; | 90 | return rc; |
93 | } | 91 | } |
@@ -206,8 +204,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) | |||
206 | } | 204 | } |
207 | } | 205 | } |
208 | 206 | ||
209 | if (full_path) | 207 | kfree(full_path); |
210 | kfree(full_path); | ||
211 | FreeXid(xid); | 208 | FreeXid(xid); |
212 | return rc; | 209 | return rc; |
213 | } | 210 | } |
@@ -253,8 +250,7 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen) | |||
253 | len = buflen; | 250 | len = buflen; |
254 | tmpbuffer = kmalloc(len,GFP_KERNEL); | 251 | tmpbuffer = kmalloc(len,GFP_KERNEL); |
255 | if(tmpbuffer == NULL) { | 252 | if(tmpbuffer == NULL) { |
256 | if (full_path) | 253 | kfree(full_path); |
257 | kfree(full_path); | ||
258 | FreeXid(xid); | 254 | FreeXid(xid); |
259 | return -ENOMEM; | 255 | return -ENOMEM; |
260 | } | 256 | } |
@@ -303,8 +299,7 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen) | |||
303 | strncpy(tmpbuffer, referrals, len-1); | 299 | strncpy(tmpbuffer, referrals, len-1); |
304 | } | 300 | } |
305 | } | 301 | } |
306 | if(referrals) | 302 | kfree(referrals); |
307 | kfree(referrals); | ||
308 | kfree(tmp_path); | 303 | kfree(tmp_path); |
309 | } | 304 | } |
310 | /* BB add code like else decode referrals then memcpy to | 305 | /* BB add code like else decode referrals then memcpy to |
@@ -323,12 +318,8 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen) | |||
323 | rc)); | 318 | rc)); |
324 | } | 319 | } |
325 | 320 | ||
326 | if (tmpbuffer) { | 321 | kfree(tmpbuffer); |
327 | kfree(tmpbuffer); | 322 | kfree(full_path); |
328 | } | ||
329 | if (full_path) { | ||
330 | kfree(full_path); | ||
331 | } | ||
332 | FreeXid(xid); | 323 | FreeXid(xid); |
333 | return rc; | 324 | return rc; |
334 | } | 325 | } |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index eba1de917f2a..34a06692e4fa 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -98,14 +98,10 @@ sesInfoFree(struct cifsSesInfo *buf_to_free) | |||
98 | atomic_dec(&sesInfoAllocCount); | 98 | atomic_dec(&sesInfoAllocCount); |
99 | list_del(&buf_to_free->cifsSessionList); | 99 | list_del(&buf_to_free->cifsSessionList); |
100 | write_unlock(&GlobalSMBSeslock); | 100 | write_unlock(&GlobalSMBSeslock); |
101 | if (buf_to_free->serverOS) | 101 | kfree(buf_to_free->serverOS); |
102 | kfree(buf_to_free->serverOS); | 102 | kfree(buf_to_free->serverDomain); |
103 | if (buf_to_free->serverDomain) | 103 | kfree(buf_to_free->serverNOS); |
104 | kfree(buf_to_free->serverDomain); | 104 | kfree(buf_to_free->password); |
105 | if (buf_to_free->serverNOS) | ||
106 | kfree(buf_to_free->serverNOS); | ||
107 | if (buf_to_free->password) | ||
108 | kfree(buf_to_free->password); | ||
109 | kfree(buf_to_free); | 105 | kfree(buf_to_free); |
110 | } | 106 | } |
111 | 107 | ||
@@ -144,8 +140,7 @@ tconInfoFree(struct cifsTconInfo *buf_to_free) | |||
144 | atomic_dec(&tconInfoAllocCount); | 140 | atomic_dec(&tconInfoAllocCount); |
145 | list_del(&buf_to_free->cifsConnectionList); | 141 | list_del(&buf_to_free->cifsConnectionList); |
146 | write_unlock(&GlobalSMBSeslock); | 142 | write_unlock(&GlobalSMBSeslock); |
147 | if (buf_to_free->nativeFileSystem) | 143 | kfree(buf_to_free->nativeFileSystem); |
148 | kfree(buf_to_free->nativeFileSystem); | ||
149 | kfree(buf_to_free); | 144 | kfree(buf_to_free); |
150 | } | 145 | } |
151 | 146 | ||
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index c1e02eff1d25..f375f87c7dbd 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c | |||
@@ -87,8 +87,7 @@ int cifs_removexattr(struct dentry * direntry, const char * ea_name) | |||
87 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); | 87 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); |
88 | } | 88 | } |
89 | remove_ea_exit: | 89 | remove_ea_exit: |
90 | if (full_path) | 90 | kfree(full_path); |
91 | kfree(full_path); | ||
92 | FreeXid(xid); | 91 | FreeXid(xid); |
93 | #endif | 92 | #endif |
94 | return rc; | 93 | return rc; |
@@ -132,8 +131,7 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name, | |||
132 | returns as xattrs */ | 131 | returns as xattrs */ |
133 | if(value_size > MAX_EA_VALUE_SIZE) { | 132 | if(value_size > MAX_EA_VALUE_SIZE) { |
134 | cFYI(1,("size of EA value too large")); | 133 | cFYI(1,("size of EA value too large")); |
135 | if(full_path) | 134 | kfree(full_path); |
136 | kfree(full_path); | ||
137 | FreeXid(xid); | 135 | FreeXid(xid); |
138 | return -EOPNOTSUPP; | 136 | return -EOPNOTSUPP; |
139 | } | 137 | } |
@@ -195,8 +193,7 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name, | |||
195 | } | 193 | } |
196 | 194 | ||
197 | set_ea_exit: | 195 | set_ea_exit: |
198 | if (full_path) | 196 | kfree(full_path); |
199 | kfree(full_path); | ||
200 | FreeXid(xid); | 197 | FreeXid(xid); |
201 | #endif | 198 | #endif |
202 | return rc; | 199 | return rc; |
@@ -298,8 +295,7 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name, | |||
298 | rc = -EOPNOTSUPP; | 295 | rc = -EOPNOTSUPP; |
299 | 296 | ||
300 | get_ea_exit: | 297 | get_ea_exit: |
301 | if (full_path) | 298 | kfree(full_path); |
302 | kfree(full_path); | ||
303 | FreeXid(xid); | 299 | FreeXid(xid); |
304 | #endif | 300 | #endif |
305 | return rc; | 301 | return rc; |
@@ -345,8 +341,7 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size) | |||
345 | cifs_sb->mnt_cifs_flags & | 341 | cifs_sb->mnt_cifs_flags & |
346 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 342 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
347 | 343 | ||
348 | if (full_path) | 344 | kfree(full_path); |
349 | kfree(full_path); | ||
350 | FreeXid(xid); | 345 | FreeXid(xid); |
351 | #endif | 346 | #endif |
352 | return rc; | 347 | return rc; |
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 43dbcb0b21eb..4909754ea84a 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c | |||
@@ -2235,7 +2235,8 @@ static int fd_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg) | |||
2235 | if (err) | 2235 | if (err) |
2236 | err = -EFAULT; | 2236 | err = -EFAULT; |
2237 | 2237 | ||
2238 | out: if (karg) kfree(karg); | 2238 | out: |
2239 | kfree(karg); | ||
2239 | return err; | 2240 | return err; |
2240 | } | 2241 | } |
2241 | 2242 | ||
diff --git a/fs/dcache.c b/fs/dcache.c index e90512ed35a4..17e439138681 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -644,7 +644,7 @@ void shrink_dcache_parent(struct dentry * parent) | |||
644 | * | 644 | * |
645 | * Prune the dentries that are anonymous | 645 | * Prune the dentries that are anonymous |
646 | * | 646 | * |
647 | * parsing d_hash list does not hlist_for_each_rcu() as it | 647 | * parsing d_hash list does not hlist_for_each_entry_rcu() as it |
648 | * done under dcache_lock. | 648 | * done under dcache_lock. |
649 | * | 649 | * |
650 | */ | 650 | */ |
@@ -1043,15 +1043,13 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) | |||
1043 | struct hlist_head *head = d_hash(parent,hash); | 1043 | struct hlist_head *head = d_hash(parent,hash); |
1044 | struct dentry *found = NULL; | 1044 | struct dentry *found = NULL; |
1045 | struct hlist_node *node; | 1045 | struct hlist_node *node; |
1046 | struct dentry *dentry; | ||
1046 | 1047 | ||
1047 | rcu_read_lock(); | 1048 | rcu_read_lock(); |
1048 | 1049 | ||
1049 | hlist_for_each_rcu(node, head) { | 1050 | hlist_for_each_entry_rcu(dentry, node, head, d_hash) { |
1050 | struct dentry *dentry; | ||
1051 | struct qstr *qstr; | 1051 | struct qstr *qstr; |
1052 | 1052 | ||
1053 | dentry = hlist_entry(node, struct dentry, d_hash); | ||
1054 | |||
1055 | if (dentry->d_name.hash != hash) | 1053 | if (dentry->d_name.hash != hash) |
1056 | continue; | 1054 | continue; |
1057 | if (dentry->d_parent != parent) | 1055 | if (dentry->d_parent != parent) |
@@ -1123,7 +1121,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent) | |||
1123 | spin_lock(&dcache_lock); | 1121 | spin_lock(&dcache_lock); |
1124 | base = d_hash(dparent, dentry->d_name.hash); | 1122 | base = d_hash(dparent, dentry->d_name.hash); |
1125 | hlist_for_each(lhp,base) { | 1123 | hlist_for_each(lhp,base) { |
1126 | /* hlist_for_each_rcu() not required for d_hash list | 1124 | /* hlist_for_each_entry_rcu() not required for d_hash list |
1127 | * as it is parsed under dcache_lock | 1125 | * as it is parsed under dcache_lock |
1128 | */ | 1126 | */ |
1129 | if (dentry == hlist_entry(lhp, struct dentry, d_hash)) { | 1127 | if (dentry == hlist_entry(lhp, struct dentry, d_hash)) { |
diff --git a/fs/devfs/base.c b/fs/devfs/base.c index 8b679b67e5e0..1274422a5384 100644 --- a/fs/devfs/base.c +++ b/fs/devfs/base.c | |||
@@ -2738,10 +2738,8 @@ static int devfsd_close(struct inode *inode, struct file *file) | |||
2738 | entry = fs_info->devfsd_first_event; | 2738 | entry = fs_info->devfsd_first_event; |
2739 | fs_info->devfsd_first_event = NULL; | 2739 | fs_info->devfsd_first_event = NULL; |
2740 | fs_info->devfsd_last_event = NULL; | 2740 | fs_info->devfsd_last_event = NULL; |
2741 | if (fs_info->devfsd_info) { | 2741 | kfree(fs_info->devfsd_info); |
2742 | kfree(fs_info->devfsd_info); | 2742 | fs_info->devfsd_info = NULL; |
2743 | fs_info->devfsd_info = NULL; | ||
2744 | } | ||
2745 | spin_unlock(&fs_info->devfsd_buffer_lock); | 2743 | spin_unlock(&fs_info->devfsd_buffer_lock); |
2746 | fs_info->devfsd_pgrp = 0; | 2744 | fs_info->devfsd_pgrp = 0; |
2747 | fs_info->devfsd_task = NULL; | 2745 | fs_info->devfsd_task = NULL; |
diff --git a/fs/dquot.c b/fs/dquot.c index ea7644227a65..05b60283c9c2 100644 --- a/fs/dquot.c +++ b/fs/dquot.c | |||
@@ -77,6 +77,7 @@ | |||
77 | #include <linux/kmod.h> | 77 | #include <linux/kmod.h> |
78 | #include <linux/namei.h> | 78 | #include <linux/namei.h> |
79 | #include <linux/buffer_head.h> | 79 | #include <linux/buffer_head.h> |
80 | #include <linux/quotaops.h> | ||
80 | 81 | ||
81 | #include <asm/uaccess.h> | 82 | #include <asm/uaccess.h> |
82 | 83 | ||
@@ -1320,13 +1321,11 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1320 | int cnt; | 1321 | int cnt; |
1321 | struct quota_info *dqopt = sb_dqopt(sb); | 1322 | struct quota_info *dqopt = sb_dqopt(sb); |
1322 | struct inode *toputinode[MAXQUOTAS]; | 1323 | struct inode *toputinode[MAXQUOTAS]; |
1323 | struct vfsmount *toputmnt[MAXQUOTAS]; | ||
1324 | 1324 | ||
1325 | /* We need to serialize quota_off() for device */ | 1325 | /* We need to serialize quota_off() for device */ |
1326 | down(&dqopt->dqonoff_sem); | 1326 | down(&dqopt->dqonoff_sem); |
1327 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1327 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1328 | toputinode[cnt] = NULL; | 1328 | toputinode[cnt] = NULL; |
1329 | toputmnt[cnt] = NULL; | ||
1330 | if (type != -1 && cnt != type) | 1329 | if (type != -1 && cnt != type) |
1331 | continue; | 1330 | continue; |
1332 | if (!sb_has_quota_enabled(sb, cnt)) | 1331 | if (!sb_has_quota_enabled(sb, cnt)) |
@@ -1347,9 +1346,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1347 | put_quota_format(dqopt->info[cnt].dqi_format); | 1346 | put_quota_format(dqopt->info[cnt].dqi_format); |
1348 | 1347 | ||
1349 | toputinode[cnt] = dqopt->files[cnt]; | 1348 | toputinode[cnt] = dqopt->files[cnt]; |
1350 | toputmnt[cnt] = dqopt->mnt[cnt]; | ||
1351 | dqopt->files[cnt] = NULL; | 1349 | dqopt->files[cnt] = NULL; |
1352 | dqopt->mnt[cnt] = NULL; | ||
1353 | dqopt->info[cnt].dqi_flags = 0; | 1350 | dqopt->info[cnt].dqi_flags = 0; |
1354 | dqopt->info[cnt].dqi_igrace = 0; | 1351 | dqopt->info[cnt].dqi_igrace = 0; |
1355 | dqopt->info[cnt].dqi_bgrace = 0; | 1352 | dqopt->info[cnt].dqi_bgrace = 0; |
@@ -1357,10 +1354,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1357 | } | 1354 | } |
1358 | up(&dqopt->dqonoff_sem); | 1355 | up(&dqopt->dqonoff_sem); |
1359 | /* Sync the superblock so that buffers with quota data are written to | 1356 | /* Sync the superblock so that buffers with quota data are written to |
1360 | * disk (and so userspace sees correct data afterwards). | 1357 | * disk (and so userspace sees correct data afterwards). */ |
1361 | * The reference to vfsmnt we are still holding protects us from | ||
1362 | * umount (we don't have it only when quotas are turned on/off for | ||
1363 | * journal replay but in that case we are guarded by the fs anyway). */ | ||
1364 | if (sb->s_op->sync_fs) | 1358 | if (sb->s_op->sync_fs) |
1365 | sb->s_op->sync_fs(sb, 1); | 1359 | sb->s_op->sync_fs(sb, 1); |
1366 | sync_blockdev(sb->s_bdev); | 1360 | sync_blockdev(sb->s_bdev); |
@@ -1384,10 +1378,6 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1384 | iput(toputinode[cnt]); | 1378 | iput(toputinode[cnt]); |
1385 | } | 1379 | } |
1386 | up(&dqopt->dqonoff_sem); | 1380 | up(&dqopt->dqonoff_sem); |
1387 | /* We don't hold the reference when we turned on quotas | ||
1388 | * just for the journal replay... */ | ||
1389 | if (toputmnt[cnt]) | ||
1390 | mntput(toputmnt[cnt]); | ||
1391 | } | 1381 | } |
1392 | if (sb->s_bdev) | 1382 | if (sb->s_bdev) |
1393 | invalidate_bdev(sb->s_bdev, 0); | 1383 | invalidate_bdev(sb->s_bdev, 0); |
@@ -1502,11 +1492,8 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path) | |||
1502 | /* Quota file not on the same filesystem? */ | 1492 | /* Quota file not on the same filesystem? */ |
1503 | if (nd.mnt->mnt_sb != sb) | 1493 | if (nd.mnt->mnt_sb != sb) |
1504 | error = -EXDEV; | 1494 | error = -EXDEV; |
1505 | else { | 1495 | else |
1506 | error = vfs_quota_on_inode(nd.dentry->d_inode, type, format_id); | 1496 | error = vfs_quota_on_inode(nd.dentry->d_inode, type, format_id); |
1507 | if (!error) | ||
1508 | sb_dqopt(sb)->mnt[type] = mntget(nd.mnt); | ||
1509 | } | ||
1510 | out_path: | 1497 | out_path: |
1511 | path_release(&nd); | 1498 | path_release(&nd); |
1512 | return error; | 1499 | return error; |
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/syscalls.h> | 48 | #include <linux/syscalls.h> |
49 | #include <linux/rmap.h> | 49 | #include <linux/rmap.h> |
50 | #include <linux/acct.h> | 50 | #include <linux/acct.h> |
51 | #include <linux/cn_proc.h> | ||
51 | 52 | ||
52 | #include <asm/uaccess.h> | 53 | #include <asm/uaccess.h> |
53 | #include <asm/mmu_context.h> | 54 | #include <asm/mmu_context.h> |
@@ -1096,6 +1097,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) | |||
1096 | fput(bprm->file); | 1097 | fput(bprm->file); |
1097 | bprm->file = NULL; | 1098 | bprm->file = NULL; |
1098 | current->did_exec = 1; | 1099 | current->did_exec = 1; |
1100 | proc_exec_connector(current); | ||
1099 | return retval; | 1101 | return retval; |
1100 | } | 1102 | } |
1101 | read_lock(&binfmt_lock); | 1103 | read_lock(&binfmt_lock); |
@@ -1509,7 +1511,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) | |||
1509 | goto close_fail; | 1511 | goto close_fail; |
1510 | if (!file->f_op->write) | 1512 | if (!file->f_op->write) |
1511 | goto close_fail; | 1513 | goto close_fail; |
1512 | if (do_truncate(file->f_dentry, 0) != 0) | 1514 | if (do_truncate(file->f_dentry, 0, file) != 0) |
1513 | goto close_fail; | 1515 | goto close_fail; |
1514 | 1516 | ||
1515 | retval = binfmt->core_dump(signr, regs, file); | 1517 | retval = binfmt->core_dump(signr, regs, file); |
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c index 213148c36ebe..6af2f4130290 100644 --- a/fs/ext2/acl.c +++ b/fs/ext2/acl.c | |||
@@ -194,8 +194,7 @@ ext2_get_acl(struct inode *inode, int type) | |||
194 | acl = NULL; | 194 | acl = NULL; |
195 | else | 195 | else |
196 | acl = ERR_PTR(retval); | 196 | acl = ERR_PTR(retval); |
197 | if (value) | 197 | kfree(value); |
198 | kfree(value); | ||
199 | 198 | ||
200 | if (!IS_ERR(acl)) { | 199 | if (!IS_ERR(acl)) { |
201 | switch(type) { | 200 | switch(type) { |
@@ -262,8 +261,7 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl) | |||
262 | 261 | ||
263 | error = ext2_xattr_set(inode, name_index, "", value, size, 0); | 262 | error = ext2_xattr_set(inode, name_index, "", value, size, 0); |
264 | 263 | ||
265 | if (value) | 264 | kfree(value); |
266 | kfree(value); | ||
267 | if (!error) { | 265 | if (!error) { |
268 | switch(type) { | 266 | switch(type) { |
269 | case ACL_TYPE_ACCESS: | 267 | case ACL_TYPE_ACCESS: |
diff --git a/fs/file_table.c b/fs/file_table.c index 4dc205546547..c3a5e2fd663b 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
@@ -35,7 +35,7 @@ static DEFINE_SPINLOCK(filp_count_lock); | |||
35 | * context and must be fully threaded - use a local spinlock | 35 | * context and must be fully threaded - use a local spinlock |
36 | * to protect files_stat.nr_files | 36 | * to protect files_stat.nr_files |
37 | */ | 37 | */ |
38 | void filp_ctor(void * objp, struct kmem_cache_s *cachep, unsigned long cflags) | 38 | void filp_ctor(void *objp, struct kmem_cache *cachep, unsigned long cflags) |
39 | { | 39 | { |
40 | if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == | 40 | if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == |
41 | SLAB_CTOR_CONSTRUCTOR) { | 41 | SLAB_CTOR_CONSTRUCTOR) { |
@@ -46,7 +46,7 @@ void filp_ctor(void * objp, struct kmem_cache_s *cachep, unsigned long cflags) | |||
46 | } | 46 | } |
47 | } | 47 | } |
48 | 48 | ||
49 | void filp_dtor(void * objp, struct kmem_cache_s *cachep, unsigned long dflags) | 49 | void filp_dtor(void *objp, struct kmem_cache *cachep, unsigned long dflags) |
50 | { | 50 | { |
51 | unsigned long flags; | 51 | unsigned long flags; |
52 | spin_lock_irqsave(&filp_count_lock, flags); | 52 | spin_lock_irqsave(&filp_count_lock, flags); |
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h index d8be917f9797..927acf70c591 100644 --- a/fs/freevxfs/vxfs_extern.h +++ b/fs/freevxfs/vxfs_extern.h | |||
@@ -38,7 +38,7 @@ | |||
38 | */ | 38 | */ |
39 | 39 | ||
40 | 40 | ||
41 | struct kmem_cache_s; | 41 | struct kmem_cache; |
42 | struct super_block; | 42 | struct super_block; |
43 | struct vxfs_inode_info; | 43 | struct vxfs_inode_info; |
44 | struct inode; | 44 | struct inode; |
@@ -51,7 +51,7 @@ extern daddr_t vxfs_bmap1(struct inode *, long); | |||
51 | extern int vxfs_read_fshead(struct super_block *); | 51 | extern int vxfs_read_fshead(struct super_block *); |
52 | 52 | ||
53 | /* vxfs_inode.c */ | 53 | /* vxfs_inode.c */ |
54 | extern struct kmem_cache_s *vxfs_inode_cachep; | 54 | extern struct kmem_cache *vxfs_inode_cachep; |
55 | extern void vxfs_dumpi(struct vxfs_inode_info *, ino_t); | 55 | extern void vxfs_dumpi(struct vxfs_inode_info *, ino_t); |
56 | extern struct inode * vxfs_get_fake_inode(struct super_block *, | 56 | extern struct inode * vxfs_get_fake_inode(struct super_block *, |
57 | struct vxfs_inode_info *); | 57 | struct vxfs_inode_info *); |
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c index 9672d2facffe..f544aae9169f 100644 --- a/fs/freevxfs/vxfs_inode.c +++ b/fs/freevxfs/vxfs_inode.c | |||
@@ -46,15 +46,6 @@ extern struct address_space_operations vxfs_immed_aops; | |||
46 | 46 | ||
47 | extern struct inode_operations vxfs_immed_symlink_iops; | 47 | extern struct inode_operations vxfs_immed_symlink_iops; |
48 | 48 | ||
49 | static struct file_operations vxfs_file_operations = { | ||
50 | .open = generic_file_open, | ||
51 | .llseek = generic_file_llseek, | ||
52 | .read = generic_file_read, | ||
53 | .mmap = generic_file_mmap, | ||
54 | .sendfile = generic_file_sendfile, | ||
55 | }; | ||
56 | |||
57 | |||
58 | kmem_cache_t *vxfs_inode_cachep; | 49 | kmem_cache_t *vxfs_inode_cachep; |
59 | 50 | ||
60 | 51 | ||
@@ -318,7 +309,7 @@ vxfs_read_inode(struct inode *ip) | |||
318 | aops = &vxfs_aops; | 309 | aops = &vxfs_aops; |
319 | 310 | ||
320 | if (S_ISREG(ip->i_mode)) { | 311 | if (S_ISREG(ip->i_mode)) { |
321 | ip->i_fop = &vxfs_file_operations; | 312 | ip->i_fop = &generic_ro_fops; |
322 | ip->i_mapping->a_ops = aops; | 313 | ip->i_mapping->a_ops = aops; |
323 | } else if (S_ISDIR(ip->i_mode)) { | 314 | } else if (S_ISDIR(ip->i_mode)) { |
324 | ip->i_op = &vxfs_dir_inode_ops; | 315 | ip->i_op = &vxfs_dir_inode_ops; |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index c27f8d4098be..785c7213a54f 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -562,7 +562,7 @@ int write_inode_now(struct inode *inode, int sync) | |||
562 | }; | 562 | }; |
563 | 563 | ||
564 | if (!mapping_cap_writeback_dirty(inode->i_mapping)) | 564 | if (!mapping_cap_writeback_dirty(inode->i_mapping)) |
565 | return 0; | 565 | wbc.nr_to_write = 0; |
566 | 566 | ||
567 | might_sleep(); | 567 | might_sleep(); |
568 | spin_lock(&inode_lock); | 568 | spin_lock(&inode_lock); |
@@ -606,7 +606,7 @@ EXPORT_SYMBOL(sync_inode); | |||
606 | * O_SYNC flag set, to flush dirty writes to disk. | 606 | * O_SYNC flag set, to flush dirty writes to disk. |
607 | * | 607 | * |
608 | * @what is a bitmask, specifying which part of the inode's data should be | 608 | * @what is a bitmask, specifying which part of the inode's data should be |
609 | * written and waited upon: | 609 | * written and waited upon. |
610 | * | 610 | * |
611 | * OSYNC_DATA: i_mapping's dirty data | 611 | * OSYNC_DATA: i_mapping's dirty data |
612 | * OSYNC_METADATA: the buffers at i_mapping->private_list | 612 | * OSYNC_METADATA: the buffers at i_mapping->private_list |
@@ -672,8 +672,9 @@ int writeback_acquire(struct backing_dev_info *bdi) | |||
672 | 672 | ||
673 | /** | 673 | /** |
674 | * writeback_in_progress: determine whether there is writeback in progress | 674 | * writeback_in_progress: determine whether there is writeback in progress |
675 | * against a backing device. | ||
676 | * @bdi: the device's backing_dev_info structure. | 675 | * @bdi: the device's backing_dev_info structure. |
676 | * | ||
677 | * Determine whether there is writeback in progress against a backing device. | ||
677 | */ | 678 | */ |
678 | int writeback_in_progress(struct backing_dev_info *bdi) | 679 | int writeback_in_progress(struct backing_dev_info *bdi) |
679 | { | 680 | { |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index a6f90a6c754a..8f873e621f41 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -184,6 +184,13 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) | |||
184 | fuse_putback_request() */ | 184 | fuse_putback_request() */ |
185 | for (i = 1; i < FUSE_MAX_OUTSTANDING; i++) | 185 | for (i = 1; i < FUSE_MAX_OUTSTANDING; i++) |
186 | up(&fc->outstanding_sem); | 186 | up(&fc->outstanding_sem); |
187 | } else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) { | ||
188 | /* Special case for failed iget in CREATE */ | ||
189 | u64 nodeid = req->in.h.nodeid; | ||
190 | __fuse_get_request(req); | ||
191 | fuse_reset_request(req); | ||
192 | fuse_send_forget(fc, req, nodeid, 1); | ||
193 | putback = 0; | ||
187 | } | 194 | } |
188 | if (putback) | 195 | if (putback) |
189 | fuse_putback_request(fc, req); | 196 | fuse_putback_request(fc, req); |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 70dba721acab..c045cc70c749 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/gfp.h> | 13 | #include <linux/gfp.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/namei.h> | 15 | #include <linux/namei.h> |
16 | #include <linux/mount.h> | ||
16 | 17 | ||
17 | static inline unsigned long time_to_jiffies(unsigned long sec, | 18 | static inline unsigned long time_to_jiffies(unsigned long sec, |
18 | unsigned long nsec) | 19 | unsigned long nsec) |
@@ -134,6 +135,101 @@ static void fuse_invalidate_entry(struct dentry *entry) | |||
134 | entry->d_time = jiffies - 1; | 135 | entry->d_time = jiffies - 1; |
135 | } | 136 | } |
136 | 137 | ||
138 | static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, | ||
139 | struct nameidata *nd) | ||
140 | { | ||
141 | int err; | ||
142 | struct inode *inode; | ||
143 | struct fuse_conn *fc = get_fuse_conn(dir); | ||
144 | struct fuse_req *req; | ||
145 | struct fuse_open_in inarg; | ||
146 | struct fuse_open_out outopen; | ||
147 | struct fuse_entry_out outentry; | ||
148 | struct fuse_inode *fi; | ||
149 | struct fuse_file *ff; | ||
150 | struct file *file; | ||
151 | int flags = nd->intent.open.flags - 1; | ||
152 | |||
153 | err = -ENOSYS; | ||
154 | if (fc->no_create) | ||
155 | goto out; | ||
156 | |||
157 | err = -ENAMETOOLONG; | ||
158 | if (entry->d_name.len > FUSE_NAME_MAX) | ||
159 | goto out; | ||
160 | |||
161 | err = -EINTR; | ||
162 | req = fuse_get_request(fc); | ||
163 | if (!req) | ||
164 | goto out; | ||
165 | |||
166 | ff = fuse_file_alloc(); | ||
167 | if (!ff) | ||
168 | goto out_put_request; | ||
169 | |||
170 | flags &= ~O_NOCTTY; | ||
171 | memset(&inarg, 0, sizeof(inarg)); | ||
172 | inarg.flags = flags; | ||
173 | inarg.mode = mode; | ||
174 | req->in.h.opcode = FUSE_CREATE; | ||
175 | req->in.h.nodeid = get_node_id(dir); | ||
176 | req->inode = dir; | ||
177 | req->in.numargs = 2; | ||
178 | req->in.args[0].size = sizeof(inarg); | ||
179 | req->in.args[0].value = &inarg; | ||
180 | req->in.args[1].size = entry->d_name.len + 1; | ||
181 | req->in.args[1].value = entry->d_name.name; | ||
182 | req->out.numargs = 2; | ||
183 | req->out.args[0].size = sizeof(outentry); | ||
184 | req->out.args[0].value = &outentry; | ||
185 | req->out.args[1].size = sizeof(outopen); | ||
186 | req->out.args[1].value = &outopen; | ||
187 | request_send(fc, req); | ||
188 | err = req->out.h.error; | ||
189 | if (err) { | ||
190 | if (err == -ENOSYS) | ||
191 | fc->no_create = 1; | ||
192 | goto out_free_ff; | ||
193 | } | ||
194 | |||
195 | err = -EIO; | ||
196 | if (!S_ISREG(outentry.attr.mode)) | ||
197 | goto out_free_ff; | ||
198 | |||
199 | inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation, | ||
200 | &outentry.attr); | ||
201 | err = -ENOMEM; | ||
202 | if (!inode) { | ||
203 | flags &= ~(O_CREAT | O_EXCL | O_TRUNC); | ||
204 | ff->fh = outopen.fh; | ||
205 | fuse_send_release(fc, ff, outentry.nodeid, NULL, flags, 0); | ||
206 | goto out_put_request; | ||
207 | } | ||
208 | fuse_put_request(fc, req); | ||
209 | entry->d_time = time_to_jiffies(outentry.entry_valid, | ||
210 | outentry.entry_valid_nsec); | ||
211 | fi = get_fuse_inode(inode); | ||
212 | fi->i_time = time_to_jiffies(outentry.attr_valid, | ||
213 | outentry.attr_valid_nsec); | ||
214 | |||
215 | d_instantiate(entry, inode); | ||
216 | file = lookup_instantiate_filp(nd, entry, generic_file_open); | ||
217 | if (IS_ERR(file)) { | ||
218 | ff->fh = outopen.fh; | ||
219 | fuse_send_release(fc, ff, outentry.nodeid, inode, flags, 0); | ||
220 | return PTR_ERR(file); | ||
221 | } | ||
222 | fuse_finish_open(inode, file, ff, &outopen); | ||
223 | return 0; | ||
224 | |||
225 | out_free_ff: | ||
226 | fuse_file_free(ff); | ||
227 | out_put_request: | ||
228 | fuse_put_request(fc, req); | ||
229 | out: | ||
230 | return err; | ||
231 | } | ||
232 | |||
137 | static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, | 233 | static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, |
138 | struct inode *dir, struct dentry *entry, | 234 | struct inode *dir, struct dentry *entry, |
139 | int mode) | 235 | int mode) |
@@ -208,6 +304,12 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode, | |||
208 | static int fuse_create(struct inode *dir, struct dentry *entry, int mode, | 304 | static int fuse_create(struct inode *dir, struct dentry *entry, int mode, |
209 | struct nameidata *nd) | 305 | struct nameidata *nd) |
210 | { | 306 | { |
307 | if (nd && (nd->flags & LOOKUP_CREATE)) { | ||
308 | int err = fuse_create_open(dir, entry, mode, nd); | ||
309 | if (err != -ENOSYS) | ||
310 | return err; | ||
311 | /* Fall back on mknod */ | ||
312 | } | ||
211 | return fuse_mknod(dir, entry, mode, 0); | 313 | return fuse_mknod(dir, entry, mode, 0); |
212 | } | 314 | } |
213 | 315 | ||
@@ -461,6 +563,38 @@ static int fuse_revalidate(struct dentry *entry) | |||
461 | return fuse_do_getattr(inode); | 563 | return fuse_do_getattr(inode); |
462 | } | 564 | } |
463 | 565 | ||
566 | static int fuse_access(struct inode *inode, int mask) | ||
567 | { | ||
568 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
569 | struct fuse_req *req; | ||
570 | struct fuse_access_in inarg; | ||
571 | int err; | ||
572 | |||
573 | if (fc->no_access) | ||
574 | return 0; | ||
575 | |||
576 | req = fuse_get_request(fc); | ||
577 | if (!req) | ||
578 | return -EINTR; | ||
579 | |||
580 | memset(&inarg, 0, sizeof(inarg)); | ||
581 | inarg.mask = mask; | ||
582 | req->in.h.opcode = FUSE_ACCESS; | ||
583 | req->in.h.nodeid = get_node_id(inode); | ||
584 | req->inode = inode; | ||
585 | req->in.numargs = 1; | ||
586 | req->in.args[0].size = sizeof(inarg); | ||
587 | req->in.args[0].value = &inarg; | ||
588 | request_send(fc, req); | ||
589 | err = req->out.h.error; | ||
590 | fuse_put_request(fc, req); | ||
591 | if (err == -ENOSYS) { | ||
592 | fc->no_access = 1; | ||
593 | err = 0; | ||
594 | } | ||
595 | return err; | ||
596 | } | ||
597 | |||
464 | static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd) | 598 | static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd) |
465 | { | 599 | { |
466 | struct fuse_conn *fc = get_fuse_conn(inode); | 600 | struct fuse_conn *fc = get_fuse_conn(inode); |
@@ -491,11 +625,11 @@ static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd) | |||
491 | return err; | 625 | return err; |
492 | } else { | 626 | } else { |
493 | int mode = inode->i_mode; | 627 | int mode = inode->i_mode; |
494 | if ((mask & MAY_WRITE) && IS_RDONLY(inode) && | ||
495 | (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) | ||
496 | return -EROFS; | ||
497 | if ((mask & MAY_EXEC) && !S_ISDIR(mode) && !(mode & S_IXUGO)) | 628 | if ((mask & MAY_EXEC) && !S_ISDIR(mode) && !(mode & S_IXUGO)) |
498 | return -EACCES; | 629 | return -EACCES; |
630 | |||
631 | if (nd && (nd->flags & LOOKUP_ACCESS)) | ||
632 | return fuse_access(inode, mask); | ||
499 | return 0; | 633 | return 0; |
500 | } | 634 | } |
501 | } | 635 | } |
@@ -629,29 +763,29 @@ static int fuse_dir_fsync(struct file *file, struct dentry *de, int datasync) | |||
629 | return file ? fuse_fsync_common(file, de, datasync, 1) : 0; | 763 | return file ? fuse_fsync_common(file, de, datasync, 1) : 0; |
630 | } | 764 | } |
631 | 765 | ||
632 | static unsigned iattr_to_fattr(struct iattr *iattr, struct fuse_attr *fattr) | 766 | static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg) |
633 | { | 767 | { |
634 | unsigned ivalid = iattr->ia_valid; | 768 | unsigned ivalid = iattr->ia_valid; |
635 | unsigned fvalid = 0; | ||
636 | |||
637 | memset(fattr, 0, sizeof(*fattr)); | ||
638 | 769 | ||
639 | if (ivalid & ATTR_MODE) | 770 | if (ivalid & ATTR_MODE) |
640 | fvalid |= FATTR_MODE, fattr->mode = iattr->ia_mode; | 771 | arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode; |
641 | if (ivalid & ATTR_UID) | 772 | if (ivalid & ATTR_UID) |
642 | fvalid |= FATTR_UID, fattr->uid = iattr->ia_uid; | 773 | arg->valid |= FATTR_UID, arg->uid = iattr->ia_uid; |
643 | if (ivalid & ATTR_GID) | 774 | if (ivalid & ATTR_GID) |
644 | fvalid |= FATTR_GID, fattr->gid = iattr->ia_gid; | 775 | arg->valid |= FATTR_GID, arg->gid = iattr->ia_gid; |
645 | if (ivalid & ATTR_SIZE) | 776 | if (ivalid & ATTR_SIZE) |
646 | fvalid |= FATTR_SIZE, fattr->size = iattr->ia_size; | 777 | arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size; |
647 | /* You can only _set_ these together (they may change by themselves) */ | 778 | /* You can only _set_ these together (they may change by themselves) */ |
648 | if ((ivalid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) { | 779 | if ((ivalid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) { |
649 | fvalid |= FATTR_ATIME | FATTR_MTIME; | 780 | arg->valid |= FATTR_ATIME | FATTR_MTIME; |
650 | fattr->atime = iattr->ia_atime.tv_sec; | 781 | arg->atime = iattr->ia_atime.tv_sec; |
651 | fattr->mtime = iattr->ia_mtime.tv_sec; | 782 | arg->mtime = iattr->ia_mtime.tv_sec; |
783 | } | ||
784 | if (ivalid & ATTR_FILE) { | ||
785 | struct fuse_file *ff = iattr->ia_file->private_data; | ||
786 | arg->valid |= FATTR_FH; | ||
787 | arg->fh = ff->fh; | ||
652 | } | 788 | } |
653 | |||
654 | return fvalid; | ||
655 | } | 789 | } |
656 | 790 | ||
657 | static int fuse_setattr(struct dentry *entry, struct iattr *attr) | 791 | static int fuse_setattr(struct dentry *entry, struct iattr *attr) |
@@ -686,7 +820,7 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr) | |||
686 | return -EINTR; | 820 | return -EINTR; |
687 | 821 | ||
688 | memset(&inarg, 0, sizeof(inarg)); | 822 | memset(&inarg, 0, sizeof(inarg)); |
689 | inarg.valid = iattr_to_fattr(attr, &inarg.attr); | 823 | iattr_to_fattr(attr, &inarg); |
690 | req->in.h.opcode = FUSE_SETATTR; | 824 | req->in.h.opcode = FUSE_SETATTR; |
691 | req->in.h.nodeid = get_node_id(inode); | 825 | req->in.h.nodeid = get_node_id(inode); |
692 | req->inode = inode; | 826 | req->inode = inode; |
@@ -735,7 +869,9 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, | |||
735 | struct nameidata *nd) | 869 | struct nameidata *nd) |
736 | { | 870 | { |
737 | struct inode *inode; | 871 | struct inode *inode; |
738 | int err = fuse_lookup_iget(dir, entry, &inode); | 872 | int err; |
873 | |||
874 | err = fuse_lookup_iget(dir, entry, &inode); | ||
739 | if (err) | 875 | if (err) |
740 | return ERR_PTR(err); | 876 | return ERR_PTR(err); |
741 | if (inode && S_ISDIR(inode->i_mode)) { | 877 | if (inode && S_ISDIR(inode->i_mode)) { |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 657ab11c173b..2ca86141d13a 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -14,11 +14,69 @@ | |||
14 | 14 | ||
15 | static struct file_operations fuse_direct_io_file_operations; | 15 | static struct file_operations fuse_direct_io_file_operations; |
16 | 16 | ||
17 | int fuse_open_common(struct inode *inode, struct file *file, int isdir) | 17 | static int fuse_send_open(struct inode *inode, struct file *file, int isdir, |
18 | struct fuse_open_out *outargp) | ||
18 | { | 19 | { |
19 | struct fuse_conn *fc = get_fuse_conn(inode); | 20 | struct fuse_conn *fc = get_fuse_conn(inode); |
20 | struct fuse_req *req; | ||
21 | struct fuse_open_in inarg; | 21 | struct fuse_open_in inarg; |
22 | struct fuse_req *req; | ||
23 | int err; | ||
24 | |||
25 | req = fuse_get_request(fc); | ||
26 | if (!req) | ||
27 | return -EINTR; | ||
28 | |||
29 | memset(&inarg, 0, sizeof(inarg)); | ||
30 | inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); | ||
31 | req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; | ||
32 | req->in.h.nodeid = get_node_id(inode); | ||
33 | req->inode = inode; | ||
34 | req->in.numargs = 1; | ||
35 | req->in.args[0].size = sizeof(inarg); | ||
36 | req->in.args[0].value = &inarg; | ||
37 | req->out.numargs = 1; | ||
38 | req->out.args[0].size = sizeof(*outargp); | ||
39 | req->out.args[0].value = outargp; | ||
40 | request_send(fc, req); | ||
41 | err = req->out.h.error; | ||
42 | fuse_put_request(fc, req); | ||
43 | |||
44 | return err; | ||
45 | } | ||
46 | |||
47 | struct fuse_file *fuse_file_alloc(void) | ||
48 | { | ||
49 | struct fuse_file *ff; | ||
50 | ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); | ||
51 | if (ff) { | ||
52 | ff->release_req = fuse_request_alloc(); | ||
53 | if (!ff->release_req) { | ||
54 | kfree(ff); | ||
55 | ff = NULL; | ||
56 | } | ||
57 | } | ||
58 | return ff; | ||
59 | } | ||
60 | |||
61 | void fuse_file_free(struct fuse_file *ff) | ||
62 | { | ||
63 | fuse_request_free(ff->release_req); | ||
64 | kfree(ff); | ||
65 | } | ||
66 | |||
67 | void fuse_finish_open(struct inode *inode, struct file *file, | ||
68 | struct fuse_file *ff, struct fuse_open_out *outarg) | ||
69 | { | ||
70 | if (outarg->open_flags & FOPEN_DIRECT_IO) | ||
71 | file->f_op = &fuse_direct_io_file_operations; | ||
72 | if (!(outarg->open_flags & FOPEN_KEEP_CACHE)) | ||
73 | invalidate_inode_pages(inode->i_mapping); | ||
74 | ff->fh = outarg->fh; | ||
75 | file->private_data = ff; | ||
76 | } | ||
77 | |||
78 | int fuse_open_common(struct inode *inode, struct file *file, int isdir) | ||
79 | { | ||
22 | struct fuse_open_out outarg; | 80 | struct fuse_open_out outarg; |
23 | struct fuse_file *ff; | 81 | struct fuse_file *ff; |
24 | int err; | 82 | int err; |
@@ -34,73 +92,53 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir) | |||
34 | /* If opening the root node, no lookup has been performed on | 92 | /* If opening the root node, no lookup has been performed on |
35 | it, so the attributes must be refreshed */ | 93 | it, so the attributes must be refreshed */ |
36 | if (get_node_id(inode) == FUSE_ROOT_ID) { | 94 | if (get_node_id(inode) == FUSE_ROOT_ID) { |
37 | int err = fuse_do_getattr(inode); | 95 | err = fuse_do_getattr(inode); |
38 | if (err) | 96 | if (err) |
39 | return err; | 97 | return err; |
40 | } | 98 | } |
41 | 99 | ||
42 | req = fuse_get_request(fc); | 100 | ff = fuse_file_alloc(); |
43 | if (!req) | ||
44 | return -EINTR; | ||
45 | |||
46 | err = -ENOMEM; | ||
47 | ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); | ||
48 | if (!ff) | 101 | if (!ff) |
49 | goto out_put_request; | 102 | return -ENOMEM; |
50 | 103 | ||
51 | ff->release_req = fuse_request_alloc(); | 104 | err = fuse_send_open(inode, file, isdir, &outarg); |
52 | if (!ff->release_req) { | 105 | if (err) |
53 | kfree(ff); | 106 | fuse_file_free(ff); |
54 | goto out_put_request; | 107 | else { |
55 | } | 108 | if (isdir) |
56 | 109 | outarg.open_flags &= ~FOPEN_DIRECT_IO; | |
57 | memset(&inarg, 0, sizeof(inarg)); | 110 | fuse_finish_open(inode, file, ff, &outarg); |
58 | inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); | ||
59 | req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; | ||
60 | req->in.h.nodeid = get_node_id(inode); | ||
61 | req->inode = inode; | ||
62 | req->in.numargs = 1; | ||
63 | req->in.args[0].size = sizeof(inarg); | ||
64 | req->in.args[0].value = &inarg; | ||
65 | req->out.numargs = 1; | ||
66 | req->out.args[0].size = sizeof(outarg); | ||
67 | req->out.args[0].value = &outarg; | ||
68 | request_send(fc, req); | ||
69 | err = req->out.h.error; | ||
70 | if (err) { | ||
71 | fuse_request_free(ff->release_req); | ||
72 | kfree(ff); | ||
73 | } else { | ||
74 | if (!isdir && (outarg.open_flags & FOPEN_DIRECT_IO)) | ||
75 | file->f_op = &fuse_direct_io_file_operations; | ||
76 | if (!(outarg.open_flags & FOPEN_KEEP_CACHE)) | ||
77 | invalidate_inode_pages(inode->i_mapping); | ||
78 | ff->fh = outarg.fh; | ||
79 | file->private_data = ff; | ||
80 | } | 111 | } |
81 | 112 | ||
82 | out_put_request: | ||
83 | fuse_put_request(fc, req); | ||
84 | return err; | 113 | return err; |
85 | } | 114 | } |
86 | 115 | ||
87 | int fuse_release_common(struct inode *inode, struct file *file, int isdir) | 116 | void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff, |
117 | u64 nodeid, struct inode *inode, int flags, int isdir) | ||
88 | { | 118 | { |
89 | struct fuse_conn *fc = get_fuse_conn(inode); | 119 | struct fuse_req * req = ff->release_req; |
90 | struct fuse_file *ff = file->private_data; | ||
91 | struct fuse_req *req = ff->release_req; | ||
92 | struct fuse_release_in *inarg = &req->misc.release_in; | 120 | struct fuse_release_in *inarg = &req->misc.release_in; |
93 | 121 | ||
94 | inarg->fh = ff->fh; | 122 | inarg->fh = ff->fh; |
95 | inarg->flags = file->f_flags & ~O_EXCL; | 123 | inarg->flags = flags; |
96 | req->in.h.opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; | 124 | req->in.h.opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; |
97 | req->in.h.nodeid = get_node_id(inode); | 125 | req->in.h.nodeid = nodeid; |
98 | req->inode = inode; | 126 | req->inode = inode; |
99 | req->in.numargs = 1; | 127 | req->in.numargs = 1; |
100 | req->in.args[0].size = sizeof(struct fuse_release_in); | 128 | req->in.args[0].size = sizeof(struct fuse_release_in); |
101 | req->in.args[0].value = inarg; | 129 | req->in.args[0].value = inarg; |
102 | request_send_background(fc, req); | 130 | request_send_background(fc, req); |
103 | kfree(ff); | 131 | kfree(ff); |
132 | } | ||
133 | |||
134 | int fuse_release_common(struct inode *inode, struct file *file, int isdir) | ||
135 | { | ||
136 | struct fuse_file *ff = file->private_data; | ||
137 | if (ff) { | ||
138 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
139 | u64 nodeid = get_node_id(inode); | ||
140 | fuse_send_release(fc, ff, nodeid, inode, file->f_flags, isdir); | ||
141 | } | ||
104 | 142 | ||
105 | /* Return value is ignored by VFS */ | 143 | /* Return value is ignored by VFS */ |
106 | return 0; | 144 | return 0; |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 5cb456f572c1..0ea5301f86be 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -266,6 +266,12 @@ struct fuse_conn { | |||
266 | /** Is removexattr not implemented by fs? */ | 266 | /** Is removexattr not implemented by fs? */ |
267 | unsigned no_removexattr : 1; | 267 | unsigned no_removexattr : 1; |
268 | 268 | ||
269 | /** Is access not implemented by fs? */ | ||
270 | unsigned no_access : 1; | ||
271 | |||
272 | /** Is create not implemented by fs? */ | ||
273 | unsigned no_create : 1; | ||
274 | |||
269 | /** Backing dev info */ | 275 | /** Backing dev info */ |
270 | struct backing_dev_info bdi; | 276 | struct backing_dev_info bdi; |
271 | }; | 277 | }; |
@@ -337,6 +343,17 @@ size_t fuse_send_read_common(struct fuse_req *req, struct file *file, | |||
337 | */ | 343 | */ |
338 | int fuse_open_common(struct inode *inode, struct file *file, int isdir); | 344 | int fuse_open_common(struct inode *inode, struct file *file, int isdir); |
339 | 345 | ||
346 | struct fuse_file *fuse_file_alloc(void); | ||
347 | void fuse_file_free(struct fuse_file *ff); | ||
348 | void fuse_finish_open(struct inode *inode, struct file *file, | ||
349 | struct fuse_file *ff, struct fuse_open_out *outarg); | ||
350 | |||
351 | /** | ||
352 | * Send a RELEASE request | ||
353 | */ | ||
354 | void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff, | ||
355 | u64 nodeid, struct inode *inode, int flags, int isdir); | ||
356 | |||
340 | /** | 357 | /** |
341 | * Send RELEASE or RELEASEDIR request | 358 | * Send RELEASE or RELEASEDIR request |
342 | */ | 359 | */ |
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index dd7113106269..a33fb1d91373 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c | |||
@@ -294,8 +294,7 @@ static void hostfs_delete_inode(struct inode *inode) | |||
294 | 294 | ||
295 | static void hostfs_destroy_inode(struct inode *inode) | 295 | static void hostfs_destroy_inode(struct inode *inode) |
296 | { | 296 | { |
297 | if(HOSTFS_I(inode)->host_filename) | 297 | kfree(HOSTFS_I(inode)->host_filename); |
298 | kfree(HOSTFS_I(inode)->host_filename); | ||
299 | 298 | ||
300 | /*XXX: This should not happen, probably. The check is here for | 299 | /*XXX: This should not happen, probably. The check is here for |
301 | * additional safety.*/ | 300 | * additional safety.*/ |
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c index 1d21307730a8..229ff2fb1809 100644 --- a/fs/hpfs/dnode.c +++ b/fs/hpfs/dnode.c | |||
@@ -244,12 +244,12 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, | |||
244 | go_up: | 244 | go_up: |
245 | if (namelen >= 256) { | 245 | if (namelen >= 256) { |
246 | hpfs_error(i->i_sb, "hpfs_add_to_dnode: namelen == %d", namelen); | 246 | hpfs_error(i->i_sb, "hpfs_add_to_dnode: namelen == %d", namelen); |
247 | if (nd) kfree(nd); | 247 | kfree(nd); |
248 | kfree(nname); | 248 | kfree(nname); |
249 | return 1; | 249 | return 1; |
250 | } | 250 | } |
251 | if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) { | 251 | if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) { |
252 | if (nd) kfree(nd); | 252 | kfree(nd); |
253 | kfree(nname); | 253 | kfree(nname); |
254 | return 1; | 254 | return 1; |
255 | } | 255 | } |
@@ -257,7 +257,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, | |||
257 | if (hpfs_sb(i->i_sb)->sb_chk) | 257 | if (hpfs_sb(i->i_sb)->sb_chk) |
258 | if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_to_dnode")) { | 258 | if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_to_dnode")) { |
259 | hpfs_brelse4(&qbh); | 259 | hpfs_brelse4(&qbh); |
260 | if (nd) kfree(nd); | 260 | kfree(nd); |
261 | kfree(nname); | 261 | kfree(nname); |
262 | return 1; | 262 | return 1; |
263 | } | 263 | } |
@@ -270,7 +270,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, | |||
270 | for_all_poss(i, hpfs_pos_subst, 5, t + 1); | 270 | for_all_poss(i, hpfs_pos_subst, 5, t + 1); |
271 | hpfs_mark_4buffers_dirty(&qbh); | 271 | hpfs_mark_4buffers_dirty(&qbh); |
272 | hpfs_brelse4(&qbh); | 272 | hpfs_brelse4(&qbh); |
273 | if (nd) kfree(nd); | 273 | kfree(nd); |
274 | kfree(nname); | 274 | kfree(nname); |
275 | return 0; | 275 | return 0; |
276 | } | 276 | } |
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 8eefa6366db7..63e88d7e2c3b 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c | |||
@@ -75,7 +75,7 @@ void hpfs_error(struct super_block *s, char *m,...) | |||
75 | } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n"); | 75 | } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n"); |
76 | else printk("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n"); | 76 | else printk("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n"); |
77 | } else printk("\n"); | 77 | } else printk("\n"); |
78 | if (buf) kfree(buf); | 78 | kfree(buf); |
79 | hpfs_sb(s)->sb_was_error = 1; | 79 | hpfs_sb(s)->sb_was_error = 1; |
80 | } | 80 | } |
81 | 81 | ||
@@ -102,8 +102,8 @@ int hpfs_stop_cycles(struct super_block *s, int key, int *c1, int *c2, | |||
102 | static void hpfs_put_super(struct super_block *s) | 102 | static void hpfs_put_super(struct super_block *s) |
103 | { | 103 | { |
104 | struct hpfs_sb_info *sbi = hpfs_sb(s); | 104 | struct hpfs_sb_info *sbi = hpfs_sb(s); |
105 | if (sbi->sb_cp_table) kfree(sbi->sb_cp_table); | 105 | kfree(sbi->sb_cp_table); |
106 | if (sbi->sb_bmp_dir) kfree(sbi->sb_bmp_dir); | 106 | kfree(sbi->sb_bmp_dir); |
107 | unmark_dirty(s); | 107 | unmark_dirty(s); |
108 | s->s_fs_info = NULL; | 108 | s->s_fs_info = NULL; |
109 | kfree(sbi); | 109 | kfree(sbi); |
@@ -654,8 +654,8 @@ bail3: brelse(bh1); | |||
654 | bail2: brelse(bh0); | 654 | bail2: brelse(bh0); |
655 | bail1: | 655 | bail1: |
656 | bail0: | 656 | bail0: |
657 | if (sbi->sb_bmp_dir) kfree(sbi->sb_bmp_dir); | 657 | kfree(sbi->sb_bmp_dir); |
658 | if (sbi->sb_cp_table) kfree(sbi->sb_cp_table); | 658 | kfree(sbi->sb_cp_table); |
659 | s->s_fs_info = NULL; | 659 | s->s_fs_info = NULL; |
660 | kfree(sbi); | 660 | kfree(sbi); |
661 | return -EINVAL; | 661 | return -EINVAL; |
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 1652de1b6cb9..298f08be22d4 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c | |||
@@ -855,8 +855,7 @@ root_found: | |||
855 | if (opt.check == 'r') table++; | 855 | if (opt.check == 'r') table++; |
856 | s->s_root->d_op = &isofs_dentry_ops[table]; | 856 | s->s_root->d_op = &isofs_dentry_ops[table]; |
857 | 857 | ||
858 | if (opt.iocharset) | 858 | kfree(opt.iocharset); |
859 | kfree(opt.iocharset); | ||
860 | 859 | ||
861 | return 0; | 860 | return 0; |
862 | 861 | ||
@@ -895,8 +894,7 @@ out_unknown_format: | |||
895 | out_freebh: | 894 | out_freebh: |
896 | brelse(bh); | 895 | brelse(bh); |
897 | out_freesbi: | 896 | out_freesbi: |
898 | if (opt.iocharset) | 897 | kfree(opt.iocharset); |
899 | kfree(opt.iocharset); | ||
900 | kfree(sbi); | 898 | kfree(sbi); |
901 | s->s_fs_info = NULL; | 899 | s->s_fs_info = NULL; |
902 | return -EINVAL; | 900 | return -EINVAL; |
@@ -1164,8 +1162,7 @@ out_nomem: | |||
1164 | 1162 | ||
1165 | out_noread: | 1163 | out_noread: |
1166 | printk(KERN_INFO "ISOFS: unable to read i-node block %lu\n", block); | 1164 | printk(KERN_INFO "ISOFS: unable to read i-node block %lu\n", block); |
1167 | if (tmpde) | 1165 | kfree(tmpde); |
1168 | kfree(tmpde); | ||
1169 | return -EIO; | 1166 | return -EIO; |
1170 | 1167 | ||
1171 | out_toomany: | 1168 | out_toomany: |
@@ -1334,8 +1331,7 @@ static void isofs_read_inode(struct inode *inode) | |||
1334 | init_special_inode(inode, inode->i_mode, inode->i_rdev); | 1331 | init_special_inode(inode, inode->i_mode, inode->i_rdev); |
1335 | 1332 | ||
1336 | out: | 1333 | out: |
1337 | if (tmpde) | 1334 | kfree(tmpde); |
1338 | kfree(tmpde); | ||
1339 | if (bh) | 1335 | if (bh) |
1340 | brelse(bh); | 1336 | brelse(bh); |
1341 | return; | 1337 | return; |
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 2a3e310f79ef..002ad2bbc769 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c | |||
@@ -261,10 +261,8 @@ void journal_commit_transaction(journal_t *journal) | |||
261 | struct buffer_head *bh = jh2bh(jh); | 261 | struct buffer_head *bh = jh2bh(jh); |
262 | 262 | ||
263 | jbd_lock_bh_state(bh); | 263 | jbd_lock_bh_state(bh); |
264 | if (jh->b_committed_data) { | 264 | kfree(jh->b_committed_data); |
265 | kfree(jh->b_committed_data); | 265 | jh->b_committed_data = NULL; |
266 | jh->b_committed_data = NULL; | ||
267 | } | ||
268 | jbd_unlock_bh_state(bh); | 266 | jbd_unlock_bh_state(bh); |
269 | } | 267 | } |
270 | journal_refile_buffer(journal, jh); | 268 | journal_refile_buffer(journal, jh); |
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c index 103c34e4fb28..80d7f53fd0a7 100644 --- a/fs/jbd/recovery.c +++ b/fs/jbd/recovery.c | |||
@@ -210,7 +210,7 @@ do { \ | |||
210 | } while (0) | 210 | } while (0) |
211 | 211 | ||
212 | /** | 212 | /** |
213 | * int journal_recover(journal_t *journal) - recovers a on-disk journal | 213 | * journal_recover - recovers a on-disk journal |
214 | * @journal: the journal to recover | 214 | * @journal: the journal to recover |
215 | * | 215 | * |
216 | * The primary function for recovering the log contents when mounting a | 216 | * The primary function for recovering the log contents when mounting a |
@@ -266,7 +266,7 @@ int journal_recover(journal_t *journal) | |||
266 | } | 266 | } |
267 | 267 | ||
268 | /** | 268 | /** |
269 | * int journal_skip_recovery() - Start journal and wipe exiting records | 269 | * journal_skip_recovery - Start journal and wipe exiting records |
270 | * @journal: journal to startup | 270 | * @journal: journal to startup |
271 | * | 271 | * |
272 | * Locate any valid recovery information from the journal and set up the | 272 | * Locate any valid recovery information from the journal and set up the |
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 13cb05bf6048..429f4b263cf1 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c | |||
@@ -227,8 +227,7 @@ repeat_locked: | |||
227 | spin_unlock(&transaction->t_handle_lock); | 227 | spin_unlock(&transaction->t_handle_lock); |
228 | spin_unlock(&journal->j_state_lock); | 228 | spin_unlock(&journal->j_state_lock); |
229 | out: | 229 | out: |
230 | if (new_transaction) | 230 | kfree(new_transaction); |
231 | kfree(new_transaction); | ||
232 | return ret; | 231 | return ret; |
233 | } | 232 | } |
234 | 233 | ||
@@ -725,8 +724,7 @@ done: | |||
725 | journal_cancel_revoke(handle, jh); | 724 | journal_cancel_revoke(handle, jh); |
726 | 725 | ||
727 | out: | 726 | out: |
728 | if (frozen_buffer) | 727 | kfree(frozen_buffer); |
729 | kfree(frozen_buffer); | ||
730 | 728 | ||
731 | JBUFFER_TRACE(jh, "exit"); | 729 | JBUFFER_TRACE(jh, "exit"); |
732 | return error; | 730 | return error; |
@@ -905,8 +903,7 @@ repeat: | |||
905 | jbd_unlock_bh_state(bh); | 903 | jbd_unlock_bh_state(bh); |
906 | out: | 904 | out: |
907 | journal_put_journal_head(jh); | 905 | journal_put_journal_head(jh); |
908 | if (committed_data) | 906 | kfree(committed_data); |
909 | kfree(committed_data); | ||
910 | return err; | 907 | return err; |
911 | } | 908 | } |
912 | 909 | ||
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c index 27f199e94cfc..b2e95421d932 100644 --- a/fs/jffs/intrep.c +++ b/fs/jffs/intrep.c | |||
@@ -462,7 +462,7 @@ jffs_checksum_flash(struct mtd_info *mtd, loff_t start, int size, __u32 *result) | |||
462 | } | 462 | } |
463 | 463 | ||
464 | /* Free read buffer */ | 464 | /* Free read buffer */ |
465 | kfree (read_buf); | 465 | kfree(read_buf); |
466 | 466 | ||
467 | /* Return result */ | 467 | /* Return result */ |
468 | D3(printk("checksum result: 0x%08x\n", sum)); | 468 | D3(printk("checksum result: 0x%08x\n", sum)); |
@@ -1011,12 +1011,12 @@ jffs_scan_flash(struct jffs_control *c) | |||
1011 | offset , fmc->sector_size); | 1011 | offset , fmc->sector_size); |
1012 | 1012 | ||
1013 | flash_safe_release(fmc->mtd); | 1013 | flash_safe_release(fmc->mtd); |
1014 | kfree (read_buf); | 1014 | kfree(read_buf); |
1015 | return -1; /* bad, bad, bad! */ | 1015 | return -1; /* bad, bad, bad! */ |
1016 | 1016 | ||
1017 | } | 1017 | } |
1018 | flash_safe_release(fmc->mtd); | 1018 | flash_safe_release(fmc->mtd); |
1019 | kfree (read_buf); | 1019 | kfree(read_buf); |
1020 | 1020 | ||
1021 | return -EAGAIN; /* erased offending sector. Try mount one more time please. */ | 1021 | return -EAGAIN; /* erased offending sector. Try mount one more time please. */ |
1022 | } | 1022 | } |
@@ -1112,7 +1112,7 @@ jffs_scan_flash(struct jffs_control *c) | |||
1112 | if (!node) { | 1112 | if (!node) { |
1113 | if (!(node = jffs_alloc_node())) { | 1113 | if (!(node = jffs_alloc_node())) { |
1114 | /* Free read buffer */ | 1114 | /* Free read buffer */ |
1115 | kfree (read_buf); | 1115 | kfree(read_buf); |
1116 | 1116 | ||
1117 | /* Release the flash device */ | 1117 | /* Release the flash device */ |
1118 | flash_safe_release(fmc->mtd); | 1118 | flash_safe_release(fmc->mtd); |
@@ -1269,7 +1269,7 @@ jffs_scan_flash(struct jffs_control *c) | |||
1269 | DJM(no_jffs_node--); | 1269 | DJM(no_jffs_node--); |
1270 | 1270 | ||
1271 | /* Free read buffer */ | 1271 | /* Free read buffer */ |
1272 | kfree (read_buf); | 1272 | kfree(read_buf); |
1273 | 1273 | ||
1274 | /* Release the flash device */ | 1274 | /* Release the flash device */ |
1275 | flash_safe_release(fmc->mtd); | 1275 | flash_safe_release(fmc->mtd); |
@@ -1296,7 +1296,7 @@ jffs_scan_flash(struct jffs_control *c) | |||
1296 | flash_safe_release(fmc->flash_part); | 1296 | flash_safe_release(fmc->flash_part); |
1297 | 1297 | ||
1298 | /* Free read buffer */ | 1298 | /* Free read buffer */ |
1299 | kfree (read_buf); | 1299 | kfree(read_buf); |
1300 | 1300 | ||
1301 | return -ENOMEM; | 1301 | return -ENOMEM; |
1302 | } | 1302 | } |
@@ -1324,7 +1324,7 @@ jffs_scan_flash(struct jffs_control *c) | |||
1324 | jffs_build_end(fmc); | 1324 | jffs_build_end(fmc); |
1325 | 1325 | ||
1326 | /* Free read buffer */ | 1326 | /* Free read buffer */ |
1327 | kfree (read_buf); | 1327 | kfree(read_buf); |
1328 | 1328 | ||
1329 | if(!num_free_space){ | 1329 | if(!num_free_space){ |
1330 | printk(KERN_WARNING "jffs_scan_flash(): Did not find even a single " | 1330 | printk(KERN_WARNING "jffs_scan_flash(): Did not find even a single " |
@@ -1747,9 +1747,7 @@ jffs_find_child(struct jffs_file *dir, const char *name, int len) | |||
1747 | } | 1747 | } |
1748 | printk("jffs_find_child(): Didn't find the file \"%s\".\n", | 1748 | printk("jffs_find_child(): Didn't find the file \"%s\".\n", |
1749 | (copy ? copy : "")); | 1749 | (copy ? copy : "")); |
1750 | if (copy) { | 1750 | kfree(copy); |
1751 | kfree(copy); | ||
1752 | } | ||
1753 | }); | 1751 | }); |
1754 | 1752 | ||
1755 | return f; | 1753 | return f; |
diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile index f1afe681ecd6..77dc5561a04e 100644 --- a/fs/jffs2/Makefile +++ b/fs/jffs2/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Makefile for the Linux Journalling Flash File System v2 (JFFS2) | 2 | # Makefile for the Linux Journalling Flash File System v2 (JFFS2) |
3 | # | 3 | # |
4 | # $Id: Makefile.common,v 1.9 2005/02/09 09:23:53 pavlov Exp $ | 4 | # $Id: Makefile.common,v 1.11 2005/09/07 08:34:53 havasi Exp $ |
5 | # | 5 | # |
6 | 6 | ||
7 | obj-$(CONFIG_JFFS2_FS) += jffs2.o | 7 | obj-$(CONFIG_JFFS2_FS) += jffs2.o |
@@ -9,9 +9,10 @@ obj-$(CONFIG_JFFS2_FS) += jffs2.o | |||
9 | jffs2-y := compr.o dir.o file.o ioctl.o nodelist.o malloc.o | 9 | jffs2-y := compr.o dir.o file.o ioctl.o nodelist.o malloc.o |
10 | jffs2-y += read.o nodemgmt.o readinode.o write.o scan.o gc.o | 10 | jffs2-y += read.o nodemgmt.o readinode.o write.o scan.o gc.o |
11 | jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o | 11 | jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o |
12 | jffs2-y += super.o | 12 | jffs2-y += super.o debug.o |
13 | 13 | ||
14 | jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o | 14 | jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o |
15 | jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o | 15 | jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o |
16 | jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o | 16 | jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o |
17 | jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o | 17 | jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o |
18 | jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o | ||
diff --git a/fs/jffs2/TODO b/fs/jffs2/TODO index 2bff82fd221f..d0e23b26fa50 100644 --- a/fs/jffs2/TODO +++ b/fs/jffs2/TODO | |||
@@ -1,5 +1,11 @@ | |||
1 | $Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $ | 1 | $Id: TODO,v 1.18 2005/09/22 11:24:56 dedekind Exp $ |
2 | 2 | ||
3 | - support asynchronous operation -- add a per-fs 'reserved_space' count, | ||
4 | let each outstanding write reserve the _maximum_ amount of physical | ||
5 | space it could take. Let GC flush the outstanding writes because the | ||
6 | reservations will necessarily be pessimistic. With this we could even | ||
7 | do shared writable mmap, if we can have a fs hook for do_wp_page() to | ||
8 | make the reservation. | ||
3 | - disable compression in commit_write()? | 9 | - disable compression in commit_write()? |
4 | - fine-tune the allocation / GC thresholds | 10 | - fine-tune the allocation / GC thresholds |
5 | - chattr support - turning on/off and tuning compression per-inode | 11 | - chattr support - turning on/off and tuning compression per-inode |
@@ -11,26 +17,15 @@ $Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $ | |||
11 | - test, test, test | 17 | - test, test, test |
12 | 18 | ||
13 | - NAND flash support: | 19 | - NAND flash support: |
14 | - flush_wbuf using GC to fill it, don't just pad. | 20 | - almost done :) |
15 | - Deal with write errors. Data don't get lost - we just have to write | 21 | - use bad block check instead of the hardwired byte check |
16 | the affected node(s) out again somewhere else. | ||
17 | - make fsync flush only if actually required | ||
18 | - make sys_sync() work. | ||
19 | - reboot notifier | ||
20 | - timed flush of old wbuf | ||
21 | - fix magical second arg of jffs2_flush_wbuf(). Split into two or more functions instead. | ||
22 | |||
23 | 22 | ||
24 | - Optimisations: | 23 | - Optimisations: |
25 | - Stop GC from decompressing and immediately recompressing nodes which could | 24 | - Split writes so they go to two separate blocks rather than just c->nextblock. |
26 | just be copied intact. (We now keep track of REF_PRISTINE flag. Easy now.) | 25 | By writing _new_ nodes to one block, and garbage-collected REF_PRISTINE |
27 | - Furthermore, in the case where it could be copied intact we don't even need | 26 | nodes to a different one, we can separate clean nodes from those which |
28 | to call iget() for it -- if we use (raw_node_raw->flash_offset & 2) as a flag | 27 | are likely to become dirty, and end up with blocks which are each far |
29 | to show a node can be copied intact and it's _not_ in icache, we could just do | 28 | closer to 100% or 0% clean, hence speeding up later GC progress dramatically. |
30 | it, fix up the next_in_ino list and move on. We would need a way to find out | ||
31 | _whether_ it's in icache though -- if it's in icache we also need to do the | ||
32 | fragment lists, etc. P'raps a flag or pointer in the jffs2_inode_cache could | ||
33 | help. (We have half of this now.) | ||
34 | - Stop keeping name in-core with struct jffs2_full_dirent. If we keep the hash in | 29 | - Stop keeping name in-core with struct jffs2_full_dirent. If we keep the hash in |
35 | the full dirent, we only need to go to the flash in lookup() when we think we've | 30 | the full dirent, we only need to go to the flash in lookup() when we think we've |
36 | got a match, and in readdir(). | 31 | got a match, and in readdir(). |
@@ -38,3 +33,8 @@ $Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $ | |||
38 | - Remove totlen from jffs2_raw_node_ref? Need to have totlen passed into | 33 | - Remove totlen from jffs2_raw_node_ref? Need to have totlen passed into |
39 | jffs2_mark_node_obsolete(). Can all callers work it out? | 34 | jffs2_mark_node_obsolete(). Can all callers work it out? |
40 | - Remove size from jffs2_raw_node_frag. | 35 | - Remove size from jffs2_raw_node_frag. |
36 | |||
37 | dedekind: | ||
38 | 1. __jffs2_flush_wbuf() has a strange 'pad' parameter. Eliminate. | ||
39 | 2. get_sb()->build_fs()->scan() path... Why get_sb() removes scan()'s crap in | ||
40 | case of failure? scan() does not clean everything. Fix. | ||
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index 8210ac16a368..7b77a9541125 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c | |||
@@ -51,7 +51,7 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) | |||
51 | D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid)); | 51 | D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid)); |
52 | wait_for_completion(&c->gc_thread_start); | 52 | wait_for_completion(&c->gc_thread_start); |
53 | } | 53 | } |
54 | 54 | ||
55 | return ret; | 55 | return ret; |
56 | } | 56 | } |
57 | 57 | ||
@@ -101,7 +101,7 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
101 | 101 | ||
102 | cond_resched(); | 102 | cond_resched(); |
103 | 103 | ||
104 | /* Put_super will send a SIGKILL and then wait on the sem. | 104 | /* Put_super will send a SIGKILL and then wait on the sem. |
105 | */ | 105 | */ |
106 | while (signal_pending(current)) { | 106 | while (signal_pending(current)) { |
107 | siginfo_t info; | 107 | siginfo_t info; |
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c index 97dc39796e2c..fff108bb118b 100644 --- a/fs/jffs2/build.c +++ b/fs/jffs2/build.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: build.c,v 1.71 2005/07/12 16:37:08 dedekind Exp $ | 10 | * $Id: build.c,v 1.85 2005/11/07 11:14:38 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -18,7 +18,8 @@ | |||
18 | #include <linux/mtd/mtd.h> | 18 | #include <linux/mtd/mtd.h> |
19 | #include "nodelist.h" | 19 | #include "nodelist.h" |
20 | 20 | ||
21 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **); | 21 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, |
22 | struct jffs2_inode_cache *, struct jffs2_full_dirent **); | ||
22 | 23 | ||
23 | static inline struct jffs2_inode_cache * | 24 | static inline struct jffs2_inode_cache * |
24 | first_inode_chain(int *i, struct jffs2_sb_info *c) | 25 | first_inode_chain(int *i, struct jffs2_sb_info *c) |
@@ -46,11 +47,12 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) | |||
46 | ic = next_inode(&i, ic, (c))) | 47 | ic = next_inode(&i, ic, (c))) |
47 | 48 | ||
48 | 49 | ||
49 | static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | 50 | static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, |
51 | struct jffs2_inode_cache *ic) | ||
50 | { | 52 | { |
51 | struct jffs2_full_dirent *fd; | 53 | struct jffs2_full_dirent *fd; |
52 | 54 | ||
53 | D1(printk(KERN_DEBUG "jffs2_build_inode building directory inode #%u\n", ic->ino)); | 55 | dbg_fsbuild("building directory inode #%u\n", ic->ino); |
54 | 56 | ||
55 | /* For each child, increase nlink */ | 57 | /* For each child, increase nlink */ |
56 | for(fd = ic->scan_dents; fd; fd = fd->next) { | 58 | for(fd = ic->scan_dents; fd; fd = fd->next) { |
@@ -58,26 +60,23 @@ static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2 | |||
58 | if (!fd->ino) | 60 | if (!fd->ino) |
59 | continue; | 61 | continue; |
60 | 62 | ||
61 | /* XXX: Can get high latency here with huge directories */ | 63 | /* we can get high latency here with huge directories */ |
62 | 64 | ||
63 | child_ic = jffs2_get_ino_cache(c, fd->ino); | 65 | child_ic = jffs2_get_ino_cache(c, fd->ino); |
64 | if (!child_ic) { | 66 | if (!child_ic) { |
65 | printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", | 67 | dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", |
66 | fd->name, fd->ino, ic->ino); | 68 | fd->name, fd->ino, ic->ino); |
67 | jffs2_mark_node_obsolete(c, fd->raw); | 69 | jffs2_mark_node_obsolete(c, fd->raw); |
68 | continue; | 70 | continue; |
69 | } | 71 | } |
70 | 72 | ||
71 | if (child_ic->nlink++ && fd->type == DT_DIR) { | 73 | if (child_ic->nlink++ && fd->type == DT_DIR) { |
72 | printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino); | 74 | JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", |
73 | if (fd->ino == 1 && ic->ino == 1) { | 75 | fd->name, fd->ino, ic->ino); |
74 | printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n"); | 76 | /* TODO: What do we do about it? */ |
75 | printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n"); | ||
76 | } | ||
77 | /* What do we do about it? */ | ||
78 | } | 77 | } |
79 | D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino)); | 78 | dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); |
80 | /* Can't free them. We might need them in pass 2 */ | 79 | /* Can't free scan_dents so far. We might need them in pass 2 */ |
81 | } | 80 | } |
82 | } | 81 | } |
83 | 82 | ||
@@ -94,6 +93,8 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) | |||
94 | struct jffs2_full_dirent *fd; | 93 | struct jffs2_full_dirent *fd; |
95 | struct jffs2_full_dirent *dead_fds = NULL; | 94 | struct jffs2_full_dirent *dead_fds = NULL; |
96 | 95 | ||
96 | dbg_fsbuild("build FS data structures\n"); | ||
97 | |||
97 | /* First, scan the medium and build all the inode caches with | 98 | /* First, scan the medium and build all the inode caches with |
98 | lists of physical nodes */ | 99 | lists of physical nodes */ |
99 | 100 | ||
@@ -103,60 +104,54 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) | |||
103 | if (ret) | 104 | if (ret) |
104 | goto exit; | 105 | goto exit; |
105 | 106 | ||
106 | D1(printk(KERN_DEBUG "Scanned flash completely\n")); | 107 | dbg_fsbuild("scanned flash completely\n"); |
107 | D2(jffs2_dump_block_lists(c)); | 108 | jffs2_dbg_dump_block_lists_nolock(c); |
108 | 109 | ||
110 | dbg_fsbuild("pass 1 starting\n"); | ||
109 | c->flags |= JFFS2_SB_FLAG_BUILDING; | 111 | c->flags |= JFFS2_SB_FLAG_BUILDING; |
110 | /* Now scan the directory tree, increasing nlink according to every dirent found. */ | 112 | /* Now scan the directory tree, increasing nlink according to every dirent found. */ |
111 | for_each_inode(i, c, ic) { | 113 | for_each_inode(i, c, ic) { |
112 | D1(printk(KERN_DEBUG "Pass 1: ino #%u\n", ic->ino)); | ||
113 | |||
114 | D1(BUG_ON(ic->ino > c->highest_ino)); | ||
115 | |||
116 | if (ic->scan_dents) { | 114 | if (ic->scan_dents) { |
117 | jffs2_build_inode_pass1(c, ic); | 115 | jffs2_build_inode_pass1(c, ic); |
118 | cond_resched(); | 116 | cond_resched(); |
119 | } | 117 | } |
120 | } | 118 | } |
121 | 119 | ||
122 | D1(printk(KERN_DEBUG "Pass 1 complete\n")); | 120 | dbg_fsbuild("pass 1 complete\n"); |
123 | 121 | ||
124 | /* Next, scan for inodes with nlink == 0 and remove them. If | 122 | /* Next, scan for inodes with nlink == 0 and remove them. If |
125 | they were directories, then decrement the nlink of their | 123 | they were directories, then decrement the nlink of their |
126 | children too, and repeat the scan. As that's going to be | 124 | children too, and repeat the scan. As that's going to be |
127 | a fairly uncommon occurrence, it's not so evil to do it this | 125 | a fairly uncommon occurrence, it's not so evil to do it this |
128 | way. Recursion bad. */ | 126 | way. Recursion bad. */ |
129 | D1(printk(KERN_DEBUG "Pass 2 starting\n")); | 127 | dbg_fsbuild("pass 2 starting\n"); |
130 | 128 | ||
131 | for_each_inode(i, c, ic) { | 129 | for_each_inode(i, c, ic) { |
132 | D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes)); | ||
133 | if (ic->nlink) | 130 | if (ic->nlink) |
134 | continue; | 131 | continue; |
135 | 132 | ||
136 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); | 133 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); |
137 | cond_resched(); | 134 | cond_resched(); |
138 | } | 135 | } |
139 | 136 | ||
140 | D1(printk(KERN_DEBUG "Pass 2a starting\n")); | 137 | dbg_fsbuild("pass 2a starting\n"); |
141 | 138 | ||
142 | while (dead_fds) { | 139 | while (dead_fds) { |
143 | fd = dead_fds; | 140 | fd = dead_fds; |
144 | dead_fds = fd->next; | 141 | dead_fds = fd->next; |
145 | 142 | ||
146 | ic = jffs2_get_ino_cache(c, fd->ino); | 143 | ic = jffs2_get_ino_cache(c, fd->ino); |
147 | D1(printk(KERN_DEBUG "Removing dead_fd ino #%u (\"%s\"), ic at %p\n", fd->ino, fd->name, ic)); | ||
148 | 144 | ||
149 | if (ic) | 145 | if (ic) |
150 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); | 146 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); |
151 | jffs2_free_full_dirent(fd); | 147 | jffs2_free_full_dirent(fd); |
152 | } | 148 | } |
153 | 149 | ||
154 | D1(printk(KERN_DEBUG "Pass 2 complete\n")); | 150 | dbg_fsbuild("pass 2a complete\n"); |
155 | 151 | dbg_fsbuild("freeing temporary data structures\n"); | |
152 | |||
156 | /* Finally, we can scan again and free the dirent structs */ | 153 | /* Finally, we can scan again and free the dirent structs */ |
157 | for_each_inode(i, c, ic) { | 154 | for_each_inode(i, c, ic) { |
158 | D1(printk(KERN_DEBUG "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes)); | ||
159 | |||
160 | while(ic->scan_dents) { | 155 | while(ic->scan_dents) { |
161 | fd = ic->scan_dents; | 156 | fd = ic->scan_dents; |
162 | ic->scan_dents = fd->next; | 157 | ic->scan_dents = fd->next; |
@@ -166,9 +161,8 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) | |||
166 | cond_resched(); | 161 | cond_resched(); |
167 | } | 162 | } |
168 | c->flags &= ~JFFS2_SB_FLAG_BUILDING; | 163 | c->flags &= ~JFFS2_SB_FLAG_BUILDING; |
169 | 164 | ||
170 | D1(printk(KERN_DEBUG "Pass 3 complete\n")); | 165 | dbg_fsbuild("FS build complete\n"); |
171 | D2(jffs2_dump_block_lists(c)); | ||
172 | 166 | ||
173 | /* Rotate the lists by some number to ensure wear levelling */ | 167 | /* Rotate the lists by some number to ensure wear levelling */ |
174 | jffs2_rotate_lists(c); | 168 | jffs2_rotate_lists(c); |
@@ -189,24 +183,26 @@ exit: | |||
189 | return ret; | 183 | return ret; |
190 | } | 184 | } |
191 | 185 | ||
192 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds) | 186 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, |
187 | struct jffs2_inode_cache *ic, | ||
188 | struct jffs2_full_dirent **dead_fds) | ||
193 | { | 189 | { |
194 | struct jffs2_raw_node_ref *raw; | 190 | struct jffs2_raw_node_ref *raw; |
195 | struct jffs2_full_dirent *fd; | 191 | struct jffs2_full_dirent *fd; |
196 | 192 | ||
197 | D1(printk(KERN_DEBUG "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino)); | 193 | dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino); |
198 | 194 | ||
199 | raw = ic->nodes; | 195 | raw = ic->nodes; |
200 | while (raw != (void *)ic) { | 196 | while (raw != (void *)ic) { |
201 | struct jffs2_raw_node_ref *next = raw->next_in_ino; | 197 | struct jffs2_raw_node_ref *next = raw->next_in_ino; |
202 | D1(printk(KERN_DEBUG "obsoleting node at 0x%08x\n", ref_offset(raw))); | 198 | dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw)); |
203 | jffs2_mark_node_obsolete(c, raw); | 199 | jffs2_mark_node_obsolete(c, raw); |
204 | raw = next; | 200 | raw = next; |
205 | } | 201 | } |
206 | 202 | ||
207 | if (ic->scan_dents) { | 203 | if (ic->scan_dents) { |
208 | int whinged = 0; | 204 | int whinged = 0; |
209 | D1(printk(KERN_DEBUG "Inode #%u was a directory which may have children...\n", ic->ino)); | 205 | dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino); |
210 | 206 | ||
211 | while(ic->scan_dents) { | 207 | while(ic->scan_dents) { |
212 | struct jffs2_inode_cache *child_ic; | 208 | struct jffs2_inode_cache *child_ic; |
@@ -216,45 +212,43 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jf | |||
216 | 212 | ||
217 | if (!fd->ino) { | 213 | if (!fd->ino) { |
218 | /* It's a deletion dirent. Ignore it */ | 214 | /* It's a deletion dirent. Ignore it */ |
219 | D1(printk(KERN_DEBUG "Child \"%s\" is a deletion dirent, skipping...\n", fd->name)); | 215 | dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name); |
220 | jffs2_free_full_dirent(fd); | 216 | jffs2_free_full_dirent(fd); |
221 | continue; | 217 | continue; |
222 | } | 218 | } |
223 | if (!whinged) { | 219 | if (!whinged) |
224 | whinged = 1; | 220 | whinged = 1; |
225 | printk(KERN_NOTICE "Inode #%u was a directory with children - removing those too...\n", ic->ino); | ||
226 | } | ||
227 | 221 | ||
228 | D1(printk(KERN_DEBUG "Removing child \"%s\", ino #%u\n", | 222 | dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino); |
229 | fd->name, fd->ino)); | 223 | |
230 | |||
231 | child_ic = jffs2_get_ino_cache(c, fd->ino); | 224 | child_ic = jffs2_get_ino_cache(c, fd->ino); |
232 | if (!child_ic) { | 225 | if (!child_ic) { |
233 | printk(KERN_NOTICE "Cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino); | 226 | dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n", |
227 | fd->name, fd->ino); | ||
234 | jffs2_free_full_dirent(fd); | 228 | jffs2_free_full_dirent(fd); |
235 | continue; | 229 | continue; |
236 | } | 230 | } |
237 | 231 | ||
238 | /* Reduce nlink of the child. If it's now zero, stick it on the | 232 | /* Reduce nlink of the child. If it's now zero, stick it on the |
239 | dead_fds list to be cleaned up later. Else just free the fd */ | 233 | dead_fds list to be cleaned up later. Else just free the fd */ |
240 | 234 | ||
241 | child_ic->nlink--; | 235 | child_ic->nlink--; |
242 | 236 | ||
243 | if (!child_ic->nlink) { | 237 | if (!child_ic->nlink) { |
244 | D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got zero nlink. Adding to dead_fds list.\n", | 238 | dbg_fsbuild("inode #%u (\"%s\") has now got zero nlink, adding to dead_fds list.\n", |
245 | fd->ino, fd->name)); | 239 | fd->ino, fd->name); |
246 | fd->next = *dead_fds; | 240 | fd->next = *dead_fds; |
247 | *dead_fds = fd; | 241 | *dead_fds = fd; |
248 | } else { | 242 | } else { |
249 | D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", | 243 | dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", |
250 | fd->ino, fd->name, child_ic->nlink)); | 244 | fd->ino, fd->name, child_ic->nlink); |
251 | jffs2_free_full_dirent(fd); | 245 | jffs2_free_full_dirent(fd); |
252 | } | 246 | } |
253 | } | 247 | } |
254 | } | 248 | } |
255 | 249 | ||
256 | /* | 250 | /* |
257 | We don't delete the inocache from the hash list and free it yet. | 251 | We don't delete the inocache from the hash list and free it yet. |
258 | The erase code will do that, when all the nodes are completely gone. | 252 | The erase code will do that, when all the nodes are completely gone. |
259 | */ | 253 | */ |
260 | } | 254 | } |
@@ -268,7 +262,7 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) | |||
268 | because there's not enough free space... */ | 262 | because there's not enough free space... */ |
269 | c->resv_blocks_deletion = 2; | 263 | c->resv_blocks_deletion = 2; |
270 | 264 | ||
271 | /* Be conservative about how much space we need before we allow writes. | 265 | /* Be conservative about how much space we need before we allow writes. |
272 | On top of that which is required for deletia, require an extra 2% | 266 | On top of that which is required for deletia, require an extra 2% |
273 | of the medium to be available, for overhead caused by nodes being | 267 | of the medium to be available, for overhead caused by nodes being |
274 | split across blocks, etc. */ | 268 | split across blocks, etc. */ |
@@ -283,7 +277,7 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) | |||
283 | 277 | ||
284 | c->resv_blocks_gctrigger = c->resv_blocks_write + 1; | 278 | c->resv_blocks_gctrigger = c->resv_blocks_write + 1; |
285 | 279 | ||
286 | /* When do we allow garbage collection to merge nodes to make | 280 | /* When do we allow garbage collection to merge nodes to make |
287 | long-term progress at the expense of short-term space exhaustion? */ | 281 | long-term progress at the expense of short-term space exhaustion? */ |
288 | c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; | 282 | c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; |
289 | 283 | ||
@@ -295,45 +289,45 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) | |||
295 | trying to GC to make more space. It'll be a fruitless task */ | 289 | trying to GC to make more space. It'll be a fruitless task */ |
296 | c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); | 290 | c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); |
297 | 291 | ||
298 | D1(printk(KERN_DEBUG "JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", | 292 | dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", |
299 | c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks)); | 293 | c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); |
300 | D1(printk(KERN_DEBUG "Blocks required to allow deletion: %d (%d KiB)\n", | 294 | dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n", |
301 | c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024)); | 295 | c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); |
302 | D1(printk(KERN_DEBUG "Blocks required to allow writes: %d (%d KiB)\n", | 296 | dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n", |
303 | c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024)); | 297 | c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024); |
304 | D1(printk(KERN_DEBUG "Blocks required to quiesce GC thread: %d (%d KiB)\n", | 298 | dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n", |
305 | c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024)); | 299 | c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024); |
306 | D1(printk(KERN_DEBUG "Blocks required to allow GC merges: %d (%d KiB)\n", | 300 | dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n", |
307 | c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024)); | 301 | c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024); |
308 | D1(printk(KERN_DEBUG "Blocks required to GC bad blocks: %d (%d KiB)\n", | 302 | dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n", |
309 | c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024)); | 303 | c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024); |
310 | D1(printk(KERN_DEBUG "Amount of dirty space required to GC: %d bytes\n", | 304 | dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n", |
311 | c->nospc_dirty_size)); | 305 | c->nospc_dirty_size); |
312 | } | 306 | } |
313 | 307 | ||
314 | int jffs2_do_mount_fs(struct jffs2_sb_info *c) | 308 | int jffs2_do_mount_fs(struct jffs2_sb_info *c) |
315 | { | 309 | { |
310 | int ret; | ||
316 | int i; | 311 | int i; |
312 | int size; | ||
317 | 313 | ||
318 | c->free_size = c->flash_size; | 314 | c->free_size = c->flash_size; |
319 | c->nr_blocks = c->flash_size / c->sector_size; | 315 | c->nr_blocks = c->flash_size / c->sector_size; |
320 | if (c->mtd->flags & MTD_NO_VIRTBLOCKS) | 316 | size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; |
321 | c->blocks = vmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks); | 317 | #ifndef __ECOS |
318 | if (jffs2_blocks_use_vmalloc(c)) | ||
319 | c->blocks = vmalloc(size); | ||
322 | else | 320 | else |
323 | c->blocks = kmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks, GFP_KERNEL); | 321 | #endif |
322 | c->blocks = kmalloc(size, GFP_KERNEL); | ||
324 | if (!c->blocks) | 323 | if (!c->blocks) |
325 | return -ENOMEM; | 324 | return -ENOMEM; |
325 | |||
326 | memset(c->blocks, 0, size); | ||
326 | for (i=0; i<c->nr_blocks; i++) { | 327 | for (i=0; i<c->nr_blocks; i++) { |
327 | INIT_LIST_HEAD(&c->blocks[i].list); | 328 | INIT_LIST_HEAD(&c->blocks[i].list); |
328 | c->blocks[i].offset = i * c->sector_size; | 329 | c->blocks[i].offset = i * c->sector_size; |
329 | c->blocks[i].free_size = c->sector_size; | 330 | c->blocks[i].free_size = c->sector_size; |
330 | c->blocks[i].dirty_size = 0; | ||
331 | c->blocks[i].wasted_size = 0; | ||
332 | c->blocks[i].unchecked_size = 0; | ||
333 | c->blocks[i].used_size = 0; | ||
334 | c->blocks[i].first_node = NULL; | ||
335 | c->blocks[i].last_node = NULL; | ||
336 | c->blocks[i].bad_count = 0; | ||
337 | } | 331 | } |
338 | 332 | ||
339 | INIT_LIST_HEAD(&c->clean_list); | 333 | INIT_LIST_HEAD(&c->clean_list); |
@@ -348,16 +342,23 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c) | |||
348 | INIT_LIST_HEAD(&c->bad_list); | 342 | INIT_LIST_HEAD(&c->bad_list); |
349 | INIT_LIST_HEAD(&c->bad_used_list); | 343 | INIT_LIST_HEAD(&c->bad_used_list); |
350 | c->highest_ino = 1; | 344 | c->highest_ino = 1; |
345 | c->summary = NULL; | ||
346 | |||
347 | ret = jffs2_sum_init(c); | ||
348 | if (ret) | ||
349 | return ret; | ||
351 | 350 | ||
352 | if (jffs2_build_filesystem(c)) { | 351 | if (jffs2_build_filesystem(c)) { |
353 | D1(printk(KERN_DEBUG "build_fs failed\n")); | 352 | dbg_fsbuild("build_fs failed\n"); |
354 | jffs2_free_ino_caches(c); | 353 | jffs2_free_ino_caches(c); |
355 | jffs2_free_raw_node_refs(c); | 354 | jffs2_free_raw_node_refs(c); |
356 | if (c->mtd->flags & MTD_NO_VIRTBLOCKS) { | 355 | #ifndef __ECOS |
356 | if (jffs2_blocks_use_vmalloc(c)) | ||
357 | vfree(c->blocks); | 357 | vfree(c->blocks); |
358 | } else { | 358 | else |
359 | #endif | ||
359 | kfree(c->blocks); | 360 | kfree(c->blocks); |
360 | } | 361 | |
361 | return -EIO; | 362 | return -EIO; |
362 | } | 363 | } |
363 | 364 | ||
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c index af922a9618ac..e7944e665b9f 100644 --- a/fs/jffs2/compr.c +++ b/fs/jffs2/compr.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * | 9 | * |
10 | * For licensing information, see the file 'LICENCE' in this directory. | 10 | * For licensing information, see the file 'LICENCE' in this directory. |
11 | * | 11 | * |
12 | * $Id: compr.c,v 1.42 2004/08/07 21:56:08 dwmw2 Exp $ | 12 | * $Id: compr.c,v 1.46 2005/11/07 11:14:38 gleixner Exp $ |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
@@ -36,16 +36,16 @@ static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_co | |||
36 | * data. | 36 | * data. |
37 | * | 37 | * |
38 | * Returns: Lower byte to be stored with data indicating compression type used. | 38 | * Returns: Lower byte to be stored with data indicating compression type used. |
39 | * Zero is used to show that the data could not be compressed - the | 39 | * Zero is used to show that the data could not be compressed - the |
40 | * compressed version was actually larger than the original. | 40 | * compressed version was actually larger than the original. |
41 | * Upper byte will be used later. (soon) | 41 | * Upper byte will be used later. (soon) |
42 | * | 42 | * |
43 | * If the cdata buffer isn't large enough to hold all the uncompressed data, | 43 | * If the cdata buffer isn't large enough to hold all the uncompressed data, |
44 | * jffs2_compress should compress as much as will fit, and should set | 44 | * jffs2_compress should compress as much as will fit, and should set |
45 | * *datalen accordingly to show the amount of data which were compressed. | 45 | * *datalen accordingly to show the amount of data which were compressed. |
46 | */ | 46 | */ |
47 | uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 47 | uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
48 | unsigned char *data_in, unsigned char **cpage_out, | 48 | unsigned char *data_in, unsigned char **cpage_out, |
49 | uint32_t *datalen, uint32_t *cdatalen) | 49 | uint32_t *datalen, uint32_t *cdatalen) |
50 | { | 50 | { |
51 | int ret = JFFS2_COMPR_NONE; | 51 | int ret = JFFS2_COMPR_NONE; |
@@ -164,7 +164,7 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
164 | } | 164 | } |
165 | 165 | ||
166 | int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 166 | int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
167 | uint16_t comprtype, unsigned char *cdata_in, | 167 | uint16_t comprtype, unsigned char *cdata_in, |
168 | unsigned char *data_out, uint32_t cdatalen, uint32_t datalen) | 168 | unsigned char *data_out, uint32_t cdatalen, uint32_t datalen) |
169 | { | 169 | { |
170 | struct jffs2_compressor *this; | 170 | struct jffs2_compressor *this; |
@@ -298,7 +298,7 @@ char *jffs2_stats(void) | |||
298 | 298 | ||
299 | act_buf += sprintf(act_buf,"JFFS2 compressor statistics:\n"); | 299 | act_buf += sprintf(act_buf,"JFFS2 compressor statistics:\n"); |
300 | act_buf += sprintf(act_buf,"%10s ","none"); | 300 | act_buf += sprintf(act_buf,"%10s ","none"); |
301 | act_buf += sprintf(act_buf,"compr: %d blocks (%d) decompr: %d blocks\n", none_stat_compr_blocks, | 301 | act_buf += sprintf(act_buf,"compr: %d blocks (%d) decompr: %d blocks\n", none_stat_compr_blocks, |
302 | none_stat_compr_size, none_stat_decompr_blocks); | 302 | none_stat_compr_size, none_stat_decompr_blocks); |
303 | spin_lock(&jffs2_compressor_list_lock); | 303 | spin_lock(&jffs2_compressor_list_lock); |
304 | list_for_each_entry(this, &jffs2_compressor_list, list) { | 304 | list_for_each_entry(this, &jffs2_compressor_list, list) { |
@@ -307,8 +307,8 @@ char *jffs2_stats(void) | |||
307 | act_buf += sprintf(act_buf,"- "); | 307 | act_buf += sprintf(act_buf,"- "); |
308 | else | 308 | else |
309 | act_buf += sprintf(act_buf,"+ "); | 309 | act_buf += sprintf(act_buf,"+ "); |
310 | act_buf += sprintf(act_buf,"compr: %d blocks (%d/%d) decompr: %d blocks ", this->stat_compr_blocks, | 310 | act_buf += sprintf(act_buf,"compr: %d blocks (%d/%d) decompr: %d blocks ", this->stat_compr_blocks, |
311 | this->stat_compr_new_size, this->stat_compr_orig_size, | 311 | this->stat_compr_new_size, this->stat_compr_orig_size, |
312 | this->stat_decompr_blocks); | 312 | this->stat_decompr_blocks); |
313 | act_buf += sprintf(act_buf,"\n"); | 313 | act_buf += sprintf(act_buf,"\n"); |
314 | } | 314 | } |
@@ -317,7 +317,7 @@ char *jffs2_stats(void) | |||
317 | return buf; | 317 | return buf; |
318 | } | 318 | } |
319 | 319 | ||
320 | char *jffs2_get_compression_mode_name(void) | 320 | char *jffs2_get_compression_mode_name(void) |
321 | { | 321 | { |
322 | switch (jffs2_compression_mode) { | 322 | switch (jffs2_compression_mode) { |
323 | case JFFS2_COMPR_MODE_NONE: | 323 | case JFFS2_COMPR_MODE_NONE: |
@@ -330,7 +330,7 @@ char *jffs2_get_compression_mode_name(void) | |||
330 | return "unkown"; | 330 | return "unkown"; |
331 | } | 331 | } |
332 | 332 | ||
333 | int jffs2_set_compression_mode_name(const char *name) | 333 | int jffs2_set_compression_mode_name(const char *name) |
334 | { | 334 | { |
335 | if (!strcmp("none",name)) { | 335 | if (!strcmp("none",name)) { |
336 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; | 336 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; |
@@ -355,7 +355,7 @@ static int jffs2_compressor_Xable(const char *name, int disabled) | |||
355 | if (!strcmp(this->name, name)) { | 355 | if (!strcmp(this->name, name)) { |
356 | this->disabled = disabled; | 356 | this->disabled = disabled; |
357 | spin_unlock(&jffs2_compressor_list_lock); | 357 | spin_unlock(&jffs2_compressor_list_lock); |
358 | return 0; | 358 | return 0; |
359 | } | 359 | } |
360 | } | 360 | } |
361 | spin_unlock(&jffs2_compressor_list_lock); | 361 | spin_unlock(&jffs2_compressor_list_lock); |
@@ -385,7 +385,7 @@ int jffs2_set_compressor_priority(const char *name, int priority) | |||
385 | } | 385 | } |
386 | } | 386 | } |
387 | spin_unlock(&jffs2_compressor_list_lock); | 387 | spin_unlock(&jffs2_compressor_list_lock); |
388 | printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name); | 388 | printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name); |
389 | return 1; | 389 | return 1; |
390 | reinsert: | 390 | reinsert: |
391 | /* list is sorted in the order of priority, so if | 391 | /* list is sorted in the order of priority, so if |
@@ -412,7 +412,7 @@ void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig) | |||
412 | kfree(comprbuf); | 412 | kfree(comprbuf); |
413 | } | 413 | } |
414 | 414 | ||
415 | int jffs2_compressors_init(void) | 415 | int jffs2_compressors_init(void) |
416 | { | 416 | { |
417 | /* Registering compressors */ | 417 | /* Registering compressors */ |
418 | #ifdef CONFIG_JFFS2_ZLIB | 418 | #ifdef CONFIG_JFFS2_ZLIB |
@@ -425,12 +425,6 @@ int jffs2_compressors_init(void) | |||
425 | jffs2_rubinmips_init(); | 425 | jffs2_rubinmips_init(); |
426 | jffs2_dynrubin_init(); | 426 | jffs2_dynrubin_init(); |
427 | #endif | 427 | #endif |
428 | #ifdef CONFIG_JFFS2_LZARI | ||
429 | jffs2_lzari_init(); | ||
430 | #endif | ||
431 | #ifdef CONFIG_JFFS2_LZO | ||
432 | jffs2_lzo_init(); | ||
433 | #endif | ||
434 | /* Setting default compression mode */ | 428 | /* Setting default compression mode */ |
435 | #ifdef CONFIG_JFFS2_CMODE_NONE | 429 | #ifdef CONFIG_JFFS2_CMODE_NONE |
436 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; | 430 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; |
@@ -446,15 +440,9 @@ int jffs2_compressors_init(void) | |||
446 | return 0; | 440 | return 0; |
447 | } | 441 | } |
448 | 442 | ||
449 | int jffs2_compressors_exit(void) | 443 | int jffs2_compressors_exit(void) |
450 | { | 444 | { |
451 | /* Unregistering compressors */ | 445 | /* Unregistering compressors */ |
452 | #ifdef CONFIG_JFFS2_LZO | ||
453 | jffs2_lzo_exit(); | ||
454 | #endif | ||
455 | #ifdef CONFIG_JFFS2_LZARI | ||
456 | jffs2_lzari_exit(); | ||
457 | #endif | ||
458 | #ifdef CONFIG_JFFS2_RUBIN | 446 | #ifdef CONFIG_JFFS2_RUBIN |
459 | jffs2_dynrubin_exit(); | 447 | jffs2_dynrubin_exit(); |
460 | jffs2_rubinmips_exit(); | 448 | jffs2_rubinmips_exit(); |
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h index 89ceeed201eb..a77e830d85c5 100644 --- a/fs/jffs2/compr.h +++ b/fs/jffs2/compr.h | |||
@@ -4,10 +4,10 @@ | |||
4 | * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, | 4 | * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, |
5 | * University of Szeged, Hungary | 5 | * University of Szeged, Hungary |
6 | * | 6 | * |
7 | * For licensing information, see the file 'LICENCE' in the | 7 | * For licensing information, see the file 'LICENCE' in the |
8 | * jffs2 directory. | 8 | * jffs2 directory. |
9 | * | 9 | * |
10 | * $Id: compr.h,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $ | 10 | * $Id: compr.h,v 1.9 2005/11/07 11:14:38 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -103,13 +103,5 @@ void jffs2_rtime_exit(void); | |||
103 | int jffs2_zlib_init(void); | 103 | int jffs2_zlib_init(void); |
104 | void jffs2_zlib_exit(void); | 104 | void jffs2_zlib_exit(void); |
105 | #endif | 105 | #endif |
106 | #ifdef CONFIG_JFFS2_LZARI | ||
107 | int jffs2_lzari_init(void); | ||
108 | void jffs2_lzari_exit(void); | ||
109 | #endif | ||
110 | #ifdef CONFIG_JFFS2_LZO | ||
111 | int jffs2_lzo_init(void); | ||
112 | void jffs2_lzo_exit(void); | ||
113 | #endif | ||
114 | 106 | ||
115 | #endif /* __JFFS2_COMPR_H__ */ | 107 | #endif /* __JFFS2_COMPR_H__ */ |
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c index 393129418666..2eb1b7428d16 100644 --- a/fs/jffs2/compr_rtime.c +++ b/fs/jffs2/compr_rtime.c | |||
@@ -24,8 +24,8 @@ | |||
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/errno.h> | 26 | #include <linux/errno.h> |
27 | #include <linux/string.h> | 27 | #include <linux/string.h> |
28 | #include <linux/jffs2.h> | 28 | #include <linux/jffs2.h> |
29 | #include "compr.h" | 29 | #include "compr.h" |
30 | 30 | ||
31 | /* _compress returns the compressed size, -1 if bigger */ | 31 | /* _compress returns the compressed size, -1 if bigger */ |
@@ -38,19 +38,19 @@ static int jffs2_rtime_compress(unsigned char *data_in, | |||
38 | int outpos = 0; | 38 | int outpos = 0; |
39 | int pos=0; | 39 | int pos=0; |
40 | 40 | ||
41 | memset(positions,0,sizeof(positions)); | 41 | memset(positions,0,sizeof(positions)); |
42 | 42 | ||
43 | while (pos < (*sourcelen) && outpos <= (*dstlen)-2) { | 43 | while (pos < (*sourcelen) && outpos <= (*dstlen)-2) { |
44 | int backpos, runlen=0; | 44 | int backpos, runlen=0; |
45 | unsigned char value; | 45 | unsigned char value; |
46 | 46 | ||
47 | value = data_in[pos]; | 47 | value = data_in[pos]; |
48 | 48 | ||
49 | cpage_out[outpos++] = data_in[pos++]; | 49 | cpage_out[outpos++] = data_in[pos++]; |
50 | 50 | ||
51 | backpos = positions[value]; | 51 | backpos = positions[value]; |
52 | positions[value]=pos; | 52 | positions[value]=pos; |
53 | 53 | ||
54 | while ((backpos < pos) && (pos < (*sourcelen)) && | 54 | while ((backpos < pos) && (pos < (*sourcelen)) && |
55 | (data_in[pos]==data_in[backpos++]) && (runlen<255)) { | 55 | (data_in[pos]==data_in[backpos++]) && (runlen<255)) { |
56 | pos++; | 56 | pos++; |
@@ -63,12 +63,12 @@ static int jffs2_rtime_compress(unsigned char *data_in, | |||
63 | /* We failed */ | 63 | /* We failed */ |
64 | return -1; | 64 | return -1; |
65 | } | 65 | } |
66 | 66 | ||
67 | /* Tell the caller how much we managed to compress, and how much space it took */ | 67 | /* Tell the caller how much we managed to compress, and how much space it took */ |
68 | *sourcelen = pos; | 68 | *sourcelen = pos; |
69 | *dstlen = outpos; | 69 | *dstlen = outpos; |
70 | return 0; | 70 | return 0; |
71 | } | 71 | } |
72 | 72 | ||
73 | 73 | ||
74 | static int jffs2_rtime_decompress(unsigned char *data_in, | 74 | static int jffs2_rtime_decompress(unsigned char *data_in, |
@@ -79,19 +79,19 @@ static int jffs2_rtime_decompress(unsigned char *data_in, | |||
79 | short positions[256]; | 79 | short positions[256]; |
80 | int outpos = 0; | 80 | int outpos = 0; |
81 | int pos=0; | 81 | int pos=0; |
82 | 82 | ||
83 | memset(positions,0,sizeof(positions)); | 83 | memset(positions,0,sizeof(positions)); |
84 | 84 | ||
85 | while (outpos<destlen) { | 85 | while (outpos<destlen) { |
86 | unsigned char value; | 86 | unsigned char value; |
87 | int backoffs; | 87 | int backoffs; |
88 | int repeat; | 88 | int repeat; |
89 | 89 | ||
90 | value = data_in[pos++]; | 90 | value = data_in[pos++]; |
91 | cpage_out[outpos++] = value; /* first the verbatim copied byte */ | 91 | cpage_out[outpos++] = value; /* first the verbatim copied byte */ |
92 | repeat = data_in[pos++]; | 92 | repeat = data_in[pos++]; |
93 | backoffs = positions[value]; | 93 | backoffs = positions[value]; |
94 | 94 | ||
95 | positions[value]=outpos; | 95 | positions[value]=outpos; |
96 | if (repeat) { | 96 | if (repeat) { |
97 | if (backoffs + repeat >= outpos) { | 97 | if (backoffs + repeat >= outpos) { |
@@ -101,12 +101,12 @@ static int jffs2_rtime_decompress(unsigned char *data_in, | |||
101 | } | 101 | } |
102 | } else { | 102 | } else { |
103 | memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat); | 103 | memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat); |
104 | outpos+=repeat; | 104 | outpos+=repeat; |
105 | } | 105 | } |
106 | } | 106 | } |
107 | } | 107 | } |
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
110 | 110 | ||
111 | static struct jffs2_compressor jffs2_rtime_comp = { | 111 | static struct jffs2_compressor jffs2_rtime_comp = { |
112 | .priority = JFFS2_RTIME_PRIORITY, | 112 | .priority = JFFS2_RTIME_PRIORITY, |
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c index 09422388fb96..e792e675d624 100644 --- a/fs/jffs2/compr_rubin.c +++ b/fs/jffs2/compr_rubin.c | |||
@@ -11,7 +11,6 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | |||
15 | #include <linux/string.h> | 14 | #include <linux/string.h> |
16 | #include <linux/types.h> | 15 | #include <linux/types.h> |
17 | #include <linux/jffs2.h> | 16 | #include <linux/jffs2.h> |
@@ -20,7 +19,7 @@ | |||
20 | #include "compr.h" | 19 | #include "compr.h" |
21 | 20 | ||
22 | static void init_rubin(struct rubin_state *rs, int div, int *bits) | 21 | static void init_rubin(struct rubin_state *rs, int div, int *bits) |
23 | { | 22 | { |
24 | int c; | 23 | int c; |
25 | 24 | ||
26 | rs->q = 0; | 25 | rs->q = 0; |
@@ -40,7 +39,7 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol) | |||
40 | 39 | ||
41 | while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { | 40 | while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { |
42 | rs->bit_number++; | 41 | rs->bit_number++; |
43 | 42 | ||
44 | ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); | 43 | ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); |
45 | if (ret) | 44 | if (ret) |
46 | return ret; | 45 | return ret; |
@@ -68,7 +67,7 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol) | |||
68 | 67 | ||
69 | 68 | ||
70 | static void end_rubin(struct rubin_state *rs) | 69 | static void end_rubin(struct rubin_state *rs) |
71 | { | 70 | { |
72 | 71 | ||
73 | int i; | 72 | int i; |
74 | 73 | ||
@@ -82,7 +81,7 @@ static void end_rubin(struct rubin_state *rs) | |||
82 | 81 | ||
83 | static void init_decode(struct rubin_state *rs, int div, int *bits) | 82 | static void init_decode(struct rubin_state *rs, int div, int *bits) |
84 | { | 83 | { |
85 | init_rubin(rs, div, bits); | 84 | init_rubin(rs, div, bits); |
86 | 85 | ||
87 | /* behalve lower */ | 86 | /* behalve lower */ |
88 | rs->rec_q = 0; | 87 | rs->rec_q = 0; |
@@ -188,7 +187,7 @@ static int in_byte(struct rubin_state *rs) | |||
188 | 187 | ||
189 | 188 | ||
190 | 189 | ||
191 | static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, | 190 | static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, |
192 | unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) | 191 | unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) |
193 | { | 192 | { |
194 | int outpos = 0; | 193 | int outpos = 0; |
@@ -198,31 +197,31 @@ static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, | |||
198 | init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32); | 197 | init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32); |
199 | 198 | ||
200 | init_rubin(&rs, bit_divider, bits); | 199 | init_rubin(&rs, bit_divider, bits); |
201 | 200 | ||
202 | while (pos < (*sourcelen) && !out_byte(&rs, data_in[pos])) | 201 | while (pos < (*sourcelen) && !out_byte(&rs, data_in[pos])) |
203 | pos++; | 202 | pos++; |
204 | 203 | ||
205 | end_rubin(&rs); | 204 | end_rubin(&rs); |
206 | 205 | ||
207 | if (outpos > pos) { | 206 | if (outpos > pos) { |
208 | /* We failed */ | 207 | /* We failed */ |
209 | return -1; | 208 | return -1; |
210 | } | 209 | } |
211 | 210 | ||
212 | /* Tell the caller how much we managed to compress, | 211 | /* Tell the caller how much we managed to compress, |
213 | * and how much space it took */ | 212 | * and how much space it took */ |
214 | 213 | ||
215 | outpos = (pushedbits(&rs.pp)+7)/8; | 214 | outpos = (pushedbits(&rs.pp)+7)/8; |
216 | 215 | ||
217 | if (outpos >= pos) | 216 | if (outpos >= pos) |
218 | return -1; /* We didn't actually compress */ | 217 | return -1; /* We didn't actually compress */ |
219 | *sourcelen = pos; | 218 | *sourcelen = pos; |
220 | *dstlen = outpos; | 219 | *dstlen = outpos; |
221 | return 0; | 220 | return 0; |
222 | } | 221 | } |
223 | #if 0 | 222 | #if 0 |
224 | /* _compress returns the compressed size, -1 if bigger */ | 223 | /* _compress returns the compressed size, -1 if bigger */ |
225 | int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, | 224 | int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, |
226 | uint32_t *sourcelen, uint32_t *dstlen, void *model) | 225 | uint32_t *sourcelen, uint32_t *dstlen, void *model) |
227 | { | 226 | { |
228 | return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); | 227 | return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); |
@@ -277,7 +276,7 @@ static int jffs2_dynrubin_compress(unsigned char *data_in, | |||
277 | } | 276 | } |
278 | 277 | ||
279 | ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen); | 278 | ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen); |
280 | if (ret) | 279 | if (ret) |
281 | return ret; | 280 | return ret; |
282 | 281 | ||
283 | /* Add back the 8 bytes we took for the probabilities */ | 282 | /* Add back the 8 bytes we took for the probabilities */ |
@@ -293,19 +292,19 @@ static int jffs2_dynrubin_compress(unsigned char *data_in, | |||
293 | return 0; | 292 | return 0; |
294 | } | 293 | } |
295 | 294 | ||
296 | static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in, | 295 | static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in, |
297 | unsigned char *page_out, uint32_t srclen, uint32_t destlen) | 296 | unsigned char *page_out, uint32_t srclen, uint32_t destlen) |
298 | { | 297 | { |
299 | int outpos = 0; | 298 | int outpos = 0; |
300 | struct rubin_state rs; | 299 | struct rubin_state rs; |
301 | 300 | ||
302 | init_pushpull(&rs.pp, cdata_in, srclen, 0, 0); | 301 | init_pushpull(&rs.pp, cdata_in, srclen, 0, 0); |
303 | init_decode(&rs, bit_divider, bits); | 302 | init_decode(&rs, bit_divider, bits); |
304 | 303 | ||
305 | while (outpos < destlen) { | 304 | while (outpos < destlen) { |
306 | page_out[outpos++] = in_byte(&rs); | 305 | page_out[outpos++] = in_byte(&rs); |
307 | } | 306 | } |
308 | } | 307 | } |
309 | 308 | ||
310 | 309 | ||
311 | static int jffs2_rubinmips_decompress(unsigned char *data_in, | 310 | static int jffs2_rubinmips_decompress(unsigned char *data_in, |
diff --git a/fs/jffs2/compr_rubin.h b/fs/jffs2/compr_rubin.h index cf51e34f6574..bf1a93451621 100644 --- a/fs/jffs2/compr_rubin.h +++ b/fs/jffs2/compr_rubin.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* Rubin encoder/decoder header */ | 1 | /* Rubin encoder/decoder header */ |
2 | /* work started at : aug 3, 1994 */ | 2 | /* work started at : aug 3, 1994 */ |
3 | /* last modification : aug 15, 1994 */ | 3 | /* last modification : aug 15, 1994 */ |
4 | /* $Id: compr_rubin.h,v 1.6 2002/01/25 01:49:26 dwmw2 Exp $ */ | 4 | /* $Id: compr_rubin.h,v 1.7 2005/11/07 11:14:38 gleixner Exp $ */ |
5 | 5 | ||
6 | #include "pushpull.h" | 6 | #include "pushpull.h" |
7 | 7 | ||
@@ -11,8 +11,8 @@ | |||
11 | 11 | ||
12 | 12 | ||
13 | struct rubin_state { | 13 | struct rubin_state { |
14 | unsigned long p; | 14 | unsigned long p; |
15 | unsigned long q; | 15 | unsigned long q; |
16 | unsigned long rec_q; | 16 | unsigned long rec_q; |
17 | long bit_number; | 17 | long bit_number; |
18 | struct pushpull pp; | 18 | struct pushpull pp; |
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c index 83f7e0788fd0..4db8be8e90cc 100644 --- a/fs/jffs2/compr_zlib.c +++ b/fs/jffs2/compr_zlib.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: compr_zlib.c,v 1.31 2005/05/20 19:30:06 gleixner Exp $ | 10 | * $Id: compr_zlib.c,v 1.32 2005/11/07 11:14:38 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -24,11 +24,11 @@ | |||
24 | #include "nodelist.h" | 24 | #include "nodelist.h" |
25 | #include "compr.h" | 25 | #include "compr.h" |
26 | 26 | ||
27 | /* Plan: call deflate() with avail_in == *sourcelen, | 27 | /* Plan: call deflate() with avail_in == *sourcelen, |
28 | avail_out = *dstlen - 12 and flush == Z_FINISH. | 28 | avail_out = *dstlen - 12 and flush == Z_FINISH. |
29 | If it doesn't manage to finish, call it again with | 29 | If it doesn't manage to finish, call it again with |
30 | avail_in == 0 and avail_out set to the remaining 12 | 30 | avail_in == 0 and avail_out set to the remaining 12 |
31 | bytes for it to clean up. | 31 | bytes for it to clean up. |
32 | Q: Is 12 bytes sufficient? | 32 | Q: Is 12 bytes sufficient? |
33 | */ | 33 | */ |
34 | #define STREAM_END_SPACE 12 | 34 | #define STREAM_END_SPACE 12 |
@@ -89,7 +89,7 @@ static int jffs2_zlib_compress(unsigned char *data_in, | |||
89 | 89 | ||
90 | def_strm.next_in = data_in; | 90 | def_strm.next_in = data_in; |
91 | def_strm.total_in = 0; | 91 | def_strm.total_in = 0; |
92 | 92 | ||
93 | def_strm.next_out = cpage_out; | 93 | def_strm.next_out = cpage_out; |
94 | def_strm.total_out = 0; | 94 | def_strm.total_out = 0; |
95 | 95 | ||
@@ -99,7 +99,7 @@ static int jffs2_zlib_compress(unsigned char *data_in, | |||
99 | D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n", | 99 | D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n", |
100 | def_strm.avail_in, def_strm.avail_out)); | 100 | def_strm.avail_in, def_strm.avail_out)); |
101 | ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); | 101 | ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); |
102 | D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", | 102 | D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", |
103 | def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out)); | 103 | def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out)); |
104 | if (ret != Z_OK) { | 104 | if (ret != Z_OK) { |
105 | D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); | 105 | D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); |
@@ -150,7 +150,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in, | |||
150 | inf_strm.next_in = data_in; | 150 | inf_strm.next_in = data_in; |
151 | inf_strm.avail_in = srclen; | 151 | inf_strm.avail_in = srclen; |
152 | inf_strm.total_in = 0; | 152 | inf_strm.total_in = 0; |
153 | 153 | ||
154 | inf_strm.next_out = cpage_out; | 154 | inf_strm.next_out = cpage_out; |
155 | inf_strm.avail_out = destlen; | 155 | inf_strm.avail_out = destlen; |
156 | inf_strm.total_out = 0; | 156 | inf_strm.total_out = 0; |
diff --git a/fs/jffs2/comprtest.c b/fs/jffs2/comprtest.c index cf51f091d0e7..f0fb8be7740c 100644 --- a/fs/jffs2/comprtest.c +++ b/fs/jffs2/comprtest.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* $Id: comprtest.c,v 1.5 2002/01/03 15:20:44 dwmw2 Exp $ */ | 1 | /* $Id: comprtest.c,v 1.6 2005/11/07 11:14:38 gleixner Exp $ */ |
2 | 2 | ||
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/string.h> | 4 | #include <linux/string.h> |
@@ -265,9 +265,9 @@ static unsigned char testdata[TESTDATA_LEN] = { | |||
265 | static unsigned char comprbuf[TESTDATA_LEN]; | 265 | static unsigned char comprbuf[TESTDATA_LEN]; |
266 | static unsigned char decomprbuf[TESTDATA_LEN]; | 266 | static unsigned char decomprbuf[TESTDATA_LEN]; |
267 | 267 | ||
268 | int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in, | 268 | int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in, |
269 | unsigned char *data_out, uint32_t cdatalen, uint32_t datalen); | 269 | unsigned char *data_out, uint32_t cdatalen, uint32_t datalen); |
270 | unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out, | 270 | unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out, |
271 | uint32_t *datalen, uint32_t *cdatalen); | 271 | uint32_t *datalen, uint32_t *cdatalen); |
272 | 272 | ||
273 | int init_module(void ) { | 273 | int init_module(void ) { |
@@ -276,10 +276,10 @@ int init_module(void ) { | |||
276 | int ret; | 276 | int ret; |
277 | 277 | ||
278 | printk("Original data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | 278 | printk("Original data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", |
279 | testdata[0],testdata[1],testdata[2],testdata[3], | 279 | testdata[0],testdata[1],testdata[2],testdata[3], |
280 | testdata[4],testdata[5],testdata[6],testdata[7], | 280 | testdata[4],testdata[5],testdata[6],testdata[7], |
281 | testdata[8],testdata[9],testdata[10],testdata[11], | 281 | testdata[8],testdata[9],testdata[10],testdata[11], |
282 | testdata[12],testdata[13],testdata[14],testdata[15]); | 282 | testdata[12],testdata[13],testdata[14],testdata[15]); |
283 | d = TESTDATA_LEN; | 283 | d = TESTDATA_LEN; |
284 | c = TESTDATA_LEN; | 284 | c = TESTDATA_LEN; |
285 | comprtype = jffs2_compress(testdata, comprbuf, &d, &c); | 285 | comprtype = jffs2_compress(testdata, comprbuf, &d, &c); |
@@ -287,18 +287,18 @@ int init_module(void ) { | |||
287 | printk("jffs2_compress used compression type %d. Compressed size %d, uncompressed size %d\n", | 287 | printk("jffs2_compress used compression type %d. Compressed size %d, uncompressed size %d\n", |
288 | comprtype, c, d); | 288 | comprtype, c, d); |
289 | printk("Compressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | 289 | printk("Compressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", |
290 | comprbuf[0],comprbuf[1],comprbuf[2],comprbuf[3], | 290 | comprbuf[0],comprbuf[1],comprbuf[2],comprbuf[3], |
291 | comprbuf[4],comprbuf[5],comprbuf[6],comprbuf[7], | 291 | comprbuf[4],comprbuf[5],comprbuf[6],comprbuf[7], |
292 | comprbuf[8],comprbuf[9],comprbuf[10],comprbuf[11], | 292 | comprbuf[8],comprbuf[9],comprbuf[10],comprbuf[11], |
293 | comprbuf[12],comprbuf[13],comprbuf[14],comprbuf[15]); | 293 | comprbuf[12],comprbuf[13],comprbuf[14],comprbuf[15]); |
294 | 294 | ||
295 | ret = jffs2_decompress(comprtype, comprbuf, decomprbuf, c, d); | 295 | ret = jffs2_decompress(comprtype, comprbuf, decomprbuf, c, d); |
296 | printk("jffs2_decompress returned %d\n", ret); | 296 | printk("jffs2_decompress returned %d\n", ret); |
297 | printk("Decompressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | 297 | printk("Decompressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", |
298 | decomprbuf[0],decomprbuf[1],decomprbuf[2],decomprbuf[3], | 298 | decomprbuf[0],decomprbuf[1],decomprbuf[2],decomprbuf[3], |
299 | decomprbuf[4],decomprbuf[5],decomprbuf[6],decomprbuf[7], | 299 | decomprbuf[4],decomprbuf[5],decomprbuf[6],decomprbuf[7], |
300 | decomprbuf[8],decomprbuf[9],decomprbuf[10],decomprbuf[11], | 300 | decomprbuf[8],decomprbuf[9],decomprbuf[10],decomprbuf[11], |
301 | decomprbuf[12],decomprbuf[13],decomprbuf[14],decomprbuf[15]); | 301 | decomprbuf[12],decomprbuf[13],decomprbuf[14],decomprbuf[15]); |
302 | if (memcmp(decomprbuf, testdata, d)) | 302 | if (memcmp(decomprbuf, testdata, d)) |
303 | printk("Compression and decompression corrupted data\n"); | 303 | printk("Compression and decompression corrupted data\n"); |
304 | else | 304 | else |
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c new file mode 100644 index 000000000000..1fe17de713e8 --- /dev/null +++ b/fs/jffs2/debug.c | |||
@@ -0,0 +1,705 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: debug.c,v 1.12 2005/11/07 11:14:39 gleixner Exp $ | ||
11 | * | ||
12 | */ | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/pagemap.h> | ||
16 | #include <linux/crc32.h> | ||
17 | #include <linux/jffs2.h> | ||
18 | #include <linux/mtd/mtd.h> | ||
19 | #include "nodelist.h" | ||
20 | #include "debug.h" | ||
21 | |||
22 | #ifdef JFFS2_DBG_SANITY_CHECKS | ||
23 | |||
24 | void | ||
25 | __jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c, | ||
26 | struct jffs2_eraseblock *jeb) | ||
27 | { | ||
28 | if (unlikely(jeb && jeb->used_size + jeb->dirty_size + | ||
29 | jeb->free_size + jeb->wasted_size + | ||
30 | jeb->unchecked_size != c->sector_size)) { | ||
31 | JFFS2_ERROR("eeep, space accounting for block at 0x%08x is screwed.\n", jeb->offset); | ||
32 | JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", | ||
33 | jeb->free_size, jeb->dirty_size, jeb->used_size, | ||
34 | jeb->wasted_size, jeb->unchecked_size, c->sector_size); | ||
35 | BUG(); | ||
36 | } | ||
37 | |||
38 | if (unlikely(c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size | ||
39 | + c->wasted_size + c->unchecked_size != c->flash_size)) { | ||
40 | JFFS2_ERROR("eeep, space accounting superblock info is screwed.\n"); | ||
41 | JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + erasing %#08x + bad %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", | ||
42 | c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, | ||
43 | c->wasted_size, c->unchecked_size, c->flash_size); | ||
44 | BUG(); | ||
45 | } | ||
46 | } | ||
47 | |||
48 | void | ||
49 | __jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c, | ||
50 | struct jffs2_eraseblock *jeb) | ||
51 | { | ||
52 | spin_lock(&c->erase_completion_lock); | ||
53 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); | ||
54 | spin_unlock(&c->erase_completion_lock); | ||
55 | } | ||
56 | |||
57 | #endif /* JFFS2_DBG_SANITY_CHECKS */ | ||
58 | |||
59 | #ifdef JFFS2_DBG_PARANOIA_CHECKS | ||
60 | /* | ||
61 | * Check the fragtree. | ||
62 | */ | ||
63 | void | ||
64 | __jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f) | ||
65 | { | ||
66 | down(&f->sem); | ||
67 | __jffs2_dbg_fragtree_paranoia_check_nolock(f); | ||
68 | up(&f->sem); | ||
69 | } | ||
70 | |||
71 | void | ||
72 | __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f) | ||
73 | { | ||
74 | struct jffs2_node_frag *frag; | ||
75 | int bitched = 0; | ||
76 | |||
77 | for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { | ||
78 | struct jffs2_full_dnode *fn = frag->node; | ||
79 | |||
80 | if (!fn || !fn->raw) | ||
81 | continue; | ||
82 | |||
83 | if (ref_flags(fn->raw) == REF_PRISTINE) { | ||
84 | if (fn->frags > 1) { | ||
85 | JFFS2_ERROR("REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2.\n", | ||
86 | ref_offset(fn->raw), fn->frags); | ||
87 | bitched = 1; | ||
88 | } | ||
89 | |||
90 | /* A hole node which isn't multi-page should be garbage-collected | ||
91 | and merged anyway, so we just check for the frag size here, | ||
92 | rather than mucking around with actually reading the node | ||
93 | and checking the compression type, which is the real way | ||
94 | to tell a hole node. */ | ||
95 | if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) | ||
96 | && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { | ||
97 | JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", | ||
98 | ref_offset(fn->raw)); | ||
99 | bitched = 1; | ||
100 | } | ||
101 | |||
102 | if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) | ||
103 | && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { | ||
104 | JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", | ||
105 | ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); | ||
106 | bitched = 1; | ||
107 | } | ||
108 | } | ||
109 | } | ||
110 | |||
111 | if (bitched) { | ||
112 | JFFS2_ERROR("fragtree is corrupted.\n"); | ||
113 | __jffs2_dbg_dump_fragtree_nolock(f); | ||
114 | BUG(); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * Check if the flash contains all 0xFF before we start writing. | ||
120 | */ | ||
121 | void | ||
122 | __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c, | ||
123 | uint32_t ofs, int len) | ||
124 | { | ||
125 | size_t retlen; | ||
126 | int ret, i; | ||
127 | unsigned char *buf; | ||
128 | |||
129 | buf = kmalloc(len, GFP_KERNEL); | ||
130 | if (!buf) | ||
131 | return; | ||
132 | |||
133 | ret = jffs2_flash_read(c, ofs, len, &retlen, buf); | ||
134 | if (ret || (retlen != len)) { | ||
135 | JFFS2_WARNING("read %d bytes failed or short. ret %d, retlen %zd.\n", | ||
136 | len, ret, retlen); | ||
137 | kfree(buf); | ||
138 | return; | ||
139 | } | ||
140 | |||
141 | ret = 0; | ||
142 | for (i = 0; i < len; i++) | ||
143 | if (buf[i] != 0xff) | ||
144 | ret = 1; | ||
145 | |||
146 | if (ret) { | ||
147 | JFFS2_ERROR("argh, about to write node to %#08x on flash, but there are data already there. The first corrupted byte is at %#08x offset.\n", | ||
148 | ofs, ofs + i); | ||
149 | __jffs2_dbg_dump_buffer(buf, len, ofs); | ||
150 | kfree(buf); | ||
151 | BUG(); | ||
152 | } | ||
153 | |||
154 | kfree(buf); | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Check the space accounting and node_ref list correctness for the JFFS2 erasable block 'jeb'. | ||
159 | */ | ||
160 | void | ||
161 | __jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c, | ||
162 | struct jffs2_eraseblock *jeb) | ||
163 | { | ||
164 | spin_lock(&c->erase_completion_lock); | ||
165 | __jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | ||
166 | spin_unlock(&c->erase_completion_lock); | ||
167 | } | ||
168 | |||
169 | void | ||
170 | __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, | ||
171 | struct jffs2_eraseblock *jeb) | ||
172 | { | ||
173 | uint32_t my_used_size = 0; | ||
174 | uint32_t my_unchecked_size = 0; | ||
175 | uint32_t my_dirty_size = 0; | ||
176 | struct jffs2_raw_node_ref *ref2 = jeb->first_node; | ||
177 | |||
178 | while (ref2) { | ||
179 | uint32_t totlen = ref_totlen(c, jeb, ref2); | ||
180 | |||
181 | if (ref2->flash_offset < jeb->offset || | ||
182 | ref2->flash_offset > jeb->offset + c->sector_size) { | ||
183 | JFFS2_ERROR("node_ref %#08x shouldn't be in block at %#08x.\n", | ||
184 | ref_offset(ref2), jeb->offset); | ||
185 | goto error; | ||
186 | |||
187 | } | ||
188 | if (ref_flags(ref2) == REF_UNCHECKED) | ||
189 | my_unchecked_size += totlen; | ||
190 | else if (!ref_obsolete(ref2)) | ||
191 | my_used_size += totlen; | ||
192 | else | ||
193 | my_dirty_size += totlen; | ||
194 | |||
195 | if ((!ref2->next_phys) != (ref2 == jeb->last_node)) { | ||
196 | JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next_phys at %#08x (mem %p), last_node is at %#08x (mem %p).\n", | ||
197 | ref_offset(ref2), ref2, ref_offset(ref2->next_phys), ref2->next_phys, | ||
198 | ref_offset(jeb->last_node), jeb->last_node); | ||
199 | goto error; | ||
200 | } | ||
201 | ref2 = ref2->next_phys; | ||
202 | } | ||
203 | |||
204 | if (my_used_size != jeb->used_size) { | ||
205 | JFFS2_ERROR("Calculated used size %#08x != stored used size %#08x.\n", | ||
206 | my_used_size, jeb->used_size); | ||
207 | goto error; | ||
208 | } | ||
209 | |||
210 | if (my_unchecked_size != jeb->unchecked_size) { | ||
211 | JFFS2_ERROR("Calculated unchecked size %#08x != stored unchecked size %#08x.\n", | ||
212 | my_unchecked_size, jeb->unchecked_size); | ||
213 | goto error; | ||
214 | } | ||
215 | |||
216 | #if 0 | ||
217 | /* This should work when we implement ref->__totlen elemination */ | ||
218 | if (my_dirty_size != jeb->dirty_size + jeb->wasted_size) { | ||
219 | JFFS2_ERROR("Calculated dirty+wasted size %#08x != stored dirty + wasted size %#08x\n", | ||
220 | my_dirty_size, jeb->dirty_size + jeb->wasted_size); | ||
221 | goto error; | ||
222 | } | ||
223 | |||
224 | if (jeb->free_size == 0 | ||
225 | && my_used_size + my_unchecked_size + my_dirty_size != c->sector_size) { | ||
226 | JFFS2_ERROR("The sum of all nodes in block (%#x) != size of block (%#x)\n", | ||
227 | my_used_size + my_unchecked_size + my_dirty_size, | ||
228 | c->sector_size); | ||
229 | goto error; | ||
230 | } | ||
231 | #endif | ||
232 | |||
233 | return; | ||
234 | |||
235 | error: | ||
236 | __jffs2_dbg_dump_node_refs_nolock(c, jeb); | ||
237 | __jffs2_dbg_dump_jeb_nolock(jeb); | ||
238 | __jffs2_dbg_dump_block_lists_nolock(c); | ||
239 | BUG(); | ||
240 | |||
241 | } | ||
242 | #endif /* JFFS2_DBG_PARANOIA_CHECKS */ | ||
243 | |||
244 | #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) | ||
245 | /* | ||
246 | * Dump the node_refs of the 'jeb' JFFS2 eraseblock. | ||
247 | */ | ||
248 | void | ||
249 | __jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c, | ||
250 | struct jffs2_eraseblock *jeb) | ||
251 | { | ||
252 | spin_lock(&c->erase_completion_lock); | ||
253 | __jffs2_dbg_dump_node_refs_nolock(c, jeb); | ||
254 | spin_unlock(&c->erase_completion_lock); | ||
255 | } | ||
256 | |||
257 | void | ||
258 | __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, | ||
259 | struct jffs2_eraseblock *jeb) | ||
260 | { | ||
261 | struct jffs2_raw_node_ref *ref; | ||
262 | int i = 0; | ||
263 | |||
264 | printk(JFFS2_DBG_MSG_PREFIX " Dump node_refs of the eraseblock %#08x\n", jeb->offset); | ||
265 | if (!jeb->first_node) { | ||
266 | printk(JFFS2_DBG_MSG_PREFIX " no nodes in the eraseblock %#08x\n", jeb->offset); | ||
267 | return; | ||
268 | } | ||
269 | |||
270 | printk(JFFS2_DBG); | ||
271 | for (ref = jeb->first_node; ; ref = ref->next_phys) { | ||
272 | printk("%#08x(%#x)", ref_offset(ref), ref->__totlen); | ||
273 | if (ref->next_phys) | ||
274 | printk("->"); | ||
275 | else | ||
276 | break; | ||
277 | if (++i == 4) { | ||
278 | i = 0; | ||
279 | printk("\n" JFFS2_DBG); | ||
280 | } | ||
281 | } | ||
282 | printk("\n"); | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * Dump an eraseblock's space accounting. | ||
287 | */ | ||
288 | void | ||
289 | __jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
290 | { | ||
291 | spin_lock(&c->erase_completion_lock); | ||
292 | __jffs2_dbg_dump_jeb_nolock(jeb); | ||
293 | spin_unlock(&c->erase_completion_lock); | ||
294 | } | ||
295 | |||
296 | void | ||
297 | __jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb) | ||
298 | { | ||
299 | if (!jeb) | ||
300 | return; | ||
301 | |||
302 | printk(JFFS2_DBG_MSG_PREFIX " dump space accounting for the eraseblock at %#08x:\n", | ||
303 | jeb->offset); | ||
304 | |||
305 | printk(JFFS2_DBG "used_size: %#08x\n", jeb->used_size); | ||
306 | printk(JFFS2_DBG "dirty_size: %#08x\n", jeb->dirty_size); | ||
307 | printk(JFFS2_DBG "wasted_size: %#08x\n", jeb->wasted_size); | ||
308 | printk(JFFS2_DBG "unchecked_size: %#08x\n", jeb->unchecked_size); | ||
309 | printk(JFFS2_DBG "free_size: %#08x\n", jeb->free_size); | ||
310 | } | ||
311 | |||
312 | void | ||
313 | __jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c) | ||
314 | { | ||
315 | spin_lock(&c->erase_completion_lock); | ||
316 | __jffs2_dbg_dump_block_lists_nolock(c); | ||
317 | spin_unlock(&c->erase_completion_lock); | ||
318 | } | ||
319 | |||
320 | void | ||
321 | __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c) | ||
322 | { | ||
323 | printk(JFFS2_DBG_MSG_PREFIX " dump JFFS2 blocks lists:\n"); | ||
324 | |||
325 | printk(JFFS2_DBG "flash_size: %#08x\n", c->flash_size); | ||
326 | printk(JFFS2_DBG "used_size: %#08x\n", c->used_size); | ||
327 | printk(JFFS2_DBG "dirty_size: %#08x\n", c->dirty_size); | ||
328 | printk(JFFS2_DBG "wasted_size: %#08x\n", c->wasted_size); | ||
329 | printk(JFFS2_DBG "unchecked_size: %#08x\n", c->unchecked_size); | ||
330 | printk(JFFS2_DBG "free_size: %#08x\n", c->free_size); | ||
331 | printk(JFFS2_DBG "erasing_size: %#08x\n", c->erasing_size); | ||
332 | printk(JFFS2_DBG "bad_size: %#08x\n", c->bad_size); | ||
333 | printk(JFFS2_DBG "sector_size: %#08x\n", c->sector_size); | ||
334 | printk(JFFS2_DBG "jffs2_reserved_blocks size: %#08x\n", | ||
335 | c->sector_size * c->resv_blocks_write); | ||
336 | |||
337 | if (c->nextblock) | ||
338 | printk(JFFS2_DBG "nextblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
339 | c->nextblock->offset, c->nextblock->used_size, | ||
340 | c->nextblock->dirty_size, c->nextblock->wasted_size, | ||
341 | c->nextblock->unchecked_size, c->nextblock->free_size); | ||
342 | else | ||
343 | printk(JFFS2_DBG "nextblock: NULL\n"); | ||
344 | |||
345 | if (c->gcblock) | ||
346 | printk(JFFS2_DBG "gcblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
347 | c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, | ||
348 | c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); | ||
349 | else | ||
350 | printk(JFFS2_DBG "gcblock: NULL\n"); | ||
351 | |||
352 | if (list_empty(&c->clean_list)) { | ||
353 | printk(JFFS2_DBG "clean_list: empty\n"); | ||
354 | } else { | ||
355 | struct list_head *this; | ||
356 | int numblocks = 0; | ||
357 | uint32_t dirty = 0; | ||
358 | |||
359 | list_for_each(this, &c->clean_list) { | ||
360 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
361 | numblocks ++; | ||
362 | dirty += jeb->wasted_size; | ||
363 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
364 | printk(JFFS2_DBG "clean_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
365 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
366 | jeb->unchecked_size, jeb->free_size); | ||
367 | } | ||
368 | } | ||
369 | |||
370 | printk (JFFS2_DBG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", | ||
371 | numblocks, dirty, dirty / numblocks); | ||
372 | } | ||
373 | |||
374 | if (list_empty(&c->very_dirty_list)) { | ||
375 | printk(JFFS2_DBG "very_dirty_list: empty\n"); | ||
376 | } else { | ||
377 | struct list_head *this; | ||
378 | int numblocks = 0; | ||
379 | uint32_t dirty = 0; | ||
380 | |||
381 | list_for_each(this, &c->very_dirty_list) { | ||
382 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
383 | |||
384 | numblocks ++; | ||
385 | dirty += jeb->dirty_size; | ||
386 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
387 | printk(JFFS2_DBG "very_dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
388 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
389 | jeb->unchecked_size, jeb->free_size); | ||
390 | } | ||
391 | } | ||
392 | |||
393 | printk (JFFS2_DBG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | ||
394 | numblocks, dirty, dirty / numblocks); | ||
395 | } | ||
396 | |||
397 | if (list_empty(&c->dirty_list)) { | ||
398 | printk(JFFS2_DBG "dirty_list: empty\n"); | ||
399 | } else { | ||
400 | struct list_head *this; | ||
401 | int numblocks = 0; | ||
402 | uint32_t dirty = 0; | ||
403 | |||
404 | list_for_each(this, &c->dirty_list) { | ||
405 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
406 | |||
407 | numblocks ++; | ||
408 | dirty += jeb->dirty_size; | ||
409 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
410 | printk(JFFS2_DBG "dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
411 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
412 | jeb->unchecked_size, jeb->free_size); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | printk (JFFS2_DBG "contains %d blocks with total dirty size %u, average dirty size: %u\n", | ||
417 | numblocks, dirty, dirty / numblocks); | ||
418 | } | ||
419 | |||
420 | if (list_empty(&c->erasable_list)) { | ||
421 | printk(JFFS2_DBG "erasable_list: empty\n"); | ||
422 | } else { | ||
423 | struct list_head *this; | ||
424 | |||
425 | list_for_each(this, &c->erasable_list) { | ||
426 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
427 | |||
428 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
429 | printk(JFFS2_DBG "erasable_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
430 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
431 | jeb->unchecked_size, jeb->free_size); | ||
432 | } | ||
433 | } | ||
434 | } | ||
435 | |||
436 | if (list_empty(&c->erasing_list)) { | ||
437 | printk(JFFS2_DBG "erasing_list: empty\n"); | ||
438 | } else { | ||
439 | struct list_head *this; | ||
440 | |||
441 | list_for_each(this, &c->erasing_list) { | ||
442 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
443 | |||
444 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
445 | printk(JFFS2_DBG "erasing_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
446 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
447 | jeb->unchecked_size, jeb->free_size); | ||
448 | } | ||
449 | } | ||
450 | } | ||
451 | |||
452 | if (list_empty(&c->erase_pending_list)) { | ||
453 | printk(JFFS2_DBG "erase_pending_list: empty\n"); | ||
454 | } else { | ||
455 | struct list_head *this; | ||
456 | |||
457 | list_for_each(this, &c->erase_pending_list) { | ||
458 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
459 | |||
460 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
461 | printk(JFFS2_DBG "erase_pending_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
462 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
463 | jeb->unchecked_size, jeb->free_size); | ||
464 | } | ||
465 | } | ||
466 | } | ||
467 | |||
468 | if (list_empty(&c->erasable_pending_wbuf_list)) { | ||
469 | printk(JFFS2_DBG "erasable_pending_wbuf_list: empty\n"); | ||
470 | } else { | ||
471 | struct list_head *this; | ||
472 | |||
473 | list_for_each(this, &c->erasable_pending_wbuf_list) { | ||
474 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
475 | |||
476 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
477 | printk(JFFS2_DBG "erasable_pending_wbuf_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
478 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
479 | jeb->unchecked_size, jeb->free_size); | ||
480 | } | ||
481 | } | ||
482 | } | ||
483 | |||
484 | if (list_empty(&c->free_list)) { | ||
485 | printk(JFFS2_DBG "free_list: empty\n"); | ||
486 | } else { | ||
487 | struct list_head *this; | ||
488 | |||
489 | list_for_each(this, &c->free_list) { | ||
490 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
491 | |||
492 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
493 | printk(JFFS2_DBG "free_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
494 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
495 | jeb->unchecked_size, jeb->free_size); | ||
496 | } | ||
497 | } | ||
498 | } | ||
499 | |||
500 | if (list_empty(&c->bad_list)) { | ||
501 | printk(JFFS2_DBG "bad_list: empty\n"); | ||
502 | } else { | ||
503 | struct list_head *this; | ||
504 | |||
505 | list_for_each(this, &c->bad_list) { | ||
506 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
507 | |||
508 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
509 | printk(JFFS2_DBG "bad_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
510 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
511 | jeb->unchecked_size, jeb->free_size); | ||
512 | } | ||
513 | } | ||
514 | } | ||
515 | |||
516 | if (list_empty(&c->bad_used_list)) { | ||
517 | printk(JFFS2_DBG "bad_used_list: empty\n"); | ||
518 | } else { | ||
519 | struct list_head *this; | ||
520 | |||
521 | list_for_each(this, &c->bad_used_list) { | ||
522 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
523 | |||
524 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
525 | printk(JFFS2_DBG "bad_used_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
526 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
527 | jeb->unchecked_size, jeb->free_size); | ||
528 | } | ||
529 | } | ||
530 | } | ||
531 | } | ||
532 | |||
533 | void | ||
534 | __jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f) | ||
535 | { | ||
536 | down(&f->sem); | ||
537 | jffs2_dbg_dump_fragtree_nolock(f); | ||
538 | up(&f->sem); | ||
539 | } | ||
540 | |||
541 | void | ||
542 | __jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f) | ||
543 | { | ||
544 | struct jffs2_node_frag *this = frag_first(&f->fragtree); | ||
545 | uint32_t lastofs = 0; | ||
546 | int buggy = 0; | ||
547 | |||
548 | printk(JFFS2_DBG_MSG_PREFIX " dump fragtree of ino #%u\n", f->inocache->ino); | ||
549 | while(this) { | ||
550 | if (this->node) | ||
551 | printk(JFFS2_DBG "frag %#04x-%#04x: %#08x(%d) on flash (*%p), left (%p), right (%p), parent (%p)\n", | ||
552 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw), | ||
553 | ref_flags(this->node->raw), this, frag_left(this), frag_right(this), | ||
554 | frag_parent(this)); | ||
555 | else | ||
556 | printk(JFFS2_DBG "frag %#04x-%#04x: hole (*%p). left (%p), right (%p), parent (%p)\n", | ||
557 | this->ofs, this->ofs+this->size, this, frag_left(this), | ||
558 | frag_right(this), frag_parent(this)); | ||
559 | if (this->ofs != lastofs) | ||
560 | buggy = 1; | ||
561 | lastofs = this->ofs + this->size; | ||
562 | this = frag_next(this); | ||
563 | } | ||
564 | |||
565 | if (f->metadata) | ||
566 | printk(JFFS2_DBG "metadata at 0x%08x\n", ref_offset(f->metadata->raw)); | ||
567 | |||
568 | if (buggy) { | ||
569 | JFFS2_ERROR("frag tree got a hole in it.\n"); | ||
570 | BUG(); | ||
571 | } | ||
572 | } | ||
573 | |||
574 | #define JFFS2_BUFDUMP_BYTES_PER_LINE 32 | ||
575 | void | ||
576 | __jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs) | ||
577 | { | ||
578 | int skip; | ||
579 | int i; | ||
580 | |||
581 | printk(JFFS2_DBG_MSG_PREFIX " dump from offset %#08x to offset %#08x (%x bytes).\n", | ||
582 | offs, offs + len, len); | ||
583 | i = skip = offs % JFFS2_BUFDUMP_BYTES_PER_LINE; | ||
584 | offs = offs & ~(JFFS2_BUFDUMP_BYTES_PER_LINE - 1); | ||
585 | |||
586 | if (skip != 0) | ||
587 | printk(JFFS2_DBG "%#08x: ", offs); | ||
588 | |||
589 | while (skip--) | ||
590 | printk(" "); | ||
591 | |||
592 | while (i < len) { | ||
593 | if ((i % JFFS2_BUFDUMP_BYTES_PER_LINE) == 0 && i != len -1) { | ||
594 | if (i != 0) | ||
595 | printk("\n"); | ||
596 | offs += JFFS2_BUFDUMP_BYTES_PER_LINE; | ||
597 | printk(JFFS2_DBG "%0#8x: ", offs); | ||
598 | } | ||
599 | |||
600 | printk("%02x ", buf[i]); | ||
601 | |||
602 | i += 1; | ||
603 | } | ||
604 | |||
605 | printk("\n"); | ||
606 | } | ||
607 | |||
608 | /* | ||
609 | * Dump a JFFS2 node. | ||
610 | */ | ||
611 | void | ||
612 | __jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs) | ||
613 | { | ||
614 | union jffs2_node_union node; | ||
615 | int len = sizeof(union jffs2_node_union); | ||
616 | size_t retlen; | ||
617 | uint32_t crc; | ||
618 | int ret; | ||
619 | |||
620 | printk(JFFS2_DBG_MSG_PREFIX " dump node at offset %#08x.\n", ofs); | ||
621 | |||
622 | ret = jffs2_flash_read(c, ofs, len, &retlen, (unsigned char *)&node); | ||
623 | if (ret || (retlen != len)) { | ||
624 | JFFS2_ERROR("read %d bytes failed or short. ret %d, retlen %zd.\n", | ||
625 | len, ret, retlen); | ||
626 | return; | ||
627 | } | ||
628 | |||
629 | printk(JFFS2_DBG "magic:\t%#04x\n", je16_to_cpu(node.u.magic)); | ||
630 | printk(JFFS2_DBG "nodetype:\t%#04x\n", je16_to_cpu(node.u.nodetype)); | ||
631 | printk(JFFS2_DBG "totlen:\t%#08x\n", je32_to_cpu(node.u.totlen)); | ||
632 | printk(JFFS2_DBG "hdr_crc:\t%#08x\n", je32_to_cpu(node.u.hdr_crc)); | ||
633 | |||
634 | crc = crc32(0, &node.u, sizeof(node.u) - 4); | ||
635 | if (crc != je32_to_cpu(node.u.hdr_crc)) { | ||
636 | JFFS2_ERROR("wrong common header CRC.\n"); | ||
637 | return; | ||
638 | } | ||
639 | |||
640 | if (je16_to_cpu(node.u.magic) != JFFS2_MAGIC_BITMASK && | ||
641 | je16_to_cpu(node.u.magic) != JFFS2_OLD_MAGIC_BITMASK) | ||
642 | { | ||
643 | JFFS2_ERROR("wrong node magic: %#04x instead of %#04x.\n", | ||
644 | je16_to_cpu(node.u.magic), JFFS2_MAGIC_BITMASK); | ||
645 | return; | ||
646 | } | ||
647 | |||
648 | switch(je16_to_cpu(node.u.nodetype)) { | ||
649 | |||
650 | case JFFS2_NODETYPE_INODE: | ||
651 | |||
652 | printk(JFFS2_DBG "the node is inode node\n"); | ||
653 | printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.i.ino)); | ||
654 | printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.i.version)); | ||
655 | printk(JFFS2_DBG "mode:\t%#08x\n", node.i.mode.m); | ||
656 | printk(JFFS2_DBG "uid:\t%#04x\n", je16_to_cpu(node.i.uid)); | ||
657 | printk(JFFS2_DBG "gid:\t%#04x\n", je16_to_cpu(node.i.gid)); | ||
658 | printk(JFFS2_DBG "isize:\t%#08x\n", je32_to_cpu(node.i.isize)); | ||
659 | printk(JFFS2_DBG "atime:\t%#08x\n", je32_to_cpu(node.i.atime)); | ||
660 | printk(JFFS2_DBG "mtime:\t%#08x\n", je32_to_cpu(node.i.mtime)); | ||
661 | printk(JFFS2_DBG "ctime:\t%#08x\n", je32_to_cpu(node.i.ctime)); | ||
662 | printk(JFFS2_DBG "offset:\t%#08x\n", je32_to_cpu(node.i.offset)); | ||
663 | printk(JFFS2_DBG "csize:\t%#08x\n", je32_to_cpu(node.i.csize)); | ||
664 | printk(JFFS2_DBG "dsize:\t%#08x\n", je32_to_cpu(node.i.dsize)); | ||
665 | printk(JFFS2_DBG "compr:\t%#02x\n", node.i.compr); | ||
666 | printk(JFFS2_DBG "usercompr:\t%#02x\n", node.i.usercompr); | ||
667 | printk(JFFS2_DBG "flags:\t%#04x\n", je16_to_cpu(node.i.flags)); | ||
668 | printk(JFFS2_DBG "data_crc:\t%#08x\n", je32_to_cpu(node.i.data_crc)); | ||
669 | printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.i.node_crc)); | ||
670 | |||
671 | crc = crc32(0, &node.i, sizeof(node.i) - 8); | ||
672 | if (crc != je32_to_cpu(node.i.node_crc)) { | ||
673 | JFFS2_ERROR("wrong node header CRC.\n"); | ||
674 | return; | ||
675 | } | ||
676 | break; | ||
677 | |||
678 | case JFFS2_NODETYPE_DIRENT: | ||
679 | |||
680 | printk(JFFS2_DBG "the node is dirent node\n"); | ||
681 | printk(JFFS2_DBG "pino:\t%#08x\n", je32_to_cpu(node.d.pino)); | ||
682 | printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.d.version)); | ||
683 | printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.d.ino)); | ||
684 | printk(JFFS2_DBG "mctime:\t%#08x\n", je32_to_cpu(node.d.mctime)); | ||
685 | printk(JFFS2_DBG "nsize:\t%#02x\n", node.d.nsize); | ||
686 | printk(JFFS2_DBG "type:\t%#02x\n", node.d.type); | ||
687 | printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.d.node_crc)); | ||
688 | printk(JFFS2_DBG "name_crc:\t%#08x\n", je32_to_cpu(node.d.name_crc)); | ||
689 | |||
690 | node.d.name[node.d.nsize] = '\0'; | ||
691 | printk(JFFS2_DBG "name:\t\"%s\"\n", node.d.name); | ||
692 | |||
693 | crc = crc32(0, &node.d, sizeof(node.d) - 8); | ||
694 | if (crc != je32_to_cpu(node.d.node_crc)) { | ||
695 | JFFS2_ERROR("wrong node header CRC.\n"); | ||
696 | return; | ||
697 | } | ||
698 | break; | ||
699 | |||
700 | default: | ||
701 | printk(JFFS2_DBG "node type is unknown\n"); | ||
702 | break; | ||
703 | } | ||
704 | } | ||
705 | #endif /* JFFS2_DBG_DUMPS || JFFS2_DBG_PARANOIA_CHECKS */ | ||
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h new file mode 100644 index 000000000000..f193d43a8a59 --- /dev/null +++ b/fs/jffs2/debug.h | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: debug.h,v 1.21 2005/11/07 11:14:39 gleixner Exp $ | ||
11 | * | ||
12 | */ | ||
13 | #ifndef _JFFS2_DEBUG_H_ | ||
14 | #define _JFFS2_DEBUG_H_ | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | |||
18 | #ifndef CONFIG_JFFS2_FS_DEBUG | ||
19 | #define CONFIG_JFFS2_FS_DEBUG 0 | ||
20 | #endif | ||
21 | |||
22 | #if CONFIG_JFFS2_FS_DEBUG > 0 | ||
23 | /* Enable "paranoia" checks and dumps */ | ||
24 | #define JFFS2_DBG_PARANOIA_CHECKS | ||
25 | #define JFFS2_DBG_DUMPS | ||
26 | |||
27 | /* | ||
28 | * By defining/undefining the below macros one may select debugging messages | ||
29 | * fro specific JFFS2 subsystems. | ||
30 | */ | ||
31 | #define JFFS2_DBG_READINODE_MESSAGES | ||
32 | #define JFFS2_DBG_FRAGTREE_MESSAGES | ||
33 | #define JFFS2_DBG_DENTLIST_MESSAGES | ||
34 | #define JFFS2_DBG_NODEREF_MESSAGES | ||
35 | #define JFFS2_DBG_INOCACHE_MESSAGES | ||
36 | #define JFFS2_DBG_SUMMARY_MESSAGES | ||
37 | #define JFFS2_DBG_FSBUILD_MESSAGES | ||
38 | #endif | ||
39 | |||
40 | #if CONFIG_JFFS2_FS_DEBUG > 1 | ||
41 | #define JFFS2_DBG_FRAGTREE2_MESSAGES | ||
42 | #define JFFS2_DBG_MEMALLOC_MESSAGES | ||
43 | #endif | ||
44 | |||
45 | /* Sanity checks are supposed to be light-weight and enabled by default */ | ||
46 | #define JFFS2_DBG_SANITY_CHECKS | ||
47 | |||
48 | /* | ||
49 | * Dx() are mainly used for debugging messages, they must go away and be | ||
50 | * superseded by nicer dbg_xxx() macros... | ||
51 | */ | ||
52 | #if CONFIG_JFFS2_FS_DEBUG > 0 | ||
53 | #define D1(x) x | ||
54 | #else | ||
55 | #define D1(x) | ||
56 | #endif | ||
57 | |||
58 | #if CONFIG_JFFS2_FS_DEBUG > 1 | ||
59 | #define D2(x) x | ||
60 | #else | ||
61 | #define D2(x) | ||
62 | #endif | ||
63 | |||
64 | /* The prefixes of JFFS2 messages */ | ||
65 | #define JFFS2_DBG_PREFIX "[JFFS2 DBG]" | ||
66 | #define JFFS2_ERR_PREFIX "JFFS2 error:" | ||
67 | #define JFFS2_WARN_PREFIX "JFFS2 warning:" | ||
68 | #define JFFS2_NOTICE_PREFIX "JFFS2 notice:" | ||
69 | |||
70 | #define JFFS2_ERR KERN_ERR | ||
71 | #define JFFS2_WARN KERN_WARNING | ||
72 | #define JFFS2_NOT KERN_NOTICE | ||
73 | #define JFFS2_DBG KERN_DEBUG | ||
74 | |||
75 | #define JFFS2_DBG_MSG_PREFIX JFFS2_DBG JFFS2_DBG_PREFIX | ||
76 | #define JFFS2_ERR_MSG_PREFIX JFFS2_ERR JFFS2_ERR_PREFIX | ||
77 | #define JFFS2_WARN_MSG_PREFIX JFFS2_WARN JFFS2_WARN_PREFIX | ||
78 | #define JFFS2_NOTICE_MSG_PREFIX JFFS2_NOT JFFS2_NOTICE_PREFIX | ||
79 | |||
80 | /* JFFS2 message macros */ | ||
81 | #define JFFS2_ERROR(fmt, ...) \ | ||
82 | do { \ | ||
83 | printk(JFFS2_ERR_MSG_PREFIX \ | ||
84 | " (%d) %s: " fmt, current->pid, \ | ||
85 | __FUNCTION__, ##__VA_ARGS__); \ | ||
86 | } while(0) | ||
87 | |||
88 | #define JFFS2_WARNING(fmt, ...) \ | ||
89 | do { \ | ||
90 | printk(JFFS2_WARN_MSG_PREFIX \ | ||
91 | " (%d) %s: " fmt, current->pid, \ | ||
92 | __FUNCTION__, ##__VA_ARGS__); \ | ||
93 | } while(0) | ||
94 | |||
95 | #define JFFS2_NOTICE(fmt, ...) \ | ||
96 | do { \ | ||
97 | printk(JFFS2_NOTICE_MSG_PREFIX \ | ||
98 | " (%d) %s: " fmt, current->pid, \ | ||
99 | __FUNCTION__, ##__VA_ARGS__); \ | ||
100 | } while(0) | ||
101 | |||
102 | #define JFFS2_DEBUG(fmt, ...) \ | ||
103 | do { \ | ||
104 | printk(JFFS2_DBG_MSG_PREFIX \ | ||
105 | " (%d) %s: " fmt, current->pid, \ | ||
106 | __FUNCTION__, ##__VA_ARGS__); \ | ||
107 | } while(0) | ||
108 | |||
109 | /* | ||
110 | * We split our debugging messages on several parts, depending on the JFFS2 | ||
111 | * subsystem the message belongs to. | ||
112 | */ | ||
113 | /* Read inode debugging messages */ | ||
114 | #ifdef JFFS2_DBG_READINODE_MESSAGES | ||
115 | #define dbg_readinode(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
116 | #else | ||
117 | #define dbg_readinode(fmt, ...) | ||
118 | #endif | ||
119 | |||
120 | /* Fragtree build debugging messages */ | ||
121 | #ifdef JFFS2_DBG_FRAGTREE_MESSAGES | ||
122 | #define dbg_fragtree(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
123 | #else | ||
124 | #define dbg_fragtree(fmt, ...) | ||
125 | #endif | ||
126 | #ifdef JFFS2_DBG_FRAGTREE2_MESSAGES | ||
127 | #define dbg_fragtree2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
128 | #else | ||
129 | #define dbg_fragtree2(fmt, ...) | ||
130 | #endif | ||
131 | |||
132 | /* Directory entry list manilulation debugging messages */ | ||
133 | #ifdef JFFS2_DBG_DENTLIST_MESSAGES | ||
134 | #define dbg_dentlist(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
135 | #else | ||
136 | #define dbg_dentlist(fmt, ...) | ||
137 | #endif | ||
138 | |||
139 | /* Print the messages about manipulating node_refs */ | ||
140 | #ifdef JFFS2_DBG_NODEREF_MESSAGES | ||
141 | #define dbg_noderef(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
142 | #else | ||
143 | #define dbg_noderef(fmt, ...) | ||
144 | #endif | ||
145 | |||
146 | /* Manipulations with the list of inodes (JFFS2 inocache) */ | ||
147 | #ifdef JFFS2_DBG_INOCACHE_MESSAGES | ||
148 | #define dbg_inocache(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
149 | #else | ||
150 | #define dbg_inocache(fmt, ...) | ||
151 | #endif | ||
152 | |||
153 | /* Summary debugging messages */ | ||
154 | #ifdef JFFS2_DBG_SUMMARY_MESSAGES | ||
155 | #define dbg_summary(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
156 | #else | ||
157 | #define dbg_summary(fmt, ...) | ||
158 | #endif | ||
159 | |||
160 | /* File system build messages */ | ||
161 | #ifdef JFFS2_DBG_FSBUILD_MESSAGES | ||
162 | #define dbg_fsbuild(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
163 | #else | ||
164 | #define dbg_fsbuild(fmt, ...) | ||
165 | #endif | ||
166 | |||
167 | /* Watch the object allocations */ | ||
168 | #ifdef JFFS2_DBG_MEMALLOC_MESSAGES | ||
169 | #define dbg_memalloc(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
170 | #else | ||
171 | #define dbg_memalloc(fmt, ...) | ||
172 | #endif | ||
173 | |||
174 | |||
175 | /* "Sanity" checks */ | ||
176 | void | ||
177 | __jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c, | ||
178 | struct jffs2_eraseblock *jeb); | ||
179 | void | ||
180 | __jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c, | ||
181 | struct jffs2_eraseblock *jeb); | ||
182 | |||
183 | /* "Paranoia" checks */ | ||
184 | void | ||
185 | __jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f); | ||
186 | void | ||
187 | __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f); | ||
188 | void | ||
189 | __jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c, | ||
190 | struct jffs2_eraseblock *jeb); | ||
191 | void | ||
192 | __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, | ||
193 | struct jffs2_eraseblock *jeb); | ||
194 | void | ||
195 | __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c, | ||
196 | uint32_t ofs, int len); | ||
197 | |||
198 | /* "Dump" functions */ | ||
199 | void | ||
200 | __jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
201 | void | ||
202 | __jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb); | ||
203 | void | ||
204 | __jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c); | ||
205 | void | ||
206 | __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c); | ||
207 | void | ||
208 | __jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c, | ||
209 | struct jffs2_eraseblock *jeb); | ||
210 | void | ||
211 | __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, | ||
212 | struct jffs2_eraseblock *jeb); | ||
213 | void | ||
214 | __jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f); | ||
215 | void | ||
216 | __jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f); | ||
217 | void | ||
218 | __jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs); | ||
219 | void | ||
220 | __jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs); | ||
221 | |||
222 | #ifdef JFFS2_DBG_PARANOIA_CHECKS | ||
223 | #define jffs2_dbg_fragtree_paranoia_check(f) \ | ||
224 | __jffs2_dbg_fragtree_paranoia_check(f) | ||
225 | #define jffs2_dbg_fragtree_paranoia_check_nolock(f) \ | ||
226 | __jffs2_dbg_fragtree_paranoia_check_nolock(f) | ||
227 | #define jffs2_dbg_acct_paranoia_check(c, jeb) \ | ||
228 | __jffs2_dbg_acct_paranoia_check(c,jeb) | ||
229 | #define jffs2_dbg_acct_paranoia_check_nolock(c, jeb) \ | ||
230 | __jffs2_dbg_acct_paranoia_check_nolock(c,jeb) | ||
231 | #define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) \ | ||
232 | __jffs2_dbg_prewrite_paranoia_check(c, ofs, len) | ||
233 | #else | ||
234 | #define jffs2_dbg_fragtree_paranoia_check(f) | ||
235 | #define jffs2_dbg_fragtree_paranoia_check_nolock(f) | ||
236 | #define jffs2_dbg_acct_paranoia_check(c, jeb) | ||
237 | #define jffs2_dbg_acct_paranoia_check_nolock(c, jeb) | ||
238 | #define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) | ||
239 | #endif /* !JFFS2_PARANOIA_CHECKS */ | ||
240 | |||
241 | #ifdef JFFS2_DBG_DUMPS | ||
242 | #define jffs2_dbg_dump_jeb(c, jeb) \ | ||
243 | __jffs2_dbg_dump_jeb(c, jeb); | ||
244 | #define jffs2_dbg_dump_jeb_nolock(jeb) \ | ||
245 | __jffs2_dbg_dump_jeb_nolock(jeb); | ||
246 | #define jffs2_dbg_dump_block_lists(c) \ | ||
247 | __jffs2_dbg_dump_block_lists(c) | ||
248 | #define jffs2_dbg_dump_block_lists_nolock(c) \ | ||
249 | __jffs2_dbg_dump_block_lists_nolock(c) | ||
250 | #define jffs2_dbg_dump_fragtree(f) \ | ||
251 | __jffs2_dbg_dump_fragtree(f); | ||
252 | #define jffs2_dbg_dump_fragtree_nolock(f) \ | ||
253 | __jffs2_dbg_dump_fragtree_nolock(f); | ||
254 | #define jffs2_dbg_dump_buffer(buf, len, offs) \ | ||
255 | __jffs2_dbg_dump_buffer(*buf, len, offs); | ||
256 | #define jffs2_dbg_dump_node(c, ofs) \ | ||
257 | __jffs2_dbg_dump_node(c, ofs); | ||
258 | #else | ||
259 | #define jffs2_dbg_dump_jeb(c, jeb) | ||
260 | #define jffs2_dbg_dump_jeb_nolock(jeb) | ||
261 | #define jffs2_dbg_dump_block_lists(c) | ||
262 | #define jffs2_dbg_dump_block_lists_nolock(c) | ||
263 | #define jffs2_dbg_dump_fragtree(f) | ||
264 | #define jffs2_dbg_dump_fragtree_nolock(f) | ||
265 | #define jffs2_dbg_dump_buffer(buf, len, offs) | ||
266 | #define jffs2_dbg_dump_node(c, ofs) | ||
267 | #endif /* !JFFS2_DBG_DUMPS */ | ||
268 | |||
269 | #ifdef JFFS2_DBG_SANITY_CHECKS | ||
270 | #define jffs2_dbg_acct_sanity_check(c, jeb) \ | ||
271 | __jffs2_dbg_acct_sanity_check(c, jeb) | ||
272 | #define jffs2_dbg_acct_sanity_check_nolock(c, jeb) \ | ||
273 | __jffs2_dbg_acct_sanity_check_nolock(c, jeb) | ||
274 | #else | ||
275 | #define jffs2_dbg_acct_sanity_check(c, jeb) | ||
276 | #define jffs2_dbg_acct_sanity_check_nolock(c, jeb) | ||
277 | #endif /* !JFFS2_DBG_SANITY_CHECKS */ | ||
278 | |||
279 | #endif /* _JFFS2_DEBUG_H_ */ | ||
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 3ca0d25eef1d..a7bf9cb2567f 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: dir.c,v 1.86 2005/07/06 12:13:09 dwmw2 Exp $ | 10 | * $Id: dir.c,v 1.90 2005/11/07 11:14:39 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -64,7 +64,7 @@ struct inode_operations jffs2_dir_inode_operations = | |||
64 | 64 | ||
65 | 65 | ||
66 | /* We keep the dirent list sorted in increasing order of name hash, | 66 | /* We keep the dirent list sorted in increasing order of name hash, |
67 | and we use the same hash function as the dentries. Makes this | 67 | and we use the same hash function as the dentries. Makes this |
68 | nice and simple | 68 | nice and simple |
69 | */ | 69 | */ |
70 | static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, | 70 | static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, |
@@ -85,7 +85,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, | |||
85 | 85 | ||
86 | /* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */ | 86 | /* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */ |
87 | for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) { | 87 | for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) { |
88 | if (fd_list->nhash == target->d_name.hash && | 88 | if (fd_list->nhash == target->d_name.hash && |
89 | (!fd || fd_list->version > fd->version) && | 89 | (!fd || fd_list->version > fd->version) && |
90 | strlen(fd_list->name) == target->d_name.len && | 90 | strlen(fd_list->name) == target->d_name.len && |
91 | !strncmp(fd_list->name, target->d_name.name, target->d_name.len)) { | 91 | !strncmp(fd_list->name, target->d_name.name, target->d_name.len)) { |
@@ -147,7 +147,7 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
147 | curofs++; | 147 | curofs++; |
148 | /* First loop: curofs = 2; offset = 2 */ | 148 | /* First loop: curofs = 2; offset = 2 */ |
149 | if (curofs < offset) { | 149 | if (curofs < offset) { |
150 | D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", | 150 | D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", |
151 | fd->name, fd->ino, fd->type, curofs, offset)); | 151 | fd->name, fd->ino, fd->type, curofs, offset)); |
152 | continue; | 152 | continue; |
153 | } | 153 | } |
@@ -182,7 +182,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode, | |||
182 | ri = jffs2_alloc_raw_inode(); | 182 | ri = jffs2_alloc_raw_inode(); |
183 | if (!ri) | 183 | if (!ri) |
184 | return -ENOMEM; | 184 | return -ENOMEM; |
185 | 185 | ||
186 | c = JFFS2_SB_INFO(dir_i->i_sb); | 186 | c = JFFS2_SB_INFO(dir_i->i_sb); |
187 | 187 | ||
188 | D1(printk(KERN_DEBUG "jffs2_create()\n")); | 188 | D1(printk(KERN_DEBUG "jffs2_create()\n")); |
@@ -203,7 +203,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode, | |||
203 | f = JFFS2_INODE_INFO(inode); | 203 | f = JFFS2_INODE_INFO(inode); |
204 | dir_f = JFFS2_INODE_INFO(dir_i); | 204 | dir_f = JFFS2_INODE_INFO(dir_i); |
205 | 205 | ||
206 | ret = jffs2_do_create(c, dir_f, f, ri, | 206 | ret = jffs2_do_create(c, dir_f, f, ri, |
207 | dentry->d_name.name, dentry->d_name.len); | 207 | dentry->d_name.name, dentry->d_name.len); |
208 | 208 | ||
209 | if (ret) { | 209 | if (ret) { |
@@ -232,11 +232,14 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry) | |||
232 | struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); | 232 | struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); |
233 | struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(dentry->d_inode); | 233 | struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(dentry->d_inode); |
234 | int ret; | 234 | int ret; |
235 | uint32_t now = get_seconds(); | ||
235 | 236 | ||
236 | ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, | 237 | ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, |
237 | dentry->d_name.len, dead_f); | 238 | dentry->d_name.len, dead_f, now); |
238 | if (dead_f->inocache) | 239 | if (dead_f->inocache) |
239 | dentry->d_inode->i_nlink = dead_f->inocache->nlink; | 240 | dentry->d_inode->i_nlink = dead_f->inocache->nlink; |
241 | if (!ret) | ||
242 | dir_i->i_mtime = dir_i->i_ctime = ITIME(now); | ||
240 | return ret; | 243 | return ret; |
241 | } | 244 | } |
242 | /***********************************************************************/ | 245 | /***********************************************************************/ |
@@ -249,6 +252,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de | |||
249 | struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); | 252 | struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); |
250 | int ret; | 253 | int ret; |
251 | uint8_t type; | 254 | uint8_t type; |
255 | uint32_t now; | ||
252 | 256 | ||
253 | /* Don't let people make hard links to bad inodes. */ | 257 | /* Don't let people make hard links to bad inodes. */ |
254 | if (!f->inocache) | 258 | if (!f->inocache) |
@@ -261,13 +265,15 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de | |||
261 | type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; | 265 | type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; |
262 | if (!type) type = DT_REG; | 266 | if (!type) type = DT_REG; |
263 | 267 | ||
264 | ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len); | 268 | now = get_seconds(); |
269 | ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now); | ||
265 | 270 | ||
266 | if (!ret) { | 271 | if (!ret) { |
267 | down(&f->sem); | 272 | down(&f->sem); |
268 | old_dentry->d_inode->i_nlink = ++f->inocache->nlink; | 273 | old_dentry->d_inode->i_nlink = ++f->inocache->nlink; |
269 | up(&f->sem); | 274 | up(&f->sem); |
270 | d_instantiate(dentry, old_dentry->d_inode); | 275 | d_instantiate(dentry, old_dentry->d_inode); |
276 | dir_i->i_mtime = dir_i->i_ctime = ITIME(now); | ||
271 | atomic_inc(&old_dentry->d_inode->i_count); | 277 | atomic_inc(&old_dentry->d_inode->i_count); |
272 | } | 278 | } |
273 | return ret; | 279 | return ret; |
@@ -297,14 +303,15 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
297 | 303 | ||
298 | if (!ri) | 304 | if (!ri) |
299 | return -ENOMEM; | 305 | return -ENOMEM; |
300 | 306 | ||
301 | c = JFFS2_SB_INFO(dir_i->i_sb); | 307 | c = JFFS2_SB_INFO(dir_i->i_sb); |
302 | 308 | ||
303 | /* Try to reserve enough space for both node and dirent. | 309 | /* Try to reserve enough space for both node and dirent. |
304 | * Just the node will do for now, though | 310 | * Just the node will do for now, though |
305 | */ | 311 | */ |
306 | namelen = dentry->d_name.len; | 312 | namelen = dentry->d_name.len; |
307 | ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 313 | ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &phys_ofs, &alloclen, |
314 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | ||
308 | 315 | ||
309 | if (ret) { | 316 | if (ret) { |
310 | jffs2_free_raw_inode(ri); | 317 | jffs2_free_raw_inode(ri); |
@@ -331,7 +338,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
331 | ri->compr = JFFS2_COMPR_NONE; | 338 | ri->compr = JFFS2_COMPR_NONE; |
332 | ri->data_crc = cpu_to_je32(crc32(0, target, targetlen)); | 339 | ri->data_crc = cpu_to_je32(crc32(0, target, targetlen)); |
333 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 340 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
334 | 341 | ||
335 | fn = jffs2_write_dnode(c, f, ri, target, targetlen, phys_ofs, ALLOC_NORMAL); | 342 | fn = jffs2_write_dnode(c, f, ri, target, targetlen, phys_ofs, ALLOC_NORMAL); |
336 | 343 | ||
337 | jffs2_free_raw_inode(ri); | 344 | jffs2_free_raw_inode(ri); |
@@ -344,9 +351,9 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
344 | return PTR_ERR(fn); | 351 | return PTR_ERR(fn); |
345 | } | 352 | } |
346 | 353 | ||
347 | /* We use f->dents field to store the target path. */ | 354 | /* We use f->target field to store the target path. */ |
348 | f->dents = kmalloc(targetlen + 1, GFP_KERNEL); | 355 | f->target = kmalloc(targetlen + 1, GFP_KERNEL); |
349 | if (!f->dents) { | 356 | if (!f->target) { |
350 | printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1); | 357 | printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1); |
351 | up(&f->sem); | 358 | up(&f->sem); |
352 | jffs2_complete_reservation(c); | 359 | jffs2_complete_reservation(c); |
@@ -354,17 +361,18 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
354 | return -ENOMEM; | 361 | return -ENOMEM; |
355 | } | 362 | } |
356 | 363 | ||
357 | memcpy(f->dents, target, targetlen + 1); | 364 | memcpy(f->target, target, targetlen + 1); |
358 | D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->dents)); | 365 | D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->target)); |
359 | 366 | ||
360 | /* No data here. Only a metadata node, which will be | 367 | /* No data here. Only a metadata node, which will be |
361 | obsoleted by the first data write | 368 | obsoleted by the first data write |
362 | */ | 369 | */ |
363 | f->metadata = fn; | 370 | f->metadata = fn; |
364 | up(&f->sem); | 371 | up(&f->sem); |
365 | 372 | ||
366 | jffs2_complete_reservation(c); | 373 | jffs2_complete_reservation(c); |
367 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 374 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, |
375 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
368 | if (ret) { | 376 | if (ret) { |
369 | /* Eep. */ | 377 | /* Eep. */ |
370 | jffs2_clear_inode(inode); | 378 | jffs2_clear_inode(inode); |
@@ -399,7 +407,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
399 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); | 407 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); |
400 | 408 | ||
401 | if (IS_ERR(fd)) { | 409 | if (IS_ERR(fd)) { |
402 | /* dirent failed to write. Delete the inode normally | 410 | /* dirent failed to write. Delete the inode normally |
403 | as if it were the final unlink() */ | 411 | as if it were the final unlink() */ |
404 | jffs2_complete_reservation(c); | 412 | jffs2_complete_reservation(c); |
405 | jffs2_free_raw_dirent(rd); | 413 | jffs2_free_raw_dirent(rd); |
@@ -442,14 +450,15 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
442 | ri = jffs2_alloc_raw_inode(); | 450 | ri = jffs2_alloc_raw_inode(); |
443 | if (!ri) | 451 | if (!ri) |
444 | return -ENOMEM; | 452 | return -ENOMEM; |
445 | 453 | ||
446 | c = JFFS2_SB_INFO(dir_i->i_sb); | 454 | c = JFFS2_SB_INFO(dir_i->i_sb); |
447 | 455 | ||
448 | /* Try to reserve enough space for both node and dirent. | 456 | /* Try to reserve enough space for both node and dirent. |
449 | * Just the node will do for now, though | 457 | * Just the node will do for now, though |
450 | */ | 458 | */ |
451 | namelen = dentry->d_name.len; | 459 | namelen = dentry->d_name.len; |
452 | ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL); | 460 | ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL, |
461 | JFFS2_SUMMARY_INODE_SIZE); | ||
453 | 462 | ||
454 | if (ret) { | 463 | if (ret) { |
455 | jffs2_free_raw_inode(ri); | 464 | jffs2_free_raw_inode(ri); |
@@ -473,7 +482,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
473 | 482 | ||
474 | ri->data_crc = cpu_to_je32(0); | 483 | ri->data_crc = cpu_to_je32(0); |
475 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 484 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
476 | 485 | ||
477 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL); | 486 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL); |
478 | 487 | ||
479 | jffs2_free_raw_inode(ri); | 488 | jffs2_free_raw_inode(ri); |
@@ -485,20 +494,21 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
485 | jffs2_clear_inode(inode); | 494 | jffs2_clear_inode(inode); |
486 | return PTR_ERR(fn); | 495 | return PTR_ERR(fn); |
487 | } | 496 | } |
488 | /* No data here. Only a metadata node, which will be | 497 | /* No data here. Only a metadata node, which will be |
489 | obsoleted by the first data write | 498 | obsoleted by the first data write |
490 | */ | 499 | */ |
491 | f->metadata = fn; | 500 | f->metadata = fn; |
492 | up(&f->sem); | 501 | up(&f->sem); |
493 | 502 | ||
494 | jffs2_complete_reservation(c); | 503 | jffs2_complete_reservation(c); |
495 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 504 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, |
505 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
496 | if (ret) { | 506 | if (ret) { |
497 | /* Eep. */ | 507 | /* Eep. */ |
498 | jffs2_clear_inode(inode); | 508 | jffs2_clear_inode(inode); |
499 | return ret; | 509 | return ret; |
500 | } | 510 | } |
501 | 511 | ||
502 | rd = jffs2_alloc_raw_dirent(); | 512 | rd = jffs2_alloc_raw_dirent(); |
503 | if (!rd) { | 513 | if (!rd) { |
504 | /* Argh. Now we treat it like a normal delete */ | 514 | /* Argh. Now we treat it like a normal delete */ |
@@ -525,9 +535,9 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
525 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); | 535 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); |
526 | 536 | ||
527 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); | 537 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); |
528 | 538 | ||
529 | if (IS_ERR(fd)) { | 539 | if (IS_ERR(fd)) { |
530 | /* dirent failed to write. Delete the inode normally | 540 | /* dirent failed to write. Delete the inode normally |
531 | as if it were the final unlink() */ | 541 | as if it were the final unlink() */ |
532 | jffs2_complete_reservation(c); | 542 | jffs2_complete_reservation(c); |
533 | jffs2_free_raw_dirent(rd); | 543 | jffs2_free_raw_dirent(rd); |
@@ -589,19 +599,20 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
589 | ri = jffs2_alloc_raw_inode(); | 599 | ri = jffs2_alloc_raw_inode(); |
590 | if (!ri) | 600 | if (!ri) |
591 | return -ENOMEM; | 601 | return -ENOMEM; |
592 | 602 | ||
593 | c = JFFS2_SB_INFO(dir_i->i_sb); | 603 | c = JFFS2_SB_INFO(dir_i->i_sb); |
594 | 604 | ||
595 | if (S_ISBLK(mode) || S_ISCHR(mode)) { | 605 | if (S_ISBLK(mode) || S_ISCHR(mode)) { |
596 | dev = cpu_to_je16(old_encode_dev(rdev)); | 606 | dev = cpu_to_je16(old_encode_dev(rdev)); |
597 | devlen = sizeof(dev); | 607 | devlen = sizeof(dev); |
598 | } | 608 | } |
599 | 609 | ||
600 | /* Try to reserve enough space for both node and dirent. | 610 | /* Try to reserve enough space for both node and dirent. |
601 | * Just the node will do for now, though | 611 | * Just the node will do for now, though |
602 | */ | 612 | */ |
603 | namelen = dentry->d_name.len; | 613 | namelen = dentry->d_name.len; |
604 | ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 614 | ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen, |
615 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | ||
605 | 616 | ||
606 | if (ret) { | 617 | if (ret) { |
607 | jffs2_free_raw_inode(ri); | 618 | jffs2_free_raw_inode(ri); |
@@ -627,7 +638,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
627 | ri->compr = JFFS2_COMPR_NONE; | 638 | ri->compr = JFFS2_COMPR_NONE; |
628 | ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen)); | 639 | ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen)); |
629 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 640 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
630 | 641 | ||
631 | fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL); | 642 | fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL); |
632 | 643 | ||
633 | jffs2_free_raw_inode(ri); | 644 | jffs2_free_raw_inode(ri); |
@@ -639,14 +650,15 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
639 | jffs2_clear_inode(inode); | 650 | jffs2_clear_inode(inode); |
640 | return PTR_ERR(fn); | 651 | return PTR_ERR(fn); |
641 | } | 652 | } |
642 | /* No data here. Only a metadata node, which will be | 653 | /* No data here. Only a metadata node, which will be |
643 | obsoleted by the first data write | 654 | obsoleted by the first data write |
644 | */ | 655 | */ |
645 | f->metadata = fn; | 656 | f->metadata = fn; |
646 | up(&f->sem); | 657 | up(&f->sem); |
647 | 658 | ||
648 | jffs2_complete_reservation(c); | 659 | jffs2_complete_reservation(c); |
649 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 660 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, |
661 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
650 | if (ret) { | 662 | if (ret) { |
651 | /* Eep. */ | 663 | /* Eep. */ |
652 | jffs2_clear_inode(inode); | 664 | jffs2_clear_inode(inode); |
@@ -682,9 +694,9 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
682 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); | 694 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); |
683 | 695 | ||
684 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); | 696 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); |
685 | 697 | ||
686 | if (IS_ERR(fd)) { | 698 | if (IS_ERR(fd)) { |
687 | /* dirent failed to write. Delete the inode normally | 699 | /* dirent failed to write. Delete the inode normally |
688 | as if it were the final unlink() */ | 700 | as if it were the final unlink() */ |
689 | jffs2_complete_reservation(c); | 701 | jffs2_complete_reservation(c); |
690 | jffs2_free_raw_dirent(rd); | 702 | jffs2_free_raw_dirent(rd); |
@@ -716,8 +728,9 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, | |||
716 | struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb); | 728 | struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb); |
717 | struct jffs2_inode_info *victim_f = NULL; | 729 | struct jffs2_inode_info *victim_f = NULL; |
718 | uint8_t type; | 730 | uint8_t type; |
731 | uint32_t now; | ||
719 | 732 | ||
720 | /* The VFS will check for us and prevent trying to rename a | 733 | /* The VFS will check for us and prevent trying to rename a |
721 | * file over a directory and vice versa, but if it's a directory, | 734 | * file over a directory and vice versa, but if it's a directory, |
722 | * the VFS can't check whether the victim is empty. The filesystem | 735 | * the VFS can't check whether the victim is empty. The filesystem |
723 | * needs to do that for itself. | 736 | * needs to do that for itself. |
@@ -739,19 +752,20 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, | |||
739 | } | 752 | } |
740 | 753 | ||
741 | /* XXX: We probably ought to alloc enough space for | 754 | /* XXX: We probably ought to alloc enough space for |
742 | both nodes at the same time. Writing the new link, | 755 | both nodes at the same time. Writing the new link, |
743 | then getting -ENOSPC, is quite bad :) | 756 | then getting -ENOSPC, is quite bad :) |
744 | */ | 757 | */ |
745 | 758 | ||
746 | /* Make a hard link */ | 759 | /* Make a hard link */ |
747 | 760 | ||
748 | /* XXX: This is ugly */ | 761 | /* XXX: This is ugly */ |
749 | type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; | 762 | type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; |
750 | if (!type) type = DT_REG; | 763 | if (!type) type = DT_REG; |
751 | 764 | ||
752 | ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i), | 765 | now = get_seconds(); |
766 | ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i), | ||
753 | old_dentry->d_inode->i_ino, type, | 767 | old_dentry->d_inode->i_ino, type, |
754 | new_dentry->d_name.name, new_dentry->d_name.len); | 768 | new_dentry->d_name.name, new_dentry->d_name.len, now); |
755 | 769 | ||
756 | if (ret) | 770 | if (ret) |
757 | return ret; | 771 | return ret; |
@@ -768,14 +782,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, | |||
768 | } | 782 | } |
769 | } | 783 | } |
770 | 784 | ||
771 | /* If it was a directory we moved, and there was no victim, | 785 | /* If it was a directory we moved, and there was no victim, |
772 | increase i_nlink on its new parent */ | 786 | increase i_nlink on its new parent */ |
773 | if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f) | 787 | if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f) |
774 | new_dir_i->i_nlink++; | 788 | new_dir_i->i_nlink++; |
775 | 789 | ||
776 | /* Unlink the original */ | 790 | /* Unlink the original */ |
777 | ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), | 791 | ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), |
778 | old_dentry->d_name.name, old_dentry->d_name.len, NULL); | 792 | old_dentry->d_name.name, old_dentry->d_name.len, NULL, now); |
779 | 793 | ||
780 | /* We don't touch inode->i_nlink */ | 794 | /* We don't touch inode->i_nlink */ |
781 | 795 | ||
@@ -792,12 +806,15 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, | |||
792 | /* Might as well let the VFS know */ | 806 | /* Might as well let the VFS know */ |
793 | d_instantiate(new_dentry, old_dentry->d_inode); | 807 | d_instantiate(new_dentry, old_dentry->d_inode); |
794 | atomic_inc(&old_dentry->d_inode->i_count); | 808 | atomic_inc(&old_dentry->d_inode->i_count); |
809 | new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now); | ||
795 | return ret; | 810 | return ret; |
796 | } | 811 | } |
797 | 812 | ||
798 | if (S_ISDIR(old_dentry->d_inode->i_mode)) | 813 | if (S_ISDIR(old_dentry->d_inode->i_mode)) |
799 | old_dir_i->i_nlink--; | 814 | old_dir_i->i_nlink--; |
800 | 815 | ||
816 | new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now); | ||
817 | |||
801 | return 0; | 818 | return 0; |
802 | } | 819 | } |
803 | 820 | ||
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index 787d84ac2bcd..dad68fdffe9e 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: erase.c,v 1.80 2005/07/14 19:46:24 joern Exp $ | 10 | * $Id: erase.c,v 1.85 2005/09/20 14:53:15 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -24,7 +24,7 @@ struct erase_priv_struct { | |||
24 | struct jffs2_eraseblock *jeb; | 24 | struct jffs2_eraseblock *jeb; |
25 | struct jffs2_sb_info *c; | 25 | struct jffs2_sb_info *c; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | #ifndef __ECOS | 28 | #ifndef __ECOS |
29 | static void jffs2_erase_callback(struct erase_info *); | 29 | static void jffs2_erase_callback(struct erase_info *); |
30 | #endif | 30 | #endif |
@@ -48,7 +48,8 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
48 | #else /* Linux */ | 48 | #else /* Linux */ |
49 | struct erase_info *instr; | 49 | struct erase_info *instr; |
50 | 50 | ||
51 | D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#x (range %#x-%#x)\n", jeb->offset, jeb->offset, jeb->offset + c->sector_size)); | 51 | D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n", |
52 | jeb->offset, jeb->offset, jeb->offset + c->sector_size)); | ||
52 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); | 53 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); |
53 | if (!instr) { | 54 | if (!instr) { |
54 | printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); | 55 | printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); |
@@ -70,7 +71,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
70 | instr->callback = jffs2_erase_callback; | 71 | instr->callback = jffs2_erase_callback; |
71 | instr->priv = (unsigned long)(&instr[1]); | 72 | instr->priv = (unsigned long)(&instr[1]); |
72 | instr->fail_addr = 0xffffffff; | 73 | instr->fail_addr = 0xffffffff; |
73 | 74 | ||
74 | ((struct erase_priv_struct *)instr->priv)->jeb = jeb; | 75 | ((struct erase_priv_struct *)instr->priv)->jeb = jeb; |
75 | ((struct erase_priv_struct *)instr->priv)->c = c; | 76 | ((struct erase_priv_struct *)instr->priv)->c = c; |
76 | 77 | ||
@@ -95,7 +96,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
95 | return; | 96 | return; |
96 | } | 97 | } |
97 | 98 | ||
98 | if (ret == -EROFS) | 99 | if (ret == -EROFS) |
99 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); | 100 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); |
100 | else | 101 | else |
101 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); | 102 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); |
@@ -196,7 +197,7 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
196 | c->nr_erasing_blocks--; | 197 | c->nr_erasing_blocks--; |
197 | spin_unlock(&c->erase_completion_lock); | 198 | spin_unlock(&c->erase_completion_lock); |
198 | wake_up(&c->erase_wait); | 199 | wake_up(&c->erase_wait); |
199 | } | 200 | } |
200 | 201 | ||
201 | #ifndef __ECOS | 202 | #ifndef __ECOS |
202 | static void jffs2_erase_callback(struct erase_info *instr) | 203 | static void jffs2_erase_callback(struct erase_info *instr) |
@@ -208,7 +209,7 @@ static void jffs2_erase_callback(struct erase_info *instr) | |||
208 | jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); | 209 | jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); |
209 | } else { | 210 | } else { |
210 | jffs2_erase_succeeded(priv->c, priv->jeb); | 211 | jffs2_erase_succeeded(priv->c, priv->jeb); |
211 | } | 212 | } |
212 | kfree(instr); | 213 | kfree(instr); |
213 | } | 214 | } |
214 | #endif /* !__ECOS */ | 215 | #endif /* !__ECOS */ |
@@ -226,13 +227,13 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
226 | /* Walk the inode's list once, removing any nodes from this eraseblock */ | 227 | /* Walk the inode's list once, removing any nodes from this eraseblock */ |
227 | while (1) { | 228 | while (1) { |
228 | if (!(*prev)->next_in_ino) { | 229 | if (!(*prev)->next_in_ino) { |
229 | /* We're looking at the jffs2_inode_cache, which is | 230 | /* We're looking at the jffs2_inode_cache, which is |
230 | at the end of the linked list. Stash it and continue | 231 | at the end of the linked list. Stash it and continue |
231 | from the beginning of the list */ | 232 | from the beginning of the list */ |
232 | ic = (struct jffs2_inode_cache *)(*prev); | 233 | ic = (struct jffs2_inode_cache *)(*prev); |
233 | prev = &ic->nodes; | 234 | prev = &ic->nodes; |
234 | continue; | 235 | continue; |
235 | } | 236 | } |
236 | 237 | ||
237 | if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) { | 238 | if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) { |
238 | /* It's in the block we're erasing */ | 239 | /* It's in the block we're erasing */ |
@@ -266,7 +267,7 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
266 | printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG); | 267 | printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG); |
267 | 268 | ||
268 | this = ic->nodes; | 269 | this = ic->nodes; |
269 | 270 | ||
270 | while(this) { | 271 | while(this) { |
271 | printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this)); | 272 | printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this)); |
272 | if (++i == 5) { | 273 | if (++i == 5) { |
@@ -289,7 +290,7 @@ static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_erase | |||
289 | while(jeb->first_node) { | 290 | while(jeb->first_node) { |
290 | ref = jeb->first_node; | 291 | ref = jeb->first_node; |
291 | jeb->first_node = ref->next_phys; | 292 | jeb->first_node = ref->next_phys; |
292 | 293 | ||
293 | /* Remove from the inode-list */ | 294 | /* Remove from the inode-list */ |
294 | if (ref->next_in_ino) | 295 | if (ref->next_in_ino) |
295 | jffs2_remove_node_refs_from_ino_list(c, ref, jeb); | 296 | jffs2_remove_node_refs_from_ino_list(c, ref, jeb); |
@@ -306,7 +307,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl | |||
306 | uint32_t ofs; | 307 | uint32_t ofs; |
307 | size_t retlen; | 308 | size_t retlen; |
308 | int ret = -EIO; | 309 | int ret = -EIO; |
309 | 310 | ||
310 | ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 311 | ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
311 | if (!ebuf) { | 312 | if (!ebuf) { |
312 | printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); | 313 | printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); |
@@ -360,7 +361,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
360 | case -EIO: goto filebad; | 361 | case -EIO: goto filebad; |
361 | } | 362 | } |
362 | 363 | ||
363 | /* Write the erase complete marker */ | 364 | /* Write the erase complete marker */ |
364 | D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); | 365 | D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); |
365 | bad_offset = jeb->offset; | 366 | bad_offset = jeb->offset; |
366 | 367 | ||
@@ -398,7 +399,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
398 | vecs[0].iov_base = (unsigned char *) ▮ | 399 | vecs[0].iov_base = (unsigned char *) ▮ |
399 | vecs[0].iov_len = sizeof(marker); | 400 | vecs[0].iov_len = sizeof(marker); |
400 | ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); | 401 | ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); |
401 | 402 | ||
402 | if (ret || retlen != sizeof(marker)) { | 403 | if (ret || retlen != sizeof(marker)) { |
403 | if (ret) | 404 | if (ret) |
404 | printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", | 405 | printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", |
@@ -415,9 +416,9 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
415 | marker_ref->next_phys = NULL; | 416 | marker_ref->next_phys = NULL; |
416 | marker_ref->flash_offset = jeb->offset | REF_NORMAL; | 417 | marker_ref->flash_offset = jeb->offset | REF_NORMAL; |
417 | marker_ref->__totlen = c->cleanmarker_size; | 418 | marker_ref->__totlen = c->cleanmarker_size; |
418 | 419 | ||
419 | jeb->first_node = jeb->last_node = marker_ref; | 420 | jeb->first_node = jeb->last_node = marker_ref; |
420 | 421 | ||
421 | jeb->free_size = c->sector_size - c->cleanmarker_size; | 422 | jeb->free_size = c->sector_size - c->cleanmarker_size; |
422 | jeb->used_size = c->cleanmarker_size; | 423 | jeb->used_size = c->cleanmarker_size; |
423 | jeb->dirty_size = 0; | 424 | jeb->dirty_size = 0; |
@@ -429,8 +430,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
429 | c->free_size += jeb->free_size; | 430 | c->free_size += jeb->free_size; |
430 | c->used_size += jeb->used_size; | 431 | c->used_size += jeb->used_size; |
431 | 432 | ||
432 | ACCT_SANITY_CHECK(c,jeb); | 433 | jffs2_dbg_acct_sanity_check_nolock(c,jeb); |
433 | D1(ACCT_PARANOIA_CHECK(jeb)); | 434 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
434 | 435 | ||
435 | list_add_tail(&jeb->list, &c->free_list); | 436 | list_add_tail(&jeb->list, &c->free_list); |
436 | c->nr_erasing_blocks--; | 437 | c->nr_erasing_blocks--; |
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 8279bf0133ff..935f273dc57b 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: file.c,v 1.102 2005/07/06 12:13:09 dwmw2 Exp $ | 10 | * $Id: file.c,v 1.104 2005/10/18 23:29:35 tpoynor Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -34,8 +34,8 @@ int jffs2_fsync(struct file *filp, struct dentry *dentry, int datasync) | |||
34 | 34 | ||
35 | /* Trigger GC to flush any pending writes for this inode */ | 35 | /* Trigger GC to flush any pending writes for this inode */ |
36 | jffs2_flush_wbuf_gc(c, inode->i_ino); | 36 | jffs2_flush_wbuf_gc(c, inode->i_ino); |
37 | 37 | ||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | struct file_operations jffs2_file_operations = | 41 | struct file_operations jffs2_file_operations = |
@@ -107,7 +107,7 @@ static int jffs2_readpage (struct file *filp, struct page *pg) | |||
107 | { | 107 | { |
108 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host); | 108 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host); |
109 | int ret; | 109 | int ret; |
110 | 110 | ||
111 | down(&f->sem); | 111 | down(&f->sem); |
112 | ret = jffs2_do_readpage_unlock(pg->mapping->host, pg); | 112 | ret = jffs2_do_readpage_unlock(pg->mapping->host, pg); |
113 | up(&f->sem); | 113 | up(&f->sem); |
@@ -130,11 +130,12 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg, | |||
130 | struct jffs2_raw_inode ri; | 130 | struct jffs2_raw_inode ri; |
131 | struct jffs2_full_dnode *fn; | 131 | struct jffs2_full_dnode *fn; |
132 | uint32_t phys_ofs, alloc_len; | 132 | uint32_t phys_ofs, alloc_len; |
133 | 133 | ||
134 | D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", | 134 | D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", |
135 | (unsigned int)inode->i_size, pageofs)); | 135 | (unsigned int)inode->i_size, pageofs)); |
136 | 136 | ||
137 | ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL); | 137 | ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, |
138 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | ||
138 | if (ret) | 139 | if (ret) |
139 | return ret; | 140 | return ret; |
140 | 141 | ||
@@ -159,7 +160,7 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg, | |||
159 | ri.compr = JFFS2_COMPR_ZERO; | 160 | ri.compr = JFFS2_COMPR_ZERO; |
160 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | 161 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); |
161 | ri.data_crc = cpu_to_je32(0); | 162 | ri.data_crc = cpu_to_je32(0); |
162 | 163 | ||
163 | fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL); | 164 | fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL); |
164 | 165 | ||
165 | if (IS_ERR(fn)) { | 166 | if (IS_ERR(fn)) { |
@@ -186,7 +187,7 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg, | |||
186 | inode->i_size = pageofs; | 187 | inode->i_size = pageofs; |
187 | up(&f->sem); | 188 | up(&f->sem); |
188 | } | 189 | } |
189 | 190 | ||
190 | /* Read in the page if it wasn't already present, unless it's a whole page */ | 191 | /* Read in the page if it wasn't already present, unless it's a whole page */ |
191 | if (!PageUptodate(pg) && (start || end < PAGE_CACHE_SIZE)) { | 192 | if (!PageUptodate(pg) && (start || end < PAGE_CACHE_SIZE)) { |
192 | down(&f->sem); | 193 | down(&f->sem); |
@@ -217,7 +218,7 @@ static int jffs2_commit_write (struct file *filp, struct page *pg, | |||
217 | if (!start && end == PAGE_CACHE_SIZE) { | 218 | if (!start && end == PAGE_CACHE_SIZE) { |
218 | /* We need to avoid deadlock with page_cache_read() in | 219 | /* We need to avoid deadlock with page_cache_read() in |
219 | jffs2_garbage_collect_pass(). So we have to mark the | 220 | jffs2_garbage_collect_pass(). So we have to mark the |
220 | page up to date, to prevent page_cache_read() from | 221 | page up to date, to prevent page_cache_read() from |
221 | trying to re-lock it. */ | 222 | trying to re-lock it. */ |
222 | SetPageUptodate(pg); | 223 | SetPageUptodate(pg); |
223 | } | 224 | } |
@@ -251,7 +252,7 @@ static int jffs2_commit_write (struct file *filp, struct page *pg, | |||
251 | /* There was an error writing. */ | 252 | /* There was an error writing. */ |
252 | SetPageError(pg); | 253 | SetPageError(pg); |
253 | } | 254 | } |
254 | 255 | ||
255 | /* Adjust writtenlen for the padding we did, so we don't confuse our caller */ | 256 | /* Adjust writtenlen for the padding we did, so we don't confuse our caller */ |
256 | if (writtenlen < (start&3)) | 257 | if (writtenlen < (start&3)) |
257 | writtenlen = 0; | 258 | writtenlen = 0; |
@@ -262,7 +263,7 @@ static int jffs2_commit_write (struct file *filp, struct page *pg, | |||
262 | if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) { | 263 | if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) { |
263 | inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen; | 264 | inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen; |
264 | inode->i_blocks = (inode->i_size + 511) >> 9; | 265 | inode->i_blocks = (inode->i_size + 511) >> 9; |
265 | 266 | ||
266 | inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime)); | 267 | inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime)); |
267 | } | 268 | } |
268 | } | 269 | } |
@@ -271,13 +272,13 @@ static int jffs2_commit_write (struct file *filp, struct page *pg, | |||
271 | 272 | ||
272 | if (start+writtenlen < end) { | 273 | if (start+writtenlen < end) { |
273 | /* generic_file_write has written more to the page cache than we've | 274 | /* generic_file_write has written more to the page cache than we've |
274 | actually written to the medium. Mark the page !Uptodate so that | 275 | actually written to the medium. Mark the page !Uptodate so that |
275 | it gets reread */ | 276 | it gets reread */ |
276 | D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n")); | 277 | D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n")); |
277 | SetPageError(pg); | 278 | SetPageError(pg); |
278 | ClearPageUptodate(pg); | 279 | ClearPageUptodate(pg); |
279 | } | 280 | } |
280 | 281 | ||
281 | D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d\n",writtenlen?writtenlen:ret)); | 282 | D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d\n",start+writtenlen==end?0:ret)); |
282 | return writtenlen?writtenlen:ret; | 283 | return start+writtenlen==end?0:ret; |
283 | } | 284 | } |
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 5687c3f42002..543420665c5b 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: fs.c,v 1.56 2005/07/06 12:13:09 dwmw2 Exp $ | 10 | * $Id: fs.c,v 1.66 2005/09/27 13:17:29 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -40,7 +40,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
40 | int ret; | 40 | int ret; |
41 | D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); | 41 | D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); |
42 | ret = inode_change_ok(inode, iattr); | 42 | ret = inode_change_ok(inode, iattr); |
43 | if (ret) | 43 | if (ret) |
44 | return ret; | 44 | return ret; |
45 | 45 | ||
46 | /* Special cases - we don't want more than one data node | 46 | /* Special cases - we don't want more than one data node |
@@ -73,8 +73,9 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
73 | kfree(mdata); | 73 | kfree(mdata); |
74 | return -ENOMEM; | 74 | return -ENOMEM; |
75 | } | 75 | } |
76 | 76 | ||
77 | ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 77 | ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, |
78 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | ||
78 | if (ret) { | 79 | if (ret) { |
79 | jffs2_free_raw_inode(ri); | 80 | jffs2_free_raw_inode(ri); |
80 | if (S_ISLNK(inode->i_mode & S_IFMT)) | 81 | if (S_ISLNK(inode->i_mode & S_IFMT)) |
@@ -83,7 +84,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
83 | } | 84 | } |
84 | down(&f->sem); | 85 | down(&f->sem); |
85 | ivalid = iattr->ia_valid; | 86 | ivalid = iattr->ia_valid; |
86 | 87 | ||
87 | ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 88 | ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
88 | ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); | 89 | ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); |
89 | ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); | 90 | ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); |
@@ -99,7 +100,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
99 | if (iattr->ia_mode & S_ISGID && | 100 | if (iattr->ia_mode & S_ISGID && |
100 | !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID)) | 101 | !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID)) |
101 | ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID); | 102 | ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID); |
102 | else | 103 | else |
103 | ri->mode = cpu_to_jemode(iattr->ia_mode); | 104 | ri->mode = cpu_to_jemode(iattr->ia_mode); |
104 | else | 105 | else |
105 | ri->mode = cpu_to_jemode(inode->i_mode); | 106 | ri->mode = cpu_to_jemode(inode->i_mode); |
@@ -128,7 +129,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
128 | new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL); | 129 | new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL); |
129 | if (S_ISLNK(inode->i_mode)) | 130 | if (S_ISLNK(inode->i_mode)) |
130 | kfree(mdata); | 131 | kfree(mdata); |
131 | 132 | ||
132 | if (IS_ERR(new_metadata)) { | 133 | if (IS_ERR(new_metadata)) { |
133 | jffs2_complete_reservation(c); | 134 | jffs2_complete_reservation(c); |
134 | jffs2_free_raw_inode(ri); | 135 | jffs2_free_raw_inode(ri); |
@@ -147,7 +148,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
147 | old_metadata = f->metadata; | 148 | old_metadata = f->metadata; |
148 | 149 | ||
149 | if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) | 150 | if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) |
150 | jffs2_truncate_fraglist (c, &f->fragtree, iattr->ia_size); | 151 | jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size); |
151 | 152 | ||
152 | if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { | 153 | if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { |
153 | jffs2_add_full_dnode_to_inode(c, f, new_metadata); | 154 | jffs2_add_full_dnode_to_inode(c, f, new_metadata); |
@@ -166,7 +167,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
166 | jffs2_complete_reservation(c); | 167 | jffs2_complete_reservation(c); |
167 | 168 | ||
168 | /* We have to do the vmtruncate() without f->sem held, since | 169 | /* We have to do the vmtruncate() without f->sem held, since |
169 | some pages may be locked and waiting for it in readpage(). | 170 | some pages may be locked and waiting for it in readpage(). |
170 | We are protected from a simultaneous write() extending i_size | 171 | We are protected from a simultaneous write() extending i_size |
171 | back past iattr->ia_size, because do_truncate() holds the | 172 | back past iattr->ia_size, because do_truncate() holds the |
172 | generic inode semaphore. */ | 173 | generic inode semaphore. */ |
@@ -194,31 +195,27 @@ int jffs2_statfs(struct super_block *sb, struct kstatfs *buf) | |||
194 | buf->f_namelen = JFFS2_MAX_NAME_LEN; | 195 | buf->f_namelen = JFFS2_MAX_NAME_LEN; |
195 | 196 | ||
196 | spin_lock(&c->erase_completion_lock); | 197 | spin_lock(&c->erase_completion_lock); |
197 | |||
198 | avail = c->dirty_size + c->free_size; | 198 | avail = c->dirty_size + c->free_size; |
199 | if (avail > c->sector_size * c->resv_blocks_write) | 199 | if (avail > c->sector_size * c->resv_blocks_write) |
200 | avail -= c->sector_size * c->resv_blocks_write; | 200 | avail -= c->sector_size * c->resv_blocks_write; |
201 | else | 201 | else |
202 | avail = 0; | 202 | avail = 0; |
203 | spin_unlock(&c->erase_completion_lock); | ||
203 | 204 | ||
204 | buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT; | 205 | buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT; |
205 | 206 | ||
206 | D2(jffs2_dump_block_lists(c)); | ||
207 | |||
208 | spin_unlock(&c->erase_completion_lock); | ||
209 | |||
210 | return 0; | 207 | return 0; |
211 | } | 208 | } |
212 | 209 | ||
213 | 210 | ||
214 | void jffs2_clear_inode (struct inode *inode) | 211 | void jffs2_clear_inode (struct inode *inode) |
215 | { | 212 | { |
216 | /* We can forget about this inode for now - drop all | 213 | /* We can forget about this inode for now - drop all |
217 | * the nodelists associated with it, etc. | 214 | * the nodelists associated with it, etc. |
218 | */ | 215 | */ |
219 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | 216 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); |
220 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | 217 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); |
221 | 218 | ||
222 | D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); | 219 | D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); |
223 | 220 | ||
224 | jffs2_do_clear_inode(c, f); | 221 | jffs2_do_clear_inode(c, f); |
@@ -237,7 +234,7 @@ void jffs2_read_inode (struct inode *inode) | |||
237 | c = JFFS2_SB_INFO(inode->i_sb); | 234 | c = JFFS2_SB_INFO(inode->i_sb); |
238 | 235 | ||
239 | jffs2_init_inode_info(f); | 236 | jffs2_init_inode_info(f); |
240 | 237 | ||
241 | ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); | 238 | ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); |
242 | 239 | ||
243 | if (ret) { | 240 | if (ret) { |
@@ -257,14 +254,14 @@ void jffs2_read_inode (struct inode *inode) | |||
257 | 254 | ||
258 | inode->i_blksize = PAGE_SIZE; | 255 | inode->i_blksize = PAGE_SIZE; |
259 | inode->i_blocks = (inode->i_size + 511) >> 9; | 256 | inode->i_blocks = (inode->i_size + 511) >> 9; |
260 | 257 | ||
261 | switch (inode->i_mode & S_IFMT) { | 258 | switch (inode->i_mode & S_IFMT) { |
262 | jint16_t rdev; | 259 | jint16_t rdev; |
263 | 260 | ||
264 | case S_IFLNK: | 261 | case S_IFLNK: |
265 | inode->i_op = &jffs2_symlink_inode_operations; | 262 | inode->i_op = &jffs2_symlink_inode_operations; |
266 | break; | 263 | break; |
267 | 264 | ||
268 | case S_IFDIR: | 265 | case S_IFDIR: |
269 | { | 266 | { |
270 | struct jffs2_full_dirent *fd; | 267 | struct jffs2_full_dirent *fd; |
@@ -301,7 +298,7 @@ void jffs2_read_inode (struct inode *inode) | |||
301 | jffs2_do_clear_inode(c, f); | 298 | jffs2_do_clear_inode(c, f); |
302 | make_bad_inode(inode); | 299 | make_bad_inode(inode); |
303 | return; | 300 | return; |
304 | } | 301 | } |
305 | 302 | ||
306 | case S_IFSOCK: | 303 | case S_IFSOCK: |
307 | case S_IFIFO: | 304 | case S_IFIFO: |
@@ -357,11 +354,11 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data) | |||
357 | down(&c->alloc_sem); | 354 | down(&c->alloc_sem); |
358 | jffs2_flush_wbuf_pad(c); | 355 | jffs2_flush_wbuf_pad(c); |
359 | up(&c->alloc_sem); | 356 | up(&c->alloc_sem); |
360 | } | 357 | } |
361 | 358 | ||
362 | if (!(*flags & MS_RDONLY)) | 359 | if (!(*flags & MS_RDONLY)) |
363 | jffs2_start_garbage_collect_thread(c); | 360 | jffs2_start_garbage_collect_thread(c); |
364 | 361 | ||
365 | *flags |= MS_NOATIME; | 362 | *flags |= MS_NOATIME; |
366 | 363 | ||
367 | return 0; | 364 | return 0; |
@@ -395,9 +392,9 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i | |||
395 | D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode)); | 392 | D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode)); |
396 | 393 | ||
397 | c = JFFS2_SB_INFO(sb); | 394 | c = JFFS2_SB_INFO(sb); |
398 | 395 | ||
399 | inode = new_inode(sb); | 396 | inode = new_inode(sb); |
400 | 397 | ||
401 | if (!inode) | 398 | if (!inode) |
402 | return ERR_PTR(-ENOMEM); | 399 | return ERR_PTR(-ENOMEM); |
403 | 400 | ||
@@ -461,40 +458,24 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
461 | #endif | 458 | #endif |
462 | 459 | ||
463 | c->flash_size = c->mtd->size; | 460 | c->flash_size = c->mtd->size; |
464 | 461 | c->sector_size = c->mtd->erasesize; | |
465 | /* | ||
466 | * Check, if we have to concatenate physical blocks to larger virtual blocks | ||
467 | * to reduce the memorysize for c->blocks. (kmalloc allows max. 128K allocation) | ||
468 | */ | ||
469 | c->sector_size = c->mtd->erasesize; | ||
470 | blocks = c->flash_size / c->sector_size; | 462 | blocks = c->flash_size / c->sector_size; |
471 | if (!(c->mtd->flags & MTD_NO_VIRTBLOCKS)) { | ||
472 | while ((blocks * sizeof (struct jffs2_eraseblock)) > (128 * 1024)) { | ||
473 | blocks >>= 1; | ||
474 | c->sector_size <<= 1; | ||
475 | } | ||
476 | } | ||
477 | 463 | ||
478 | /* | 464 | /* |
479 | * Size alignment check | 465 | * Size alignment check |
480 | */ | 466 | */ |
481 | if ((c->sector_size * blocks) != c->flash_size) { | 467 | if ((c->sector_size * blocks) != c->flash_size) { |
482 | c->flash_size = c->sector_size * blocks; | 468 | c->flash_size = c->sector_size * blocks; |
483 | printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n", | 469 | printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n", |
484 | c->flash_size / 1024); | 470 | c->flash_size / 1024); |
485 | } | 471 | } |
486 | 472 | ||
487 | if (c->sector_size != c->mtd->erasesize) | ||
488 | printk(KERN_INFO "jffs2: Erase block size too small (%dKiB). Using virtual blocks size (%dKiB) instead\n", | ||
489 | c->mtd->erasesize / 1024, c->sector_size / 1024); | ||
490 | |||
491 | if (c->flash_size < 5*c->sector_size) { | 473 | if (c->flash_size < 5*c->sector_size) { |
492 | printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size); | 474 | printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size); |
493 | return -EINVAL; | 475 | return -EINVAL; |
494 | } | 476 | } |
495 | 477 | ||
496 | c->cleanmarker_size = sizeof(struct jffs2_unknown_node); | 478 | c->cleanmarker_size = sizeof(struct jffs2_unknown_node); |
497 | /* Joern -- stick alignment for weird 8-byte-page flash here */ | ||
498 | 479 | ||
499 | /* NAND (or other bizarre) flash... do setup accordingly */ | 480 | /* NAND (or other bizarre) flash... do setup accordingly */ |
500 | ret = jffs2_flash_setup(c); | 481 | ret = jffs2_flash_setup(c); |
@@ -517,7 +498,7 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
517 | root_i = iget(sb, 1); | 498 | root_i = iget(sb, 1); |
518 | if (is_bad_inode(root_i)) { | 499 | if (is_bad_inode(root_i)) { |
519 | D1(printk(KERN_WARNING "get root inode failed\n")); | 500 | D1(printk(KERN_WARNING "get root inode failed\n")); |
520 | goto out_nodes; | 501 | goto out_root_i; |
521 | } | 502 | } |
522 | 503 | ||
523 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); | 504 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); |
@@ -535,10 +516,9 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
535 | 516 | ||
536 | out_root_i: | 517 | out_root_i: |
537 | iput(root_i); | 518 | iput(root_i); |
538 | out_nodes: | ||
539 | jffs2_free_ino_caches(c); | 519 | jffs2_free_ino_caches(c); |
540 | jffs2_free_raw_node_refs(c); | 520 | jffs2_free_raw_node_refs(c); |
541 | if (c->mtd->flags & MTD_NO_VIRTBLOCKS) | 521 | if (jffs2_blocks_use_vmalloc(c)) |
542 | vfree(c->blocks); | 522 | vfree(c->blocks); |
543 | else | 523 | else |
544 | kfree(c->blocks); | 524 | kfree(c->blocks); |
@@ -563,16 +543,16 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, | |||
563 | struct jffs2_inode_cache *ic; | 543 | struct jffs2_inode_cache *ic; |
564 | if (!nlink) { | 544 | if (!nlink) { |
565 | /* The inode has zero nlink but its nodes weren't yet marked | 545 | /* The inode has zero nlink but its nodes weren't yet marked |
566 | obsolete. This has to be because we're still waiting for | 546 | obsolete. This has to be because we're still waiting for |
567 | the final (close() and) iput() to happen. | 547 | the final (close() and) iput() to happen. |
568 | 548 | ||
569 | There's a possibility that the final iput() could have | 549 | There's a possibility that the final iput() could have |
570 | happened while we were contemplating. In order to ensure | 550 | happened while we were contemplating. In order to ensure |
571 | that we don't cause a new read_inode() (which would fail) | 551 | that we don't cause a new read_inode() (which would fail) |
572 | for the inode in question, we use ilookup() in this case | 552 | for the inode in question, we use ilookup() in this case |
573 | instead of iget(). | 553 | instead of iget(). |
574 | 554 | ||
575 | The nlink can't _become_ zero at this point because we're | 555 | The nlink can't _become_ zero at this point because we're |
576 | holding the alloc_sem, and jffs2_do_unlink() would also | 556 | holding the alloc_sem, and jffs2_do_unlink() would also |
577 | need that while decrementing nlink on any inode. | 557 | need that while decrementing nlink on any inode. |
578 | */ | 558 | */ |
@@ -619,19 +599,19 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, | |||
619 | return JFFS2_INODE_INFO(inode); | 599 | return JFFS2_INODE_INFO(inode); |
620 | } | 600 | } |
621 | 601 | ||
622 | unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, | 602 | unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, |
623 | struct jffs2_inode_info *f, | 603 | struct jffs2_inode_info *f, |
624 | unsigned long offset, | 604 | unsigned long offset, |
625 | unsigned long *priv) | 605 | unsigned long *priv) |
626 | { | 606 | { |
627 | struct inode *inode = OFNI_EDONI_2SFFJ(f); | 607 | struct inode *inode = OFNI_EDONI_2SFFJ(f); |
628 | struct page *pg; | 608 | struct page *pg; |
629 | 609 | ||
630 | pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, | 610 | pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, |
631 | (void *)jffs2_do_readpage_unlock, inode); | 611 | (void *)jffs2_do_readpage_unlock, inode); |
632 | if (IS_ERR(pg)) | 612 | if (IS_ERR(pg)) |
633 | return (void *)pg; | 613 | return (void *)pg; |
634 | 614 | ||
635 | *priv = (unsigned long)pg; | 615 | *priv = (unsigned long)pg; |
636 | return kmap(pg); | 616 | return kmap(pg); |
637 | } | 617 | } |
@@ -648,7 +628,7 @@ void jffs2_gc_release_page(struct jffs2_sb_info *c, | |||
648 | 628 | ||
649 | static int jffs2_flash_setup(struct jffs2_sb_info *c) { | 629 | static int jffs2_flash_setup(struct jffs2_sb_info *c) { |
650 | int ret = 0; | 630 | int ret = 0; |
651 | 631 | ||
652 | if (jffs2_cleanmarker_oob(c)) { | 632 | if (jffs2_cleanmarker_oob(c)) { |
653 | /* NAND flash... do setup accordingly */ | 633 | /* NAND flash... do setup accordingly */ |
654 | ret = jffs2_nand_flash_setup(c); | 634 | ret = jffs2_nand_flash_setup(c); |
@@ -662,14 +642,21 @@ static int jffs2_flash_setup(struct jffs2_sb_info *c) { | |||
662 | if (ret) | 642 | if (ret) |
663 | return ret; | 643 | return ret; |
664 | } | 644 | } |
665 | 645 | ||
666 | /* and Dataflash */ | 646 | /* and Dataflash */ |
667 | if (jffs2_dataflash(c)) { | 647 | if (jffs2_dataflash(c)) { |
668 | ret = jffs2_dataflash_setup(c); | 648 | ret = jffs2_dataflash_setup(c); |
669 | if (ret) | 649 | if (ret) |
670 | return ret; | 650 | return ret; |
671 | } | 651 | } |
672 | 652 | ||
653 | /* and Intel "Sibley" flash */ | ||
654 | if (jffs2_nor_wbuf_flash(c)) { | ||
655 | ret = jffs2_nor_wbuf_flash_setup(c); | ||
656 | if (ret) | ||
657 | return ret; | ||
658 | } | ||
659 | |||
673 | return ret; | 660 | return ret; |
674 | } | 661 | } |
675 | 662 | ||
@@ -683,9 +670,14 @@ void jffs2_flash_cleanup(struct jffs2_sb_info *c) { | |||
683 | if (jffs2_nor_ecc(c)) { | 670 | if (jffs2_nor_ecc(c)) { |
684 | jffs2_nor_ecc_flash_cleanup(c); | 671 | jffs2_nor_ecc_flash_cleanup(c); |
685 | } | 672 | } |
686 | 673 | ||
687 | /* and DataFlash */ | 674 | /* and DataFlash */ |
688 | if (jffs2_dataflash(c)) { | 675 | if (jffs2_dataflash(c)) { |
689 | jffs2_dataflash_cleanup(c); | 676 | jffs2_dataflash_cleanup(c); |
690 | } | 677 | } |
678 | |||
679 | /* and Intel "Sibley" flash */ | ||
680 | if (jffs2_nor_wbuf_flash(c)) { | ||
681 | jffs2_nor_wbuf_flash_cleanup(c); | ||
682 | } | ||
691 | } | 683 | } |
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c index 7086cd634503..f9ffece453a3 100644 --- a/fs/jffs2/gc.c +++ b/fs/jffs2/gc.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: gc.c,v 1.148 2005/04/09 10:47:00 dedekind Exp $ | 10 | * $Id: gc.c,v 1.155 2005/11/07 11:14:39 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -21,14 +21,14 @@ | |||
21 | #include "nodelist.h" | 21 | #include "nodelist.h" |
22 | #include "compr.h" | 22 | #include "compr.h" |
23 | 23 | ||
24 | static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | 24 | static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, |
25 | struct jffs2_inode_cache *ic, | 25 | struct jffs2_inode_cache *ic, |
26 | struct jffs2_raw_node_ref *raw); | 26 | struct jffs2_raw_node_ref *raw); |
27 | static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 27 | static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
28 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fd); | 28 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fd); |
29 | static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 29 | static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
30 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); | 30 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); |
31 | static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 31 | static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
32 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); | 32 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); |
33 | static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 33 | static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
34 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, | 34 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, |
@@ -55,7 +55,7 @@ again: | |||
55 | D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n")); | 55 | D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n")); |
56 | nextlist = &c->bad_used_list; | 56 | nextlist = &c->bad_used_list; |
57 | } else if (n < 50 && !list_empty(&c->erasable_list)) { | 57 | } else if (n < 50 && !list_empty(&c->erasable_list)) { |
58 | /* Note that most of them will have gone directly to be erased. | 58 | /* Note that most of them will have gone directly to be erased. |
59 | So don't favour the erasable_list _too_ much. */ | 59 | So don't favour the erasable_list _too_ much. */ |
60 | D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n")); | 60 | D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n")); |
61 | nextlist = &c->erasable_list; | 61 | nextlist = &c->erasable_list; |
@@ -101,7 +101,7 @@ again: | |||
101 | printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset); | 101 | printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset); |
102 | BUG(); | 102 | BUG(); |
103 | } | 103 | } |
104 | 104 | ||
105 | /* Have we accidentally picked a clean block with wasted space ? */ | 105 | /* Have we accidentally picked a clean block with wasted space ? */ |
106 | if (ret->wasted_size) { | 106 | if (ret->wasted_size) { |
107 | D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size)); | 107 | D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size)); |
@@ -111,7 +111,6 @@ again: | |||
111 | ret->wasted_size = 0; | 111 | ret->wasted_size = 0; |
112 | } | 112 | } |
113 | 113 | ||
114 | D2(jffs2_dump_block_lists(c)); | ||
115 | return ret; | 114 | return ret; |
116 | } | 115 | } |
117 | 116 | ||
@@ -137,12 +136,12 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
137 | 136 | ||
138 | /* We can't start doing GC yet. We haven't finished checking | 137 | /* We can't start doing GC yet. We haven't finished checking |
139 | the node CRCs etc. Do it now. */ | 138 | the node CRCs etc. Do it now. */ |
140 | 139 | ||
141 | /* checked_ino is protected by the alloc_sem */ | 140 | /* checked_ino is protected by the alloc_sem */ |
142 | if (c->checked_ino > c->highest_ino) { | 141 | if (c->checked_ino > c->highest_ino) { |
143 | printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n", | 142 | printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n", |
144 | c->unchecked_size); | 143 | c->unchecked_size); |
145 | D2(jffs2_dump_block_lists(c)); | 144 | jffs2_dbg_dump_block_lists_nolock(c); |
146 | spin_unlock(&c->erase_completion_lock); | 145 | spin_unlock(&c->erase_completion_lock); |
147 | BUG(); | 146 | BUG(); |
148 | } | 147 | } |
@@ -179,7 +178,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
179 | 178 | ||
180 | case INO_STATE_READING: | 179 | case INO_STATE_READING: |
181 | /* We need to wait for it to finish, lest we move on | 180 | /* We need to wait for it to finish, lest we move on |
182 | and trigger the BUG() above while we haven't yet | 181 | and trigger the BUG() above while we haven't yet |
183 | finished checking all its nodes */ | 182 | finished checking all its nodes */ |
184 | D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); | 183 | D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); |
185 | up(&c->alloc_sem); | 184 | up(&c->alloc_sem); |
@@ -229,13 +228,13 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
229 | } | 228 | } |
230 | 229 | ||
231 | raw = jeb->gc_node; | 230 | raw = jeb->gc_node; |
232 | 231 | ||
233 | while(ref_obsolete(raw)) { | 232 | while(ref_obsolete(raw)) { |
234 | D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); | 233 | D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); |
235 | raw = raw->next_phys; | 234 | raw = raw->next_phys; |
236 | if (unlikely(!raw)) { | 235 | if (unlikely(!raw)) { |
237 | printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); | 236 | printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); |
238 | printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", | 237 | printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", |
239 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); | 238 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); |
240 | jeb->gc_node = raw; | 239 | jeb->gc_node = raw; |
241 | spin_unlock(&c->erase_completion_lock); | 240 | spin_unlock(&c->erase_completion_lock); |
@@ -260,7 +259,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
260 | ic = jffs2_raw_ref_to_ic(raw); | 259 | ic = jffs2_raw_ref_to_ic(raw); |
261 | 260 | ||
262 | /* We need to hold the inocache. Either the erase_completion_lock or | 261 | /* We need to hold the inocache. Either the erase_completion_lock or |
263 | the inocache_lock are sufficient; we trade down since the inocache_lock | 262 | the inocache_lock are sufficient; we trade down since the inocache_lock |
264 | causes less contention. */ | 263 | causes less contention. */ |
265 | spin_lock(&c->inocache_lock); | 264 | spin_lock(&c->inocache_lock); |
266 | 265 | ||
@@ -279,14 +278,14 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
279 | 278 | ||
280 | switch(ic->state) { | 279 | switch(ic->state) { |
281 | case INO_STATE_CHECKEDABSENT: | 280 | case INO_STATE_CHECKEDABSENT: |
282 | /* It's been checked, but it's not currently in-core. | 281 | /* It's been checked, but it's not currently in-core. |
283 | We can just copy any pristine nodes, but have | 282 | We can just copy any pristine nodes, but have |
284 | to prevent anyone else from doing read_inode() while | 283 | to prevent anyone else from doing read_inode() while |
285 | we're at it, so we set the state accordingly */ | 284 | we're at it, so we set the state accordingly */ |
286 | if (ref_flags(raw) == REF_PRISTINE) | 285 | if (ref_flags(raw) == REF_PRISTINE) |
287 | ic->state = INO_STATE_GC; | 286 | ic->state = INO_STATE_GC; |
288 | else { | 287 | else { |
289 | D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", | 288 | D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", |
290 | ic->ino)); | 289 | ic->ino)); |
291 | } | 290 | } |
292 | break; | 291 | break; |
@@ -299,8 +298,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
299 | case INO_STATE_CHECKING: | 298 | case INO_STATE_CHECKING: |
300 | case INO_STATE_GC: | 299 | case INO_STATE_GC: |
301 | /* Should never happen. We should have finished checking | 300 | /* Should never happen. We should have finished checking |
302 | by the time we actually start doing any GC, and since | 301 | by the time we actually start doing any GC, and since |
303 | we're holding the alloc_sem, no other garbage collection | 302 | we're holding the alloc_sem, no other garbage collection |
304 | can happen. | 303 | can happen. |
305 | */ | 304 | */ |
306 | printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n", | 305 | printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n", |
@@ -320,21 +319,21 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
320 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n", | 319 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n", |
321 | ic->ino, ic->state)); | 320 | ic->ino, ic->state)); |
322 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | 321 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); |
323 | /* And because we dropped the alloc_sem we must start again from the | 322 | /* And because we dropped the alloc_sem we must start again from the |
324 | beginning. Ponder chance of livelock here -- we're returning success | 323 | beginning. Ponder chance of livelock here -- we're returning success |
325 | without actually making any progress. | 324 | without actually making any progress. |
326 | 325 | ||
327 | Q: What are the chances that the inode is back in INO_STATE_READING | 326 | Q: What are the chances that the inode is back in INO_STATE_READING |
328 | again by the time we next enter this function? And that this happens | 327 | again by the time we next enter this function? And that this happens |
329 | enough times to cause a real delay? | 328 | enough times to cause a real delay? |
330 | 329 | ||
331 | A: Small enough that I don't care :) | 330 | A: Small enough that I don't care :) |
332 | */ | 331 | */ |
333 | return 0; | 332 | return 0; |
334 | } | 333 | } |
335 | 334 | ||
336 | /* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the | 335 | /* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the |
337 | node intact, and we don't have to muck about with the fragtree etc. | 336 | node intact, and we don't have to muck about with the fragtree etc. |
338 | because we know it's not in-core. If it _was_ in-core, we go through | 337 | because we know it's not in-core. If it _was_ in-core, we go through |
339 | all the iget() crap anyway */ | 338 | all the iget() crap anyway */ |
340 | 339 | ||
@@ -454,7 +453,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era | |||
454 | if (!ret) { | 453 | if (!ret) { |
455 | /* Urgh. Return it sensibly. */ | 454 | /* Urgh. Return it sensibly. */ |
456 | frag->node->raw = f->inocache->nodes; | 455 | frag->node->raw = f->inocache->nodes; |
457 | } | 456 | } |
458 | if (ret != -EBADFD) | 457 | if (ret != -EBADFD) |
459 | goto upnout; | 458 | goto upnout; |
460 | } | 459 | } |
@@ -468,7 +467,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era | |||
468 | } | 467 | } |
469 | goto upnout; | 468 | goto upnout; |
470 | } | 469 | } |
471 | 470 | ||
472 | /* Wasn't a dnode. Try dirent */ | 471 | /* Wasn't a dnode. Try dirent */ |
473 | for (fd = f->dents; fd; fd=fd->next) { | 472 | for (fd = f->dents; fd; fd=fd->next) { |
474 | if (fd->raw == raw) | 473 | if (fd->raw == raw) |
@@ -485,7 +484,8 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era | |||
485 | if (ref_obsolete(raw)) { | 484 | if (ref_obsolete(raw)) { |
486 | printk(KERN_WARNING "But it's obsolete so we don't mind too much\n"); | 485 | printk(KERN_WARNING "But it's obsolete so we don't mind too much\n"); |
487 | } else { | 486 | } else { |
488 | ret = -EIO; | 487 | jffs2_dbg_dump_node(c, ref_offset(raw)); |
488 | BUG(); | ||
489 | } | 489 | } |
490 | } | 490 | } |
491 | upnout: | 491 | upnout: |
@@ -494,7 +494,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era | |||
494 | return ret; | 494 | return ret; |
495 | } | 495 | } |
496 | 496 | ||
497 | static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | 497 | static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, |
498 | struct jffs2_inode_cache *ic, | 498 | struct jffs2_inode_cache *ic, |
499 | struct jffs2_raw_node_ref *raw) | 499 | struct jffs2_raw_node_ref *raw) |
500 | { | 500 | { |
@@ -513,8 +513,11 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
513 | /* Ask for a small amount of space (or the totlen if smaller) because we | 513 | /* Ask for a small amount of space (or the totlen if smaller) because we |
514 | don't want to force wastage of the end of a block if splitting would | 514 | don't want to force wastage of the end of a block if splitting would |
515 | work. */ | 515 | work. */ |
516 | ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN, | 516 | ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + |
517 | rawlen), &phys_ofs, &alloclen); | 517 | JFFS2_MIN_DATA_LEN, rawlen), &phys_ofs, &alloclen, rawlen); |
518 | /* this is not the exact summary size of it, | ||
519 | it is only an upper estimation */ | ||
520 | |||
518 | if (ret) | 521 | if (ret) |
519 | return ret; | 522 | return ret; |
520 | 523 | ||
@@ -577,7 +580,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
577 | } | 580 | } |
578 | break; | 581 | break; |
579 | default: | 582 | default: |
580 | printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", | 583 | printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", |
581 | ref_offset(raw), je16_to_cpu(node->u.nodetype)); | 584 | ref_offset(raw), je16_to_cpu(node->u.nodetype)); |
582 | goto bail; | 585 | goto bail; |
583 | } | 586 | } |
@@ -618,17 +621,19 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
618 | retried = 1; | 621 | retried = 1; |
619 | 622 | ||
620 | D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n")); | 623 | D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n")); |
621 | |||
622 | ACCT_SANITY_CHECK(c,jeb); | ||
623 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
624 | 624 | ||
625 | ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy); | 625 | jffs2_dbg_acct_sanity_check(c,jeb); |
626 | jffs2_dbg_acct_paranoia_check(c, jeb); | ||
627 | |||
628 | ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy, rawlen); | ||
629 | /* this is not the exact summary size of it, | ||
630 | it is only an upper estimation */ | ||
626 | 631 | ||
627 | if (!ret) { | 632 | if (!ret) { |
628 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs)); | 633 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs)); |
629 | 634 | ||
630 | ACCT_SANITY_CHECK(c,jeb); | 635 | jffs2_dbg_acct_sanity_check(c,jeb); |
631 | D1(ACCT_PARANOIA_CHECK(jeb)); | 636 | jffs2_dbg_acct_paranoia_check(c, jeb); |
632 | 637 | ||
633 | goto retry; | 638 | goto retry; |
634 | } | 639 | } |
@@ -664,7 +669,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
664 | goto out_node; | 669 | goto out_node; |
665 | } | 670 | } |
666 | 671 | ||
667 | static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 672 | static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
668 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) | 673 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) |
669 | { | 674 | { |
670 | struct jffs2_full_dnode *new_fn; | 675 | struct jffs2_full_dnode *new_fn; |
@@ -679,7 +684,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
679 | S_ISCHR(JFFS2_F_I_MODE(f)) ) { | 684 | S_ISCHR(JFFS2_F_I_MODE(f)) ) { |
680 | /* For these, we don't actually need to read the old node */ | 685 | /* For these, we don't actually need to read the old node */ |
681 | /* FIXME: for minor or major > 255. */ | 686 | /* FIXME: for minor or major > 255. */ |
682 | dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) | | 687 | dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) | |
683 | JFFS2_F_I_RDEV_MIN(f))); | 688 | JFFS2_F_I_RDEV_MIN(f))); |
684 | mdata = (char *)&dev; | 689 | mdata = (char *)&dev; |
685 | mdatalen = sizeof(dev); | 690 | mdatalen = sizeof(dev); |
@@ -700,14 +705,15 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
700 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen)); | 705 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen)); |
701 | 706 | ||
702 | } | 707 | } |
703 | 708 | ||
704 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen); | 709 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen, |
710 | JFFS2_SUMMARY_INODE_SIZE); | ||
705 | if (ret) { | 711 | if (ret) { |
706 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", | 712 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", |
707 | sizeof(ri)+ mdatalen, ret); | 713 | sizeof(ri)+ mdatalen, ret); |
708 | goto out; | 714 | goto out; |
709 | } | 715 | } |
710 | 716 | ||
711 | last_frag = frag_last(&f->fragtree); | 717 | last_frag = frag_last(&f->fragtree); |
712 | if (last_frag) | 718 | if (last_frag) |
713 | /* Fetch the inode length from the fragtree rather then | 719 | /* Fetch the inode length from the fragtree rather then |
@@ -715,7 +721,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
715 | ilen = last_frag->ofs + last_frag->size; | 721 | ilen = last_frag->ofs + last_frag->size; |
716 | else | 722 | else |
717 | ilen = JFFS2_F_I_SIZE(f); | 723 | ilen = JFFS2_F_I_SIZE(f); |
718 | 724 | ||
719 | memset(&ri, 0, sizeof(ri)); | 725 | memset(&ri, 0, sizeof(ri)); |
720 | ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 726 | ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
721 | ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); | 727 | ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); |
@@ -754,7 +760,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
754 | return ret; | 760 | return ret; |
755 | } | 761 | } |
756 | 762 | ||
757 | static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 763 | static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
758 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) | 764 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) |
759 | { | 765 | { |
760 | struct jffs2_full_dirent *new_fd; | 766 | struct jffs2_full_dirent *new_fd; |
@@ -771,12 +777,18 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er | |||
771 | rd.pino = cpu_to_je32(f->inocache->ino); | 777 | rd.pino = cpu_to_je32(f->inocache->ino); |
772 | rd.version = cpu_to_je32(++f->highest_version); | 778 | rd.version = cpu_to_je32(++f->highest_version); |
773 | rd.ino = cpu_to_je32(fd->ino); | 779 | rd.ino = cpu_to_je32(fd->ino); |
774 | rd.mctime = cpu_to_je32(max(JFFS2_F_I_MTIME(f), JFFS2_F_I_CTIME(f))); | 780 | /* If the times on this inode were set by explicit utime() they can be different, |
781 | so refrain from splatting them. */ | ||
782 | if (JFFS2_F_I_MTIME(f) == JFFS2_F_I_CTIME(f)) | ||
783 | rd.mctime = cpu_to_je32(JFFS2_F_I_MTIME(f)); | ||
784 | else | ||
785 | rd.mctime = cpu_to_je32(0); | ||
775 | rd.type = fd->type; | 786 | rd.type = fd->type; |
776 | rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8)); | 787 | rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8)); |
777 | rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize)); | 788 | rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize)); |
778 | 789 | ||
779 | ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen); | 790 | ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen, |
791 | JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize)); | ||
780 | if (ret) { | 792 | if (ret) { |
781 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", | 793 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", |
782 | sizeof(rd)+rd.nsize, ret); | 794 | sizeof(rd)+rd.nsize, ret); |
@@ -792,7 +804,7 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er | |||
792 | return 0; | 804 | return 0; |
793 | } | 805 | } |
794 | 806 | ||
795 | static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 807 | static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
796 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) | 808 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) |
797 | { | 809 | { |
798 | struct jffs2_full_dirent **fdp = &f->dents; | 810 | struct jffs2_full_dirent **fdp = &f->dents; |
@@ -831,7 +843,7 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct | |||
831 | if (ref_totlen(c, NULL, raw) != rawlen) | 843 | if (ref_totlen(c, NULL, raw) != rawlen) |
832 | continue; | 844 | continue; |
833 | 845 | ||
834 | /* Doesn't matter if there's one in the same erase block. We're going to | 846 | /* Doesn't matter if there's one in the same erase block. We're going to |
835 | delete it too at the same time. */ | 847 | delete it too at the same time. */ |
836 | if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset)) | 848 | if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset)) |
837 | continue; | 849 | continue; |
@@ -883,6 +895,9 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct | |||
883 | kfree(rd); | 895 | kfree(rd); |
884 | } | 896 | } |
885 | 897 | ||
898 | /* FIXME: If we're deleting a dirent which contains the current mtime and ctime, | ||
899 | we should update the metadata node with those times accordingly */ | ||
900 | |||
886 | /* No need for it any more. Just mark it obsolete and remove it from the list */ | 901 | /* No need for it any more. Just mark it obsolete and remove it from the list */ |
887 | while (*fdp) { | 902 | while (*fdp) { |
888 | if ((*fdp) == fd) { | 903 | if ((*fdp) == fd) { |
@@ -912,13 +927,13 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
912 | 927 | ||
913 | D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", | 928 | D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", |
914 | f->inocache->ino, start, end)); | 929 | f->inocache->ino, start, end)); |
915 | 930 | ||
916 | memset(&ri, 0, sizeof(ri)); | 931 | memset(&ri, 0, sizeof(ri)); |
917 | 932 | ||
918 | if(fn->frags > 1) { | 933 | if(fn->frags > 1) { |
919 | size_t readlen; | 934 | size_t readlen; |
920 | uint32_t crc; | 935 | uint32_t crc; |
921 | /* It's partially obsoleted by a later write. So we have to | 936 | /* It's partially obsoleted by a later write. So we have to |
922 | write it out again with the _same_ version as before */ | 937 | write it out again with the _same_ version as before */ |
923 | ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri); | 938 | ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri); |
924 | if (readlen != sizeof(ri) || ret) { | 939 | if (readlen != sizeof(ri) || ret) { |
@@ -940,16 +955,16 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
940 | crc = crc32(0, &ri, sizeof(ri)-8); | 955 | crc = crc32(0, &ri, sizeof(ri)-8); |
941 | if (crc != je32_to_cpu(ri.node_crc)) { | 956 | if (crc != je32_to_cpu(ri.node_crc)) { |
942 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n", | 957 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n", |
943 | ref_offset(fn->raw), | 958 | ref_offset(fn->raw), |
944 | je32_to_cpu(ri.node_crc), crc); | 959 | je32_to_cpu(ri.node_crc), crc); |
945 | /* FIXME: We could possibly deal with this by writing new holes for each frag */ | 960 | /* FIXME: We could possibly deal with this by writing new holes for each frag */ |
946 | printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", | 961 | printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", |
947 | start, end, f->inocache->ino); | 962 | start, end, f->inocache->ino); |
948 | goto fill; | 963 | goto fill; |
949 | } | 964 | } |
950 | if (ri.compr != JFFS2_COMPR_ZERO) { | 965 | if (ri.compr != JFFS2_COMPR_ZERO) { |
951 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw)); | 966 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw)); |
952 | printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", | 967 | printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", |
953 | start, end, f->inocache->ino); | 968 | start, end, f->inocache->ino); |
954 | goto fill; | 969 | goto fill; |
955 | } | 970 | } |
@@ -967,7 +982,7 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
967 | ri.csize = cpu_to_je32(0); | 982 | ri.csize = cpu_to_je32(0); |
968 | ri.compr = JFFS2_COMPR_ZERO; | 983 | ri.compr = JFFS2_COMPR_ZERO; |
969 | } | 984 | } |
970 | 985 | ||
971 | frag = frag_last(&f->fragtree); | 986 | frag = frag_last(&f->fragtree); |
972 | if (frag) | 987 | if (frag) |
973 | /* Fetch the inode length from the fragtree rather then | 988 | /* Fetch the inode length from the fragtree rather then |
@@ -986,7 +1001,8 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
986 | ri.data_crc = cpu_to_je32(0); | 1001 | ri.data_crc = cpu_to_je32(0); |
987 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | 1002 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); |
988 | 1003 | ||
989 | ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen); | 1004 | ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen, |
1005 | JFFS2_SUMMARY_INODE_SIZE); | ||
990 | if (ret) { | 1006 | if (ret) { |
991 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", | 1007 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", |
992 | sizeof(ri), ret); | 1008 | sizeof(ri), ret); |
@@ -1008,10 +1024,10 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
1008 | return 0; | 1024 | return 0; |
1009 | } | 1025 | } |
1010 | 1026 | ||
1011 | /* | 1027 | /* |
1012 | * We should only get here in the case where the node we are | 1028 | * We should only get here in the case where the node we are |
1013 | * replacing had more than one frag, so we kept the same version | 1029 | * replacing had more than one frag, so we kept the same version |
1014 | * number as before. (Except in case of error -- see 'goto fill;' | 1030 | * number as before. (Except in case of error -- see 'goto fill;' |
1015 | * above.) | 1031 | * above.) |
1016 | */ | 1032 | */ |
1017 | D1(if(unlikely(fn->frags <= 1)) { | 1033 | D1(if(unlikely(fn->frags <= 1)) { |
@@ -1023,7 +1039,7 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
1023 | /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */ | 1039 | /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */ |
1024 | mark_ref_normal(new_fn->raw); | 1040 | mark_ref_normal(new_fn->raw); |
1025 | 1041 | ||
1026 | for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); | 1042 | for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); |
1027 | frag; frag = frag_next(frag)) { | 1043 | frag; frag = frag_next(frag)) { |
1028 | if (frag->ofs > fn->size + fn->ofs) | 1044 | if (frag->ofs > fn->size + fn->ofs) |
1029 | break; | 1045 | break; |
@@ -1041,10 +1057,10 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
1041 | printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n"); | 1057 | printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n"); |
1042 | BUG(); | 1058 | BUG(); |
1043 | } | 1059 | } |
1044 | 1060 | ||
1045 | jffs2_mark_node_obsolete(c, fn->raw); | 1061 | jffs2_mark_node_obsolete(c, fn->raw); |
1046 | jffs2_free_full_dnode(fn); | 1062 | jffs2_free_full_dnode(fn); |
1047 | 1063 | ||
1048 | return 0; | 1064 | return 0; |
1049 | } | 1065 | } |
1050 | 1066 | ||
@@ -1054,12 +1070,12 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1054 | { | 1070 | { |
1055 | struct jffs2_full_dnode *new_fn; | 1071 | struct jffs2_full_dnode *new_fn; |
1056 | struct jffs2_raw_inode ri; | 1072 | struct jffs2_raw_inode ri; |
1057 | uint32_t alloclen, phys_ofs, offset, orig_end, orig_start; | 1073 | uint32_t alloclen, phys_ofs, offset, orig_end, orig_start; |
1058 | int ret = 0; | 1074 | int ret = 0; |
1059 | unsigned char *comprbuf = NULL, *writebuf; | 1075 | unsigned char *comprbuf = NULL, *writebuf; |
1060 | unsigned long pg; | 1076 | unsigned long pg; |
1061 | unsigned char *pg_ptr; | 1077 | unsigned char *pg_ptr; |
1062 | 1078 | ||
1063 | memset(&ri, 0, sizeof(ri)); | 1079 | memset(&ri, 0, sizeof(ri)); |
1064 | 1080 | ||
1065 | D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", | 1081 | D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", |
@@ -1071,8 +1087,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1071 | if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) { | 1087 | if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) { |
1072 | /* Attempt to do some merging. But only expand to cover logically | 1088 | /* Attempt to do some merging. But only expand to cover logically |
1073 | adjacent frags if the block containing them is already considered | 1089 | adjacent frags if the block containing them is already considered |
1074 | to be dirty. Otherwise we end up with GC just going round in | 1090 | to be dirty. Otherwise we end up with GC just going round in |
1075 | circles dirtying the nodes it already wrote out, especially | 1091 | circles dirtying the nodes it already wrote out, especially |
1076 | on NAND where we have small eraseblocks and hence a much higher | 1092 | on NAND where we have small eraseblocks and hence a much higher |
1077 | chance of nodes having to be split to cross boundaries. */ | 1093 | chance of nodes having to be split to cross boundaries. */ |
1078 | 1094 | ||
@@ -1106,7 +1122,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1106 | break; | 1122 | break; |
1107 | } else { | 1123 | } else { |
1108 | 1124 | ||
1109 | /* OK, it's a frag which extends to the beginning of the page. Does it live | 1125 | /* OK, it's a frag which extends to the beginning of the page. Does it live |
1110 | in a block which is still considered clean? If so, don't obsolete it. | 1126 | in a block which is still considered clean? If so, don't obsolete it. |
1111 | If not, cover it anyway. */ | 1127 | If not, cover it anyway. */ |
1112 | 1128 | ||
@@ -1156,7 +1172,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1156 | break; | 1172 | break; |
1157 | } else { | 1173 | } else { |
1158 | 1174 | ||
1159 | /* OK, it's a frag which extends to the beginning of the page. Does it live | 1175 | /* OK, it's a frag which extends to the beginning of the page. Does it live |
1160 | in a block which is still considered clean? If so, don't obsolete it. | 1176 | in a block which is still considered clean? If so, don't obsolete it. |
1161 | If not, cover it anyway. */ | 1177 | If not, cover it anyway. */ |
1162 | 1178 | ||
@@ -1183,14 +1199,14 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1183 | break; | 1199 | break; |
1184 | } | 1200 | } |
1185 | } | 1201 | } |
1186 | D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", | 1202 | D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", |
1187 | orig_start, orig_end, start, end)); | 1203 | orig_start, orig_end, start, end)); |
1188 | 1204 | ||
1189 | D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); | 1205 | D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); |
1190 | BUG_ON(end < orig_end); | 1206 | BUG_ON(end < orig_end); |
1191 | BUG_ON(start > orig_start); | 1207 | BUG_ON(start > orig_start); |
1192 | } | 1208 | } |
1193 | 1209 | ||
1194 | /* First, use readpage() to read the appropriate page into the page cache */ | 1210 | /* First, use readpage() to read the appropriate page into the page cache */ |
1195 | /* Q: What happens if we actually try to GC the _same_ page for which commit_write() | 1211 | /* Q: What happens if we actually try to GC the _same_ page for which commit_write() |
1196 | * triggered garbage collection in the first place? | 1212 | * triggered garbage collection in the first place? |
@@ -1211,7 +1227,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1211 | uint32_t cdatalen; | 1227 | uint32_t cdatalen; |
1212 | uint16_t comprtype = JFFS2_COMPR_NONE; | 1228 | uint16_t comprtype = JFFS2_COMPR_NONE; |
1213 | 1229 | ||
1214 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen); | 1230 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, |
1231 | &alloclen, JFFS2_SUMMARY_INODE_SIZE); | ||
1215 | 1232 | ||
1216 | if (ret) { | 1233 | if (ret) { |
1217 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n", | 1234 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n", |
@@ -1246,7 +1263,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1246 | ri.usercompr = (comprtype >> 8) & 0xff; | 1263 | ri.usercompr = (comprtype >> 8) & 0xff; |
1247 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | 1264 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); |
1248 | ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); | 1265 | ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); |
1249 | 1266 | ||
1250 | new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC); | 1267 | new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC); |
1251 | 1268 | ||
1252 | jffs2_free_comprbuf(comprbuf, writebuf); | 1269 | jffs2_free_comprbuf(comprbuf, writebuf); |
@@ -1268,4 +1285,3 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1268 | jffs2_gc_release_page(c, pg_ptr, &pg); | 1285 | jffs2_gc_release_page(c, pg_ptr, &pg); |
1269 | return ret; | 1286 | return ret; |
1270 | } | 1287 | } |
1271 | |||
diff --git a/fs/jffs2/histo.h b/fs/jffs2/histo.h index 84f184f0836f..22a93a08210c 100644 --- a/fs/jffs2/histo.h +++ b/fs/jffs2/histo.h | |||
@@ -1,3 +1,3 @@ | |||
1 | /* This file provides the bit-probabilities for the input file */ | 1 | /* This file provides the bit-probabilities for the input file */ |
2 | #define BIT_DIVIDER 629 | 2 | #define BIT_DIVIDER 629 |
3 | static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */ | 3 | static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */ |
diff --git a/fs/jffs2/histo_mips.h b/fs/jffs2/histo_mips.h index 9a443268d885..fa3dac19a109 100644 --- a/fs/jffs2/histo_mips.h +++ b/fs/jffs2/histo_mips.h | |||
@@ -1,2 +1,2 @@ | |||
1 | #define BIT_DIVIDER_MIPS 1043 | 1 | #define BIT_DIVIDER_MIPS 1043 |
2 | static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */ | 2 | static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */ |
diff --git a/fs/jffs2/ioctl.c b/fs/jffs2/ioctl.c index 238c7992064c..69099835de1c 100644 --- a/fs/jffs2/ioctl.c +++ b/fs/jffs2/ioctl.c | |||
@@ -7,17 +7,17 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: ioctl.c,v 1.9 2004/11/16 20:36:11 dwmw2 Exp $ | 10 | * $Id: ioctl.c,v 1.10 2005/11/07 11:14:40 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
15 | 15 | ||
16 | int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, | 16 | int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, |
17 | unsigned long arg) | 17 | unsigned long arg) |
18 | { | 18 | { |
19 | /* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which | 19 | /* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which |
20 | will include compression support etc. */ | 20 | will include compression support etc. */ |
21 | return -ENOTTY; | 21 | return -ENOTTY; |
22 | } | 22 | } |
23 | 23 | ||
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c index 5abb431c2a00..036cbd11c004 100644 --- a/fs/jffs2/malloc.c +++ b/fs/jffs2/malloc.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: malloc.c,v 1.28 2004/11/16 20:36:11 dwmw2 Exp $ | 10 | * $Id: malloc.c,v 1.31 2005/11/07 11:14:40 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -17,15 +17,6 @@ | |||
17 | #include <linux/jffs2.h> | 17 | #include <linux/jffs2.h> |
18 | #include "nodelist.h" | 18 | #include "nodelist.h" |
19 | 19 | ||
20 | #if 0 | ||
21 | #define JFFS2_SLAB_POISON SLAB_POISON | ||
22 | #else | ||
23 | #define JFFS2_SLAB_POISON 0 | ||
24 | #endif | ||
25 | |||
26 | // replace this by #define D3 (x) x for cache debugging | ||
27 | #define D3(x) | ||
28 | |||
29 | /* These are initialised to NULL in the kernel startup code. | 20 | /* These are initialised to NULL in the kernel startup code. |
30 | If you're porting to other operating systems, beware */ | 21 | If you're porting to other operating systems, beware */ |
31 | static kmem_cache_t *full_dnode_slab; | 22 | static kmem_cache_t *full_dnode_slab; |
@@ -38,45 +29,45 @@ static kmem_cache_t *inode_cache_slab; | |||
38 | 29 | ||
39 | int __init jffs2_create_slab_caches(void) | 30 | int __init jffs2_create_slab_caches(void) |
40 | { | 31 | { |
41 | full_dnode_slab = kmem_cache_create("jffs2_full_dnode", | 32 | full_dnode_slab = kmem_cache_create("jffs2_full_dnode", |
42 | sizeof(struct jffs2_full_dnode), | 33 | sizeof(struct jffs2_full_dnode), |
43 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 34 | 0, 0, NULL, NULL); |
44 | if (!full_dnode_slab) | 35 | if (!full_dnode_slab) |
45 | goto err; | 36 | goto err; |
46 | 37 | ||
47 | raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", | 38 | raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", |
48 | sizeof(struct jffs2_raw_dirent), | 39 | sizeof(struct jffs2_raw_dirent), |
49 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 40 | 0, 0, NULL, NULL); |
50 | if (!raw_dirent_slab) | 41 | if (!raw_dirent_slab) |
51 | goto err; | 42 | goto err; |
52 | 43 | ||
53 | raw_inode_slab = kmem_cache_create("jffs2_raw_inode", | 44 | raw_inode_slab = kmem_cache_create("jffs2_raw_inode", |
54 | sizeof(struct jffs2_raw_inode), | 45 | sizeof(struct jffs2_raw_inode), |
55 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 46 | 0, 0, NULL, NULL); |
56 | if (!raw_inode_slab) | 47 | if (!raw_inode_slab) |
57 | goto err; | 48 | goto err; |
58 | 49 | ||
59 | tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode", | 50 | tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode", |
60 | sizeof(struct jffs2_tmp_dnode_info), | 51 | sizeof(struct jffs2_tmp_dnode_info), |
61 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 52 | 0, 0, NULL, NULL); |
62 | if (!tmp_dnode_info_slab) | 53 | if (!tmp_dnode_info_slab) |
63 | goto err; | 54 | goto err; |
64 | 55 | ||
65 | raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref", | 56 | raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref", |
66 | sizeof(struct jffs2_raw_node_ref), | 57 | sizeof(struct jffs2_raw_node_ref), |
67 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 58 | 0, 0, NULL, NULL); |
68 | if (!raw_node_ref_slab) | 59 | if (!raw_node_ref_slab) |
69 | goto err; | 60 | goto err; |
70 | 61 | ||
71 | node_frag_slab = kmem_cache_create("jffs2_node_frag", | 62 | node_frag_slab = kmem_cache_create("jffs2_node_frag", |
72 | sizeof(struct jffs2_node_frag), | 63 | sizeof(struct jffs2_node_frag), |
73 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 64 | 0, 0, NULL, NULL); |
74 | if (!node_frag_slab) | 65 | if (!node_frag_slab) |
75 | goto err; | 66 | goto err; |
76 | 67 | ||
77 | inode_cache_slab = kmem_cache_create("jffs2_inode_cache", | 68 | inode_cache_slab = kmem_cache_create("jffs2_inode_cache", |
78 | sizeof(struct jffs2_inode_cache), | 69 | sizeof(struct jffs2_inode_cache), |
79 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 70 | 0, 0, NULL, NULL); |
80 | if (inode_cache_slab) | 71 | if (inode_cache_slab) |
81 | return 0; | 72 | return 0; |
82 | err: | 73 | err: |
@@ -104,102 +95,113 @@ void jffs2_destroy_slab_caches(void) | |||
104 | 95 | ||
105 | struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) | 96 | struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) |
106 | { | 97 | { |
107 | return kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL); | 98 | struct jffs2_full_dirent *ret; |
99 | ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL); | ||
100 | dbg_memalloc("%p\n", ret); | ||
101 | return ret; | ||
108 | } | 102 | } |
109 | 103 | ||
110 | void jffs2_free_full_dirent(struct jffs2_full_dirent *x) | 104 | void jffs2_free_full_dirent(struct jffs2_full_dirent *x) |
111 | { | 105 | { |
106 | dbg_memalloc("%p\n", x); | ||
112 | kfree(x); | 107 | kfree(x); |
113 | } | 108 | } |
114 | 109 | ||
115 | struct jffs2_full_dnode *jffs2_alloc_full_dnode(void) | 110 | struct jffs2_full_dnode *jffs2_alloc_full_dnode(void) |
116 | { | 111 | { |
117 | struct jffs2_full_dnode *ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL); | 112 | struct jffs2_full_dnode *ret; |
118 | D3 (printk (KERN_DEBUG "alloc_full_dnode at %p\n", ret)); | 113 | ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL); |
114 | dbg_memalloc("%p\n", ret); | ||
119 | return ret; | 115 | return ret; |
120 | } | 116 | } |
121 | 117 | ||
122 | void jffs2_free_full_dnode(struct jffs2_full_dnode *x) | 118 | void jffs2_free_full_dnode(struct jffs2_full_dnode *x) |
123 | { | 119 | { |
124 | D3 (printk (KERN_DEBUG "free full_dnode at %p\n", x)); | 120 | dbg_memalloc("%p\n", x); |
125 | kmem_cache_free(full_dnode_slab, x); | 121 | kmem_cache_free(full_dnode_slab, x); |
126 | } | 122 | } |
127 | 123 | ||
128 | struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void) | 124 | struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void) |
129 | { | 125 | { |
130 | struct jffs2_raw_dirent *ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL); | 126 | struct jffs2_raw_dirent *ret; |
131 | D3 (printk (KERN_DEBUG "alloc_raw_dirent\n", ret)); | 127 | ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL); |
128 | dbg_memalloc("%p\n", ret); | ||
132 | return ret; | 129 | return ret; |
133 | } | 130 | } |
134 | 131 | ||
135 | void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x) | 132 | void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x) |
136 | { | 133 | { |
137 | D3 (printk (KERN_DEBUG "free_raw_dirent at %p\n", x)); | 134 | dbg_memalloc("%p\n", x); |
138 | kmem_cache_free(raw_dirent_slab, x); | 135 | kmem_cache_free(raw_dirent_slab, x); |
139 | } | 136 | } |
140 | 137 | ||
141 | struct jffs2_raw_inode *jffs2_alloc_raw_inode(void) | 138 | struct jffs2_raw_inode *jffs2_alloc_raw_inode(void) |
142 | { | 139 | { |
143 | struct jffs2_raw_inode *ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL); | 140 | struct jffs2_raw_inode *ret; |
144 | D3 (printk (KERN_DEBUG "alloc_raw_inode at %p\n", ret)); | 141 | ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL); |
142 | dbg_memalloc("%p\n", ret); | ||
145 | return ret; | 143 | return ret; |
146 | } | 144 | } |
147 | 145 | ||
148 | void jffs2_free_raw_inode(struct jffs2_raw_inode *x) | 146 | void jffs2_free_raw_inode(struct jffs2_raw_inode *x) |
149 | { | 147 | { |
150 | D3 (printk (KERN_DEBUG "free_raw_inode at %p\n", x)); | 148 | dbg_memalloc("%p\n", x); |
151 | kmem_cache_free(raw_inode_slab, x); | 149 | kmem_cache_free(raw_inode_slab, x); |
152 | } | 150 | } |
153 | 151 | ||
154 | struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void) | 152 | struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void) |
155 | { | 153 | { |
156 | struct jffs2_tmp_dnode_info *ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL); | 154 | struct jffs2_tmp_dnode_info *ret; |
157 | D3 (printk (KERN_DEBUG "alloc_tmp_dnode_info at %p\n", ret)); | 155 | ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL); |
156 | dbg_memalloc("%p\n", | ||
157 | ret); | ||
158 | return ret; | 158 | return ret; |
159 | } | 159 | } |
160 | 160 | ||
161 | void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x) | 161 | void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x) |
162 | { | 162 | { |
163 | D3 (printk (KERN_DEBUG "free_tmp_dnode_info at %p\n", x)); | 163 | dbg_memalloc("%p\n", x); |
164 | kmem_cache_free(tmp_dnode_info_slab, x); | 164 | kmem_cache_free(tmp_dnode_info_slab, x); |
165 | } | 165 | } |
166 | 166 | ||
167 | struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void) | 167 | struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void) |
168 | { | 168 | { |
169 | struct jffs2_raw_node_ref *ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL); | 169 | struct jffs2_raw_node_ref *ret; |
170 | D3 (printk (KERN_DEBUG "alloc_raw_node_ref at %p\n", ret)); | 170 | ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL); |
171 | dbg_memalloc("%p\n", ret); | ||
171 | return ret; | 172 | return ret; |
172 | } | 173 | } |
173 | 174 | ||
174 | void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x) | 175 | void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x) |
175 | { | 176 | { |
176 | D3 (printk (KERN_DEBUG "free_raw_node_ref at %p\n", x)); | 177 | dbg_memalloc("%p\n", x); |
177 | kmem_cache_free(raw_node_ref_slab, x); | 178 | kmem_cache_free(raw_node_ref_slab, x); |
178 | } | 179 | } |
179 | 180 | ||
180 | struct jffs2_node_frag *jffs2_alloc_node_frag(void) | 181 | struct jffs2_node_frag *jffs2_alloc_node_frag(void) |
181 | { | 182 | { |
182 | struct jffs2_node_frag *ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL); | 183 | struct jffs2_node_frag *ret; |
183 | D3 (printk (KERN_DEBUG "alloc_node_frag at %p\n", ret)); | 184 | ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL); |
185 | dbg_memalloc("%p\n", ret); | ||
184 | return ret; | 186 | return ret; |
185 | } | 187 | } |
186 | 188 | ||
187 | void jffs2_free_node_frag(struct jffs2_node_frag *x) | 189 | void jffs2_free_node_frag(struct jffs2_node_frag *x) |
188 | { | 190 | { |
189 | D3 (printk (KERN_DEBUG "free_node_frag at %p\n", x)); | 191 | dbg_memalloc("%p\n", x); |
190 | kmem_cache_free(node_frag_slab, x); | 192 | kmem_cache_free(node_frag_slab, x); |
191 | } | 193 | } |
192 | 194 | ||
193 | struct jffs2_inode_cache *jffs2_alloc_inode_cache(void) | 195 | struct jffs2_inode_cache *jffs2_alloc_inode_cache(void) |
194 | { | 196 | { |
195 | struct jffs2_inode_cache *ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL); | 197 | struct jffs2_inode_cache *ret; |
196 | D3 (printk(KERN_DEBUG "Allocated inocache at %p\n", ret)); | 198 | ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL); |
199 | dbg_memalloc("%p\n", ret); | ||
197 | return ret; | 200 | return ret; |
198 | } | 201 | } |
199 | 202 | ||
200 | void jffs2_free_inode_cache(struct jffs2_inode_cache *x) | 203 | void jffs2_free_inode_cache(struct jffs2_inode_cache *x) |
201 | { | 204 | { |
202 | D3 (printk(KERN_DEBUG "Freeing inocache at %p\n", x)); | 205 | dbg_memalloc("%p\n", x); |
203 | kmem_cache_free(inode_cache_slab, x); | 206 | kmem_cache_free(inode_cache_slab, x); |
204 | } | 207 | } |
205 | |||
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 4991c348f6ec..c79eebb8ab32 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: nodelist.c,v 1.98 2005/07/10 15:15:32 dedekind Exp $ | 10 | * $Id: nodelist.c,v 1.115 2005/11/07 11:14:40 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -24,469 +24,832 @@ | |||
24 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list) | 24 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list) |
25 | { | 25 | { |
26 | struct jffs2_full_dirent **prev = list; | 26 | struct jffs2_full_dirent **prev = list; |
27 | D1(printk(KERN_DEBUG "jffs2_add_fd_to_list( %p, %p (->%p))\n", new, list, *list)); | 27 | |
28 | dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino); | ||
28 | 29 | ||
29 | while ((*prev) && (*prev)->nhash <= new->nhash) { | 30 | while ((*prev) && (*prev)->nhash <= new->nhash) { |
30 | if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { | 31 | if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { |
31 | /* Duplicate. Free one */ | 32 | /* Duplicate. Free one */ |
32 | if (new->version < (*prev)->version) { | 33 | if (new->version < (*prev)->version) { |
33 | D1(printk(KERN_DEBUG "Eep! Marking new dirent node obsolete\n")); | 34 | dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n", |
34 | D1(printk(KERN_DEBUG "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n", new->name, new->ino, (*prev)->name, (*prev)->ino)); | 35 | (*prev)->name, (*prev)->ino); |
35 | jffs2_mark_node_obsolete(c, new->raw); | 36 | jffs2_mark_node_obsolete(c, new->raw); |
36 | jffs2_free_full_dirent(new); | 37 | jffs2_free_full_dirent(new); |
37 | } else { | 38 | } else { |
38 | D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino)); | 39 | dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n", |
40 | (*prev)->name, (*prev)->ino); | ||
39 | new->next = (*prev)->next; | 41 | new->next = (*prev)->next; |
40 | jffs2_mark_node_obsolete(c, ((*prev)->raw)); | 42 | jffs2_mark_node_obsolete(c, ((*prev)->raw)); |
41 | jffs2_free_full_dirent(*prev); | 43 | jffs2_free_full_dirent(*prev); |
42 | *prev = new; | 44 | *prev = new; |
43 | } | 45 | } |
44 | goto out; | 46 | return; |
45 | } | 47 | } |
46 | prev = &((*prev)->next); | 48 | prev = &((*prev)->next); |
47 | } | 49 | } |
48 | new->next = *prev; | 50 | new->next = *prev; |
49 | *prev = new; | 51 | *prev = new; |
52 | } | ||
53 | |||
54 | void jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) | ||
55 | { | ||
56 | struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); | ||
57 | |||
58 | dbg_fragtree("truncating fragtree to 0x%08x bytes\n", size); | ||
59 | |||
60 | /* We know frag->ofs <= size. That's what lookup does for us */ | ||
61 | if (frag && frag->ofs != size) { | ||
62 | if (frag->ofs+frag->size > size) { | ||
63 | frag->size = size - frag->ofs; | ||
64 | } | ||
65 | frag = frag_next(frag); | ||
66 | } | ||
67 | while (frag && frag->ofs >= size) { | ||
68 | struct jffs2_node_frag *next = frag_next(frag); | ||
69 | |||
70 | frag_erase(frag, list); | ||
71 | jffs2_obsolete_node_frag(c, frag); | ||
72 | frag = next; | ||
73 | } | ||
50 | 74 | ||
51 | out: | 75 | if (size == 0) |
52 | D2(while(*list) { | 76 | return; |
53 | printk(KERN_DEBUG "Dirent \"%s\" (hash 0x%08x, ino #%u\n", (*list)->name, (*list)->nhash, (*list)->ino); | 77 | |
54 | list = &(*list)->next; | 78 | /* |
55 | }); | 79 | * If the last fragment starts at the RAM page boundary, it is |
80 | * REF_PRISTINE irrespective of its size. | ||
81 | */ | ||
82 | frag = frag_last(list); | ||
83 | if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { | ||
84 | dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n", | ||
85 | frag->ofs, frag->ofs + frag->size); | ||
86 | frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; | ||
87 | } | ||
56 | } | 88 | } |
57 | 89 | ||
58 | /* | 90 | void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) |
59 | * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in | ||
60 | * order of increasing version. | ||
61 | */ | ||
62 | static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list) | ||
63 | { | 91 | { |
64 | struct rb_node **p = &list->rb_node; | 92 | if (this->node) { |
65 | struct rb_node * parent = NULL; | 93 | this->node->frags--; |
66 | struct jffs2_tmp_dnode_info *this; | 94 | if (!this->node->frags) { |
67 | 95 | /* The node has no valid frags left. It's totally obsoleted */ | |
68 | while (*p) { | 96 | dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", |
69 | parent = *p; | 97 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size); |
70 | this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); | 98 | jffs2_mark_node_obsolete(c, this->node->raw); |
71 | 99 | jffs2_free_full_dnode(this->node); | |
72 | /* There may actually be a collision here, but it doesn't | 100 | } else { |
73 | actually matter. As long as the two nodes with the same | 101 | dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", |
74 | version are together, it's all fine. */ | 102 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags); |
75 | if (tn->version < this->version) | 103 | mark_ref_normal(this->node->raw); |
76 | p = &(*p)->rb_left; | 104 | } |
77 | else | ||
78 | p = &(*p)->rb_right; | ||
79 | } | ||
80 | 105 | ||
81 | rb_link_node(&tn->rb, parent, p); | 106 | } |
82 | rb_insert_color(&tn->rb, list); | 107 | jffs2_free_node_frag(this); |
83 | } | 108 | } |
84 | 109 | ||
85 | static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) | 110 | static void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) |
86 | { | 111 | { |
87 | struct rb_node *this; | 112 | struct rb_node *parent = &base->rb; |
88 | struct jffs2_tmp_dnode_info *tn; | 113 | struct rb_node **link = &parent; |
89 | 114 | ||
90 | this = list->rb_node; | 115 | dbg_fragtree2("insert frag (0x%04x-0x%04x)\n", newfrag->ofs, newfrag->ofs + newfrag->size); |
91 | 116 | ||
92 | /* Now at bottom of tree */ | 117 | while (*link) { |
93 | while (this) { | 118 | parent = *link; |
94 | if (this->rb_left) | 119 | base = rb_entry(parent, struct jffs2_node_frag, rb); |
95 | this = this->rb_left; | 120 | |
96 | else if (this->rb_right) | 121 | if (newfrag->ofs > base->ofs) |
97 | this = this->rb_right; | 122 | link = &base->rb.rb_right; |
123 | else if (newfrag->ofs < base->ofs) | ||
124 | link = &base->rb.rb_left; | ||
98 | else { | 125 | else { |
99 | tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb); | 126 | JFFS2_ERROR("duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); |
100 | jffs2_free_full_dnode(tn->fn); | 127 | BUG(); |
101 | jffs2_free_tmp_dnode_info(tn); | ||
102 | |||
103 | this = this->rb_parent; | ||
104 | if (!this) | ||
105 | break; | ||
106 | |||
107 | if (this->rb_left == &tn->rb) | ||
108 | this->rb_left = NULL; | ||
109 | else if (this->rb_right == &tn->rb) | ||
110 | this->rb_right = NULL; | ||
111 | else BUG(); | ||
112 | } | 128 | } |
113 | } | 129 | } |
114 | list->rb_node = NULL; | 130 | |
131 | rb_link_node(&newfrag->rb, &base->rb, link); | ||
115 | } | 132 | } |
116 | 133 | ||
117 | static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) | 134 | /* |
135 | * Allocate and initializes a new fragment. | ||
136 | */ | ||
137 | static inline struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size) | ||
118 | { | 138 | { |
119 | struct jffs2_full_dirent *next; | 139 | struct jffs2_node_frag *newfrag; |
120 | 140 | ||
121 | while (fd) { | 141 | newfrag = jffs2_alloc_node_frag(); |
122 | next = fd->next; | 142 | if (likely(newfrag)) { |
123 | jffs2_free_full_dirent(fd); | 143 | newfrag->ofs = ofs; |
124 | fd = next; | 144 | newfrag->size = size; |
145 | newfrag->node = fn; | ||
146 | } else { | ||
147 | JFFS2_ERROR("cannot allocate a jffs2_node_frag object\n"); | ||
125 | } | 148 | } |
149 | |||
150 | return newfrag; | ||
126 | } | 151 | } |
127 | 152 | ||
128 | /* Returns first valid node after 'ref'. May return 'ref' */ | 153 | /* |
129 | static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) | 154 | * Called when there is no overlapping fragment exist. Inserts a hole before the new |
155 | * fragment and inserts the new fragment to the fragtree. | ||
156 | */ | ||
157 | static int no_overlapping_node(struct jffs2_sb_info *c, struct rb_root *root, | ||
158 | struct jffs2_node_frag *newfrag, | ||
159 | struct jffs2_node_frag *this, uint32_t lastend) | ||
130 | { | 160 | { |
131 | while (ref && ref->next_in_ino) { | 161 | if (lastend < newfrag->node->ofs) { |
132 | if (!ref_obsolete(ref)) | 162 | /* put a hole in before the new fragment */ |
133 | return ref; | 163 | struct jffs2_node_frag *holefrag; |
134 | D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref))); | 164 | |
135 | ref = ref->next_in_ino; | 165 | holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend); |
166 | if (unlikely(!holefrag)) { | ||
167 | jffs2_free_node_frag(newfrag); | ||
168 | return -ENOMEM; | ||
169 | } | ||
170 | |||
171 | if (this) { | ||
172 | /* By definition, the 'this' node has no right-hand child, | ||
173 | because there are no frags with offset greater than it. | ||
174 | So that's where we want to put the hole */ | ||
175 | dbg_fragtree2("add hole frag %#04x-%#04x on the right of the new frag.\n", | ||
176 | holefrag->ofs, holefrag->ofs + holefrag->size); | ||
177 | rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); | ||
178 | } else { | ||
179 | dbg_fragtree2("Add hole frag %#04x-%#04x to the root of the tree.\n", | ||
180 | holefrag->ofs, holefrag->ofs + holefrag->size); | ||
181 | rb_link_node(&holefrag->rb, NULL, &root->rb_node); | ||
182 | } | ||
183 | rb_insert_color(&holefrag->rb, root); | ||
184 | this = holefrag; | ||
185 | } | ||
186 | |||
187 | if (this) { | ||
188 | /* By definition, the 'this' node has no right-hand child, | ||
189 | because there are no frags with offset greater than it. | ||
190 | So that's where we want to put new fragment */ | ||
191 | dbg_fragtree2("add the new node at the right\n"); | ||
192 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | ||
193 | } else { | ||
194 | dbg_fragtree2("insert the new node at the root of the tree\n"); | ||
195 | rb_link_node(&newfrag->rb, NULL, &root->rb_node); | ||
136 | } | 196 | } |
137 | return NULL; | 197 | rb_insert_color(&newfrag->rb, root); |
198 | |||
199 | return 0; | ||
138 | } | 200 | } |
139 | 201 | ||
140 | /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated | 202 | /* Doesn't set inode->i_size */ |
141 | with this ino, returning the former in order of version */ | 203 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag) |
204 | { | ||
205 | struct jffs2_node_frag *this; | ||
206 | uint32_t lastend; | ||
207 | |||
208 | /* Skip all the nodes which are completed before this one starts */ | ||
209 | this = jffs2_lookup_node_frag(root, newfrag->node->ofs); | ||
210 | |||
211 | if (this) { | ||
212 | dbg_fragtree2("lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | ||
213 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this); | ||
214 | lastend = this->ofs + this->size; | ||
215 | } else { | ||
216 | dbg_fragtree2("lookup gave no frag\n"); | ||
217 | lastend = 0; | ||
218 | } | ||
219 | |||
220 | /* See if we ran off the end of the fragtree */ | ||
221 | if (lastend <= newfrag->ofs) { | ||
222 | /* We did */ | ||
223 | |||
224 | /* Check if 'this' node was on the same page as the new node. | ||
225 | If so, both 'this' and the new node get marked REF_NORMAL so | ||
226 | the GC can take a look. | ||
227 | */ | ||
228 | if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { | ||
229 | if (this->node) | ||
230 | mark_ref_normal(this->node->raw); | ||
231 | mark_ref_normal(newfrag->node->raw); | ||
232 | } | ||
233 | |||
234 | return no_overlapping_node(c, root, newfrag, this, lastend); | ||
235 | } | ||
142 | 236 | ||
143 | int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 237 | if (this->node) |
144 | struct rb_root *tnp, struct jffs2_full_dirent **fdp, | 238 | dbg_fragtree2("dealing with frag %u-%u, phys %#08x(%d).\n", |
145 | uint32_t *highest_version, uint32_t *latest_mctime, | 239 | this->ofs, this->ofs + this->size, |
146 | uint32_t *mctime_ver) | 240 | ref_offset(this->node->raw), ref_flags(this->node->raw)); |
241 | else | ||
242 | dbg_fragtree2("dealing with hole frag %u-%u.\n", | ||
243 | this->ofs, this->ofs + this->size); | ||
244 | |||
245 | /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, | ||
246 | * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs | ||
247 | */ | ||
248 | if (newfrag->ofs > this->ofs) { | ||
249 | /* This node isn't completely obsoleted. The start of it remains valid */ | ||
250 | |||
251 | /* Mark the new node and the partially covered node REF_NORMAL -- let | ||
252 | the GC take a look at them */ | ||
253 | mark_ref_normal(newfrag->node->raw); | ||
254 | if (this->node) | ||
255 | mark_ref_normal(this->node->raw); | ||
256 | |||
257 | if (this->ofs + this->size > newfrag->ofs + newfrag->size) { | ||
258 | /* The new node splits 'this' frag into two */ | ||
259 | struct jffs2_node_frag *newfrag2; | ||
260 | |||
261 | if (this->node) | ||
262 | dbg_fragtree2("split old frag 0x%04x-0x%04x, phys 0x%08x\n", | ||
263 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw)); | ||
264 | else | ||
265 | dbg_fragtree2("split old hole frag 0x%04x-0x%04x\n", | ||
266 | this->ofs, this->ofs+this->size); | ||
267 | |||
268 | /* New second frag pointing to this's node */ | ||
269 | newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size, | ||
270 | this->ofs + this->size - newfrag->ofs - newfrag->size); | ||
271 | if (unlikely(!newfrag2)) | ||
272 | return -ENOMEM; | ||
273 | if (this->node) | ||
274 | this->node->frags++; | ||
275 | |||
276 | /* Adjust size of original 'this' */ | ||
277 | this->size = newfrag->ofs - this->ofs; | ||
278 | |||
279 | /* Now, we know there's no node with offset | ||
280 | greater than this->ofs but smaller than | ||
281 | newfrag2->ofs or newfrag->ofs, for obvious | ||
282 | reasons. So we can do a tree insert from | ||
283 | 'this' to insert newfrag, and a tree insert | ||
284 | from newfrag to insert newfrag2. */ | ||
285 | jffs2_fragtree_insert(newfrag, this); | ||
286 | rb_insert_color(&newfrag->rb, root); | ||
287 | |||
288 | jffs2_fragtree_insert(newfrag2, newfrag); | ||
289 | rb_insert_color(&newfrag2->rb, root); | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | /* New node just reduces 'this' frag in size, doesn't split it */ | ||
294 | this->size = newfrag->ofs - this->ofs; | ||
295 | |||
296 | /* Again, we know it lives down here in the tree */ | ||
297 | jffs2_fragtree_insert(newfrag, this); | ||
298 | rb_insert_color(&newfrag->rb, root); | ||
299 | } else { | ||
300 | /* New frag starts at the same point as 'this' used to. Replace | ||
301 | it in the tree without doing a delete and insertion */ | ||
302 | dbg_fragtree2("inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", | ||
303 | newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, this, this->ofs, this->ofs+this->size); | ||
304 | |||
305 | rb_replace_node(&this->rb, &newfrag->rb, root); | ||
306 | |||
307 | if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { | ||
308 | dbg_fragtree2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size); | ||
309 | jffs2_obsolete_node_frag(c, this); | ||
310 | } else { | ||
311 | this->ofs += newfrag->size; | ||
312 | this->size -= newfrag->size; | ||
313 | |||
314 | jffs2_fragtree_insert(this, newfrag); | ||
315 | rb_insert_color(&this->rb, root); | ||
316 | return 0; | ||
317 | } | ||
318 | } | ||
319 | /* OK, now we have newfrag added in the correct place in the tree, but | ||
320 | frag_next(newfrag) may be a fragment which is overlapped by it | ||
321 | */ | ||
322 | while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { | ||
323 | /* 'this' frag is obsoleted completely. */ | ||
324 | dbg_fragtree2("obsoleting node frag %p (%x-%x) and removing from tree\n", | ||
325 | this, this->ofs, this->ofs+this->size); | ||
326 | rb_erase(&this->rb, root); | ||
327 | jffs2_obsolete_node_frag(c, this); | ||
328 | } | ||
329 | /* Now we're pointing at the first frag which isn't totally obsoleted by | ||
330 | the new frag */ | ||
331 | |||
332 | if (!this || newfrag->ofs + newfrag->size == this->ofs) | ||
333 | return 0; | ||
334 | |||
335 | /* Still some overlap but we don't need to move it in the tree */ | ||
336 | this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); | ||
337 | this->ofs = newfrag->ofs + newfrag->size; | ||
338 | |||
339 | /* And mark them REF_NORMAL so the GC takes a look at them */ | ||
340 | if (this->node) | ||
341 | mark_ref_normal(this->node->raw); | ||
342 | mark_ref_normal(newfrag->node->raw); | ||
343 | |||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * Given an inode, probably with existing tree of fragments, add the new node | ||
349 | * to the fragment tree. | ||
350 | */ | ||
351 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) | ||
147 | { | 352 | { |
148 | struct jffs2_raw_node_ref *ref, *valid_ref; | 353 | int ret; |
149 | struct jffs2_tmp_dnode_info *tn; | 354 | struct jffs2_node_frag *newfrag; |
150 | struct rb_root ret_tn = RB_ROOT; | ||
151 | struct jffs2_full_dirent *fd, *ret_fd = NULL; | ||
152 | union jffs2_node_union node; | ||
153 | size_t retlen; | ||
154 | int err; | ||
155 | |||
156 | *mctime_ver = 0; | ||
157 | |||
158 | D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino)); | ||
159 | 355 | ||
160 | spin_lock(&c->erase_completion_lock); | 356 | if (unlikely(!fn->size)) |
357 | return 0; | ||
161 | 358 | ||
162 | valid_ref = jffs2_first_valid_node(f->inocache->nodes); | 359 | newfrag = new_fragment(fn, fn->ofs, fn->size); |
360 | if (unlikely(!newfrag)) | ||
361 | return -ENOMEM; | ||
362 | newfrag->node->frags = 1; | ||
163 | 363 | ||
164 | if (!valid_ref && (f->inocache->ino != 1)) | 364 | dbg_fragtree("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n", |
165 | printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino); | 365 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag); |
166 | 366 | ||
167 | while (valid_ref) { | 367 | ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); |
168 | /* We can hold a pointer to a non-obsolete node without the spinlock, | 368 | if (unlikely(ret)) |
169 | but _obsolete_ nodes may disappear at any time, if the block | 369 | return ret; |
170 | they're in gets erased. So if we mark 'ref' obsolete while we're | ||
171 | not holding the lock, it can go away immediately. For that reason, | ||
172 | we find the next valid node first, before processing 'ref'. | ||
173 | */ | ||
174 | ref = valid_ref; | ||
175 | valid_ref = jffs2_first_valid_node(ref->next_in_ino); | ||
176 | spin_unlock(&c->erase_completion_lock); | ||
177 | 370 | ||
178 | cond_resched(); | 371 | /* If we now share a page with other nodes, mark either previous |
372 | or next node REF_NORMAL, as appropriate. */ | ||
373 | if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { | ||
374 | struct jffs2_node_frag *prev = frag_prev(newfrag); | ||
375 | |||
376 | mark_ref_normal(fn->raw); | ||
377 | /* If we don't start at zero there's _always_ a previous */ | ||
378 | if (prev->node) | ||
379 | mark_ref_normal(prev->node->raw); | ||
380 | } | ||
381 | |||
382 | if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { | ||
383 | struct jffs2_node_frag *next = frag_next(newfrag); | ||
384 | |||
385 | if (next) { | ||
386 | mark_ref_normal(fn->raw); | ||
387 | if (next->node) | ||
388 | mark_ref_normal(next->node->raw); | ||
389 | } | ||
390 | } | ||
391 | jffs2_dbg_fragtree_paranoia_check_nolock(f); | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Check the data CRC of the node. | ||
398 | * | ||
399 | * Returns: 0 if the data CRC is correct; | ||
400 | * 1 - if incorrect; | ||
401 | * error code if an error occured. | ||
402 | */ | ||
403 | static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) | ||
404 | { | ||
405 | struct jffs2_raw_node_ref *ref = tn->fn->raw; | ||
406 | int err = 0, pointed = 0; | ||
407 | struct jffs2_eraseblock *jeb; | ||
408 | unsigned char *buffer; | ||
409 | uint32_t crc, ofs, retlen, len; | ||
410 | |||
411 | BUG_ON(tn->csize == 0); | ||
412 | |||
413 | if (!jffs2_is_writebuffered(c)) | ||
414 | goto adj_acc; | ||
415 | |||
416 | /* Calculate how many bytes were already checked */ | ||
417 | ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); | ||
418 | len = ofs % c->wbuf_pagesize; | ||
419 | if (likely(len)) | ||
420 | len = c->wbuf_pagesize - len; | ||
421 | |||
422 | if (len >= tn->csize) { | ||
423 | dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", | ||
424 | ref_offset(ref), tn->csize, ofs); | ||
425 | goto adj_acc; | ||
426 | } | ||
427 | |||
428 | ofs += len; | ||
429 | len = tn->csize - len; | ||
430 | |||
431 | dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", | ||
432 | ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); | ||
433 | |||
434 | #ifndef __ECOS | ||
435 | /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(), | ||
436 | * adding and jffs2_flash_read_end() interface. */ | ||
437 | if (c->mtd->point) { | ||
438 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); | ||
439 | if (!err && retlen < tn->csize) { | ||
440 | JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize); | ||
441 | c->mtd->unpoint(c->mtd, buffer, ofs, len); | ||
442 | } else if (err) | ||
443 | JFFS2_WARNING("MTD point failed: error code %d.\n", err); | ||
444 | else | ||
445 | pointed = 1; /* succefully pointed to device */ | ||
446 | } | ||
447 | #endif | ||
448 | |||
449 | if (!pointed) { | ||
450 | buffer = kmalloc(len, GFP_KERNEL); | ||
451 | if (unlikely(!buffer)) | ||
452 | return -ENOMEM; | ||
179 | 453 | ||
180 | /* FIXME: point() */ | 454 | /* TODO: this is very frequent pattern, make it a separate |
181 | err = jffs2_flash_read(c, (ref_offset(ref)), | 455 | * routine */ |
182 | min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)), | 456 | err = jffs2_flash_read(c, ofs, len, &retlen, buffer); |
183 | &retlen, (void *)&node); | ||
184 | if (err) { | 457 | if (err) { |
185 | printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref)); | 458 | JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err); |
186 | goto free_out; | 459 | goto free_out; |
187 | } | 460 | } |
188 | |||
189 | 461 | ||
190 | /* Check we've managed to read at least the common node header */ | 462 | if (retlen != len) { |
191 | if (retlen < min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node.u))) { | 463 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len); |
192 | printk(KERN_WARNING "short read in get_inode_nodes()\n"); | ||
193 | err = -EIO; | 464 | err = -EIO; |
194 | goto free_out; | 465 | goto free_out; |
195 | } | 466 | } |
196 | 467 | } | |
197 | switch (je16_to_cpu(node.u.nodetype)) { | ||
198 | case JFFS2_NODETYPE_DIRENT: | ||
199 | D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref))); | ||
200 | if (ref_flags(ref) == REF_UNCHECKED) { | ||
201 | printk(KERN_WARNING "BUG: Dirent node at 0x%08x never got checked? How?\n", ref_offset(ref)); | ||
202 | BUG(); | ||
203 | } | ||
204 | if (retlen < sizeof(node.d)) { | ||
205 | printk(KERN_WARNING "short read in get_inode_nodes()\n"); | ||
206 | err = -EIO; | ||
207 | goto free_out; | ||
208 | } | ||
209 | /* sanity check */ | ||
210 | if (PAD((node.d.nsize + sizeof (node.d))) != PAD(je32_to_cpu (node.d.totlen))) { | ||
211 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): Illegal nsize in node at 0x%08x: nsize 0x%02x, totlen %04x\n", | ||
212 | ref_offset(ref), node.d.nsize, je32_to_cpu(node.d.totlen)); | ||
213 | jffs2_mark_node_obsolete(c, ref); | ||
214 | spin_lock(&c->erase_completion_lock); | ||
215 | continue; | ||
216 | } | ||
217 | if (je32_to_cpu(node.d.version) > *highest_version) | ||
218 | *highest_version = je32_to_cpu(node.d.version); | ||
219 | if (ref_obsolete(ref)) { | ||
220 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | ||
221 | printk(KERN_ERR "Dirent node at 0x%08x became obsolete while we weren't looking\n", | ||
222 | ref_offset(ref)); | ||
223 | BUG(); | ||
224 | } | ||
225 | |||
226 | fd = jffs2_alloc_full_dirent(node.d.nsize+1); | ||
227 | if (!fd) { | ||
228 | err = -ENOMEM; | ||
229 | goto free_out; | ||
230 | } | ||
231 | fd->raw = ref; | ||
232 | fd->version = je32_to_cpu(node.d.version); | ||
233 | fd->ino = je32_to_cpu(node.d.ino); | ||
234 | fd->type = node.d.type; | ||
235 | |||
236 | /* Pick out the mctime of the latest dirent */ | ||
237 | if(fd->version > *mctime_ver) { | ||
238 | *mctime_ver = fd->version; | ||
239 | *latest_mctime = je32_to_cpu(node.d.mctime); | ||
240 | } | ||
241 | 468 | ||
242 | /* memcpy as much of the name as possible from the raw | 469 | /* Continue calculating CRC */ |
243 | dirent we've already read from the flash | 470 | crc = crc32(tn->partial_crc, buffer, len); |
244 | */ | 471 | if(!pointed) |
245 | if (retlen > sizeof(struct jffs2_raw_dirent)) | 472 | kfree(buffer); |
246 | memcpy(&fd->name[0], &node.d.name[0], min_t(uint32_t, node.d.nsize, (retlen-sizeof(struct jffs2_raw_dirent)))); | 473 | #ifndef __ECOS |
247 | 474 | else | |
248 | /* Do we need to copy any more of the name directly | 475 | c->mtd->unpoint(c->mtd, buffer, ofs, len); |
249 | from the flash? | 476 | #endif |
250 | */ | ||
251 | if (node.d.nsize + sizeof(struct jffs2_raw_dirent) > retlen) { | ||
252 | /* FIXME: point() */ | ||
253 | int already = retlen - sizeof(struct jffs2_raw_dirent); | ||
254 | |||
255 | err = jffs2_flash_read(c, (ref_offset(ref)) + retlen, | ||
256 | node.d.nsize - already, &retlen, &fd->name[already]); | ||
257 | if (!err && retlen != node.d.nsize - already) | ||
258 | err = -EIO; | ||
259 | |||
260 | if (err) { | ||
261 | printk(KERN_WARNING "Read remainder of name in jffs2_get_inode_nodes(): error %d\n", err); | ||
262 | jffs2_free_full_dirent(fd); | ||
263 | goto free_out; | ||
264 | } | ||
265 | } | ||
266 | fd->nhash = full_name_hash(fd->name, node.d.nsize); | ||
267 | fd->next = NULL; | ||
268 | fd->name[node.d.nsize] = '\0'; | ||
269 | /* Wheee. We now have a complete jffs2_full_dirent structure, with | ||
270 | the name in it and everything. Link it into the list | ||
271 | */ | ||
272 | D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino)); | ||
273 | jffs2_add_fd_to_list(c, fd, &ret_fd); | ||
274 | break; | ||
275 | |||
276 | case JFFS2_NODETYPE_INODE: | ||
277 | D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref))); | ||
278 | if (retlen < sizeof(node.i)) { | ||
279 | printk(KERN_WARNING "read too short for dnode\n"); | ||
280 | err = -EIO; | ||
281 | goto free_out; | ||
282 | } | ||
283 | if (je32_to_cpu(node.i.version) > *highest_version) | ||
284 | *highest_version = je32_to_cpu(node.i.version); | ||
285 | D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", je32_to_cpu(node.i.version), *highest_version)); | ||
286 | |||
287 | if (ref_obsolete(ref)) { | ||
288 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | ||
289 | printk(KERN_ERR "Inode node at 0x%08x became obsolete while we weren't looking\n", | ||
290 | ref_offset(ref)); | ||
291 | BUG(); | ||
292 | } | ||
293 | 477 | ||
294 | /* If we've never checked the CRCs on this node, check them now. */ | 478 | if (crc != tn->data_crc) { |
295 | if (ref_flags(ref) == REF_UNCHECKED) { | 479 | JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", |
296 | uint32_t crc, len; | 480 | ofs, tn->data_crc, crc); |
297 | struct jffs2_eraseblock *jeb; | 481 | return 1; |
298 | 482 | } | |
299 | crc = crc32(0, &node, sizeof(node.i)-8); | ||
300 | if (crc != je32_to_cpu(node.i.node_crc)) { | ||
301 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
302 | ref_offset(ref), je32_to_cpu(node.i.node_crc), crc); | ||
303 | jffs2_mark_node_obsolete(c, ref); | ||
304 | spin_lock(&c->erase_completion_lock); | ||
305 | continue; | ||
306 | } | ||
307 | |||
308 | /* sanity checks */ | ||
309 | if ( je32_to_cpu(node.i.offset) > je32_to_cpu(node.i.isize) || | ||
310 | PAD(je32_to_cpu(node.i.csize) + sizeof (node.i)) != PAD(je32_to_cpu(node.i.totlen))) { | ||
311 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): Inode corrupted at 0x%08x, totlen %d, #ino %d, version %d, isize %d, csize %d, dsize %d \n", | ||
312 | ref_offset(ref), je32_to_cpu(node.i.totlen), je32_to_cpu(node.i.ino), | ||
313 | je32_to_cpu(node.i.version), je32_to_cpu(node.i.isize), | ||
314 | je32_to_cpu(node.i.csize), je32_to_cpu(node.i.dsize)); | ||
315 | jffs2_mark_node_obsolete(c, ref); | ||
316 | spin_lock(&c->erase_completion_lock); | ||
317 | continue; | ||
318 | } | ||
319 | 483 | ||
320 | if (node.i.compr != JFFS2_COMPR_ZERO && je32_to_cpu(node.i.csize)) { | 484 | adj_acc: |
321 | unsigned char *buf=NULL; | 485 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; |
322 | uint32_t pointed = 0; | 486 | len = ref_totlen(c, jeb, ref); |
323 | #ifndef __ECOS | 487 | |
324 | if (c->mtd->point) { | 488 | /* |
325 | err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize), | 489 | * Mark the node as having been checked and fix the |
326 | &retlen, &buf); | 490 | * accounting accordingly. |
327 | if (!err && retlen < je32_to_cpu(node.i.csize)) { | 491 | */ |
328 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen)); | 492 | spin_lock(&c->erase_completion_lock); |
329 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize)); | 493 | jeb->used_size += len; |
330 | } else if (err){ | 494 | jeb->unchecked_size -= len; |
331 | D1(printk(KERN_DEBUG "MTD point failed %d\n", err)); | 495 | c->used_size += len; |
332 | } else | 496 | c->unchecked_size -= len; |
333 | pointed = 1; /* succefully pointed to device */ | 497 | spin_unlock(&c->erase_completion_lock); |
334 | } | 498 | |
335 | #endif | 499 | return 0; |
336 | if(!pointed){ | 500 | |
337 | buf = kmalloc(je32_to_cpu(node.i.csize), GFP_KERNEL); | 501 | free_out: |
338 | if (!buf) | 502 | if(!pointed) |
339 | return -ENOMEM; | 503 | kfree(buffer); |
340 | |||
341 | err = jffs2_flash_read(c, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize), | ||
342 | &retlen, buf); | ||
343 | if (!err && retlen != je32_to_cpu(node.i.csize)) | ||
344 | err = -EIO; | ||
345 | if (err) { | ||
346 | kfree(buf); | ||
347 | return err; | ||
348 | } | ||
349 | } | ||
350 | crc = crc32(0, buf, je32_to_cpu(node.i.csize)); | ||
351 | if(!pointed) | ||
352 | kfree(buf); | ||
353 | #ifndef __ECOS | 504 | #ifndef __ECOS |
354 | else | 505 | else |
355 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize)); | 506 | c->mtd->unpoint(c->mtd, buffer, ofs, len); |
356 | #endif | 507 | #endif |
508 | return err; | ||
509 | } | ||
357 | 510 | ||
358 | if (crc != je32_to_cpu(node.i.data_crc)) { | 511 | /* |
359 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): Data CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 512 | * Helper function for jffs2_add_older_frag_to_fragtree(). |
360 | ref_offset(ref), je32_to_cpu(node.i.data_crc), crc); | 513 | * |
361 | jffs2_mark_node_obsolete(c, ref); | 514 | * Checks the node if we are in the checking stage. |
362 | spin_lock(&c->erase_completion_lock); | 515 | */ |
363 | continue; | 516 | static inline int check_node(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn) |
364 | } | 517 | { |
365 | 518 | int ret; | |
366 | } | ||
367 | 519 | ||
368 | /* Mark the node as having been checked and fix the accounting accordingly */ | 520 | BUG_ON(ref_obsolete(tn->fn->raw)); |
369 | spin_lock(&c->erase_completion_lock); | 521 | |
370 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | 522 | /* We only check the data CRC of unchecked nodes */ |
371 | len = ref_totlen(c, jeb, ref); | 523 | if (ref_flags(tn->fn->raw) != REF_UNCHECKED) |
372 | 524 | return 0; | |
373 | jeb->used_size += len; | 525 | |
374 | jeb->unchecked_size -= len; | 526 | dbg_fragtree2("check node %#04x-%#04x, phys offs %#08x.\n", |
375 | c->used_size += len; | 527 | tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw)); |
376 | c->unchecked_size -= len; | 528 | |
377 | 529 | ret = check_node_data(c, tn); | |
378 | /* If node covers at least a whole page, or if it starts at the | 530 | if (unlikely(ret < 0)) { |
379 | beginning of a page and runs to the end of the file, or if | 531 | JFFS2_ERROR("check_node_data() returned error: %d.\n", |
380 | it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. | 532 | ret); |
381 | 533 | } else if (unlikely(ret > 0)) { | |
382 | If it's actually overlapped, it'll get made NORMAL (or OBSOLETE) | 534 | dbg_fragtree2("CRC error, mark it obsolete.\n"); |
383 | when the overlapping node(s) get added to the tree anyway. | 535 | jffs2_mark_node_obsolete(c, tn->fn->raw); |
384 | */ | 536 | } |
385 | if ((je32_to_cpu(node.i.dsize) >= PAGE_CACHE_SIZE) || | 537 | |
386 | ( ((je32_to_cpu(node.i.offset)&(PAGE_CACHE_SIZE-1))==0) && | 538 | return ret; |
387 | (je32_to_cpu(node.i.dsize)+je32_to_cpu(node.i.offset) == je32_to_cpu(node.i.isize)))) { | 539 | } |
388 | D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_PRISTINE\n", ref_offset(ref))); | 540 | |
389 | ref->flash_offset = ref_offset(ref) | REF_PRISTINE; | 541 | /* |
390 | } else { | 542 | * Helper function for jffs2_add_older_frag_to_fragtree(). |
391 | D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_NORMAL\n", ref_offset(ref))); | 543 | * |
392 | ref->flash_offset = ref_offset(ref) | REF_NORMAL; | 544 | * Called when the new fragment that is being inserted |
393 | } | 545 | * splits a hole fragment. |
394 | spin_unlock(&c->erase_completion_lock); | 546 | */ |
547 | static int split_hole(struct jffs2_sb_info *c, struct rb_root *root, | ||
548 | struct jffs2_node_frag *newfrag, struct jffs2_node_frag *hole) | ||
549 | { | ||
550 | dbg_fragtree2("fragment %#04x-%#04x splits the hole %#04x-%#04x\n", | ||
551 | newfrag->ofs, newfrag->ofs + newfrag->size, hole->ofs, hole->ofs + hole->size); | ||
552 | |||
553 | if (hole->ofs == newfrag->ofs) { | ||
554 | /* | ||
555 | * Well, the new fragment actually starts at the same offset as | ||
556 | * the hole. | ||
557 | */ | ||
558 | if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) { | ||
559 | /* | ||
560 | * We replace the overlapped left part of the hole by | ||
561 | * the new node. | ||
562 | */ | ||
563 | |||
564 | dbg_fragtree2("insert fragment %#04x-%#04x and cut the left part of the hole\n", | ||
565 | newfrag->ofs, newfrag->ofs + newfrag->size); | ||
566 | rb_replace_node(&hole->rb, &newfrag->rb, root); | ||
567 | |||
568 | hole->ofs += newfrag->size; | ||
569 | hole->size -= newfrag->size; | ||
570 | |||
571 | /* | ||
572 | * We know that 'hole' should be the right hand | ||
573 | * fragment. | ||
574 | */ | ||
575 | jffs2_fragtree_insert(hole, newfrag); | ||
576 | rb_insert_color(&hole->rb, root); | ||
577 | } else { | ||
578 | /* | ||
579 | * Ah, the new fragment is of the same size as the hole. | ||
580 | * Relace the hole by it. | ||
581 | */ | ||
582 | dbg_fragtree2("insert fragment %#04x-%#04x and overwrite hole\n", | ||
583 | newfrag->ofs, newfrag->ofs + newfrag->size); | ||
584 | rb_replace_node(&hole->rb, &newfrag->rb, root); | ||
585 | jffs2_free_node_frag(hole); | ||
586 | } | ||
587 | } else { | ||
588 | /* The new fragment lefts some hole space at the left */ | ||
589 | |||
590 | struct jffs2_node_frag * newfrag2 = NULL; | ||
591 | |||
592 | if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) { | ||
593 | /* The new frag also lefts some space at the right */ | ||
594 | newfrag2 = new_fragment(NULL, newfrag->ofs + | ||
595 | newfrag->size, hole->ofs + hole->size | ||
596 | - newfrag->ofs - newfrag->size); | ||
597 | if (unlikely(!newfrag2)) { | ||
598 | jffs2_free_node_frag(newfrag); | ||
599 | return -ENOMEM; | ||
395 | } | 600 | } |
601 | } | ||
602 | |||
603 | hole->size = newfrag->ofs - hole->ofs; | ||
604 | dbg_fragtree2("left the hole %#04x-%#04x at the left and inserd fragment %#04x-%#04x\n", | ||
605 | hole->ofs, hole->ofs + hole->size, newfrag->ofs, newfrag->ofs + newfrag->size); | ||
606 | |||
607 | jffs2_fragtree_insert(newfrag, hole); | ||
608 | rb_insert_color(&newfrag->rb, root); | ||
609 | |||
610 | if (newfrag2) { | ||
611 | dbg_fragtree2("left the hole %#04x-%#04x at the right\n", | ||
612 | newfrag2->ofs, newfrag2->ofs + newfrag2->size); | ||
613 | jffs2_fragtree_insert(newfrag2, newfrag); | ||
614 | rb_insert_color(&newfrag2->rb, root); | ||
615 | } | ||
616 | } | ||
617 | |||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | /* | ||
622 | * This function is used when we build inode. It expects the nodes are passed | ||
623 | * in the decreasing version order. The whole point of this is to improve the | ||
624 | * inodes checking on NAND: we check the nodes' data CRC only when they are not | ||
625 | * obsoleted. Previously, add_frag_to_fragtree() function was used and | ||
626 | * nodes were passed to it in the increasing version ordes and CRCs of all | ||
627 | * nodes were checked. | ||
628 | * | ||
629 | * Note: tn->fn->size shouldn't be zero. | ||
630 | * | ||
631 | * Returns 0 if the node was inserted | ||
632 | * 1 if it wasn't inserted (since it is obsolete) | ||
633 | * < 0 an if error occured | ||
634 | */ | ||
635 | int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
636 | struct jffs2_tmp_dnode_info *tn) | ||
637 | { | ||
638 | struct jffs2_node_frag *this, *newfrag; | ||
639 | uint32_t lastend; | ||
640 | struct jffs2_full_dnode *fn = tn->fn; | ||
641 | struct rb_root *root = &f->fragtree; | ||
642 | uint32_t fn_size = fn->size, fn_ofs = fn->ofs; | ||
643 | int err, checked = 0; | ||
644 | int ref_flag; | ||
645 | |||
646 | dbg_fragtree("insert fragment %#04x-%#04x, ver %u\n", fn_ofs, fn_ofs + fn_size, tn->version); | ||
647 | |||
648 | /* Skip all the nodes which are completed before this one starts */ | ||
649 | this = jffs2_lookup_node_frag(root, fn_ofs); | ||
650 | if (this) | ||
651 | dbg_fragtree2("'this' found %#04x-%#04x (%s)\n", this->ofs, this->ofs + this->size, this->node ? "data" : "hole"); | ||
652 | |||
653 | if (this) | ||
654 | lastend = this->ofs + this->size; | ||
655 | else | ||
656 | lastend = 0; | ||
657 | |||
658 | /* Detect the preliminary type of node */ | ||
659 | if (fn->size >= PAGE_CACHE_SIZE) | ||
660 | ref_flag = REF_PRISTINE; | ||
661 | else | ||
662 | ref_flag = REF_NORMAL; | ||
663 | |||
664 | /* See if we ran off the end of the root */ | ||
665 | if (lastend <= fn_ofs) { | ||
666 | /* We did */ | ||
667 | |||
668 | /* | ||
669 | * We are going to insert the new node into the | ||
670 | * fragment tree, so check it. | ||
671 | */ | ||
672 | err = check_node(c, f, tn); | ||
673 | if (err != 0) | ||
674 | return err; | ||
675 | |||
676 | fn->frags = 1; | ||
677 | |||
678 | newfrag = new_fragment(fn, fn_ofs, fn_size); | ||
679 | if (unlikely(!newfrag)) | ||
680 | return -ENOMEM; | ||
681 | |||
682 | err = no_overlapping_node(c, root, newfrag, this, lastend); | ||
683 | if (unlikely(err != 0)) { | ||
684 | jffs2_free_node_frag(newfrag); | ||
685 | return err; | ||
686 | } | ||
687 | |||
688 | goto out_ok; | ||
689 | } | ||
396 | 690 | ||
397 | tn = jffs2_alloc_tmp_dnode_info(); | 691 | fn->frags = 0; |
398 | if (!tn) { | 692 | |
399 | D1(printk(KERN_DEBUG "alloc tn failed\n")); | 693 | while (1) { |
400 | err = -ENOMEM; | 694 | /* |
401 | goto free_out; | 695 | * Here we have: |
696 | * fn_ofs < this->ofs + this->size && fn_ofs >= this->ofs. | ||
697 | * | ||
698 | * Remember, 'this' has higher version, any non-hole node | ||
699 | * which is already in the fragtree is newer then the newly | ||
700 | * inserted. | ||
701 | */ | ||
702 | if (!this->node) { | ||
703 | /* | ||
704 | * 'this' is the hole fragment, so at least the | ||
705 | * beginning of the new fragment is valid. | ||
706 | */ | ||
707 | |||
708 | /* | ||
709 | * We are going to insert the new node into the | ||
710 | * fragment tree, so check it. | ||
711 | */ | ||
712 | if (!checked) { | ||
713 | err = check_node(c, f, tn); | ||
714 | if (unlikely(err != 0)) | ||
715 | return err; | ||
716 | checked = 1; | ||
402 | } | 717 | } |
403 | 718 | ||
404 | tn->fn = jffs2_alloc_full_dnode(); | 719 | if (this->ofs + this->size >= fn_ofs + fn_size) { |
405 | if (!tn->fn) { | 720 | /* We split the hole on two parts */ |
406 | D1(printk(KERN_DEBUG "alloc fn failed\n")); | 721 | |
407 | err = -ENOMEM; | 722 | fn->frags += 1; |
408 | jffs2_free_tmp_dnode_info(tn); | 723 | newfrag = new_fragment(fn, fn_ofs, fn_size); |
409 | goto free_out; | 724 | if (unlikely(!newfrag)) |
725 | return -ENOMEM; | ||
726 | |||
727 | err = split_hole(c, root, newfrag, this); | ||
728 | if (unlikely(err)) | ||
729 | return err; | ||
730 | goto out_ok; | ||
410 | } | 731 | } |
411 | tn->version = je32_to_cpu(node.i.version); | 732 | |
412 | tn->fn->ofs = je32_to_cpu(node.i.offset); | 733 | /* |
413 | /* There was a bug where we wrote hole nodes out with | 734 | * The beginning of the new fragment is valid since it |
414 | csize/dsize swapped. Deal with it */ | 735 | * overlaps the hole node. |
415 | if (node.i.compr == JFFS2_COMPR_ZERO && !je32_to_cpu(node.i.dsize) && je32_to_cpu(node.i.csize)) | 736 | */ |
416 | tn->fn->size = je32_to_cpu(node.i.csize); | 737 | |
417 | else // normal case... | 738 | ref_flag = REF_NORMAL; |
418 | tn->fn->size = je32_to_cpu(node.i.dsize); | 739 | |
419 | tn->fn->raw = ref; | 740 | fn->frags += 1; |
420 | D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %04x, dsize %04x\n", | 741 | newfrag = new_fragment(fn, fn_ofs, |
421 | ref_offset(ref), je32_to_cpu(node.i.version), | 742 | this->ofs + this->size - fn_ofs); |
422 | je32_to_cpu(node.i.offset), je32_to_cpu(node.i.dsize))); | 743 | if (unlikely(!newfrag)) |
423 | jffs2_add_tn_to_tree(tn, &ret_tn); | 744 | return -ENOMEM; |
424 | break; | 745 | |
425 | 746 | if (fn_ofs == this->ofs) { | |
426 | default: | 747 | /* |
427 | if (ref_flags(ref) == REF_UNCHECKED) { | 748 | * The new node starts at the same offset as |
428 | struct jffs2_eraseblock *jeb; | 749 | * the hole and supersieds the hole. |
429 | uint32_t len; | 750 | */ |
430 | 751 | dbg_fragtree2("add the new fragment instead of hole %#04x-%#04x, refcnt %d\n", | |
431 | printk(KERN_ERR "Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n", | 752 | fn_ofs, fn_ofs + this->ofs + this->size - fn_ofs, fn->frags); |
432 | je16_to_cpu(node.u.nodetype), ref_offset(ref)); | 753 | |
433 | 754 | rb_replace_node(&this->rb, &newfrag->rb, root); | |
434 | /* Mark the node as having been checked and fix the accounting accordingly */ | 755 | jffs2_free_node_frag(this); |
435 | spin_lock(&c->erase_completion_lock); | 756 | } else { |
436 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | 757 | /* |
437 | len = ref_totlen(c, jeb, ref); | 758 | * The hole becomes shorter as its right part |
438 | 759 | * is supersieded by the new fragment. | |
439 | jeb->used_size += len; | 760 | */ |
440 | jeb->unchecked_size -= len; | 761 | dbg_fragtree2("reduce size of hole %#04x-%#04x to %#04x-%#04x\n", |
441 | c->used_size += len; | 762 | this->ofs, this->ofs + this->size, this->ofs, this->ofs + this->size - newfrag->size); |
442 | c->unchecked_size -= len; | 763 | |
443 | 764 | dbg_fragtree2("add new fragment %#04x-%#04x, refcnt %d\n", fn_ofs, | |
444 | mark_ref_normal(ref); | 765 | fn_ofs + this->ofs + this->size - fn_ofs, fn->frags); |
445 | spin_unlock(&c->erase_completion_lock); | 766 | |
767 | this->size -= newfrag->size; | ||
768 | jffs2_fragtree_insert(newfrag, this); | ||
769 | rb_insert_color(&newfrag->rb, root); | ||
446 | } | 770 | } |
447 | node.u.nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(node.u.nodetype)); | 771 | |
448 | if (crc32(0, &node, sizeof(struct jffs2_unknown_node)-4) != je32_to_cpu(node.u.hdr_crc)) { | 772 | fn_ofs += newfrag->size; |
449 | /* Hmmm. This should have been caught at scan time. */ | 773 | fn_size -= newfrag->size; |
450 | printk(KERN_ERR "Node header CRC failed at %08x. But it must have been OK earlier.\n", | 774 | this = rb_entry(rb_next(&newfrag->rb), |
451 | ref_offset(ref)); | 775 | struct jffs2_node_frag, rb); |
452 | printk(KERN_ERR "Node was: { %04x, %04x, %08x, %08x }\n", | 776 | |
453 | je16_to_cpu(node.u.magic), je16_to_cpu(node.u.nodetype), je32_to_cpu(node.u.totlen), | 777 | dbg_fragtree2("switch to the next 'this' fragment: %#04x-%#04x %s\n", |
454 | je32_to_cpu(node.u.hdr_crc)); | 778 | this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)"); |
455 | jffs2_mark_node_obsolete(c, ref); | 779 | } |
456 | } else switch(je16_to_cpu(node.u.nodetype) & JFFS2_COMPAT_MASK) { | 780 | |
457 | case JFFS2_FEATURE_INCOMPAT: | 781 | /* |
458 | printk(KERN_NOTICE "Unknown INCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | 782 | * 'This' node is not the hole so it obsoletes the new fragment |
459 | /* EEP */ | 783 | * either fully or partially. |
460 | BUG(); | 784 | */ |
461 | break; | 785 | if (this->ofs + this->size >= fn_ofs + fn_size) { |
462 | case JFFS2_FEATURE_ROCOMPAT: | 786 | /* The new node is obsolete, drop it */ |
463 | printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | 787 | if (fn->frags == 0) { |
464 | if (!(c->flags & JFFS2_SB_FLAG_RO)) | 788 | dbg_fragtree2("%#04x-%#04x is obsolete, mark it obsolete\n", fn_ofs, fn_ofs + fn_size); |
465 | BUG(); | 789 | ref_flag = REF_OBSOLETE; |
466 | break; | ||
467 | case JFFS2_FEATURE_RWCOMPAT_COPY: | ||
468 | printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | ||
469 | break; | ||
470 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | ||
471 | printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | ||
472 | jffs2_mark_node_obsolete(c, ref); | ||
473 | break; | ||
474 | } | 790 | } |
791 | goto out_ok; | ||
792 | } else { | ||
793 | struct jffs2_node_frag *new_this; | ||
794 | |||
795 | /* 'This' node obsoletes the beginning of the new node */ | ||
796 | dbg_fragtree2("the beginning %#04x-%#04x is obsolete\n", fn_ofs, this->ofs + this->size); | ||
797 | |||
798 | ref_flag = REF_NORMAL; | ||
799 | |||
800 | fn_size -= this->ofs + this->size - fn_ofs; | ||
801 | fn_ofs = this->ofs + this->size; | ||
802 | dbg_fragtree2("now considering %#04x-%#04x\n", fn_ofs, fn_ofs + fn_size); | ||
803 | |||
804 | new_this = rb_entry(rb_next(&this->rb), struct jffs2_node_frag, rb); | ||
805 | if (!new_this) { | ||
806 | /* | ||
807 | * There is no next fragment. Add the rest of | ||
808 | * the new node as the right-hand child. | ||
809 | */ | ||
810 | if (!checked) { | ||
811 | err = check_node(c, f, tn); | ||
812 | if (unlikely(err != 0)) | ||
813 | return err; | ||
814 | checked = 1; | ||
815 | } | ||
475 | 816 | ||
817 | fn->frags += 1; | ||
818 | newfrag = new_fragment(fn, fn_ofs, fn_size); | ||
819 | if (unlikely(!newfrag)) | ||
820 | return -ENOMEM; | ||
821 | |||
822 | dbg_fragtree2("there are no more fragments, insert %#04x-%#04x\n", | ||
823 | newfrag->ofs, newfrag->ofs + newfrag->size); | ||
824 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | ||
825 | rb_insert_color(&newfrag->rb, root); | ||
826 | goto out_ok; | ||
827 | } else { | ||
828 | this = new_this; | ||
829 | dbg_fragtree2("switch to the next 'this' fragment: %#04x-%#04x %s\n", | ||
830 | this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)"); | ||
831 | } | ||
476 | } | 832 | } |
477 | spin_lock(&c->erase_completion_lock); | 833 | } |
834 | |||
835 | out_ok: | ||
836 | BUG_ON(fn->size < PAGE_CACHE_SIZE && ref_flag == REF_PRISTINE); | ||
478 | 837 | ||
838 | if (ref_flag == REF_OBSOLETE) { | ||
839 | dbg_fragtree2("the node is obsolete now\n"); | ||
840 | /* jffs2_mark_node_obsolete() will adjust space accounting */ | ||
841 | jffs2_mark_node_obsolete(c, fn->raw); | ||
842 | return 1; | ||
479 | } | 843 | } |
844 | |||
845 | dbg_fragtree2("the node is \"%s\" now\n", ref_flag == REF_NORMAL ? "REF_NORMAL" : "REF_PRISTINE"); | ||
846 | |||
847 | /* Space accounting was adjusted at check_node_data() */ | ||
848 | spin_lock(&c->erase_completion_lock); | ||
849 | fn->raw->flash_offset = ref_offset(fn->raw) | ref_flag; | ||
480 | spin_unlock(&c->erase_completion_lock); | 850 | spin_unlock(&c->erase_completion_lock); |
481 | *tnp = ret_tn; | ||
482 | *fdp = ret_fd; | ||
483 | 851 | ||
484 | return 0; | 852 | return 0; |
485 | |||
486 | free_out: | ||
487 | jffs2_free_tmp_dnode_info_list(&ret_tn); | ||
488 | jffs2_free_full_dirent_list(ret_fd); | ||
489 | return err; | ||
490 | } | 853 | } |
491 | 854 | ||
492 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) | 855 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) |
@@ -499,24 +862,21 @@ void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache | |||
499 | 862 | ||
500 | /* During mount, this needs no locking. During normal operation, its | 863 | /* During mount, this needs no locking. During normal operation, its |
501 | callers want to do other stuff while still holding the inocache_lock. | 864 | callers want to do other stuff while still holding the inocache_lock. |
502 | Rather than introducing special case get_ino_cache functions or | 865 | Rather than introducing special case get_ino_cache functions or |
503 | callbacks, we just let the caller do the locking itself. */ | 866 | callbacks, we just let the caller do the locking itself. */ |
504 | 867 | ||
505 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino) | 868 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino) |
506 | { | 869 | { |
507 | struct jffs2_inode_cache *ret; | 870 | struct jffs2_inode_cache *ret; |
508 | 871 | ||
509 | D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino)); | ||
510 | |||
511 | ret = c->inocache_list[ino % INOCACHE_HASHSIZE]; | 872 | ret = c->inocache_list[ino % INOCACHE_HASHSIZE]; |
512 | while (ret && ret->ino < ino) { | 873 | while (ret && ret->ino < ino) { |
513 | ret = ret->next; | 874 | ret = ret->next; |
514 | } | 875 | } |
515 | 876 | ||
516 | if (ret && ret->ino != ino) | 877 | if (ret && ret->ino != ino) |
517 | ret = NULL; | 878 | ret = NULL; |
518 | 879 | ||
519 | D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino)); | ||
520 | return ret; | 880 | return ret; |
521 | } | 881 | } |
522 | 882 | ||
@@ -528,7 +888,7 @@ void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new | |||
528 | if (!new->ino) | 888 | if (!new->ino) |
529 | new->ino = ++c->highest_ino; | 889 | new->ino = ++c->highest_ino; |
530 | 890 | ||
531 | D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino)); | 891 | dbg_inocache("add %p (ino #%u)\n", new, new->ino); |
532 | 892 | ||
533 | prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE]; | 893 | prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE]; |
534 | 894 | ||
@@ -544,11 +904,12 @@ void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new | |||
544 | void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) | 904 | void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) |
545 | { | 905 | { |
546 | struct jffs2_inode_cache **prev; | 906 | struct jffs2_inode_cache **prev; |
547 | D1(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino)); | 907 | |
908 | dbg_inocache("del %p (ino #%u)\n", old, old->ino); | ||
548 | spin_lock(&c->inocache_lock); | 909 | spin_lock(&c->inocache_lock); |
549 | 910 | ||
550 | prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE]; | 911 | prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE]; |
551 | 912 | ||
552 | while ((*prev) && (*prev)->ino < old->ino) { | 913 | while ((*prev) && (*prev)->ino < old->ino) { |
553 | prev = &(*prev)->next; | 914 | prev = &(*prev)->next; |
554 | } | 915 | } |
@@ -558,7 +919,7 @@ void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) | |||
558 | 919 | ||
559 | /* Free it now unless it's in READING or CLEARING state, which | 920 | /* Free it now unless it's in READING or CLEARING state, which |
560 | are the transitions upon read_inode() and clear_inode(). The | 921 | are the transitions upon read_inode() and clear_inode(). The |
561 | rest of the time we know nobody else is looking at it, and | 922 | rest of the time we know nobody else is looking at it, and |
562 | if it's held by read_inode() or clear_inode() they'll free it | 923 | if it's held by read_inode() or clear_inode() they'll free it |
563 | for themselves. */ | 924 | for themselves. */ |
564 | if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING) | 925 | if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING) |
@@ -571,7 +932,7 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c) | |||
571 | { | 932 | { |
572 | int i; | 933 | int i; |
573 | struct jffs2_inode_cache *this, *next; | 934 | struct jffs2_inode_cache *this, *next; |
574 | 935 | ||
575 | for (i=0; i<INOCACHE_HASHSIZE; i++) { | 936 | for (i=0; i<INOCACHE_HASHSIZE; i++) { |
576 | this = c->inocache_list[i]; | 937 | this = c->inocache_list[i]; |
577 | while (this) { | 938 | while (this) { |
@@ -598,38 +959,30 @@ void jffs2_free_raw_node_refs(struct jffs2_sb_info *c) | |||
598 | c->blocks[i].first_node = c->blocks[i].last_node = NULL; | 959 | c->blocks[i].first_node = c->blocks[i].last_node = NULL; |
599 | } | 960 | } |
600 | } | 961 | } |
601 | 962 | ||
602 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset) | 963 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset) |
603 | { | 964 | { |
604 | /* The common case in lookup is that there will be a node | 965 | /* The common case in lookup is that there will be a node |
605 | which precisely matches. So we go looking for that first */ | 966 | which precisely matches. So we go looking for that first */ |
606 | struct rb_node *next; | 967 | struct rb_node *next; |
607 | struct jffs2_node_frag *prev = NULL; | 968 | struct jffs2_node_frag *prev = NULL; |
608 | struct jffs2_node_frag *frag = NULL; | 969 | struct jffs2_node_frag *frag = NULL; |
609 | 970 | ||
610 | D2(printk(KERN_DEBUG "jffs2_lookup_node_frag(%p, %d)\n", fragtree, offset)); | 971 | dbg_fragtree2("root %p, offset %d\n", fragtree, offset); |
611 | 972 | ||
612 | next = fragtree->rb_node; | 973 | next = fragtree->rb_node; |
613 | 974 | ||
614 | while(next) { | 975 | while(next) { |
615 | frag = rb_entry(next, struct jffs2_node_frag, rb); | 976 | frag = rb_entry(next, struct jffs2_node_frag, rb); |
616 | 977 | ||
617 | D2(printk(KERN_DEBUG "Considering frag %d-%d (%p). left %p, right %p\n", | ||
618 | frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right)); | ||
619 | if (frag->ofs + frag->size <= offset) { | 978 | if (frag->ofs + frag->size <= offset) { |
620 | D2(printk(KERN_DEBUG "Going right from frag %d-%d, before the region we care about\n", | ||
621 | frag->ofs, frag->ofs+frag->size)); | ||
622 | /* Remember the closest smaller match on the way down */ | 979 | /* Remember the closest smaller match on the way down */ |
623 | if (!prev || frag->ofs > prev->ofs) | 980 | if (!prev || frag->ofs > prev->ofs) |
624 | prev = frag; | 981 | prev = frag; |
625 | next = frag->rb.rb_right; | 982 | next = frag->rb.rb_right; |
626 | } else if (frag->ofs > offset) { | 983 | } else if (frag->ofs > offset) { |
627 | D2(printk(KERN_DEBUG "Going left from frag %d-%d, after the region we care about\n", | ||
628 | frag->ofs, frag->ofs+frag->size)); | ||
629 | next = frag->rb.rb_left; | 984 | next = frag->rb.rb_left; |
630 | } else { | 985 | } else { |
631 | D2(printk(KERN_DEBUG "Returning frag %d,%d, matched\n", | ||
632 | frag->ofs, frag->ofs+frag->size)); | ||
633 | return frag; | 986 | return frag; |
634 | } | 987 | } |
635 | } | 988 | } |
@@ -638,11 +991,11 @@ struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_ | |||
638 | and return the closest smaller one */ | 991 | and return the closest smaller one */ |
639 | 992 | ||
640 | if (prev) | 993 | if (prev) |
641 | D2(printk(KERN_DEBUG "No match. Returning frag %d,%d, closest previous\n", | 994 | dbg_fragtree2("no match. Returning frag %#04x-%#04x, closest previous\n", |
642 | prev->ofs, prev->ofs+prev->size)); | 995 | prev->ofs, prev->ofs+prev->size); |
643 | else | 996 | else |
644 | D2(printk(KERN_DEBUG "Returning NULL, empty fragtree\n")); | 997 | dbg_fragtree2("returning NULL, empty fragtree\n"); |
645 | 998 | ||
646 | return prev; | 999 | return prev; |
647 | } | 1000 | } |
648 | 1001 | ||
@@ -656,39 +1009,32 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) | |||
656 | if (!root->rb_node) | 1009 | if (!root->rb_node) |
657 | return; | 1010 | return; |
658 | 1011 | ||
659 | frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb)); | 1012 | dbg_fragtree("killing\n"); |
660 | 1013 | ||
1014 | frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb)); | ||
661 | while(frag) { | 1015 | while(frag) { |
662 | if (frag->rb.rb_left) { | 1016 | if (frag->rb.rb_left) { |
663 | D2(printk(KERN_DEBUG "Going left from frag (%p) %d-%d\n", | ||
664 | frag, frag->ofs, frag->ofs+frag->size)); | ||
665 | frag = frag_left(frag); | 1017 | frag = frag_left(frag); |
666 | continue; | 1018 | continue; |
667 | } | 1019 | } |
668 | if (frag->rb.rb_right) { | 1020 | if (frag->rb.rb_right) { |
669 | D2(printk(KERN_DEBUG "Going right from frag (%p) %d-%d\n", | ||
670 | frag, frag->ofs, frag->ofs+frag->size)); | ||
671 | frag = frag_right(frag); | 1021 | frag = frag_right(frag); |
672 | continue; | 1022 | continue; |
673 | } | 1023 | } |
674 | 1024 | ||
675 | D2(printk(KERN_DEBUG "jffs2_kill_fragtree: frag at 0x%x-0x%x: node %p, frags %d--\n", | ||
676 | frag->ofs, frag->ofs+frag->size, frag->node, | ||
677 | frag->node?frag->node->frags:0)); | ||
678 | |||
679 | if (frag->node && !(--frag->node->frags)) { | 1025 | if (frag->node && !(--frag->node->frags)) { |
680 | /* Not a hole, and it's the final remaining frag | 1026 | /* Not a hole, and it's the final remaining frag |
681 | of this node. Free the node */ | 1027 | of this node. Free the node */ |
682 | if (c) | 1028 | if (c) |
683 | jffs2_mark_node_obsolete(c, frag->node->raw); | 1029 | jffs2_mark_node_obsolete(c, frag->node->raw); |
684 | 1030 | ||
685 | jffs2_free_full_dnode(frag->node); | 1031 | jffs2_free_full_dnode(frag->node); |
686 | } | 1032 | } |
687 | parent = frag_parent(frag); | 1033 | parent = frag_parent(frag); |
688 | if (parent) { | 1034 | if (parent) { |
689 | if (frag_left(parent) == frag) | 1035 | if (frag_left(parent) == frag) |
690 | parent->rb.rb_left = NULL; | 1036 | parent->rb.rb_left = NULL; |
691 | else | 1037 | else |
692 | parent->rb.rb_right = NULL; | 1038 | parent->rb.rb_right = NULL; |
693 | } | 1039 | } |
694 | 1040 | ||
@@ -698,29 +1044,3 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) | |||
698 | cond_resched(); | 1044 | cond_resched(); |
699 | } | 1045 | } |
700 | } | 1046 | } |
701 | |||
702 | void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) | ||
703 | { | ||
704 | struct rb_node *parent = &base->rb; | ||
705 | struct rb_node **link = &parent; | ||
706 | |||
707 | D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag, | ||
708 | newfrag->ofs, newfrag->ofs+newfrag->size, base)); | ||
709 | |||
710 | while (*link) { | ||
711 | parent = *link; | ||
712 | base = rb_entry(parent, struct jffs2_node_frag, rb); | ||
713 | |||
714 | D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs)); | ||
715 | if (newfrag->ofs > base->ofs) | ||
716 | link = &base->rb.rb_right; | ||
717 | else if (newfrag->ofs < base->ofs) | ||
718 | link = &base->rb.rb_left; | ||
719 | else { | ||
720 | printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); | ||
721 | BUG(); | ||
722 | } | ||
723 | } | ||
724 | |||
725 | rb_link_node(&newfrag->rb, &base->rb, link); | ||
726 | } | ||
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h index b34c397909ef..23a67bb3052f 100644 --- a/fs/jffs2/nodelist.h +++ b/fs/jffs2/nodelist.h | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: nodelist.h,v 1.131 2005/07/05 21:03:07 dwmw2 Exp $ | 10 | * $Id: nodelist.h,v 1.140 2005/09/07 08:34:54 havasi Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -20,30 +20,15 @@ | |||
20 | #include <linux/jffs2.h> | 20 | #include <linux/jffs2.h> |
21 | #include <linux/jffs2_fs_sb.h> | 21 | #include <linux/jffs2_fs_sb.h> |
22 | #include <linux/jffs2_fs_i.h> | 22 | #include <linux/jffs2_fs_i.h> |
23 | #include "summary.h" | ||
23 | 24 | ||
24 | #ifdef __ECOS | 25 | #ifdef __ECOS |
25 | #include "os-ecos.h" | 26 | #include "os-ecos.h" |
26 | #else | 27 | #else |
27 | #include <linux/mtd/compatmac.h> /* For min/max in older kernels */ | 28 | #include <linux/mtd/compatmac.h> /* For compatibility with older kernels */ |
28 | #include "os-linux.h" | 29 | #include "os-linux.h" |
29 | #endif | 30 | #endif |
30 | 31 | ||
31 | #ifndef CONFIG_JFFS2_FS_DEBUG | ||
32 | #define CONFIG_JFFS2_FS_DEBUG 1 | ||
33 | #endif | ||
34 | |||
35 | #if CONFIG_JFFS2_FS_DEBUG > 0 | ||
36 | #define D1(x) x | ||
37 | #else | ||
38 | #define D1(x) | ||
39 | #endif | ||
40 | |||
41 | #if CONFIG_JFFS2_FS_DEBUG > 1 | ||
42 | #define D2(x) x | ||
43 | #else | ||
44 | #define D2(x) | ||
45 | #endif | ||
46 | |||
47 | #define JFFS2_NATIVE_ENDIAN | 32 | #define JFFS2_NATIVE_ENDIAN |
48 | 33 | ||
49 | /* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from | 34 | /* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from |
@@ -73,14 +58,17 @@ | |||
73 | #define je16_to_cpu(x) (le16_to_cpu(x.v16)) | 58 | #define je16_to_cpu(x) (le16_to_cpu(x.v16)) |
74 | #define je32_to_cpu(x) (le32_to_cpu(x.v32)) | 59 | #define je32_to_cpu(x) (le32_to_cpu(x.v32)) |
75 | #define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m))) | 60 | #define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m))) |
76 | #else | 61 | #else |
77 | #error wibble | 62 | #error wibble |
78 | #endif | 63 | #endif |
79 | 64 | ||
65 | /* The minimal node header size */ | ||
66 | #define JFFS2_MIN_NODE_HEADER sizeof(struct jffs2_raw_dirent) | ||
67 | |||
80 | /* | 68 | /* |
81 | This is all we need to keep in-core for each raw node during normal | 69 | This is all we need to keep in-core for each raw node during normal |
82 | operation. As and when we do read_inode on a particular inode, we can | 70 | operation. As and when we do read_inode on a particular inode, we can |
83 | scan the nodes which are listed for it and build up a proper map of | 71 | scan the nodes which are listed for it and build up a proper map of |
84 | which nodes are currently valid. JFFSv1 always used to keep that whole | 72 | which nodes are currently valid. JFFSv1 always used to keep that whole |
85 | map in core for each inode. | 73 | map in core for each inode. |
86 | */ | 74 | */ |
@@ -97,7 +85,7 @@ struct jffs2_raw_node_ref | |||
97 | 85 | ||
98 | /* flash_offset & 3 always has to be zero, because nodes are | 86 | /* flash_offset & 3 always has to be zero, because nodes are |
99 | always aligned at 4 bytes. So we have a couple of extra bits | 87 | always aligned at 4 bytes. So we have a couple of extra bits |
100 | to play with, which indicate the node's status; see below: */ | 88 | to play with, which indicate the node's status; see below: */ |
101 | #define REF_UNCHECKED 0 /* We haven't yet checked the CRC or built its inode */ | 89 | #define REF_UNCHECKED 0 /* We haven't yet checked the CRC or built its inode */ |
102 | #define REF_OBSOLETE 1 /* Obsolete, can be completely ignored */ | 90 | #define REF_OBSOLETE 1 /* Obsolete, can be completely ignored */ |
103 | #define REF_PRISTINE 2 /* Completely clean. GC without looking */ | 91 | #define REF_PRISTINE 2 /* Completely clean. GC without looking */ |
@@ -110,7 +98,7 @@ struct jffs2_raw_node_ref | |||
110 | /* For each inode in the filesystem, we need to keep a record of | 98 | /* For each inode in the filesystem, we need to keep a record of |
111 | nlink, because it would be a PITA to scan the whole directory tree | 99 | nlink, because it would be a PITA to scan the whole directory tree |
112 | at read_inode() time to calculate it, and to keep sufficient information | 100 | at read_inode() time to calculate it, and to keep sufficient information |
113 | in the raw_node_ref (basically both parent and child inode number for | 101 | in the raw_node_ref (basically both parent and child inode number for |
114 | dirent nodes) would take more space than this does. We also keep | 102 | dirent nodes) would take more space than this does. We also keep |
115 | a pointer to the first physical node which is part of this inode, too. | 103 | a pointer to the first physical node which is part of this inode, too. |
116 | */ | 104 | */ |
@@ -140,7 +128,7 @@ struct jffs2_inode_cache { | |||
140 | #define INOCACHE_HASHSIZE 128 | 128 | #define INOCACHE_HASHSIZE 128 |
141 | 129 | ||
142 | /* | 130 | /* |
143 | Larger representation of a raw node, kept in-core only when the | 131 | Larger representation of a raw node, kept in-core only when the |
144 | struct inode for this particular ino is instantiated. | 132 | struct inode for this particular ino is instantiated. |
145 | */ | 133 | */ |
146 | 134 | ||
@@ -150,11 +138,11 @@ struct jffs2_full_dnode | |||
150 | uint32_t ofs; /* The offset to which the data of this node belongs */ | 138 | uint32_t ofs; /* The offset to which the data of this node belongs */ |
151 | uint32_t size; | 139 | uint32_t size; |
152 | uint32_t frags; /* Number of fragments which currently refer | 140 | uint32_t frags; /* Number of fragments which currently refer |
153 | to this node. When this reaches zero, | 141 | to this node. When this reaches zero, |
154 | the node is obsolete. */ | 142 | the node is obsolete. */ |
155 | }; | 143 | }; |
156 | 144 | ||
157 | /* | 145 | /* |
158 | Even larger representation of a raw node, kept in-core only while | 146 | Even larger representation of a raw node, kept in-core only while |
159 | we're actually building up the original map of which nodes go where, | 147 | we're actually building up the original map of which nodes go where, |
160 | in read_inode() | 148 | in read_inode() |
@@ -164,7 +152,10 @@ struct jffs2_tmp_dnode_info | |||
164 | struct rb_node rb; | 152 | struct rb_node rb; |
165 | struct jffs2_full_dnode *fn; | 153 | struct jffs2_full_dnode *fn; |
166 | uint32_t version; | 154 | uint32_t version; |
167 | }; | 155 | uint32_t data_crc; |
156 | uint32_t partial_crc; | ||
157 | uint32_t csize; | ||
158 | }; | ||
168 | 159 | ||
169 | struct jffs2_full_dirent | 160 | struct jffs2_full_dirent |
170 | { | 161 | { |
@@ -178,7 +169,7 @@ struct jffs2_full_dirent | |||
178 | }; | 169 | }; |
179 | 170 | ||
180 | /* | 171 | /* |
181 | Fragments - used to build a map of which raw node to obtain | 172 | Fragments - used to build a map of which raw node to obtain |
182 | data from for each part of the ino | 173 | data from for each part of the ino |
183 | */ | 174 | */ |
184 | struct jffs2_node_frag | 175 | struct jffs2_node_frag |
@@ -207,86 +198,18 @@ struct jffs2_eraseblock | |||
207 | struct jffs2_raw_node_ref *gc_node; /* Next node to be garbage collected */ | 198 | struct jffs2_raw_node_ref *gc_node; /* Next node to be garbage collected */ |
208 | }; | 199 | }; |
209 | 200 | ||
210 | #define ACCT_SANITY_CHECK(c, jeb) do { \ | 201 | static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c) |
211 | struct jffs2_eraseblock *___j = jeb; \ | ||
212 | if ((___j) && ___j->used_size + ___j->dirty_size + ___j->free_size + ___j->wasted_size + ___j->unchecked_size != c->sector_size) { \ | ||
213 | printk(KERN_NOTICE "Eeep. Space accounting for block at 0x%08x is screwed\n", ___j->offset); \ | ||
214 | printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + wasted %08x + unchecked %08x != total %08x\n", \ | ||
215 | ___j->free_size, ___j->dirty_size, ___j->used_size, ___j->wasted_size, ___j->unchecked_size, c->sector_size); \ | ||
216 | BUG(); \ | ||
217 | } \ | ||
218 | if (c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size + c->wasted_size + c->unchecked_size != c->flash_size) { \ | ||
219 | printk(KERN_NOTICE "Eeep. Space accounting superblock info is screwed\n"); \ | ||
220 | printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + erasing %08x + bad %08x + wasted %08x + unchecked %08x != total %08x\n", \ | ||
221 | c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, c->wasted_size, c->unchecked_size, c->flash_size); \ | ||
222 | BUG(); \ | ||
223 | } \ | ||
224 | } while(0) | ||
225 | |||
226 | static inline void paranoia_failed_dump(struct jffs2_eraseblock *jeb) | ||
227 | { | 202 | { |
228 | struct jffs2_raw_node_ref *ref; | 203 | return ((c->flash_size / c->sector_size) * sizeof (struct jffs2_eraseblock)) > (128 * 1024); |
229 | int i=0; | ||
230 | |||
231 | printk(KERN_NOTICE); | ||
232 | for (ref = jeb->first_node; ref; ref = ref->next_phys) { | ||
233 | printk("%08x->", ref_offset(ref)); | ||
234 | if (++i == 8) { | ||
235 | i = 0; | ||
236 | printk("\n" KERN_NOTICE); | ||
237 | } | ||
238 | } | ||
239 | printk("\n"); | ||
240 | } | 204 | } |
241 | 205 | ||
242 | |||
243 | #define ACCT_PARANOIA_CHECK(jeb) do { \ | ||
244 | uint32_t my_used_size = 0; \ | ||
245 | uint32_t my_unchecked_size = 0; \ | ||
246 | struct jffs2_raw_node_ref *ref2 = jeb->first_node; \ | ||
247 | while (ref2) { \ | ||
248 | if (unlikely(ref2->flash_offset < jeb->offset || \ | ||
249 | ref2->flash_offset > jeb->offset + c->sector_size)) { \ | ||
250 | printk(KERN_NOTICE "Node %08x shouldn't be in block at %08x!\n", \ | ||
251 | ref_offset(ref2), jeb->offset); \ | ||
252 | paranoia_failed_dump(jeb); \ | ||
253 | BUG(); \ | ||
254 | } \ | ||
255 | if (ref_flags(ref2) == REF_UNCHECKED) \ | ||
256 | my_unchecked_size += ref_totlen(c, jeb, ref2); \ | ||
257 | else if (!ref_obsolete(ref2)) \ | ||
258 | my_used_size += ref_totlen(c, jeb, ref2); \ | ||
259 | if (unlikely((!ref2->next_phys) != (ref2 == jeb->last_node))) { \ | ||
260 | if (!ref2->next_phys) \ | ||
261 | printk("ref for node at %p (phys %08x) has next_phys->%p (----), last_node->%p (phys %08x)\n", \ | ||
262 | ref2, ref_offset(ref2), ref2->next_phys, \ | ||
263 | jeb->last_node, ref_offset(jeb->last_node)); \ | ||
264 | else \ | ||
265 | printk("ref for node at %p (phys %08x) has next_phys->%p (%08x), last_node->%p (phys %08x)\n", \ | ||
266 | ref2, ref_offset(ref2), ref2->next_phys, ref_offset(ref2->next_phys), \ | ||
267 | jeb->last_node, ref_offset(jeb->last_node)); \ | ||
268 | paranoia_failed_dump(jeb); \ | ||
269 | BUG(); \ | ||
270 | } \ | ||
271 | ref2 = ref2->next_phys; \ | ||
272 | } \ | ||
273 | if (my_used_size != jeb->used_size) { \ | ||
274 | printk(KERN_NOTICE "Calculated used size %08x != stored used size %08x\n", my_used_size, jeb->used_size); \ | ||
275 | BUG(); \ | ||
276 | } \ | ||
277 | if (my_unchecked_size != jeb->unchecked_size) { \ | ||
278 | printk(KERN_NOTICE "Calculated unchecked size %08x != stored unchecked size %08x\n", my_unchecked_size, jeb->unchecked_size); \ | ||
279 | BUG(); \ | ||
280 | } \ | ||
281 | } while(0) | ||
282 | |||
283 | /* Calculate totlen from surrounding nodes or eraseblock */ | 206 | /* Calculate totlen from surrounding nodes or eraseblock */ |
284 | static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, | 207 | static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, |
285 | struct jffs2_eraseblock *jeb, | 208 | struct jffs2_eraseblock *jeb, |
286 | struct jffs2_raw_node_ref *ref) | 209 | struct jffs2_raw_node_ref *ref) |
287 | { | 210 | { |
288 | uint32_t ref_end; | 211 | uint32_t ref_end; |
289 | 212 | ||
290 | if (ref->next_phys) | 213 | if (ref->next_phys) |
291 | ref_end = ref_offset(ref->next_phys); | 214 | ref_end = ref_offset(ref->next_phys); |
292 | else { | 215 | else { |
@@ -306,11 +229,13 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c, | |||
306 | { | 229 | { |
307 | uint32_t ret; | 230 | uint32_t ret; |
308 | 231 | ||
309 | D1(if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) { | 232 | #if CONFIG_JFFS2_FS_DEBUG > 0 |
233 | if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) { | ||
310 | printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n", | 234 | printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n", |
311 | jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref)); | 235 | jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref)); |
312 | BUG(); | 236 | BUG(); |
313 | }) | 237 | } |
238 | #endif | ||
314 | 239 | ||
315 | #if 1 | 240 | #if 1 |
316 | ret = ref->__totlen; | 241 | ret = ref->__totlen; |
@@ -323,14 +248,13 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c, | |||
323 | ret, ref->__totlen); | 248 | ret, ref->__totlen); |
324 | if (!jeb) | 249 | if (!jeb) |
325 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | 250 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; |
326 | paranoia_failed_dump(jeb); | 251 | jffs2_dbg_dump_node_refs_nolock(c, jeb); |
327 | BUG(); | 252 | BUG(); |
328 | } | 253 | } |
329 | #endif | 254 | #endif |
330 | return ret; | 255 | return ret; |
331 | } | 256 | } |
332 | 257 | ||
333 | |||
334 | #define ALLOC_NORMAL 0 /* Normal allocation */ | 258 | #define ALLOC_NORMAL 0 /* Normal allocation */ |
335 | #define ALLOC_DELETION 1 /* Deletion node. Best to allow it */ | 259 | #define ALLOC_DELETION 1 /* Deletion node. Best to allow it */ |
336 | #define ALLOC_GC 2 /* Space requested for GC. Give it or die */ | 260 | #define ALLOC_GC 2 /* Space requested for GC. Give it or die */ |
@@ -340,7 +264,7 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c, | |||
340 | #define VERYDIRTY(c, size) ((size) >= ((c)->sector_size / 2)) | 264 | #define VERYDIRTY(c, size) ((size) >= ((c)->sector_size / 2)) |
341 | 265 | ||
342 | /* check if dirty space is more than 255 Byte */ | 266 | /* check if dirty space is more than 255 Byte */ |
343 | #define ISDIRTY(size) ((size) > sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) | 267 | #define ISDIRTY(size) ((size) > sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) |
344 | 268 | ||
345 | #define PAD(x) (((x)+3)&~3) | 269 | #define PAD(x) (((x)+3)&~3) |
346 | 270 | ||
@@ -384,12 +308,7 @@ static inline struct jffs2_node_frag *frag_last(struct rb_root *root) | |||
384 | #define frag_erase(frag, list) rb_erase(&frag->rb, list); | 308 | #define frag_erase(frag, list) rb_erase(&frag->rb, list); |
385 | 309 | ||
386 | /* nodelist.c */ | 310 | /* nodelist.c */ |
387 | D2(void jffs2_print_frag_list(struct jffs2_inode_info *f)); | ||
388 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list); | 311 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list); |
389 | int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
390 | struct rb_root *tnp, struct jffs2_full_dirent **fdp, | ||
391 | uint32_t *highest_version, uint32_t *latest_mctime, | ||
392 | uint32_t *mctime_ver); | ||
393 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state); | 312 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state); |
394 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino); | 313 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino); |
395 | void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new); | 314 | void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new); |
@@ -398,19 +317,23 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c); | |||
398 | void jffs2_free_raw_node_refs(struct jffs2_sb_info *c); | 317 | void jffs2_free_raw_node_refs(struct jffs2_sb_info *c); |
399 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset); | 318 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset); |
400 | void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete); | 319 | void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete); |
401 | void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base); | ||
402 | struct rb_node *rb_next(struct rb_node *); | 320 | struct rb_node *rb_next(struct rb_node *); |
403 | struct rb_node *rb_prev(struct rb_node *); | 321 | struct rb_node *rb_prev(struct rb_node *); |
404 | void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); | 322 | void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); |
323 | void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this); | ||
324 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); | ||
325 | void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); | ||
326 | int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn); | ||
405 | 327 | ||
406 | /* nodemgmt.c */ | 328 | /* nodemgmt.c */ |
407 | int jffs2_thread_should_wake(struct jffs2_sb_info *c); | 329 | int jffs2_thread_should_wake(struct jffs2_sb_info *c); |
408 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio); | 330 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, |
409 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len); | 331 | uint32_t *len, int prio, uint32_t sumsize); |
332 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, | ||
333 | uint32_t *len, uint32_t sumsize); | ||
410 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new); | 334 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new); |
411 | void jffs2_complete_reservation(struct jffs2_sb_info *c); | 335 | void jffs2_complete_reservation(struct jffs2_sb_info *c); |
412 | void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw); | 336 | void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw); |
413 | void jffs2_dump_block_lists(struct jffs2_sb_info *c); | ||
414 | 337 | ||
415 | /* write.c */ | 338 | /* write.c */ |
416 | int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri); | 339 | int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri); |
@@ -418,17 +341,15 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint | |||
418 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode); | 341 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode); |
419 | struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode); | 342 | struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode); |
420 | int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 343 | int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
421 | struct jffs2_raw_inode *ri, unsigned char *buf, | 344 | struct jffs2_raw_inode *ri, unsigned char *buf, |
422 | uint32_t offset, uint32_t writelen, uint32_t *retlen); | 345 | uint32_t offset, uint32_t writelen, uint32_t *retlen); |
423 | int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen); | 346 | int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen); |
424 | int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f); | 347 | int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f, uint32_t time); |
425 | int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen); | 348 | int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time); |
426 | 349 | ||
427 | 350 | ||
428 | /* readinode.c */ | 351 | /* readinode.c */ |
429 | void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); | 352 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
430 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); | ||
431 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
432 | uint32_t ino, struct jffs2_raw_inode *latest_node); | 353 | uint32_t ino, struct jffs2_raw_inode *latest_node); |
433 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); | 354 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); |
434 | void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f); | 355 | void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f); |
@@ -468,6 +389,10 @@ char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f); | |||
468 | /* scan.c */ | 389 | /* scan.c */ |
469 | int jffs2_scan_medium(struct jffs2_sb_info *c); | 390 | int jffs2_scan_medium(struct jffs2_sb_info *c); |
470 | void jffs2_rotate_lists(struct jffs2_sb_info *c); | 391 | void jffs2_rotate_lists(struct jffs2_sb_info *c); |
392 | int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf, | ||
393 | uint32_t ofs, uint32_t len); | ||
394 | struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino); | ||
395 | int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
471 | 396 | ||
472 | /* build.c */ | 397 | /* build.c */ |
473 | int jffs2_do_mount_fs(struct jffs2_sb_info *c); | 398 | int jffs2_do_mount_fs(struct jffs2_sb_info *c); |
@@ -483,4 +408,6 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
483 | int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | 408 | int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); |
484 | #endif | 409 | #endif |
485 | 410 | ||
411 | #include "debug.h" | ||
412 | |||
486 | #endif /* __JFFS2_NODELIST_H__ */ | 413 | #endif /* __JFFS2_NODELIST_H__ */ |
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index c1d8b5ed9ab9..49127a1f0458 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: nodemgmt.c,v 1.122 2005/05/06 09:30:27 dedekind Exp $ | 10 | * $Id: nodemgmt.c,v 1.127 2005/09/20 15:49:12 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/compiler.h> | 17 | #include <linux/compiler.h> |
18 | #include <linux/sched.h> /* For cond_resched() */ | 18 | #include <linux/sched.h> /* For cond_resched() */ |
19 | #include "nodelist.h" | 19 | #include "nodelist.h" |
20 | #include "debug.h" | ||
20 | 21 | ||
21 | /** | 22 | /** |
22 | * jffs2_reserve_space - request physical space to write nodes to flash | 23 | * jffs2_reserve_space - request physical space to write nodes to flash |
@@ -38,9 +39,11 @@ | |||
38 | * for the requested allocation. | 39 | * for the requested allocation. |
39 | */ | 40 | */ |
40 | 41 | ||
41 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len); | 42 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, |
43 | uint32_t *ofs, uint32_t *len, uint32_t sumsize); | ||
42 | 44 | ||
43 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio) | 45 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, |
46 | uint32_t *len, int prio, uint32_t sumsize) | ||
44 | { | 47 | { |
45 | int ret = -EAGAIN; | 48 | int ret = -EAGAIN; |
46 | int blocksneeded = c->resv_blocks_write; | 49 | int blocksneeded = c->resv_blocks_write; |
@@ -85,12 +88,12 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
85 | up(&c->alloc_sem); | 88 | up(&c->alloc_sem); |
86 | return -ENOSPC; | 89 | return -ENOSPC; |
87 | } | 90 | } |
88 | 91 | ||
89 | /* Calc possibly available space. Possibly available means that we | 92 | /* Calc possibly available space. Possibly available means that we |
90 | * don't know, if unchecked size contains obsoleted nodes, which could give us some | 93 | * don't know, if unchecked size contains obsoleted nodes, which could give us some |
91 | * more usable space. This will affect the sum only once, as gc first finishes checking | 94 | * more usable space. This will affect the sum only once, as gc first finishes checking |
92 | * of nodes. | 95 | * of nodes. |
93 | + Return -ENOSPC, if the maximum possibly available space is less or equal than | 96 | + Return -ENOSPC, if the maximum possibly available space is less or equal than |
94 | * blocksneeded * sector_size. | 97 | * blocksneeded * sector_size. |
95 | * This blocks endless gc looping on a filesystem, which is nearly full, even if | 98 | * This blocks endless gc looping on a filesystem, which is nearly full, even if |
96 | * the check above passes. | 99 | * the check above passes. |
@@ -115,7 +118,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
115 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, | 118 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, |
116 | c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); | 119 | c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); |
117 | spin_unlock(&c->erase_completion_lock); | 120 | spin_unlock(&c->erase_completion_lock); |
118 | 121 | ||
119 | ret = jffs2_garbage_collect_pass(c); | 122 | ret = jffs2_garbage_collect_pass(c); |
120 | if (ret) | 123 | if (ret) |
121 | return ret; | 124 | return ret; |
@@ -129,7 +132,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
129 | spin_lock(&c->erase_completion_lock); | 132 | spin_lock(&c->erase_completion_lock); |
130 | } | 133 | } |
131 | 134 | ||
132 | ret = jffs2_do_reserve_space(c, minsize, ofs, len); | 135 | ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize); |
133 | if (ret) { | 136 | if (ret) { |
134 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); | 137 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); |
135 | } | 138 | } |
@@ -140,7 +143,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
140 | return ret; | 143 | return ret; |
141 | } | 144 | } |
142 | 145 | ||
143 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len) | 146 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, |
147 | uint32_t *len, uint32_t sumsize) | ||
144 | { | 148 | { |
145 | int ret = -EAGAIN; | 149 | int ret = -EAGAIN; |
146 | minsize = PAD(minsize); | 150 | minsize = PAD(minsize); |
@@ -149,7 +153,7 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t * | |||
149 | 153 | ||
150 | spin_lock(&c->erase_completion_lock); | 154 | spin_lock(&c->erase_completion_lock); |
151 | while(ret == -EAGAIN) { | 155 | while(ret == -EAGAIN) { |
152 | ret = jffs2_do_reserve_space(c, minsize, ofs, len); | 156 | ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize); |
153 | if (ret) { | 157 | if (ret) { |
154 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); | 158 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); |
155 | } | 159 | } |
@@ -158,105 +162,185 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t * | |||
158 | return ret; | 162 | return ret; |
159 | } | 163 | } |
160 | 164 | ||
161 | /* Called with alloc sem _and_ erase_completion_lock */ | 165 | |
162 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len) | 166 | /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */ |
167 | |||
168 | static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
163 | { | 169 | { |
164 | struct jffs2_eraseblock *jeb = c->nextblock; | 170 | |
165 | 171 | /* Check, if we have a dirty block now, or if it was dirty already */ | |
166 | restart: | 172 | if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) { |
167 | if (jeb && minsize > jeb->free_size) { | 173 | c->dirty_size += jeb->wasted_size; |
168 | /* Skip the end of this block and file it as having some dirty space */ | 174 | c->wasted_size -= jeb->wasted_size; |
169 | /* If there's a pending write to it, flush now */ | 175 | jeb->dirty_size += jeb->wasted_size; |
170 | if (jffs2_wbuf_dirty(c)) { | 176 | jeb->wasted_size = 0; |
177 | if (VERYDIRTY(c, jeb->dirty_size)) { | ||
178 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
179 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
180 | list_add_tail(&jeb->list, &c->very_dirty_list); | ||
181 | } else { | ||
182 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
183 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
184 | list_add_tail(&jeb->list, &c->dirty_list); | ||
185 | } | ||
186 | } else { | ||
187 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
188 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
189 | list_add_tail(&jeb->list, &c->clean_list); | ||
190 | } | ||
191 | c->nextblock = NULL; | ||
192 | |||
193 | } | ||
194 | |||
195 | /* Select a new jeb for nextblock */ | ||
196 | |||
197 | static int jffs2_find_nextblock(struct jffs2_sb_info *c) | ||
198 | { | ||
199 | struct list_head *next; | ||
200 | |||
201 | /* Take the next block off the 'free' list */ | ||
202 | |||
203 | if (list_empty(&c->free_list)) { | ||
204 | |||
205 | if (!c->nr_erasing_blocks && | ||
206 | !list_empty(&c->erasable_list)) { | ||
207 | struct jffs2_eraseblock *ejeb; | ||
208 | |||
209 | ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); | ||
210 | list_del(&ejeb->list); | ||
211 | list_add_tail(&ejeb->list, &c->erase_pending_list); | ||
212 | c->nr_erasing_blocks++; | ||
213 | jffs2_erase_pending_trigger(c); | ||
214 | D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n", | ||
215 | ejeb->offset)); | ||
216 | } | ||
217 | |||
218 | if (!c->nr_erasing_blocks && | ||
219 | !list_empty(&c->erasable_pending_wbuf_list)) { | ||
220 | D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n")); | ||
221 | /* c->nextblock is NULL, no update to c->nextblock allowed */ | ||
171 | spin_unlock(&c->erase_completion_lock); | 222 | spin_unlock(&c->erase_completion_lock); |
172 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | ||
173 | jffs2_flush_wbuf_pad(c); | 223 | jffs2_flush_wbuf_pad(c); |
174 | spin_lock(&c->erase_completion_lock); | 224 | spin_lock(&c->erase_completion_lock); |
175 | jeb = c->nextblock; | 225 | /* Have another go. It'll be on the erasable_list now */ |
176 | goto restart; | 226 | return -EAGAIN; |
177 | } | 227 | } |
178 | c->wasted_size += jeb->free_size; | 228 | |
179 | c->free_size -= jeb->free_size; | 229 | if (!c->nr_erasing_blocks) { |
180 | jeb->wasted_size += jeb->free_size; | 230 | /* Ouch. We're in GC, or we wouldn't have got here. |
181 | jeb->free_size = 0; | 231 | And there's no space left. At all. */ |
182 | 232 | printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", | |
183 | /* Check, if we have a dirty block now, or if it was dirty already */ | 233 | c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", |
184 | if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) { | 234 | list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); |
185 | c->dirty_size += jeb->wasted_size; | 235 | return -ENOSPC; |
186 | c->wasted_size -= jeb->wasted_size; | ||
187 | jeb->dirty_size += jeb->wasted_size; | ||
188 | jeb->wasted_size = 0; | ||
189 | if (VERYDIRTY(c, jeb->dirty_size)) { | ||
190 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
191 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
192 | list_add_tail(&jeb->list, &c->very_dirty_list); | ||
193 | } else { | ||
194 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
195 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
196 | list_add_tail(&jeb->list, &c->dirty_list); | ||
197 | } | ||
198 | } else { | ||
199 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
200 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
201 | list_add_tail(&jeb->list, &c->clean_list); | ||
202 | } | 236 | } |
203 | c->nextblock = jeb = NULL; | 237 | |
238 | spin_unlock(&c->erase_completion_lock); | ||
239 | /* Don't wait for it; just erase one right now */ | ||
240 | jffs2_erase_pending_blocks(c, 1); | ||
241 | spin_lock(&c->erase_completion_lock); | ||
242 | |||
243 | /* An erase may have failed, decreasing the | ||
244 | amount of free space available. So we must | ||
245 | restart from the beginning */ | ||
246 | return -EAGAIN; | ||
204 | } | 247 | } |
205 | |||
206 | if (!jeb) { | ||
207 | struct list_head *next; | ||
208 | /* Take the next block off the 'free' list */ | ||
209 | 248 | ||
210 | if (list_empty(&c->free_list)) { | 249 | next = c->free_list.next; |
250 | list_del(next); | ||
251 | c->nextblock = list_entry(next, struct jffs2_eraseblock, list); | ||
252 | c->nr_free_blocks--; | ||
211 | 253 | ||
212 | if (!c->nr_erasing_blocks && | 254 | jffs2_sum_reset_collected(c->summary); /* reset collected summary */ |
213 | !list_empty(&c->erasable_list)) { | ||
214 | struct jffs2_eraseblock *ejeb; | ||
215 | 255 | ||
216 | ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); | 256 | D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset)); |
217 | list_del(&ejeb->list); | 257 | |
218 | list_add_tail(&ejeb->list, &c->erase_pending_list); | 258 | return 0; |
219 | c->nr_erasing_blocks++; | 259 | } |
220 | jffs2_erase_pending_trigger(c); | 260 | |
221 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n", | 261 | /* Called with alloc sem _and_ erase_completion_lock */ |
222 | ejeb->offset)); | 262 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize) |
263 | { | ||
264 | struct jffs2_eraseblock *jeb = c->nextblock; | ||
265 | uint32_t reserved_size; /* for summary information at the end of the jeb */ | ||
266 | int ret; | ||
267 | |||
268 | restart: | ||
269 | reserved_size = 0; | ||
270 | |||
271 | if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) { | ||
272 | /* NOSUM_SIZE means not to generate summary */ | ||
273 | |||
274 | if (jeb) { | ||
275 | reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); | ||
276 | dbg_summary("minsize=%d , jeb->free=%d ," | ||
277 | "summary->size=%d , sumsize=%d\n", | ||
278 | minsize, jeb->free_size, | ||
279 | c->summary->sum_size, sumsize); | ||
280 | } | ||
281 | |||
282 | /* Is there enough space for writing out the current node, or we have to | ||
283 | write out summary information now, close this jeb and select new nextblock? */ | ||
284 | if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize + | ||
285 | JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) { | ||
286 | |||
287 | /* Has summary been disabled for this jeb? */ | ||
288 | if (jffs2_sum_is_disabled(c->summary)) { | ||
289 | sumsize = JFFS2_SUMMARY_NOSUM_SIZE; | ||
290 | goto restart; | ||
223 | } | 291 | } |
224 | 292 | ||
225 | if (!c->nr_erasing_blocks && | 293 | /* Writing out the collected summary information */ |
226 | !list_empty(&c->erasable_pending_wbuf_list)) { | 294 | dbg_summary("generating summary for 0x%08x.\n", jeb->offset); |
227 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | 295 | ret = jffs2_sum_write_sumnode(c); |
228 | /* c->nextblock is NULL, no update to c->nextblock allowed */ | 296 | |
297 | if (ret) | ||
298 | return ret; | ||
299 | |||
300 | if (jffs2_sum_is_disabled(c->summary)) { | ||
301 | /* jffs2_write_sumnode() couldn't write out the summary information | ||
302 | diabling summary for this jeb and free the collected information | ||
303 | */ | ||
304 | sumsize = JFFS2_SUMMARY_NOSUM_SIZE; | ||
305 | goto restart; | ||
306 | } | ||
307 | |||
308 | jffs2_close_nextblock(c, jeb); | ||
309 | jeb = NULL; | ||
310 | /* keep always valid value in reserved_size */ | ||
311 | reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); | ||
312 | } | ||
313 | } else { | ||
314 | if (jeb && minsize > jeb->free_size) { | ||
315 | /* Skip the end of this block and file it as having some dirty space */ | ||
316 | /* If there's a pending write to it, flush now */ | ||
317 | |||
318 | if (jffs2_wbuf_dirty(c)) { | ||
229 | spin_unlock(&c->erase_completion_lock); | 319 | spin_unlock(&c->erase_completion_lock); |
320 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | ||
230 | jffs2_flush_wbuf_pad(c); | 321 | jffs2_flush_wbuf_pad(c); |
231 | spin_lock(&c->erase_completion_lock); | 322 | spin_lock(&c->erase_completion_lock); |
232 | /* Have another go. It'll be on the erasable_list now */ | 323 | jeb = c->nextblock; |
233 | return -EAGAIN; | 324 | goto restart; |
234 | } | 325 | } |
235 | 326 | ||
236 | if (!c->nr_erasing_blocks) { | 327 | c->wasted_size += jeb->free_size; |
237 | /* Ouch. We're in GC, or we wouldn't have got here. | 328 | c->free_size -= jeb->free_size; |
238 | And there's no space left. At all. */ | 329 | jeb->wasted_size += jeb->free_size; |
239 | printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", | 330 | jeb->free_size = 0; |
240 | c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", | ||
241 | list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); | ||
242 | return -ENOSPC; | ||
243 | } | ||
244 | |||
245 | spin_unlock(&c->erase_completion_lock); | ||
246 | /* Don't wait for it; just erase one right now */ | ||
247 | jffs2_erase_pending_blocks(c, 1); | ||
248 | spin_lock(&c->erase_completion_lock); | ||
249 | 331 | ||
250 | /* An erase may have failed, decreasing the | 332 | jffs2_close_nextblock(c, jeb); |
251 | amount of free space available. So we must | 333 | jeb = NULL; |
252 | restart from the beginning */ | ||
253 | return -EAGAIN; | ||
254 | } | 334 | } |
335 | } | ||
336 | |||
337 | if (!jeb) { | ||
255 | 338 | ||
256 | next = c->free_list.next; | 339 | ret = jffs2_find_nextblock(c); |
257 | list_del(next); | 340 | if (ret) |
258 | c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list); | 341 | return ret; |
259 | c->nr_free_blocks--; | 342 | |
343 | jeb = c->nextblock; | ||
260 | 344 | ||
261 | if (jeb->free_size != c->sector_size - c->cleanmarker_size) { | 345 | if (jeb->free_size != c->sector_size - c->cleanmarker_size) { |
262 | printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); | 346 | printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); |
@@ -266,13 +350,13 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, ui | |||
266 | /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has | 350 | /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has |
267 | enough space */ | 351 | enough space */ |
268 | *ofs = jeb->offset + (c->sector_size - jeb->free_size); | 352 | *ofs = jeb->offset + (c->sector_size - jeb->free_size); |
269 | *len = jeb->free_size; | 353 | *len = jeb->free_size - reserved_size; |
270 | 354 | ||
271 | if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && | 355 | if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && |
272 | !jeb->first_node->next_in_ino) { | 356 | !jeb->first_node->next_in_ino) { |
273 | /* Only node in it beforehand was a CLEANMARKER node (we think). | 357 | /* Only node in it beforehand was a CLEANMARKER node (we think). |
274 | So mark it obsolete now that there's going to be another node | 358 | So mark it obsolete now that there's going to be another node |
275 | in the block. This will reduce used_size to zero but We've | 359 | in the block. This will reduce used_size to zero but We've |
276 | already set c->nextblock so that jffs2_mark_node_obsolete() | 360 | already set c->nextblock so that jffs2_mark_node_obsolete() |
277 | won't try to refile it to the dirty_list. | 361 | won't try to refile it to the dirty_list. |
278 | */ | 362 | */ |
@@ -292,12 +376,12 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, ui | |||
292 | * @len: length of this physical node | 376 | * @len: length of this physical node |
293 | * @dirty: dirty flag for new node | 377 | * @dirty: dirty flag for new node |
294 | * | 378 | * |
295 | * Should only be used to report nodes for which space has been allocated | 379 | * Should only be used to report nodes for which space has been allocated |
296 | * by jffs2_reserve_space. | 380 | * by jffs2_reserve_space. |
297 | * | 381 | * |
298 | * Must be called with the alloc_sem held. | 382 | * Must be called with the alloc_sem held. |
299 | */ | 383 | */ |
300 | 384 | ||
301 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) | 385 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) |
302 | { | 386 | { |
303 | struct jffs2_eraseblock *jeb; | 387 | struct jffs2_eraseblock *jeb; |
@@ -349,8 +433,8 @@ int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_r | |||
349 | list_add_tail(&jeb->list, &c->clean_list); | 433 | list_add_tail(&jeb->list, &c->clean_list); |
350 | c->nextblock = NULL; | 434 | c->nextblock = NULL; |
351 | } | 435 | } |
352 | ACCT_SANITY_CHECK(c,jeb); | 436 | jffs2_dbg_acct_sanity_check_nolock(c,jeb); |
353 | D1(ACCT_PARANOIA_CHECK(jeb)); | 437 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
354 | 438 | ||
355 | spin_unlock(&c->erase_completion_lock); | 439 | spin_unlock(&c->erase_completion_lock); |
356 | 440 | ||
@@ -404,8 +488,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
404 | 488 | ||
405 | if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && | 489 | if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && |
406 | !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) { | 490 | !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) { |
407 | /* Hm. This may confuse static lock analysis. If any of the above | 491 | /* Hm. This may confuse static lock analysis. If any of the above |
408 | three conditions is false, we're going to return from this | 492 | three conditions is false, we're going to return from this |
409 | function without actually obliterating any nodes or freeing | 493 | function without actually obliterating any nodes or freeing |
410 | any jffs2_raw_node_refs. So we don't need to stop erases from | 494 | any jffs2_raw_node_refs. So we don't need to stop erases from |
411 | happening, or protect against people holding an obsolete | 495 | happening, or protect against people holding an obsolete |
@@ -430,7 +514,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
430 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); | 514 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); |
431 | BUG(); | 515 | BUG(); |
432 | }) | 516 | }) |
433 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); | 517 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); |
434 | jeb->used_size -= ref_totlen(c, jeb, ref); | 518 | jeb->used_size -= ref_totlen(c, jeb, ref); |
435 | c->used_size -= ref_totlen(c, jeb, ref); | 519 | c->used_size -= ref_totlen(c, jeb, ref); |
436 | } | 520 | } |
@@ -462,18 +546,17 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
462 | D1(printk(KERN_DEBUG "Wasting\n")); | 546 | D1(printk(KERN_DEBUG "Wasting\n")); |
463 | addedsize = 0; | 547 | addedsize = 0; |
464 | jeb->wasted_size += ref_totlen(c, jeb, ref); | 548 | jeb->wasted_size += ref_totlen(c, jeb, ref); |
465 | c->wasted_size += ref_totlen(c, jeb, ref); | 549 | c->wasted_size += ref_totlen(c, jeb, ref); |
466 | } | 550 | } |
467 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; | 551 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; |
468 | |||
469 | ACCT_SANITY_CHECK(c, jeb); | ||
470 | 552 | ||
471 | D1(ACCT_PARANOIA_CHECK(jeb)); | 553 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); |
554 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | ||
472 | 555 | ||
473 | if (c->flags & JFFS2_SB_FLAG_SCANNING) { | 556 | if (c->flags & JFFS2_SB_FLAG_SCANNING) { |
474 | /* Flash scanning is in progress. Don't muck about with the block | 557 | /* Flash scanning is in progress. Don't muck about with the block |
475 | lists because they're not ready yet, and don't actually | 558 | lists because they're not ready yet, and don't actually |
476 | obliterate nodes that look obsolete. If they weren't | 559 | obliterate nodes that look obsolete. If they weren't |
477 | marked obsolete on the flash at the time they _became_ | 560 | marked obsolete on the flash at the time they _became_ |
478 | obsolete, there was probably a reason for that. */ | 561 | obsolete, there was probably a reason for that. */ |
479 | spin_unlock(&c->erase_completion_lock); | 562 | spin_unlock(&c->erase_completion_lock); |
@@ -507,7 +590,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
507 | immediately reused, and we spread the load a bit. */ | 590 | immediately reused, and we spread the load a bit. */ |
508 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); | 591 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); |
509 | list_add_tail(&jeb->list, &c->erasable_list); | 592 | list_add_tail(&jeb->list, &c->erasable_list); |
510 | } | 593 | } |
511 | } | 594 | } |
512 | D1(printk(KERN_DEBUG "Done OK\n")); | 595 | D1(printk(KERN_DEBUG "Done OK\n")); |
513 | } else if (jeb == c->gcblock) { | 596 | } else if (jeb == c->gcblock) { |
@@ -525,8 +608,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
525 | list_add_tail(&jeb->list, &c->very_dirty_list); | 608 | list_add_tail(&jeb->list, &c->very_dirty_list); |
526 | } else { | 609 | } else { |
527 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", | 610 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", |
528 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 611 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); |
529 | } | 612 | } |
530 | 613 | ||
531 | spin_unlock(&c->erase_completion_lock); | 614 | spin_unlock(&c->erase_completion_lock); |
532 | 615 | ||
@@ -573,11 +656,11 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
573 | 656 | ||
574 | /* Nodes which have been marked obsolete no longer need to be | 657 | /* Nodes which have been marked obsolete no longer need to be |
575 | associated with any inode. Remove them from the per-inode list. | 658 | associated with any inode. Remove them from the per-inode list. |
576 | 659 | ||
577 | Note we can't do this for NAND at the moment because we need | 660 | Note we can't do this for NAND at the moment because we need |
578 | obsolete dirent nodes to stay on the lists, because of the | 661 | obsolete dirent nodes to stay on the lists, because of the |
579 | horridness in jffs2_garbage_collect_deletion_dirent(). Also | 662 | horridness in jffs2_garbage_collect_deletion_dirent(). Also |
580 | because we delete the inocache, and on NAND we need that to | 663 | because we delete the inocache, and on NAND we need that to |
581 | stay around until all the nodes are actually erased, in order | 664 | stay around until all the nodes are actually erased, in order |
582 | to stop us from giving the same inode number to another newly | 665 | to stop us from giving the same inode number to another newly |
583 | created inode. */ | 666 | created inode. */ |
@@ -606,7 +689,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
606 | if (ref->next_phys && ref_obsolete(ref->next_phys) && | 689 | if (ref->next_phys && ref_obsolete(ref->next_phys) && |
607 | !ref->next_phys->next_in_ino) { | 690 | !ref->next_phys->next_in_ino) { |
608 | struct jffs2_raw_node_ref *n = ref->next_phys; | 691 | struct jffs2_raw_node_ref *n = ref->next_phys; |
609 | 692 | ||
610 | spin_lock(&c->erase_completion_lock); | 693 | spin_lock(&c->erase_completion_lock); |
611 | 694 | ||
612 | ref->__totlen += n->__totlen; | 695 | ref->__totlen += n->__totlen; |
@@ -620,7 +703,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
620 | 703 | ||
621 | jffs2_free_raw_node_ref(n); | 704 | jffs2_free_raw_node_ref(n); |
622 | } | 705 | } |
623 | 706 | ||
624 | /* Also merge with the previous node in the list, if there is one | 707 | /* Also merge with the previous node in the list, if there is one |
625 | and that one is obsolete */ | 708 | and that one is obsolete */ |
626 | if (ref != jeb->first_node ) { | 709 | if (ref != jeb->first_node ) { |
@@ -630,7 +713,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
630 | 713 | ||
631 | while (p->next_phys != ref) | 714 | while (p->next_phys != ref) |
632 | p = p->next_phys; | 715 | p = p->next_phys; |
633 | 716 | ||
634 | if (ref_obsolete(p) && !ref->next_in_ino) { | 717 | if (ref_obsolete(p) && !ref->next_in_ino) { |
635 | p->__totlen += ref->__totlen; | 718 | p->__totlen += ref->__totlen; |
636 | if (jeb->last_node == ref) { | 719 | if (jeb->last_node == ref) { |
@@ -649,164 +732,6 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
649 | up(&c->erase_free_sem); | 732 | up(&c->erase_free_sem); |
650 | } | 733 | } |
651 | 734 | ||
652 | #if CONFIG_JFFS2_FS_DEBUG >= 2 | ||
653 | void jffs2_dump_block_lists(struct jffs2_sb_info *c) | ||
654 | { | ||
655 | |||
656 | |||
657 | printk(KERN_DEBUG "jffs2_dump_block_lists:\n"); | ||
658 | printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size); | ||
659 | printk(KERN_DEBUG "used_size: %08x\n", c->used_size); | ||
660 | printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size); | ||
661 | printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size); | ||
662 | printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size); | ||
663 | printk(KERN_DEBUG "free_size: %08x\n", c->free_size); | ||
664 | printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size); | ||
665 | printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size); | ||
666 | printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size); | ||
667 | printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write); | ||
668 | |||
669 | if (c->nextblock) { | ||
670 | printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
671 | c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size); | ||
672 | } else { | ||
673 | printk(KERN_DEBUG "nextblock: NULL\n"); | ||
674 | } | ||
675 | if (c->gcblock) { | ||
676 | printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
677 | c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); | ||
678 | } else { | ||
679 | printk(KERN_DEBUG "gcblock: NULL\n"); | ||
680 | } | ||
681 | if (list_empty(&c->clean_list)) { | ||
682 | printk(KERN_DEBUG "clean_list: empty\n"); | ||
683 | } else { | ||
684 | struct list_head *this; | ||
685 | int numblocks = 0; | ||
686 | uint32_t dirty = 0; | ||
687 | |||
688 | list_for_each(this, &c->clean_list) { | ||
689 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
690 | numblocks ++; | ||
691 | dirty += jeb->wasted_size; | ||
692 | printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
693 | } | ||
694 | printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks); | ||
695 | } | ||
696 | if (list_empty(&c->very_dirty_list)) { | ||
697 | printk(KERN_DEBUG "very_dirty_list: empty\n"); | ||
698 | } else { | ||
699 | struct list_head *this; | ||
700 | int numblocks = 0; | ||
701 | uint32_t dirty = 0; | ||
702 | |||
703 | list_for_each(this, &c->very_dirty_list) { | ||
704 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
705 | numblocks ++; | ||
706 | dirty += jeb->dirty_size; | ||
707 | printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
708 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
709 | } | ||
710 | printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | ||
711 | numblocks, dirty, dirty / numblocks); | ||
712 | } | ||
713 | if (list_empty(&c->dirty_list)) { | ||
714 | printk(KERN_DEBUG "dirty_list: empty\n"); | ||
715 | } else { | ||
716 | struct list_head *this; | ||
717 | int numblocks = 0; | ||
718 | uint32_t dirty = 0; | ||
719 | |||
720 | list_for_each(this, &c->dirty_list) { | ||
721 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
722 | numblocks ++; | ||
723 | dirty += jeb->dirty_size; | ||
724 | printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
725 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
726 | } | ||
727 | printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | ||
728 | numblocks, dirty, dirty / numblocks); | ||
729 | } | ||
730 | if (list_empty(&c->erasable_list)) { | ||
731 | printk(KERN_DEBUG "erasable_list: empty\n"); | ||
732 | } else { | ||
733 | struct list_head *this; | ||
734 | |||
735 | list_for_each(this, &c->erasable_list) { | ||
736 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
737 | printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
738 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
739 | } | ||
740 | } | ||
741 | if (list_empty(&c->erasing_list)) { | ||
742 | printk(KERN_DEBUG "erasing_list: empty\n"); | ||
743 | } else { | ||
744 | struct list_head *this; | ||
745 | |||
746 | list_for_each(this, &c->erasing_list) { | ||
747 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
748 | printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
749 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
750 | } | ||
751 | } | ||
752 | if (list_empty(&c->erase_pending_list)) { | ||
753 | printk(KERN_DEBUG "erase_pending_list: empty\n"); | ||
754 | } else { | ||
755 | struct list_head *this; | ||
756 | |||
757 | list_for_each(this, &c->erase_pending_list) { | ||
758 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
759 | printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
760 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
761 | } | ||
762 | } | ||
763 | if (list_empty(&c->erasable_pending_wbuf_list)) { | ||
764 | printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n"); | ||
765 | } else { | ||
766 | struct list_head *this; | ||
767 | |||
768 | list_for_each(this, &c->erasable_pending_wbuf_list) { | ||
769 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
770 | printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
771 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
772 | } | ||
773 | } | ||
774 | if (list_empty(&c->free_list)) { | ||
775 | printk(KERN_DEBUG "free_list: empty\n"); | ||
776 | } else { | ||
777 | struct list_head *this; | ||
778 | |||
779 | list_for_each(this, &c->free_list) { | ||
780 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
781 | printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
782 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
783 | } | ||
784 | } | ||
785 | if (list_empty(&c->bad_list)) { | ||
786 | printk(KERN_DEBUG "bad_list: empty\n"); | ||
787 | } else { | ||
788 | struct list_head *this; | ||
789 | |||
790 | list_for_each(this, &c->bad_list) { | ||
791 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
792 | printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
793 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
794 | } | ||
795 | } | ||
796 | if (list_empty(&c->bad_used_list)) { | ||
797 | printk(KERN_DEBUG "bad_used_list: empty\n"); | ||
798 | } else { | ||
799 | struct list_head *this; | ||
800 | |||
801 | list_for_each(this, &c->bad_used_list) { | ||
802 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
803 | printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
804 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
805 | } | ||
806 | } | ||
807 | } | ||
808 | #endif /* CONFIG_JFFS2_FS_DEBUG */ | ||
809 | |||
810 | int jffs2_thread_should_wake(struct jffs2_sb_info *c) | 735 | int jffs2_thread_should_wake(struct jffs2_sb_info *c) |
811 | { | 736 | { |
812 | int ret = 0; | 737 | int ret = 0; |
@@ -828,11 +753,11 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c) | |||
828 | */ | 753 | */ |
829 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; | 754 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; |
830 | 755 | ||
831 | if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && | 756 | if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && |
832 | (dirty > c->nospc_dirty_size)) | 757 | (dirty > c->nospc_dirty_size)) |
833 | ret = 1; | 758 | ret = 1; |
834 | 759 | ||
835 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", | 760 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", |
836 | c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no")); | 761 | c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no")); |
837 | 762 | ||
838 | return ret; | 763 | return ret; |
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index d900c8929b09..59e7a393200c 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: os-linux.h,v 1.58 2005/07/12 02:34:35 tpoynor Exp $ | 10 | * $Id: os-linux.h,v 1.64 2005/09/30 13:59:13 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -57,6 +57,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) | |||
57 | f->fragtree = RB_ROOT; | 57 | f->fragtree = RB_ROOT; |
58 | f->metadata = NULL; | 58 | f->metadata = NULL; |
59 | f->dents = NULL; | 59 | f->dents = NULL; |
60 | f->target = NULL; | ||
60 | f->flags = 0; | 61 | f->flags = 0; |
61 | f->usercompr = 0; | 62 | f->usercompr = 0; |
62 | } | 63 | } |
@@ -64,17 +65,24 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) | |||
64 | 65 | ||
65 | #define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY) | 66 | #define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY) |
66 | 67 | ||
68 | #define SECTOR_ADDR(x) ( (((unsigned long)(x) / c->sector_size) * c->sector_size) ) | ||
67 | #ifndef CONFIG_JFFS2_FS_WRITEBUFFER | 69 | #ifndef CONFIG_JFFS2_FS_WRITEBUFFER |
68 | #define SECTOR_ADDR(x) ( ((unsigned long)(x) & ~(c->sector_size-1)) ) | 70 | |
71 | |||
72 | #ifdef CONFIG_JFFS2_SUMMARY | ||
73 | #define jffs2_can_mark_obsolete(c) (0) | ||
74 | #else | ||
69 | #define jffs2_can_mark_obsolete(c) (1) | 75 | #define jffs2_can_mark_obsolete(c) (1) |
76 | #endif | ||
77 | |||
70 | #define jffs2_is_writebuffered(c) (0) | 78 | #define jffs2_is_writebuffered(c) (0) |
71 | #define jffs2_cleanmarker_oob(c) (0) | 79 | #define jffs2_cleanmarker_oob(c) (0) |
72 | #define jffs2_write_nand_cleanmarker(c,jeb) (-EIO) | 80 | #define jffs2_write_nand_cleanmarker(c,jeb) (-EIO) |
73 | 81 | ||
74 | #define jffs2_flash_write(c, ofs, len, retlen, buf) ((c)->mtd->write((c)->mtd, ofs, len, retlen, buf)) | 82 | #define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf) |
75 | #define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf)) | 83 | #define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf)) |
76 | #define jffs2_flush_wbuf_pad(c) ({ (void)(c), 0; }) | 84 | #define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; }) |
77 | #define jffs2_flush_wbuf_gc(c, i) ({ (void)(c), (void) i, 0; }) | 85 | #define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; }) |
78 | #define jffs2_write_nand_badblock(c,jeb,bad_offset) (1) | 86 | #define jffs2_write_nand_badblock(c,jeb,bad_offset) (1) |
79 | #define jffs2_nand_flash_setup(c) (0) | 87 | #define jffs2_nand_flash_setup(c) (0) |
80 | #define jffs2_nand_flash_cleanup(c) do {} while(0) | 88 | #define jffs2_nand_flash_cleanup(c) do {} while(0) |
@@ -84,16 +92,26 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) | |||
84 | #define jffs2_wbuf_process NULL | 92 | #define jffs2_wbuf_process NULL |
85 | #define jffs2_nor_ecc(c) (0) | 93 | #define jffs2_nor_ecc(c) (0) |
86 | #define jffs2_dataflash(c) (0) | 94 | #define jffs2_dataflash(c) (0) |
95 | #define jffs2_nor_wbuf_flash(c) (0) | ||
87 | #define jffs2_nor_ecc_flash_setup(c) (0) | 96 | #define jffs2_nor_ecc_flash_setup(c) (0) |
88 | #define jffs2_nor_ecc_flash_cleanup(c) do {} while (0) | 97 | #define jffs2_nor_ecc_flash_cleanup(c) do {} while (0) |
89 | #define jffs2_dataflash_setup(c) (0) | 98 | #define jffs2_dataflash_setup(c) (0) |
90 | #define jffs2_dataflash_cleanup(c) do {} while (0) | 99 | #define jffs2_dataflash_cleanup(c) do {} while (0) |
100 | #define jffs2_nor_wbuf_flash_setup(c) (0) | ||
101 | #define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0) | ||
91 | 102 | ||
92 | #else /* NAND and/or ECC'd NOR support present */ | 103 | #else /* NAND and/or ECC'd NOR support present */ |
93 | 104 | ||
94 | #define jffs2_is_writebuffered(c) (c->wbuf != NULL) | 105 | #define jffs2_is_writebuffered(c) (c->wbuf != NULL) |
95 | #define SECTOR_ADDR(x) ( ((unsigned long)(x) / (unsigned long)(c->sector_size)) * c->sector_size ) | 106 | |
96 | #define jffs2_can_mark_obsolete(c) ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & MTD_ECC)) || c->mtd->type == MTD_RAM) | 107 | #ifdef CONFIG_JFFS2_SUMMARY |
108 | #define jffs2_can_mark_obsolete(c) (0) | ||
109 | #else | ||
110 | #define jffs2_can_mark_obsolete(c) \ | ||
111 | ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & (MTD_ECC|MTD_PROGRAM_REGIONS))) || \ | ||
112 | c->mtd->type == MTD_RAM) | ||
113 | #endif | ||
114 | |||
97 | #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) | 115 | #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) |
98 | 116 | ||
99 | #define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf)) | 117 | #define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf)) |
@@ -123,6 +141,10 @@ void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c); | |||
123 | int jffs2_dataflash_setup(struct jffs2_sb_info *c); | 141 | int jffs2_dataflash_setup(struct jffs2_sb_info *c); |
124 | void jffs2_dataflash_cleanup(struct jffs2_sb_info *c); | 142 | void jffs2_dataflash_cleanup(struct jffs2_sb_info *c); |
125 | 143 | ||
144 | #define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_PROGRAM_REGIONS)) | ||
145 | int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c); | ||
146 | void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c); | ||
147 | |||
126 | #endif /* WRITEBUFFER */ | 148 | #endif /* WRITEBUFFER */ |
127 | 149 | ||
128 | /* erase.c */ | 150 | /* erase.c */ |
@@ -169,20 +191,21 @@ void jffs2_gc_release_inode(struct jffs2_sb_info *c, | |||
169 | struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, | 191 | struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, |
170 | int inum, int nlink); | 192 | int inum, int nlink); |
171 | 193 | ||
172 | unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, | 194 | unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, |
173 | struct jffs2_inode_info *f, | 195 | struct jffs2_inode_info *f, |
174 | unsigned long offset, | 196 | unsigned long offset, |
175 | unsigned long *priv); | 197 | unsigned long *priv); |
176 | void jffs2_gc_release_page(struct jffs2_sb_info *c, | 198 | void jffs2_gc_release_page(struct jffs2_sb_info *c, |
177 | unsigned char *pg, | 199 | unsigned char *pg, |
178 | unsigned long *priv); | 200 | unsigned long *priv); |
179 | void jffs2_flash_cleanup(struct jffs2_sb_info *c); | 201 | void jffs2_flash_cleanup(struct jffs2_sb_info *c); |
180 | 202 | ||
181 | 203 | ||
182 | /* writev.c */ | 204 | /* writev.c */ |
183 | int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, | 205 | int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, |
184 | unsigned long count, loff_t to, size_t *retlen); | 206 | unsigned long count, loff_t to, size_t *retlen); |
185 | 207 | int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, | |
208 | size_t *retlen, const u_char *buf); | ||
186 | 209 | ||
187 | #endif /* __JFFS2_OS_LINUX_H__ */ | 210 | #endif /* __JFFS2_OS_LINUX_H__ */ |
188 | 211 | ||
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c index c7f9068907cf..f3b86da833ba 100644 --- a/fs/jffs2/read.c +++ b/fs/jffs2/read.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: read.c,v 1.39 2005/03/01 10:34:03 dedekind Exp $ | 10 | * $Id: read.c,v 1.42 2005/11/07 11:14:41 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -43,7 +43,7 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
43 | } | 43 | } |
44 | if (readlen != sizeof(*ri)) { | 44 | if (readlen != sizeof(*ri)) { |
45 | jffs2_free_raw_inode(ri); | 45 | jffs2_free_raw_inode(ri); |
46 | printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", | 46 | printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", |
47 | ref_offset(fd->raw), sizeof(*ri), readlen); | 47 | ref_offset(fd->raw), sizeof(*ri), readlen); |
48 | return -EIO; | 48 | return -EIO; |
49 | } | 49 | } |
@@ -61,7 +61,7 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
61 | } | 61 | } |
62 | /* There was a bug where we wrote hole nodes out with csize/dsize | 62 | /* There was a bug where we wrote hole nodes out with csize/dsize |
63 | swapped. Deal with it */ | 63 | swapped. Deal with it */ |
64 | if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) && | 64 | if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) && |
65 | je32_to_cpu(ri->csize)) { | 65 | je32_to_cpu(ri->csize)) { |
66 | ri->dsize = ri->csize; | 66 | ri->dsize = ri->csize; |
67 | ri->csize = cpu_to_je32(0); | 67 | ri->csize = cpu_to_je32(0); |
@@ -74,7 +74,7 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
74 | goto out_ri; | 74 | goto out_ri; |
75 | }); | 75 | }); |
76 | 76 | ||
77 | 77 | ||
78 | if (ri->compr == JFFS2_COMPR_ZERO) { | 78 | if (ri->compr == JFFS2_COMPR_ZERO) { |
79 | memset(buf, 0, len); | 79 | memset(buf, 0, len); |
80 | goto out_ri; | 80 | goto out_ri; |
@@ -82,8 +82,8 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
82 | 82 | ||
83 | /* Cases: | 83 | /* Cases: |
84 | Reading whole node and it's uncompressed - read directly to buffer provided, check CRC. | 84 | Reading whole node and it's uncompressed - read directly to buffer provided, check CRC. |
85 | Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided | 85 | Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided |
86 | Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy | 86 | Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy |
87 | Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy | 87 | Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy |
88 | */ | 88 | */ |
89 | if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) { | 89 | if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) { |
@@ -129,7 +129,7 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
129 | D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc)); | 129 | D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc)); |
130 | if (ri->compr != JFFS2_COMPR_NONE) { | 130 | if (ri->compr != JFFS2_COMPR_NONE) { |
131 | D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n", | 131 | D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n", |
132 | je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf)); | 132 | je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf)); |
133 | ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); | 133 | ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); |
134 | if (ret) { | 134 | if (ret) { |
135 | printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret); | 135 | printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret); |
@@ -174,7 +174,6 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
174 | if (frag) { | 174 | if (frag) { |
175 | D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); | 175 | D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); |
176 | holesize = min(holesize, frag->ofs - offset); | 176 | holesize = min(holesize, frag->ofs - offset); |
177 | D2(jffs2_print_frag_list(f)); | ||
178 | } | 177 | } |
179 | D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize)); | 178 | D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize)); |
180 | memset(buf, 0, holesize); | 179 | memset(buf, 0, holesize); |
@@ -192,7 +191,7 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
192 | } else { | 191 | } else { |
193 | uint32_t readlen; | 192 | uint32_t readlen; |
194 | uint32_t fragofs; /* offset within the frag to start reading */ | 193 | uint32_t fragofs; /* offset within the frag to start reading */ |
195 | 194 | ||
196 | fragofs = offset - frag->ofs; | 195 | fragofs = offset - frag->ofs; |
197 | readlen = min(frag->size - fragofs, end - offset); | 196 | readlen = min(frag->size - fragofs, end - offset); |
198 | D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n", | 197 | D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n", |
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 5b2a83599d73..5f0652df5d47 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
@@ -7,11 +7,12 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: readinode.c,v 1.125 2005/07/10 13:13:55 dedekind Exp $ | 10 | * $Id: readinode.c,v 1.143 2005/11/07 11:14:41 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/sched.h> | ||
15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
16 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
17 | #include <linux/crc32.h> | 18 | #include <linux/crc32.h> |
@@ -20,502 +21,631 @@ | |||
20 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
21 | #include "nodelist.h" | 22 | #include "nodelist.h" |
22 | 23 | ||
23 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag); | 24 | /* |
24 | 25 | * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in | |
25 | #if CONFIG_JFFS2_FS_DEBUG >= 2 | 26 | * order of increasing version. |
26 | static void jffs2_print_fragtree(struct rb_root *list, int permitbug) | 27 | */ |
28 | static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list) | ||
27 | { | 29 | { |
28 | struct jffs2_node_frag *this = frag_first(list); | 30 | struct rb_node **p = &list->rb_node; |
29 | uint32_t lastofs = 0; | 31 | struct rb_node * parent = NULL; |
30 | int buggy = 0; | 32 | struct jffs2_tmp_dnode_info *this; |
31 | 33 | ||
32 | while(this) { | 34 | while (*p) { |
33 | if (this->node) | 35 | parent = *p; |
34 | printk(KERN_DEBUG "frag %04x-%04x: 0x%08x(%d) on flash (*%p). left (%p), right (%p), parent (%p)\n", | 36 | this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); |
35 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw), ref_flags(this->node->raw), | 37 | |
36 | this, frag_left(this), frag_right(this), frag_parent(this)); | 38 | /* There may actually be a collision here, but it doesn't |
37 | else | 39 | actually matter. As long as the two nodes with the same |
38 | printk(KERN_DEBUG "frag %04x-%04x: hole (*%p). left (%p} right (%p), parent (%p)\n", this->ofs, | 40 | version are together, it's all fine. */ |
39 | this->ofs+this->size, this, frag_left(this), frag_right(this), frag_parent(this)); | 41 | if (tn->version > this->version) |
40 | if (this->ofs != lastofs) | 42 | p = &(*p)->rb_left; |
41 | buggy = 1; | 43 | else |
42 | lastofs = this->ofs+this->size; | 44 | p = &(*p)->rb_right; |
43 | this = frag_next(this); | ||
44 | } | 45 | } |
45 | if (buggy && !permitbug) { | 46 | |
46 | printk(KERN_CRIT "Frag tree got a hole in it\n"); | 47 | rb_link_node(&tn->rb, parent, p); |
47 | BUG(); | 48 | rb_insert_color(&tn->rb, list); |
49 | } | ||
50 | |||
51 | static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) | ||
52 | { | ||
53 | struct rb_node *this; | ||
54 | struct jffs2_tmp_dnode_info *tn; | ||
55 | |||
56 | this = list->rb_node; | ||
57 | |||
58 | /* Now at bottom of tree */ | ||
59 | while (this) { | ||
60 | if (this->rb_left) | ||
61 | this = this->rb_left; | ||
62 | else if (this->rb_right) | ||
63 | this = this->rb_right; | ||
64 | else { | ||
65 | tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb); | ||
66 | jffs2_free_full_dnode(tn->fn); | ||
67 | jffs2_free_tmp_dnode_info(tn); | ||
68 | |||
69 | this = this->rb_parent; | ||
70 | if (!this) | ||
71 | break; | ||
72 | |||
73 | if (this->rb_left == &tn->rb) | ||
74 | this->rb_left = NULL; | ||
75 | else if (this->rb_right == &tn->rb) | ||
76 | this->rb_right = NULL; | ||
77 | else BUG(); | ||
78 | } | ||
48 | } | 79 | } |
80 | list->rb_node = NULL; | ||
49 | } | 81 | } |
50 | 82 | ||
51 | void jffs2_print_frag_list(struct jffs2_inode_info *f) | 83 | static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) |
52 | { | 84 | { |
53 | jffs2_print_fragtree(&f->fragtree, 0); | 85 | struct jffs2_full_dirent *next; |
54 | 86 | ||
55 | if (f->metadata) { | 87 | while (fd) { |
56 | printk(KERN_DEBUG "metadata at 0x%08x\n", ref_offset(f->metadata->raw)); | 88 | next = fd->next; |
89 | jffs2_free_full_dirent(fd); | ||
90 | fd = next; | ||
57 | } | 91 | } |
58 | } | 92 | } |
59 | #endif | ||
60 | 93 | ||
61 | #if CONFIG_JFFS2_FS_DEBUG >= 1 | 94 | /* Returns first valid node after 'ref'. May return 'ref' */ |
62 | static int jffs2_sanitycheck_fragtree(struct jffs2_inode_info *f) | 95 | static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) |
63 | { | 96 | { |
64 | struct jffs2_node_frag *frag; | 97 | while (ref && ref->next_in_ino) { |
65 | int bitched = 0; | 98 | if (!ref_obsolete(ref)) |
66 | 99 | return ref; | |
67 | for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { | 100 | dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)); |
101 | ref = ref->next_in_ino; | ||
102 | } | ||
103 | return NULL; | ||
104 | } | ||
68 | 105 | ||
69 | struct jffs2_full_dnode *fn = frag->node; | 106 | /* |
70 | if (!fn || !fn->raw) | 107 | * Helper function for jffs2_get_inode_nodes(). |
71 | continue; | 108 | * It is called every time an directory entry node is found. |
109 | * | ||
110 | * Returns: 0 on succes; | ||
111 | * 1 if the node should be marked obsolete; | ||
112 | * negative error code on failure. | ||
113 | */ | ||
114 | static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, | ||
115 | struct jffs2_raw_dirent *rd, uint32_t read, struct jffs2_full_dirent **fdp, | ||
116 | uint32_t *latest_mctime, uint32_t *mctime_ver) | ||
117 | { | ||
118 | struct jffs2_full_dirent *fd; | ||
119 | |||
120 | /* The direntry nodes are checked during the flash scanning */ | ||
121 | BUG_ON(ref_flags(ref) == REF_UNCHECKED); | ||
122 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | ||
123 | BUG_ON(ref_obsolete(ref)); | ||
124 | |||
125 | /* Sanity check */ | ||
126 | if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) { | ||
127 | JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n", | ||
128 | ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen)); | ||
129 | return 1; | ||
130 | } | ||
72 | 131 | ||
73 | if (ref_flags(fn->raw) == REF_PRISTINE) { | 132 | fd = jffs2_alloc_full_dirent(rd->nsize + 1); |
133 | if (unlikely(!fd)) | ||
134 | return -ENOMEM; | ||
74 | 135 | ||
75 | if (fn->frags > 1) { | 136 | fd->raw = ref; |
76 | printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2\n", ref_offset(fn->raw), fn->frags); | 137 | fd->version = je32_to_cpu(rd->version); |
77 | bitched = 1; | 138 | fd->ino = je32_to_cpu(rd->ino); |
78 | } | 139 | fd->type = rd->type; |
79 | /* A hole node which isn't multi-page should be garbage-collected | ||
80 | and merged anyway, so we just check for the frag size here, | ||
81 | rather than mucking around with actually reading the node | ||
82 | and checking the compression type, which is the real way | ||
83 | to tell a hole node. */ | ||
84 | if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { | ||
85 | printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2\n", | ||
86 | ref_offset(fn->raw)); | ||
87 | bitched = 1; | ||
88 | } | ||
89 | 140 | ||
90 | if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { | 141 | /* Pick out the mctime of the latest dirent */ |
91 | printk(KERN_WARNING "REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2\n", | 142 | if(fd->version > *mctime_ver && je32_to_cpu(rd->mctime)) { |
92 | ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); | 143 | *mctime_ver = fd->version; |
93 | bitched = 1; | 144 | *latest_mctime = je32_to_cpu(rd->mctime); |
94 | } | ||
95 | } | ||
96 | } | 145 | } |
97 | |||
98 | if (bitched) { | ||
99 | struct jffs2_node_frag *thisfrag; | ||
100 | |||
101 | printk(KERN_WARNING "Inode is #%u\n", f->inocache->ino); | ||
102 | thisfrag = frag_first(&f->fragtree); | ||
103 | while (thisfrag) { | ||
104 | if (!thisfrag->node) { | ||
105 | printk("Frag @0x%x-0x%x; node-less hole\n", | ||
106 | thisfrag->ofs, thisfrag->size + thisfrag->ofs); | ||
107 | } else if (!thisfrag->node->raw) { | ||
108 | printk("Frag @0x%x-0x%x; raw-less hole\n", | ||
109 | thisfrag->ofs, thisfrag->size + thisfrag->ofs); | ||
110 | } else { | ||
111 | printk("Frag @0x%x-0x%x; raw at 0x%08x(%d) (0x%x-0x%x)\n", | ||
112 | thisfrag->ofs, thisfrag->size + thisfrag->ofs, | ||
113 | ref_offset(thisfrag->node->raw), ref_flags(thisfrag->node->raw), | ||
114 | thisfrag->node->ofs, thisfrag->node->ofs+thisfrag->node->size); | ||
115 | } | ||
116 | thisfrag = frag_next(thisfrag); | ||
117 | } | ||
118 | } | ||
119 | return bitched; | ||
120 | } | ||
121 | #endif /* D1 */ | ||
122 | 146 | ||
123 | static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) | 147 | /* |
124 | { | 148 | * Copy as much of the name as possible from the raw |
125 | if (this->node) { | 149 | * dirent we've already read from the flash. |
126 | this->node->frags--; | 150 | */ |
127 | if (!this->node->frags) { | 151 | if (read > sizeof(*rd)) |
128 | /* The node has no valid frags left. It's totally obsoleted */ | 152 | memcpy(&fd->name[0], &rd->name[0], |
129 | D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", | 153 | min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) )); |
130 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size)); | 154 | |
131 | jffs2_mark_node_obsolete(c, this->node->raw); | 155 | /* Do we need to copy any more of the name directly from the flash? */ |
132 | jffs2_free_full_dnode(this->node); | 156 | if (rd->nsize + sizeof(*rd) > read) { |
133 | } else { | 157 | /* FIXME: point() */ |
134 | D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", | 158 | int err; |
135 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, | 159 | int already = read - sizeof(*rd); |
136 | this->node->frags)); | 160 | |
137 | mark_ref_normal(this->node->raw); | 161 | err = jffs2_flash_read(c, (ref_offset(ref)) + read, |
162 | rd->nsize - already, &read, &fd->name[already]); | ||
163 | if (unlikely(read != rd->nsize - already) && likely(!err)) | ||
164 | return -EIO; | ||
165 | |||
166 | if (unlikely(err)) { | ||
167 | JFFS2_ERROR("read remainder of name: error %d\n", err); | ||
168 | jffs2_free_full_dirent(fd); | ||
169 | return -EIO; | ||
138 | } | 170 | } |
139 | |||
140 | } | 171 | } |
141 | jffs2_free_node_frag(this); | 172 | |
173 | fd->nhash = full_name_hash(fd->name, rd->nsize); | ||
174 | fd->next = NULL; | ||
175 | fd->name[rd->nsize] = '\0'; | ||
176 | |||
177 | /* | ||
178 | * Wheee. We now have a complete jffs2_full_dirent structure, with | ||
179 | * the name in it and everything. Link it into the list | ||
180 | */ | ||
181 | jffs2_add_fd_to_list(c, fd, fdp); | ||
182 | |||
183 | return 0; | ||
142 | } | 184 | } |
143 | 185 | ||
144 | /* Given an inode, probably with existing list of fragments, add the new node | 186 | /* |
145 | * to the fragment list. | 187 | * Helper function for jffs2_get_inode_nodes(). |
188 | * It is called every time an inode node is found. | ||
189 | * | ||
190 | * Returns: 0 on succes; | ||
191 | * 1 if the node should be marked obsolete; | ||
192 | * negative error code on failure. | ||
146 | */ | 193 | */ |
147 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) | 194 | static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, |
195 | struct jffs2_raw_inode *rd, struct rb_root *tnp, int rdlen, | ||
196 | uint32_t *latest_mctime, uint32_t *mctime_ver) | ||
148 | { | 197 | { |
149 | int ret; | 198 | struct jffs2_tmp_dnode_info *tn; |
150 | struct jffs2_node_frag *newfrag; | 199 | uint32_t len, csize; |
151 | 200 | int ret = 1; | |
152 | D1(printk(KERN_DEBUG "jffs2_add_full_dnode_to_inode(ino #%u, f %p, fn %p)\n", f->inocache->ino, f, fn)); | ||
153 | 201 | ||
154 | if (unlikely(!fn->size)) | 202 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ |
155 | return 0; | 203 | BUG_ON(ref_obsolete(ref)); |
156 | 204 | ||
157 | newfrag = jffs2_alloc_node_frag(); | 205 | tn = jffs2_alloc_tmp_dnode_info(); |
158 | if (unlikely(!newfrag)) | 206 | if (!tn) { |
207 | JFFS2_ERROR("failed to allocate tn (%d bytes).\n", sizeof(*tn)); | ||
159 | return -ENOMEM; | 208 | return -ENOMEM; |
209 | } | ||
160 | 210 | ||
161 | D2(printk(KERN_DEBUG "adding node %04x-%04x @0x%08x on flash, newfrag *%p\n", | 211 | tn->partial_crc = 0; |
162 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag)); | 212 | csize = je32_to_cpu(rd->csize); |
163 | |||
164 | newfrag->ofs = fn->ofs; | ||
165 | newfrag->size = fn->size; | ||
166 | newfrag->node = fn; | ||
167 | newfrag->node->frags = 1; | ||
168 | 213 | ||
169 | ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); | 214 | /* If we've never checked the CRCs on this node, check them now */ |
170 | if (ret) | 215 | if (ref_flags(ref) == REF_UNCHECKED) { |
171 | return ret; | 216 | uint32_t crc; |
172 | 217 | ||
173 | /* If we now share a page with other nodes, mark either previous | 218 | crc = crc32(0, rd, sizeof(*rd) - 8); |
174 | or next node REF_NORMAL, as appropriate. */ | 219 | if (unlikely(crc != je32_to_cpu(rd->node_crc))) { |
175 | if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { | 220 | JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n", |
176 | struct jffs2_node_frag *prev = frag_prev(newfrag); | 221 | ref_offset(ref), je32_to_cpu(rd->node_crc), crc); |
222 | goto free_out; | ||
223 | } | ||
177 | 224 | ||
178 | mark_ref_normal(fn->raw); | 225 | /* Sanity checks */ |
179 | /* If we don't start at zero there's _always_ a previous */ | 226 | if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) || |
180 | if (prev->node) | 227 | unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) { |
181 | mark_ref_normal(prev->node->raw); | 228 | JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref)); |
182 | } | 229 | jffs2_dbg_dump_node(c, ref_offset(ref)); |
230 | goto free_out; | ||
231 | } | ||
183 | 232 | ||
184 | if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { | 233 | if (jffs2_is_writebuffered(c) && csize != 0) { |
185 | struct jffs2_node_frag *next = frag_next(newfrag); | 234 | /* At this point we are supposed to check the data CRC |
186 | 235 | * of our unchecked node. But thus far, we do not | |
187 | if (next) { | 236 | * know whether the node is valid or obsolete. To |
188 | mark_ref_normal(fn->raw); | 237 | * figure this out, we need to walk all the nodes of |
189 | if (next->node) | 238 | * the inode and build the inode fragtree. We don't |
190 | mark_ref_normal(next->node->raw); | 239 | * want to spend time checking data of nodes which may |
240 | * later be found to be obsolete. So we put off the full | ||
241 | * data CRC checking until we have read all the inode | ||
242 | * nodes and have started building the fragtree. | ||
243 | * | ||
244 | * The fragtree is being built starting with nodes | ||
245 | * having the highest version number, so we'll be able | ||
246 | * to detect whether a node is valid (i.e., it is not | ||
247 | * overlapped by a node with higher version) or not. | ||
248 | * And we'll be able to check only those nodes, which | ||
249 | * are not obsolete. | ||
250 | * | ||
251 | * Of course, this optimization only makes sense in case | ||
252 | * of NAND flashes (or other flashes whith | ||
253 | * !jffs2_can_mark_obsolete()), since on NOR flashes | ||
254 | * nodes are marked obsolete physically. | ||
255 | * | ||
256 | * Since NAND flashes (or other flashes with | ||
257 | * jffs2_is_writebuffered(c)) are anyway read by | ||
258 | * fractions of c->wbuf_pagesize, and we have just read | ||
259 | * the node header, it is likely that the starting part | ||
260 | * of the node data is also read when we read the | ||
261 | * header. So we don't mind to check the CRC of the | ||
262 | * starting part of the data of the node now, and check | ||
263 | * the second part later (in jffs2_check_node_data()). | ||
264 | * Of course, we will not need to re-read and re-check | ||
265 | * the NAND page which we have just read. This is why we | ||
266 | * read the whole NAND page at jffs2_get_inode_nodes(), | ||
267 | * while we needed only the node header. | ||
268 | */ | ||
269 | unsigned char *buf; | ||
270 | |||
271 | /* 'buf' will point to the start of data */ | ||
272 | buf = (unsigned char *)rd + sizeof(*rd); | ||
273 | /* len will be the read data length */ | ||
274 | len = min_t(uint32_t, rdlen - sizeof(*rd), csize); | ||
275 | tn->partial_crc = crc32(0, buf, len); | ||
276 | |||
277 | dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize); | ||
278 | |||
279 | /* If we actually calculated the whole data CRC | ||
280 | * and it is wrong, drop the node. */ | ||
281 | if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) { | ||
282 | JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", | ||
283 | ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc)); | ||
284 | goto free_out; | ||
285 | } | ||
286 | |||
287 | } else if (csize == 0) { | ||
288 | /* | ||
289 | * We checked the header CRC. If the node has no data, adjust | ||
290 | * the space accounting now. For other nodes this will be done | ||
291 | * later either when the node is marked obsolete or when its | ||
292 | * data is checked. | ||
293 | */ | ||
294 | struct jffs2_eraseblock *jeb; | ||
295 | |||
296 | dbg_readinode("the node has no data.\n"); | ||
297 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
298 | len = ref_totlen(c, jeb, ref); | ||
299 | |||
300 | spin_lock(&c->erase_completion_lock); | ||
301 | jeb->used_size += len; | ||
302 | jeb->unchecked_size -= len; | ||
303 | c->used_size += len; | ||
304 | c->unchecked_size -= len; | ||
305 | ref->flash_offset = ref_offset(ref) | REF_NORMAL; | ||
306 | spin_unlock(&c->erase_completion_lock); | ||
191 | } | 307 | } |
192 | } | 308 | } |
193 | D2(if (jffs2_sanitycheck_fragtree(f)) { | 309 | |
194 | printk(KERN_WARNING "Just added node %04x-%04x @0x%08x on flash, newfrag *%p\n", | 310 | tn->fn = jffs2_alloc_full_dnode(); |
195 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag); | 311 | if (!tn->fn) { |
196 | return 0; | 312 | JFFS2_ERROR("alloc fn failed\n"); |
197 | }) | 313 | ret = -ENOMEM; |
198 | D2(jffs2_print_frag_list(f)); | 314 | goto free_out; |
315 | } | ||
316 | |||
317 | tn->version = je32_to_cpu(rd->version); | ||
318 | tn->fn->ofs = je32_to_cpu(rd->offset); | ||
319 | tn->data_crc = je32_to_cpu(rd->data_crc); | ||
320 | tn->csize = csize; | ||
321 | tn->fn->raw = ref; | ||
322 | |||
323 | /* There was a bug where we wrote hole nodes out with | ||
324 | csize/dsize swapped. Deal with it */ | ||
325 | if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize) | ||
326 | tn->fn->size = csize; | ||
327 | else // normal case... | ||
328 | tn->fn->size = je32_to_cpu(rd->dsize); | ||
329 | |||
330 | dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n", | ||
331 | ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize); | ||
332 | |||
333 | jffs2_add_tn_to_tree(tn, tnp); | ||
334 | |||
199 | return 0; | 335 | return 0; |
336 | |||
337 | free_out: | ||
338 | jffs2_free_tmp_dnode_info(tn); | ||
339 | return ret; | ||
200 | } | 340 | } |
201 | 341 | ||
202 | /* Doesn't set inode->i_size */ | 342 | /* |
203 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag) | 343 | * Helper function for jffs2_get_inode_nodes(). |
344 | * It is called every time an unknown node is found. | ||
345 | * | ||
346 | * Returns: 0 on succes; | ||
347 | * 1 if the node should be marked obsolete; | ||
348 | * negative error code on failure. | ||
349 | */ | ||
350 | static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un) | ||
204 | { | 351 | { |
205 | struct jffs2_node_frag *this; | 352 | /* We don't mark unknown nodes as REF_UNCHECKED */ |
206 | uint32_t lastend; | 353 | BUG_ON(ref_flags(ref) == REF_UNCHECKED); |
207 | 354 | ||
208 | /* Skip all the nodes which are completed before this one starts */ | 355 | un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype)); |
209 | this = jffs2_lookup_node_frag(list, newfrag->node->ofs); | ||
210 | 356 | ||
211 | if (this) { | 357 | if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) { |
212 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | 358 | /* Hmmm. This should have been caught at scan time. */ |
213 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); | 359 | JFFS2_NOTICE("node header CRC failed at %#08x. But it must have been OK earlier.\n", ref_offset(ref)); |
214 | lastend = this->ofs + this->size; | 360 | jffs2_dbg_dump_node(c, ref_offset(ref)); |
361 | return 1; | ||
215 | } else { | 362 | } else { |
216 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave no frag\n")); | 363 | switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) { |
217 | lastend = 0; | ||
218 | } | ||
219 | |||
220 | /* See if we ran off the end of the list */ | ||
221 | if (lastend <= newfrag->ofs) { | ||
222 | /* We did */ | ||
223 | |||
224 | /* Check if 'this' node was on the same page as the new node. | ||
225 | If so, both 'this' and the new node get marked REF_NORMAL so | ||
226 | the GC can take a look. | ||
227 | */ | ||
228 | if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { | ||
229 | if (this->node) | ||
230 | mark_ref_normal(this->node->raw); | ||
231 | mark_ref_normal(newfrag->node->raw); | ||
232 | } | ||
233 | 364 | ||
234 | if (lastend < newfrag->node->ofs) { | 365 | case JFFS2_FEATURE_INCOMPAT: |
235 | /* ... and we need to put a hole in before the new node */ | 366 | JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n", |
236 | struct jffs2_node_frag *holefrag = jffs2_alloc_node_frag(); | 367 | je16_to_cpu(un->nodetype), ref_offset(ref)); |
237 | if (!holefrag) { | 368 | /* EEP */ |
238 | jffs2_free_node_frag(newfrag); | 369 | BUG(); |
239 | return -ENOMEM; | 370 | break; |
240 | } | 371 | |
241 | holefrag->ofs = lastend; | 372 | case JFFS2_FEATURE_ROCOMPAT: |
242 | holefrag->size = newfrag->node->ofs - lastend; | 373 | JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n", |
243 | holefrag->node = NULL; | 374 | je16_to_cpu(un->nodetype), ref_offset(ref)); |
244 | if (this) { | 375 | BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO)); |
245 | /* By definition, the 'this' node has no right-hand child, | 376 | break; |
246 | because there are no frags with offset greater than it. | 377 | |
247 | So that's where we want to put the hole */ | 378 | case JFFS2_FEATURE_RWCOMPAT_COPY: |
248 | D2(printk(KERN_DEBUG "Adding hole frag (%p) on right of node at (%p)\n", holefrag, this)); | 379 | JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n", |
249 | rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); | 380 | je16_to_cpu(un->nodetype), ref_offset(ref)); |
250 | } else { | 381 | break; |
251 | D2(printk(KERN_DEBUG "Adding hole frag (%p) at root of tree\n", holefrag)); | 382 | |
252 | rb_link_node(&holefrag->rb, NULL, &list->rb_node); | 383 | case JFFS2_FEATURE_RWCOMPAT_DELETE: |
253 | } | 384 | JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n", |
254 | rb_insert_color(&holefrag->rb, list); | 385 | je16_to_cpu(un->nodetype), ref_offset(ref)); |
255 | this = holefrag; | 386 | return 1; |
256 | } | ||
257 | if (this) { | ||
258 | /* By definition, the 'this' node has no right-hand child, | ||
259 | because there are no frags with offset greater than it. | ||
260 | So that's where we want to put the hole */ | ||
261 | D2(printk(KERN_DEBUG "Adding new frag (%p) on right of node at (%p)\n", newfrag, this)); | ||
262 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | ||
263 | } else { | ||
264 | D2(printk(KERN_DEBUG "Adding new frag (%p) at root of tree\n", newfrag)); | ||
265 | rb_link_node(&newfrag->rb, NULL, &list->rb_node); | ||
266 | } | 387 | } |
267 | rb_insert_color(&newfrag->rb, list); | ||
268 | return 0; | ||
269 | } | 388 | } |
270 | 389 | ||
271 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: dealing with frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | 390 | return 0; |
272 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); | 391 | } |
273 | 392 | ||
274 | /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, | 393 | /* |
275 | * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs | 394 | * Helper function for jffs2_get_inode_nodes(). |
276 | */ | 395 | * The function detects whether more data should be read and reads it if yes. |
277 | if (newfrag->ofs > this->ofs) { | 396 | * |
278 | /* This node isn't completely obsoleted. The start of it remains valid */ | 397 | * Returns: 0 on succes; |
279 | 398 | * negative error code on failure. | |
280 | /* Mark the new node and the partially covered node REF_NORMAL -- let | 399 | */ |
281 | the GC take a look at them */ | 400 | static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, |
282 | mark_ref_normal(newfrag->node->raw); | 401 | int right_size, int *rdlen, unsigned char *buf, unsigned char *bufstart) |
283 | if (this->node) | 402 | { |
284 | mark_ref_normal(this->node->raw); | 403 | int right_len, err, len; |
285 | 404 | size_t retlen; | |
286 | if (this->ofs + this->size > newfrag->ofs + newfrag->size) { | 405 | uint32_t offs; |
287 | /* The new node splits 'this' frag into two */ | ||
288 | struct jffs2_node_frag *newfrag2 = jffs2_alloc_node_frag(); | ||
289 | if (!newfrag2) { | ||
290 | jffs2_free_node_frag(newfrag); | ||
291 | return -ENOMEM; | ||
292 | } | ||
293 | D2(printk(KERN_DEBUG "split old frag 0x%04x-0x%04x -->", this->ofs, this->ofs+this->size); | ||
294 | if (this->node) | ||
295 | printk("phys 0x%08x\n", ref_offset(this->node->raw)); | ||
296 | else | ||
297 | printk("hole\n"); | ||
298 | ) | ||
299 | |||
300 | /* New second frag pointing to this's node */ | ||
301 | newfrag2->ofs = newfrag->ofs + newfrag->size; | ||
302 | newfrag2->size = (this->ofs+this->size) - newfrag2->ofs; | ||
303 | newfrag2->node = this->node; | ||
304 | if (this->node) | ||
305 | this->node->frags++; | ||
306 | |||
307 | /* Adjust size of original 'this' */ | ||
308 | this->size = newfrag->ofs - this->ofs; | ||
309 | |||
310 | /* Now, we know there's no node with offset | ||
311 | greater than this->ofs but smaller than | ||
312 | newfrag2->ofs or newfrag->ofs, for obvious | ||
313 | reasons. So we can do a tree insert from | ||
314 | 'this' to insert newfrag, and a tree insert | ||
315 | from newfrag to insert newfrag2. */ | ||
316 | jffs2_fragtree_insert(newfrag, this); | ||
317 | rb_insert_color(&newfrag->rb, list); | ||
318 | |||
319 | jffs2_fragtree_insert(newfrag2, newfrag); | ||
320 | rb_insert_color(&newfrag2->rb, list); | ||
321 | |||
322 | return 0; | ||
323 | } | ||
324 | /* New node just reduces 'this' frag in size, doesn't split it */ | ||
325 | this->size = newfrag->ofs - this->ofs; | ||
326 | 406 | ||
327 | /* Again, we know it lives down here in the tree */ | 407 | if (jffs2_is_writebuffered(c)) { |
328 | jffs2_fragtree_insert(newfrag, this); | 408 | right_len = c->wbuf_pagesize - (bufstart - buf); |
329 | rb_insert_color(&newfrag->rb, list); | 409 | if (right_size + (int)(bufstart - buf) > c->wbuf_pagesize) |
330 | } else { | 410 | right_len += c->wbuf_pagesize; |
331 | /* New frag starts at the same point as 'this' used to. Replace | 411 | } else |
332 | it in the tree without doing a delete and insertion */ | 412 | right_len = right_size; |
333 | D2(printk(KERN_DEBUG "Inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", | ||
334 | newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, | ||
335 | this, this->ofs, this->ofs+this->size)); | ||
336 | |||
337 | rb_replace_node(&this->rb, &newfrag->rb, list); | ||
338 | |||
339 | if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { | ||
340 | D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size)); | ||
341 | jffs2_obsolete_node_frag(c, this); | ||
342 | } else { | ||
343 | this->ofs += newfrag->size; | ||
344 | this->size -= newfrag->size; | ||
345 | 413 | ||
346 | jffs2_fragtree_insert(this, newfrag); | 414 | if (*rdlen == right_len) |
347 | rb_insert_color(&this->rb, list); | 415 | return 0; |
348 | return 0; | 416 | |
349 | } | 417 | /* We need to read more data */ |
418 | offs = ref_offset(ref) + *rdlen; | ||
419 | if (jffs2_is_writebuffered(c)) { | ||
420 | bufstart = buf + c->wbuf_pagesize; | ||
421 | len = c->wbuf_pagesize; | ||
422 | } else { | ||
423 | bufstart = buf + *rdlen; | ||
424 | len = right_size - *rdlen; | ||
350 | } | 425 | } |
351 | /* OK, now we have newfrag added in the correct place in the tree, but | 426 | |
352 | frag_next(newfrag) may be a fragment which is overlapped by it | 427 | dbg_readinode("read more %d bytes\n", len); |
353 | */ | 428 | |
354 | while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { | 429 | err = jffs2_flash_read(c, offs, len, &retlen, bufstart); |
355 | /* 'this' frag is obsoleted completely. */ | 430 | if (err) { |
356 | D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x) and removing from tree\n", this, this->ofs, this->ofs+this->size)); | 431 | JFFS2_ERROR("can not read %d bytes from 0x%08x, " |
357 | rb_erase(&this->rb, list); | 432 | "error code: %d.\n", len, offs, err); |
358 | jffs2_obsolete_node_frag(c, this); | 433 | return err; |
359 | } | 434 | } |
360 | /* Now we're pointing at the first frag which isn't totally obsoleted by | ||
361 | the new frag */ | ||
362 | 435 | ||
363 | if (!this || newfrag->ofs + newfrag->size == this->ofs) { | 436 | if (retlen < len) { |
364 | return 0; | 437 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", |
438 | offs, retlen, len); | ||
439 | return -EIO; | ||
365 | } | 440 | } |
366 | /* Still some overlap but we don't need to move it in the tree */ | ||
367 | this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); | ||
368 | this->ofs = newfrag->ofs + newfrag->size; | ||
369 | 441 | ||
370 | /* And mark them REF_NORMAL so the GC takes a look at them */ | 442 | *rdlen = right_len; |
371 | if (this->node) | ||
372 | mark_ref_normal(this->node->raw); | ||
373 | mark_ref_normal(newfrag->node->raw); | ||
374 | 443 | ||
375 | return 0; | 444 | return 0; |
376 | } | 445 | } |
377 | 446 | ||
378 | void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) | 447 | /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated |
448 | with this ino, returning the former in order of version */ | ||
449 | static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
450 | struct rb_root *tnp, struct jffs2_full_dirent **fdp, | ||
451 | uint32_t *highest_version, uint32_t *latest_mctime, | ||
452 | uint32_t *mctime_ver) | ||
379 | { | 453 | { |
380 | struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); | 454 | struct jffs2_raw_node_ref *ref, *valid_ref; |
455 | struct rb_root ret_tn = RB_ROOT; | ||
456 | struct jffs2_full_dirent *ret_fd = NULL; | ||
457 | unsigned char *buf = NULL; | ||
458 | union jffs2_node_union *node; | ||
459 | size_t retlen; | ||
460 | int len, err; | ||
461 | |||
462 | *mctime_ver = 0; | ||
463 | |||
464 | dbg_readinode("ino #%u\n", f->inocache->ino); | ||
465 | |||
466 | if (jffs2_is_writebuffered(c)) { | ||
467 | /* | ||
468 | * If we have the write buffer, we assume the minimal I/O unit | ||
469 | * is c->wbuf_pagesize. We implement some optimizations which in | ||
470 | * this case and we need a temporary buffer of size = | ||
471 | * 2*c->wbuf_pagesize bytes (see comments in read_dnode()). | ||
472 | * Basically, we want to read not only the node header, but the | ||
473 | * whole wbuf (NAND page in case of NAND) or 2, if the node | ||
474 | * header overlaps the border between the 2 wbufs. | ||
475 | */ | ||
476 | len = 2*c->wbuf_pagesize; | ||
477 | } else { | ||
478 | /* | ||
479 | * When there is no write buffer, the size of the temporary | ||
480 | * buffer is the size of the larges node header. | ||
481 | */ | ||
482 | len = sizeof(union jffs2_node_union); | ||
483 | } | ||
381 | 484 | ||
382 | D1(printk(KERN_DEBUG "Truncating fraglist to 0x%08x bytes\n", size)); | 485 | /* FIXME: in case of NOR and available ->point() this |
486 | * needs to be fixed. */ | ||
487 | buf = kmalloc(len, GFP_KERNEL); | ||
488 | if (!buf) | ||
489 | return -ENOMEM; | ||
383 | 490 | ||
384 | /* We know frag->ofs <= size. That's what lookup does for us */ | 491 | spin_lock(&c->erase_completion_lock); |
385 | if (frag && frag->ofs != size) { | 492 | valid_ref = jffs2_first_valid_node(f->inocache->nodes); |
386 | if (frag->ofs+frag->size >= size) { | 493 | if (!valid_ref && f->inocache->ino != 1) |
387 | D1(printk(KERN_DEBUG "Truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); | 494 | JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino); |
388 | frag->size = size - frag->ofs; | 495 | while (valid_ref) { |
496 | unsigned char *bufstart; | ||
497 | |||
498 | /* We can hold a pointer to a non-obsolete node without the spinlock, | ||
499 | but _obsolete_ nodes may disappear at any time, if the block | ||
500 | they're in gets erased. So if we mark 'ref' obsolete while we're | ||
501 | not holding the lock, it can go away immediately. For that reason, | ||
502 | we find the next valid node first, before processing 'ref'. | ||
503 | */ | ||
504 | ref = valid_ref; | ||
505 | valid_ref = jffs2_first_valid_node(ref->next_in_ino); | ||
506 | spin_unlock(&c->erase_completion_lock); | ||
507 | |||
508 | cond_resched(); | ||
509 | |||
510 | /* | ||
511 | * At this point we don't know the type of the node we're going | ||
512 | * to read, so we do not know the size of its header. In order | ||
513 | * to minimize the amount of flash IO we assume the node has | ||
514 | * size = JFFS2_MIN_NODE_HEADER. | ||
515 | */ | ||
516 | if (jffs2_is_writebuffered(c)) { | ||
517 | /* | ||
518 | * We treat 'buf' as 2 adjacent wbufs. We want to | ||
519 | * adjust bufstart such as it points to the | ||
520 | * beginning of the node within this wbuf. | ||
521 | */ | ||
522 | bufstart = buf + (ref_offset(ref) % c->wbuf_pagesize); | ||
523 | /* We will read either one wbuf or 2 wbufs. */ | ||
524 | len = c->wbuf_pagesize - (bufstart - buf); | ||
525 | if (JFFS2_MIN_NODE_HEADER + (int)(bufstart - buf) > c->wbuf_pagesize) { | ||
526 | /* The header spans the border of the first wbuf */ | ||
527 | len += c->wbuf_pagesize; | ||
528 | } | ||
529 | } else { | ||
530 | bufstart = buf; | ||
531 | len = JFFS2_MIN_NODE_HEADER; | ||
389 | } | 532 | } |
390 | frag = frag_next(frag); | ||
391 | } | ||
392 | while (frag && frag->ofs >= size) { | ||
393 | struct jffs2_node_frag *next = frag_next(frag); | ||
394 | 533 | ||
395 | D1(printk(KERN_DEBUG "Removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); | 534 | dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref)); |
396 | frag_erase(frag, list); | ||
397 | jffs2_obsolete_node_frag(c, frag); | ||
398 | frag = next; | ||
399 | } | ||
400 | } | ||
401 | 535 | ||
402 | /* Scan the list of all nodes present for this ino, build map of versions, etc. */ | 536 | /* FIXME: point() */ |
537 | err = jffs2_flash_read(c, ref_offset(ref), len, | ||
538 | &retlen, bufstart); | ||
539 | if (err) { | ||
540 | JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err); | ||
541 | goto free_out; | ||
542 | } | ||
403 | 543 | ||
404 | static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | 544 | if (retlen < len) { |
405 | struct jffs2_inode_info *f, | 545 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ref_offset(ref), retlen, len); |
406 | struct jffs2_raw_inode *latest_node); | 546 | err = -EIO; |
547 | goto free_out; | ||
548 | } | ||
407 | 549 | ||
408 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 550 | node = (union jffs2_node_union *)bufstart; |
409 | uint32_t ino, struct jffs2_raw_inode *latest_node) | ||
410 | { | ||
411 | D2(printk(KERN_DEBUG "jffs2_do_read_inode(): getting inocache\n")); | ||
412 | 551 | ||
413 | retry_inocache: | 552 | switch (je16_to_cpu(node->u.nodetype)) { |
414 | spin_lock(&c->inocache_lock); | ||
415 | f->inocache = jffs2_get_ino_cache(c, ino); | ||
416 | 553 | ||
417 | D2(printk(KERN_DEBUG "jffs2_do_read_inode(): Got inocache at %p\n", f->inocache)); | 554 | case JFFS2_NODETYPE_DIRENT: |
555 | |||
556 | if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) { | ||
557 | err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf, bufstart); | ||
558 | if (unlikely(err)) | ||
559 | goto free_out; | ||
560 | } | ||
561 | |||
562 | err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver); | ||
563 | if (err == 1) { | ||
564 | jffs2_mark_node_obsolete(c, ref); | ||
565 | break; | ||
566 | } else if (unlikely(err)) | ||
567 | goto free_out; | ||
568 | |||
569 | if (je32_to_cpu(node->d.version) > *highest_version) | ||
570 | *highest_version = je32_to_cpu(node->d.version); | ||
418 | 571 | ||
419 | if (f->inocache) { | ||
420 | /* Check its state. We may need to wait before we can use it */ | ||
421 | switch(f->inocache->state) { | ||
422 | case INO_STATE_UNCHECKED: | ||
423 | case INO_STATE_CHECKEDABSENT: | ||
424 | f->inocache->state = INO_STATE_READING; | ||
425 | break; | 572 | break; |
426 | |||
427 | case INO_STATE_CHECKING: | ||
428 | case INO_STATE_GC: | ||
429 | /* If it's in either of these states, we need | ||
430 | to wait for whoever's got it to finish and | ||
431 | put it back. */ | ||
432 | D1(printk(KERN_DEBUG "jffs2_get_ino_cache_read waiting for ino #%u in state %d\n", | ||
433 | ino, f->inocache->state)); | ||
434 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | ||
435 | goto retry_inocache; | ||
436 | 573 | ||
437 | case INO_STATE_READING: | 574 | case JFFS2_NODETYPE_INODE: |
438 | case INO_STATE_PRESENT: | 575 | |
439 | /* Eep. This should never happen. It can | 576 | if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) { |
440 | happen if Linux calls read_inode() again | 577 | err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf, bufstart); |
441 | before clear_inode() has finished though. */ | 578 | if (unlikely(err)) |
442 | printk(KERN_WARNING "Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state); | 579 | goto free_out; |
443 | /* Fail. That's probably better than allowing it to succeed */ | 580 | } |
444 | f->inocache = NULL; | 581 | |
582 | err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver); | ||
583 | if (err == 1) { | ||
584 | jffs2_mark_node_obsolete(c, ref); | ||
585 | break; | ||
586 | } else if (unlikely(err)) | ||
587 | goto free_out; | ||
588 | |||
589 | if (je32_to_cpu(node->i.version) > *highest_version) | ||
590 | *highest_version = je32_to_cpu(node->i.version); | ||
591 | |||
445 | break; | 592 | break; |
446 | 593 | ||
447 | default: | 594 | default: |
448 | BUG(); | 595 | if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) { |
449 | } | 596 | err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf, bufstart); |
450 | } | 597 | if (unlikely(err)) |
451 | spin_unlock(&c->inocache_lock); | 598 | goto free_out; |
599 | } | ||
600 | |||
601 | err = read_unknown(c, ref, &node->u); | ||
602 | if (err == 1) { | ||
603 | jffs2_mark_node_obsolete(c, ref); | ||
604 | break; | ||
605 | } else if (unlikely(err)) | ||
606 | goto free_out; | ||
452 | 607 | ||
453 | if (!f->inocache && ino == 1) { | ||
454 | /* Special case - no root inode on medium */ | ||
455 | f->inocache = jffs2_alloc_inode_cache(); | ||
456 | if (!f->inocache) { | ||
457 | printk(KERN_CRIT "jffs2_do_read_inode(): Cannot allocate inocache for root inode\n"); | ||
458 | return -ENOMEM; | ||
459 | } | 608 | } |
460 | D1(printk(KERN_DEBUG "jffs2_do_read_inode(): Creating inocache for root inode\n")); | 609 | spin_lock(&c->erase_completion_lock); |
461 | memset(f->inocache, 0, sizeof(struct jffs2_inode_cache)); | ||
462 | f->inocache->ino = f->inocache->nlink = 1; | ||
463 | f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; | ||
464 | f->inocache->state = INO_STATE_READING; | ||
465 | jffs2_add_ino_cache(c, f->inocache); | ||
466 | } | ||
467 | if (!f->inocache) { | ||
468 | printk(KERN_WARNING "jffs2_do_read_inode() on nonexistent ino %u\n", ino); | ||
469 | return -ENOENT; | ||
470 | } | 610 | } |
471 | 611 | ||
472 | return jffs2_do_read_inode_internal(c, f, latest_node); | 612 | spin_unlock(&c->erase_completion_lock); |
473 | } | 613 | *tnp = ret_tn; |
474 | 614 | *fdp = ret_fd; | |
475 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | 615 | kfree(buf); |
476 | { | ||
477 | struct jffs2_raw_inode n; | ||
478 | struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL); | ||
479 | int ret; | ||
480 | |||
481 | if (!f) | ||
482 | return -ENOMEM; | ||
483 | 616 | ||
484 | memset(f, 0, sizeof(*f)); | 617 | dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n", |
485 | init_MUTEX_LOCKED(&f->sem); | 618 | f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver); |
486 | f->inocache = ic; | 619 | return 0; |
487 | 620 | ||
488 | ret = jffs2_do_read_inode_internal(c, f, &n); | 621 | free_out: |
489 | if (!ret) { | 622 | jffs2_free_tmp_dnode_info_list(&ret_tn); |
490 | up(&f->sem); | 623 | jffs2_free_full_dirent_list(ret_fd); |
491 | jffs2_do_clear_inode(c, f); | 624 | kfree(buf); |
492 | } | 625 | return err; |
493 | kfree (f); | ||
494 | return ret; | ||
495 | } | 626 | } |
496 | 627 | ||
497 | static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | 628 | static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, |
498 | struct jffs2_inode_info *f, | 629 | struct jffs2_inode_info *f, |
499 | struct jffs2_raw_inode *latest_node) | 630 | struct jffs2_raw_inode *latest_node) |
500 | { | 631 | { |
501 | struct jffs2_tmp_dnode_info *tn = NULL; | 632 | struct jffs2_tmp_dnode_info *tn; |
502 | struct rb_root tn_list; | 633 | struct rb_root tn_list; |
503 | struct rb_node *rb, *repl_rb; | 634 | struct rb_node *rb, *repl_rb; |
504 | struct jffs2_full_dirent *fd_list; | 635 | struct jffs2_full_dirent *fd_list; |
505 | struct jffs2_full_dnode *fn = NULL; | 636 | struct jffs2_full_dnode *fn, *first_fn = NULL; |
506 | uint32_t crc; | 637 | uint32_t crc; |
507 | uint32_t latest_mctime, mctime_ver; | 638 | uint32_t latest_mctime, mctime_ver; |
508 | uint32_t mdata_ver = 0; | ||
509 | size_t retlen; | 639 | size_t retlen; |
510 | int ret; | 640 | int ret; |
511 | 641 | ||
512 | D1(printk(KERN_DEBUG "jffs2_do_read_inode_internal(): ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink)); | 642 | dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink); |
513 | 643 | ||
514 | /* Grab all nodes relevant to this ino */ | 644 | /* Grab all nodes relevant to this ino */ |
515 | ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver); | 645 | ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver); |
516 | 646 | ||
517 | if (ret) { | 647 | if (ret) { |
518 | printk(KERN_CRIT "jffs2_get_inode_nodes() for ino %u returned %d\n", f->inocache->ino, ret); | 648 | JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret); |
519 | if (f->inocache->state == INO_STATE_READING) | 649 | if (f->inocache->state == INO_STATE_READING) |
520 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); | 650 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); |
521 | return ret; | 651 | return ret; |
@@ -525,42 +655,33 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
525 | rb = rb_first(&tn_list); | 655 | rb = rb_first(&tn_list); |
526 | 656 | ||
527 | while (rb) { | 657 | while (rb) { |
658 | cond_resched(); | ||
528 | tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb); | 659 | tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb); |
529 | fn = tn->fn; | 660 | fn = tn->fn; |
530 | 661 | ret = 1; | |
531 | if (f->metadata) { | 662 | dbg_readinode("consider node ver %u, phys offset " |
532 | if (likely(tn->version >= mdata_ver)) { | 663 | "%#08x(%d), range %u-%u.\n", tn->version, |
533 | D1(printk(KERN_DEBUG "Obsoleting old metadata at 0x%08x\n", ref_offset(f->metadata->raw))); | 664 | ref_offset(fn->raw), ref_flags(fn->raw), |
534 | jffs2_mark_node_obsolete(c, f->metadata->raw); | 665 | fn->ofs, fn->ofs + fn->size); |
535 | jffs2_free_full_dnode(f->metadata); | ||
536 | f->metadata = NULL; | ||
537 | |||
538 | mdata_ver = 0; | ||
539 | } else { | ||
540 | /* This should never happen. */ | ||
541 | printk(KERN_WARNING "Er. New metadata at 0x%08x with ver %d is actually older than previous ver %d at 0x%08x\n", | ||
542 | ref_offset(fn->raw), tn->version, mdata_ver, ref_offset(f->metadata->raw)); | ||
543 | jffs2_mark_node_obsolete(c, fn->raw); | ||
544 | jffs2_free_full_dnode(fn); | ||
545 | /* Fill in latest_node from the metadata, not this one we're about to free... */ | ||
546 | fn = f->metadata; | ||
547 | goto next_tn; | ||
548 | } | ||
549 | } | ||
550 | 666 | ||
551 | if (fn->size) { | 667 | if (fn->size) { |
552 | jffs2_add_full_dnode_to_inode(c, f, fn); | 668 | ret = jffs2_add_older_frag_to_fragtree(c, f, tn); |
553 | } else { | 669 | /* TODO: the error code isn't checked, check it */ |
554 | /* Zero-sized node at end of version list. Just a metadata update */ | 670 | jffs2_dbg_fragtree_paranoia_check_nolock(f); |
555 | D1(printk(KERN_DEBUG "metadata @%08x: ver %d\n", ref_offset(fn->raw), tn->version)); | 671 | BUG_ON(ret < 0); |
672 | if (!first_fn && ret == 0) | ||
673 | first_fn = fn; | ||
674 | } else if (!first_fn) { | ||
675 | first_fn = fn; | ||
556 | f->metadata = fn; | 676 | f->metadata = fn; |
557 | mdata_ver = tn->version; | 677 | ret = 0; /* Prevent freeing the metadata update node */ |
558 | } | 678 | } else |
559 | next_tn: | 679 | jffs2_mark_node_obsolete(c, fn->raw); |
680 | |||
560 | BUG_ON(rb->rb_left); | 681 | BUG_ON(rb->rb_left); |
561 | if (rb->rb_parent && rb->rb_parent->rb_left == rb) { | 682 | if (rb->rb_parent && rb->rb_parent->rb_left == rb) { |
562 | /* We were then left-hand child of our parent. We need | 683 | /* We were then left-hand child of our parent. We need |
563 | to move our own right-hand child into our place. */ | 684 | * to move our own right-hand child into our place. */ |
564 | repl_rb = rb->rb_right; | 685 | repl_rb = rb->rb_right; |
565 | if (repl_rb) | 686 | if (repl_rb) |
566 | repl_rb->rb_parent = rb->rb_parent; | 687 | repl_rb->rb_parent = rb->rb_parent; |
@@ -570,7 +691,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
570 | rb = rb_next(rb); | 691 | rb = rb_next(rb); |
571 | 692 | ||
572 | /* Remove the spent tn from the tree; don't bother rebalancing | 693 | /* Remove the spent tn from the tree; don't bother rebalancing |
573 | but put our right-hand child in our own place. */ | 694 | * but put our right-hand child in our own place. */ |
574 | if (tn->rb.rb_parent) { | 695 | if (tn->rb.rb_parent) { |
575 | if (tn->rb.rb_parent->rb_left == &tn->rb) | 696 | if (tn->rb.rb_parent->rb_left == &tn->rb) |
576 | tn->rb.rb_parent->rb_left = repl_rb; | 697 | tn->rb.rb_parent->rb_left = repl_rb; |
@@ -581,19 +702,27 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
581 | tn->rb.rb_right->rb_parent = NULL; | 702 | tn->rb.rb_right->rb_parent = NULL; |
582 | 703 | ||
583 | jffs2_free_tmp_dnode_info(tn); | 704 | jffs2_free_tmp_dnode_info(tn); |
705 | if (ret) { | ||
706 | dbg_readinode("delete dnode %u-%u.\n", | ||
707 | fn->ofs, fn->ofs + fn->size); | ||
708 | jffs2_free_full_dnode(fn); | ||
709 | } | ||
584 | } | 710 | } |
585 | D1(jffs2_sanitycheck_fragtree(f)); | 711 | jffs2_dbg_fragtree_paranoia_check_nolock(f); |
586 | 712 | ||
587 | if (!fn) { | 713 | BUG_ON(first_fn && ref_obsolete(first_fn->raw)); |
714 | |||
715 | fn = first_fn; | ||
716 | if (unlikely(!first_fn)) { | ||
588 | /* No data nodes for this inode. */ | 717 | /* No data nodes for this inode. */ |
589 | if (f->inocache->ino != 1) { | 718 | if (f->inocache->ino != 1) { |
590 | printk(KERN_WARNING "jffs2_do_read_inode(): No data nodes found for ino #%u\n", f->inocache->ino); | 719 | JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino); |
591 | if (!fd_list) { | 720 | if (!fd_list) { |
592 | if (f->inocache->state == INO_STATE_READING) | 721 | if (f->inocache->state == INO_STATE_READING) |
593 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); | 722 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); |
594 | return -EIO; | 723 | return -EIO; |
595 | } | 724 | } |
596 | printk(KERN_WARNING "jffs2_do_read_inode(): But it has children so we fake some modes for it\n"); | 725 | JFFS2_NOTICE("but it has children so we fake some modes for it\n"); |
597 | } | 726 | } |
598 | latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO); | 727 | latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO); |
599 | latest_node->version = cpu_to_je32(0); | 728 | latest_node->version = cpu_to_je32(0); |
@@ -608,8 +737,8 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
608 | 737 | ||
609 | ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node); | 738 | ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node); |
610 | if (ret || retlen != sizeof(*latest_node)) { | 739 | if (ret || retlen != sizeof(*latest_node)) { |
611 | printk(KERN_NOTICE "MTD read in jffs2_do_read_inode() failed: Returned %d, %zd of %zd bytes read\n", | 740 | JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n", |
612 | ret, retlen, sizeof(*latest_node)); | 741 | ret, retlen, sizeof(*latest_node)); |
613 | /* FIXME: If this fails, there seems to be a memory leak. Find it. */ | 742 | /* FIXME: If this fails, there seems to be a memory leak. Find it. */ |
614 | up(&f->sem); | 743 | up(&f->sem); |
615 | jffs2_do_clear_inode(c, f); | 744 | jffs2_do_clear_inode(c, f); |
@@ -618,7 +747,8 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
618 | 747 | ||
619 | crc = crc32(0, latest_node, sizeof(*latest_node)-8); | 748 | crc = crc32(0, latest_node, sizeof(*latest_node)-8); |
620 | if (crc != je32_to_cpu(latest_node->node_crc)) { | 749 | if (crc != je32_to_cpu(latest_node->node_crc)) { |
621 | printk(KERN_NOTICE "CRC failed for read_inode of inode %u at physical location 0x%x\n", f->inocache->ino, ref_offset(fn->raw)); | 750 | JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n", |
751 | f->inocache->ino, ref_offset(fn->raw)); | ||
622 | up(&f->sem); | 752 | up(&f->sem); |
623 | jffs2_do_clear_inode(c, f); | 753 | jffs2_do_clear_inode(c, f); |
624 | return -EIO; | 754 | return -EIO; |
@@ -633,10 +763,10 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
633 | } | 763 | } |
634 | break; | 764 | break; |
635 | 765 | ||
636 | 766 | ||
637 | case S_IFREG: | 767 | case S_IFREG: |
638 | /* If it was a regular file, truncate it to the latest node's isize */ | 768 | /* If it was a regular file, truncate it to the latest node's isize */ |
639 | jffs2_truncate_fraglist(c, &f->fragtree, je32_to_cpu(latest_node->isize)); | 769 | jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize)); |
640 | break; | 770 | break; |
641 | 771 | ||
642 | case S_IFLNK: | 772 | case S_IFLNK: |
@@ -649,37 +779,33 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
649 | 779 | ||
650 | if (f->inocache->state != INO_STATE_CHECKING) { | 780 | if (f->inocache->state != INO_STATE_CHECKING) { |
651 | /* Symlink's inode data is the target path. Read it and | 781 | /* Symlink's inode data is the target path. Read it and |
652 | * keep in RAM to facilitate quick follow symlink operation. | 782 | * keep in RAM to facilitate quick follow symlink |
653 | * We use f->dents field to store the target path, which | 783 | * operation. */ |
654 | * is somewhat ugly. */ | 784 | f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL); |
655 | f->dents = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL); | 785 | if (!f->target) { |
656 | if (!f->dents) { | 786 | JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize)); |
657 | printk(KERN_WARNING "Can't allocate %d bytes of memory " | ||
658 | "for the symlink target path cache\n", | ||
659 | je32_to_cpu(latest_node->csize)); | ||
660 | up(&f->sem); | 787 | up(&f->sem); |
661 | jffs2_do_clear_inode(c, f); | 788 | jffs2_do_clear_inode(c, f); |
662 | return -ENOMEM; | 789 | return -ENOMEM; |
663 | } | 790 | } |
664 | 791 | ||
665 | ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node), | 792 | ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node), |
666 | je32_to_cpu(latest_node->csize), &retlen, (char *)f->dents); | 793 | je32_to_cpu(latest_node->csize), &retlen, (char *)f->target); |
667 | 794 | ||
668 | if (ret || retlen != je32_to_cpu(latest_node->csize)) { | 795 | if (ret || retlen != je32_to_cpu(latest_node->csize)) { |
669 | if (retlen != je32_to_cpu(latest_node->csize)) | 796 | if (retlen != je32_to_cpu(latest_node->csize)) |
670 | ret = -EIO; | 797 | ret = -EIO; |
671 | kfree(f->dents); | 798 | kfree(f->target); |
672 | f->dents = NULL; | 799 | f->target = NULL; |
673 | up(&f->sem); | 800 | up(&f->sem); |
674 | jffs2_do_clear_inode(c, f); | 801 | jffs2_do_clear_inode(c, f); |
675 | return -ret; | 802 | return -ret; |
676 | } | 803 | } |
677 | 804 | ||
678 | ((char *)f->dents)[je32_to_cpu(latest_node->csize)] = '\0'; | 805 | f->target[je32_to_cpu(latest_node->csize)] = '\0'; |
679 | D1(printk(KERN_DEBUG "jffs2_do_read_inode(): symlink's target '%s' cached\n", | 806 | dbg_readinode("symlink's target '%s' cached\n", f->target); |
680 | (char *)f->dents)); | ||
681 | } | 807 | } |
682 | 808 | ||
683 | /* fall through... */ | 809 | /* fall through... */ |
684 | 810 | ||
685 | case S_IFBLK: | 811 | case S_IFBLK: |
@@ -687,14 +813,14 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
687 | /* Certain inode types should have only one data node, and it's | 813 | /* Certain inode types should have only one data node, and it's |
688 | kept as the metadata node */ | 814 | kept as the metadata node */ |
689 | if (f->metadata) { | 815 | if (f->metadata) { |
690 | printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o had metadata node\n", | 816 | JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n", |
691 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); | 817 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); |
692 | up(&f->sem); | 818 | up(&f->sem); |
693 | jffs2_do_clear_inode(c, f); | 819 | jffs2_do_clear_inode(c, f); |
694 | return -EIO; | 820 | return -EIO; |
695 | } | 821 | } |
696 | if (!frag_first(&f->fragtree)) { | 822 | if (!frag_first(&f->fragtree)) { |
697 | printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o has no fragments\n", | 823 | JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n", |
698 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); | 824 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); |
699 | up(&f->sem); | 825 | up(&f->sem); |
700 | jffs2_do_clear_inode(c, f); | 826 | jffs2_do_clear_inode(c, f); |
@@ -702,7 +828,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
702 | } | 828 | } |
703 | /* ASSERT: f->fraglist != NULL */ | 829 | /* ASSERT: f->fraglist != NULL */ |
704 | if (frag_next(frag_first(&f->fragtree))) { | 830 | if (frag_next(frag_first(&f->fragtree))) { |
705 | printk(KERN_WARNING "Argh. Special inode #%u with mode 0x%x had more than one node\n", | 831 | JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n", |
706 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); | 832 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); |
707 | /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */ | 833 | /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */ |
708 | up(&f->sem); | 834 | up(&f->sem); |
@@ -721,6 +847,93 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
721 | return 0; | 847 | return 0; |
722 | } | 848 | } |
723 | 849 | ||
850 | /* Scan the list of all nodes present for this ino, build map of versions, etc. */ | ||
851 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
852 | uint32_t ino, struct jffs2_raw_inode *latest_node) | ||
853 | { | ||
854 | dbg_readinode("read inode #%u\n", ino); | ||
855 | |||
856 | retry_inocache: | ||
857 | spin_lock(&c->inocache_lock); | ||
858 | f->inocache = jffs2_get_ino_cache(c, ino); | ||
859 | |||
860 | if (f->inocache) { | ||
861 | /* Check its state. We may need to wait before we can use it */ | ||
862 | switch(f->inocache->state) { | ||
863 | case INO_STATE_UNCHECKED: | ||
864 | case INO_STATE_CHECKEDABSENT: | ||
865 | f->inocache->state = INO_STATE_READING; | ||
866 | break; | ||
867 | |||
868 | case INO_STATE_CHECKING: | ||
869 | case INO_STATE_GC: | ||
870 | /* If it's in either of these states, we need | ||
871 | to wait for whoever's got it to finish and | ||
872 | put it back. */ | ||
873 | dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state); | ||
874 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | ||
875 | goto retry_inocache; | ||
876 | |||
877 | case INO_STATE_READING: | ||
878 | case INO_STATE_PRESENT: | ||
879 | /* Eep. This should never happen. It can | ||
880 | happen if Linux calls read_inode() again | ||
881 | before clear_inode() has finished though. */ | ||
882 | JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state); | ||
883 | /* Fail. That's probably better than allowing it to succeed */ | ||
884 | f->inocache = NULL; | ||
885 | break; | ||
886 | |||
887 | default: | ||
888 | BUG(); | ||
889 | } | ||
890 | } | ||
891 | spin_unlock(&c->inocache_lock); | ||
892 | |||
893 | if (!f->inocache && ino == 1) { | ||
894 | /* Special case - no root inode on medium */ | ||
895 | f->inocache = jffs2_alloc_inode_cache(); | ||
896 | if (!f->inocache) { | ||
897 | JFFS2_ERROR("cannot allocate inocache for root inode\n"); | ||
898 | return -ENOMEM; | ||
899 | } | ||
900 | dbg_readinode("creating inocache for root inode\n"); | ||
901 | memset(f->inocache, 0, sizeof(struct jffs2_inode_cache)); | ||
902 | f->inocache->ino = f->inocache->nlink = 1; | ||
903 | f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; | ||
904 | f->inocache->state = INO_STATE_READING; | ||
905 | jffs2_add_ino_cache(c, f->inocache); | ||
906 | } | ||
907 | if (!f->inocache) { | ||
908 | JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino); | ||
909 | return -ENOENT; | ||
910 | } | ||
911 | |||
912 | return jffs2_do_read_inode_internal(c, f, latest_node); | ||
913 | } | ||
914 | |||
915 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | ||
916 | { | ||
917 | struct jffs2_raw_inode n; | ||
918 | struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL); | ||
919 | int ret; | ||
920 | |||
921 | if (!f) | ||
922 | return -ENOMEM; | ||
923 | |||
924 | memset(f, 0, sizeof(*f)); | ||
925 | init_MUTEX_LOCKED(&f->sem); | ||
926 | f->inocache = ic; | ||
927 | |||
928 | ret = jffs2_do_read_inode_internal(c, f, &n); | ||
929 | if (!ret) { | ||
930 | up(&f->sem); | ||
931 | jffs2_do_clear_inode(c, f); | ||
932 | } | ||
933 | kfree (f); | ||
934 | return ret; | ||
935 | } | ||
936 | |||
724 | void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) | 937 | void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) |
725 | { | 938 | { |
726 | struct jffs2_full_dirent *fd, *fds; | 939 | struct jffs2_full_dirent *fd, *fds; |
@@ -740,20 +953,16 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) | |||
740 | 953 | ||
741 | jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); | 954 | jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); |
742 | 955 | ||
743 | /* For symlink inodes we us f->dents to store the target path name */ | 956 | if (f->target) { |
744 | if (S_ISLNK(OFNI_EDONI_2SFFJ(f)->i_mode)) { | 957 | kfree(f->target); |
745 | if (f->dents) { | 958 | f->target = NULL; |
746 | kfree(f->dents); | 959 | } |
747 | f->dents = NULL; | ||
748 | } | ||
749 | } else { | ||
750 | fds = f->dents; | ||
751 | 960 | ||
752 | while(fds) { | 961 | fds = f->dents; |
753 | fd = fds; | 962 | while(fds) { |
754 | fds = fd->next; | 963 | fd = fds; |
755 | jffs2_free_full_dirent(fd); | 964 | fds = fd->next; |
756 | } | 965 | jffs2_free_full_dirent(fd); |
757 | } | 966 | } |
758 | 967 | ||
759 | if (f->inocache && f->inocache->state != INO_STATE_CHECKING) { | 968 | if (f->inocache && f->inocache->state != INO_STATE_CHECKING) { |
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index b63160f83bab..0e7456ec99fd 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: scan.c,v 1.119 2005/02/17 17:51:13 dedekind Exp $ | 10 | * $Id: scan.c,v 1.125 2005/09/30 13:59:13 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
@@ -18,22 +18,11 @@ | |||
18 | #include <linux/crc32.h> | 18 | #include <linux/crc32.h> |
19 | #include <linux/compiler.h> | 19 | #include <linux/compiler.h> |
20 | #include "nodelist.h" | 20 | #include "nodelist.h" |
21 | #include "summary.h" | ||
22 | #include "debug.h" | ||
21 | 23 | ||
22 | #define DEFAULT_EMPTY_SCAN_SIZE 1024 | 24 | #define DEFAULT_EMPTY_SCAN_SIZE 1024 |
23 | 25 | ||
24 | #define DIRTY_SPACE(x) do { typeof(x) _x = (x); \ | ||
25 | c->free_size -= _x; c->dirty_size += _x; \ | ||
26 | jeb->free_size -= _x ; jeb->dirty_size += _x; \ | ||
27 | }while(0) | ||
28 | #define USED_SPACE(x) do { typeof(x) _x = (x); \ | ||
29 | c->free_size -= _x; c->used_size += _x; \ | ||
30 | jeb->free_size -= _x ; jeb->used_size += _x; \ | ||
31 | }while(0) | ||
32 | #define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \ | ||
33 | c->free_size -= _x; c->unchecked_size += _x; \ | ||
34 | jeb->free_size -= _x ; jeb->unchecked_size += _x; \ | ||
35 | }while(0) | ||
36 | |||
37 | #define noisy_printk(noise, args...) do { \ | 26 | #define noisy_printk(noise, args...) do { \ |
38 | if (*(noise)) { \ | 27 | if (*(noise)) { \ |
39 | printk(KERN_NOTICE args); \ | 28 | printk(KERN_NOTICE args); \ |
@@ -47,23 +36,16 @@ | |||
47 | static uint32_t pseudo_random; | 36 | static uint32_t pseudo_random; |
48 | 37 | ||
49 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 38 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
50 | unsigned char *buf, uint32_t buf_size); | 39 | unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s); |
51 | 40 | ||
52 | /* These helper functions _must_ increase ofs and also do the dirty/used space accounting. | 41 | /* These helper functions _must_ increase ofs and also do the dirty/used space accounting. |
53 | * Returning an error will abort the mount - bad checksums etc. should just mark the space | 42 | * Returning an error will abort the mount - bad checksums etc. should just mark the space |
54 | * as dirty. | 43 | * as dirty. |
55 | */ | 44 | */ |
56 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 45 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
57 | struct jffs2_raw_inode *ri, uint32_t ofs); | 46 | struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s); |
58 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 47 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
59 | struct jffs2_raw_dirent *rd, uint32_t ofs); | 48 | struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s); |
60 | |||
61 | #define BLK_STATE_ALLFF 0 | ||
62 | #define BLK_STATE_CLEAN 1 | ||
63 | #define BLK_STATE_PARTDIRTY 2 | ||
64 | #define BLK_STATE_CLEANMARKER 3 | ||
65 | #define BLK_STATE_ALLDIRTY 4 | ||
66 | #define BLK_STATE_BADBLOCK 5 | ||
67 | 49 | ||
68 | static inline int min_free(struct jffs2_sb_info *c) | 50 | static inline int min_free(struct jffs2_sb_info *c) |
69 | { | 51 | { |
@@ -89,6 +71,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
89 | uint32_t empty_blocks = 0, bad_blocks = 0; | 71 | uint32_t empty_blocks = 0, bad_blocks = 0; |
90 | unsigned char *flashbuf = NULL; | 72 | unsigned char *flashbuf = NULL; |
91 | uint32_t buf_size = 0; | 73 | uint32_t buf_size = 0; |
74 | struct jffs2_summary *s = NULL; /* summary info collected by the scan process */ | ||
92 | #ifndef __ECOS | 75 | #ifndef __ECOS |
93 | size_t pointlen; | 76 | size_t pointlen; |
94 | 77 | ||
@@ -122,21 +105,34 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
122 | return -ENOMEM; | 105 | return -ENOMEM; |
123 | } | 106 | } |
124 | 107 | ||
108 | if (jffs2_sum_active()) { | ||
109 | s = kmalloc(sizeof(struct jffs2_summary), GFP_KERNEL); | ||
110 | if (!s) { | ||
111 | JFFS2_WARNING("Can't allocate memory for summary\n"); | ||
112 | return -ENOMEM; | ||
113 | } | ||
114 | memset(s, 0, sizeof(struct jffs2_summary)); | ||
115 | } | ||
116 | |||
125 | for (i=0; i<c->nr_blocks; i++) { | 117 | for (i=0; i<c->nr_blocks; i++) { |
126 | struct jffs2_eraseblock *jeb = &c->blocks[i]; | 118 | struct jffs2_eraseblock *jeb = &c->blocks[i]; |
127 | 119 | ||
128 | ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size); | 120 | /* reset summary info for next eraseblock scan */ |
121 | jffs2_sum_reset_collected(s); | ||
122 | |||
123 | ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), | ||
124 | buf_size, s); | ||
129 | 125 | ||
130 | if (ret < 0) | 126 | if (ret < 0) |
131 | goto out; | 127 | goto out; |
132 | 128 | ||
133 | ACCT_PARANOIA_CHECK(jeb); | 129 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
134 | 130 | ||
135 | /* Now decide which list to put it on */ | 131 | /* Now decide which list to put it on */ |
136 | switch(ret) { | 132 | switch(ret) { |
137 | case BLK_STATE_ALLFF: | 133 | case BLK_STATE_ALLFF: |
138 | /* | 134 | /* |
139 | * Empty block. Since we can't be sure it | 135 | * Empty block. Since we can't be sure it |
140 | * was entirely erased, we just queue it for erase | 136 | * was entirely erased, we just queue it for erase |
141 | * again. It will be marked as such when the erase | 137 | * again. It will be marked as such when the erase |
142 | * is complete. Meanwhile we still count it as empty | 138 | * is complete. Meanwhile we still count it as empty |
@@ -162,18 +158,18 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
162 | break; | 158 | break; |
163 | 159 | ||
164 | case BLK_STATE_CLEAN: | 160 | case BLK_STATE_CLEAN: |
165 | /* Full (or almost full) of clean data. Clean list */ | 161 | /* Full (or almost full) of clean data. Clean list */ |
166 | list_add(&jeb->list, &c->clean_list); | 162 | list_add(&jeb->list, &c->clean_list); |
167 | break; | 163 | break; |
168 | 164 | ||
169 | case BLK_STATE_PARTDIRTY: | 165 | case BLK_STATE_PARTDIRTY: |
170 | /* Some data, but not full. Dirty list. */ | 166 | /* Some data, but not full. Dirty list. */ |
171 | /* We want to remember the block with most free space | 167 | /* We want to remember the block with most free space |
172 | and stick it in the 'nextblock' position to start writing to it. */ | 168 | and stick it in the 'nextblock' position to start writing to it. */ |
173 | if (jeb->free_size > min_free(c) && | 169 | if (jeb->free_size > min_free(c) && |
174 | (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { | 170 | (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { |
175 | /* Better candidate for the next writes to go to */ | 171 | /* Better candidate for the next writes to go to */ |
176 | if (c->nextblock) { | 172 | if (c->nextblock) { |
177 | c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; | 173 | c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; |
178 | c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; | 174 | c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; |
179 | c->free_size -= c->nextblock->free_size; | 175 | c->free_size -= c->nextblock->free_size; |
@@ -184,9 +180,14 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
184 | } else { | 180 | } else { |
185 | list_add(&c->nextblock->list, &c->dirty_list); | 181 | list_add(&c->nextblock->list, &c->dirty_list); |
186 | } | 182 | } |
183 | /* deleting summary information of the old nextblock */ | ||
184 | jffs2_sum_reset_collected(c->summary); | ||
187 | } | 185 | } |
188 | c->nextblock = jeb; | 186 | /* update collected summary infromation for the current nextblock */ |
189 | } else { | 187 | jffs2_sum_move_collected(c, s); |
188 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset)); | ||
189 | c->nextblock = jeb; | ||
190 | } else { | ||
190 | jeb->dirty_size += jeb->free_size + jeb->wasted_size; | 191 | jeb->dirty_size += jeb->free_size + jeb->wasted_size; |
191 | c->dirty_size += jeb->free_size + jeb->wasted_size; | 192 | c->dirty_size += jeb->free_size + jeb->wasted_size; |
192 | c->free_size -= jeb->free_size; | 193 | c->free_size -= jeb->free_size; |
@@ -197,30 +198,33 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
197 | } else { | 198 | } else { |
198 | list_add(&jeb->list, &c->dirty_list); | 199 | list_add(&jeb->list, &c->dirty_list); |
199 | } | 200 | } |
200 | } | 201 | } |
201 | break; | 202 | break; |
202 | 203 | ||
203 | case BLK_STATE_ALLDIRTY: | 204 | case BLK_STATE_ALLDIRTY: |
204 | /* Nothing valid - not even a clean marker. Needs erasing. */ | 205 | /* Nothing valid - not even a clean marker. Needs erasing. */ |
205 | /* For now we just put it on the erasing list. We'll start the erases later */ | 206 | /* For now we just put it on the erasing list. We'll start the erases later */ |
206 | D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset)); | 207 | D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset)); |
207 | list_add(&jeb->list, &c->erase_pending_list); | 208 | list_add(&jeb->list, &c->erase_pending_list); |
208 | c->nr_erasing_blocks++; | 209 | c->nr_erasing_blocks++; |
209 | break; | 210 | break; |
210 | 211 | ||
211 | case BLK_STATE_BADBLOCK: | 212 | case BLK_STATE_BADBLOCK: |
212 | D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset)); | 213 | D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset)); |
213 | list_add(&jeb->list, &c->bad_list); | 214 | list_add(&jeb->list, &c->bad_list); |
214 | c->bad_size += c->sector_size; | 215 | c->bad_size += c->sector_size; |
215 | c->free_size -= c->sector_size; | 216 | c->free_size -= c->sector_size; |
216 | bad_blocks++; | 217 | bad_blocks++; |
217 | break; | 218 | break; |
218 | default: | 219 | default: |
219 | printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n"); | 220 | printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n"); |
220 | BUG(); | 221 | BUG(); |
221 | } | 222 | } |
222 | } | 223 | } |
223 | 224 | ||
225 | if (jffs2_sum_active() && s) | ||
226 | kfree(s); | ||
227 | |||
224 | /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ | 228 | /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ |
225 | if (c->nextblock && (c->nextblock->dirty_size)) { | 229 | if (c->nextblock && (c->nextblock->dirty_size)) { |
226 | c->nextblock->wasted_size += c->nextblock->dirty_size; | 230 | c->nextblock->wasted_size += c->nextblock->dirty_size; |
@@ -229,12 +233,12 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
229 | c->nextblock->dirty_size = 0; | 233 | c->nextblock->dirty_size = 0; |
230 | } | 234 | } |
231 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 235 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
232 | if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size & (c->wbuf_pagesize-1))) { | 236 | if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) { |
233 | /* If we're going to start writing into a block which already | 237 | /* If we're going to start writing into a block which already |
234 | contains data, and the end of the data isn't page-aligned, | 238 | contains data, and the end of the data isn't page-aligned, |
235 | skip a little and align it. */ | 239 | skip a little and align it. */ |
236 | 240 | ||
237 | uint32_t skip = c->nextblock->free_size & (c->wbuf_pagesize-1); | 241 | uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; |
238 | 242 | ||
239 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", | 243 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", |
240 | skip)); | 244 | skip)); |
@@ -246,7 +250,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
246 | } | 250 | } |
247 | #endif | 251 | #endif |
248 | if (c->nr_erasing_blocks) { | 252 | if (c->nr_erasing_blocks) { |
249 | if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { | 253 | if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { |
250 | printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); | 254 | printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); |
251 | printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks); | 255 | printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks); |
252 | ret = -EIO; | 256 | ret = -EIO; |
@@ -259,13 +263,13 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
259 | if (buf_size) | 263 | if (buf_size) |
260 | kfree(flashbuf); | 264 | kfree(flashbuf); |
261 | #ifndef __ECOS | 265 | #ifndef __ECOS |
262 | else | 266 | else |
263 | c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); | 267 | c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); |
264 | #endif | 268 | #endif |
265 | return ret; | 269 | return ret; |
266 | } | 270 | } |
267 | 271 | ||
268 | static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf, | 272 | int jffs2_fill_scan_buf (struct jffs2_sb_info *c, void *buf, |
269 | uint32_t ofs, uint32_t len) | 273 | uint32_t ofs, uint32_t len) |
270 | { | 274 | { |
271 | int ret; | 275 | int ret; |
@@ -286,14 +290,36 @@ static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf, | |||
286 | return 0; | 290 | return 0; |
287 | } | 291 | } |
288 | 292 | ||
293 | int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
294 | { | ||
295 | if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size | ||
296 | && (!jeb->first_node || !jeb->first_node->next_phys) ) | ||
297 | return BLK_STATE_CLEANMARKER; | ||
298 | |||
299 | /* move blocks with max 4 byte dirty space to cleanlist */ | ||
300 | else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) { | ||
301 | c->dirty_size -= jeb->dirty_size; | ||
302 | c->wasted_size += jeb->dirty_size; | ||
303 | jeb->wasted_size += jeb->dirty_size; | ||
304 | jeb->dirty_size = 0; | ||
305 | return BLK_STATE_CLEAN; | ||
306 | } else if (jeb->used_size || jeb->unchecked_size) | ||
307 | return BLK_STATE_PARTDIRTY; | ||
308 | else | ||
309 | return BLK_STATE_ALLDIRTY; | ||
310 | } | ||
311 | |||
289 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 312 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
290 | unsigned char *buf, uint32_t buf_size) { | 313 | unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) { |
291 | struct jffs2_unknown_node *node; | 314 | struct jffs2_unknown_node *node; |
292 | struct jffs2_unknown_node crcnode; | 315 | struct jffs2_unknown_node crcnode; |
316 | struct jffs2_sum_marker *sm; | ||
293 | uint32_t ofs, prevofs; | 317 | uint32_t ofs, prevofs; |
294 | uint32_t hdr_crc, buf_ofs, buf_len; | 318 | uint32_t hdr_crc, buf_ofs, buf_len; |
295 | int err; | 319 | int err; |
296 | int noise = 0; | 320 | int noise = 0; |
321 | |||
322 | |||
297 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 323 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
298 | int cleanmarkerfound = 0; | 324 | int cleanmarkerfound = 0; |
299 | #endif | 325 | #endif |
@@ -319,17 +345,53 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
319 | } | 345 | } |
320 | } | 346 | } |
321 | #endif | 347 | #endif |
348 | |||
349 | if (jffs2_sum_active()) { | ||
350 | sm = kmalloc(sizeof(struct jffs2_sum_marker), GFP_KERNEL); | ||
351 | if (!sm) { | ||
352 | return -ENOMEM; | ||
353 | } | ||
354 | |||
355 | err = jffs2_fill_scan_buf(c, (unsigned char *) sm, jeb->offset + c->sector_size - | ||
356 | sizeof(struct jffs2_sum_marker), sizeof(struct jffs2_sum_marker)); | ||
357 | if (err) { | ||
358 | kfree(sm); | ||
359 | return err; | ||
360 | } | ||
361 | |||
362 | if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC ) { | ||
363 | err = jffs2_sum_scan_sumnode(c, jeb, je32_to_cpu(sm->offset), &pseudo_random); | ||
364 | if (err) { | ||
365 | kfree(sm); | ||
366 | return err; | ||
367 | } | ||
368 | } | ||
369 | |||
370 | kfree(sm); | ||
371 | |||
372 | ofs = jeb->offset; | ||
373 | prevofs = jeb->offset - 1; | ||
374 | } | ||
375 | |||
322 | buf_ofs = jeb->offset; | 376 | buf_ofs = jeb->offset; |
323 | 377 | ||
324 | if (!buf_size) { | 378 | if (!buf_size) { |
325 | buf_len = c->sector_size; | 379 | buf_len = c->sector_size; |
380 | |||
381 | if (jffs2_sum_active()) { | ||
382 | /* must reread because of summary test */ | ||
383 | err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); | ||
384 | if (err) | ||
385 | return err; | ||
386 | } | ||
387 | |||
326 | } else { | 388 | } else { |
327 | buf_len = EMPTY_SCAN_SIZE(c->sector_size); | 389 | buf_len = EMPTY_SCAN_SIZE(c->sector_size); |
328 | err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); | 390 | err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); |
329 | if (err) | 391 | if (err) |
330 | return err; | 392 | return err; |
331 | } | 393 | } |
332 | 394 | ||
333 | /* We temporarily use 'ofs' as a pointer into the buffer/jeb */ | 395 | /* We temporarily use 'ofs' as a pointer into the buffer/jeb */ |
334 | ofs = 0; | 396 | ofs = 0; |
335 | 397 | ||
@@ -367,10 +429,12 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
367 | 429 | ||
368 | noise = 10; | 430 | noise = 10; |
369 | 431 | ||
370 | scan_more: | 432 | dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset); |
433 | |||
434 | scan_more: | ||
371 | while(ofs < jeb->offset + c->sector_size) { | 435 | while(ofs < jeb->offset + c->sector_size) { |
372 | 436 | ||
373 | D1(ACCT_PARANOIA_CHECK(jeb)); | 437 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
374 | 438 | ||
375 | cond_resched(); | 439 | cond_resched(); |
376 | 440 | ||
@@ -432,7 +496,7 @@ scan_more: | |||
432 | 496 | ||
433 | /* If we're only checking the beginning of a block with a cleanmarker, | 497 | /* If we're only checking the beginning of a block with a cleanmarker, |
434 | bail now */ | 498 | bail now */ |
435 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && | 499 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && |
436 | c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_phys) { | 500 | c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_phys) { |
437 | D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size))); | 501 | D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size))); |
438 | return BLK_STATE_CLEANMARKER; | 502 | return BLK_STATE_CLEANMARKER; |
@@ -441,7 +505,7 @@ scan_more: | |||
441 | /* See how much more there is to read in this eraseblock... */ | 505 | /* See how much more there is to read in this eraseblock... */ |
442 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 506 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
443 | if (!buf_len) { | 507 | if (!buf_len) { |
444 | /* No more to read. Break out of main loop without marking | 508 | /* No more to read. Break out of main loop without marking |
445 | this range of empty space as dirty (because it's not) */ | 509 | this range of empty space as dirty (because it's not) */ |
446 | D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n", | 510 | D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n", |
447 | empty_start)); | 511 | empty_start)); |
@@ -476,8 +540,8 @@ scan_more: | |||
476 | } | 540 | } |
477 | if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { | 541 | if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { |
478 | /* OK. We're out of possibilities. Whinge and move on */ | 542 | /* OK. We're out of possibilities. Whinge and move on */ |
479 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", | 543 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", |
480 | JFFS2_MAGIC_BITMASK, ofs, | 544 | JFFS2_MAGIC_BITMASK, ofs, |
481 | je16_to_cpu(node->magic)); | 545 | je16_to_cpu(node->magic)); |
482 | DIRTY_SPACE(4); | 546 | DIRTY_SPACE(4); |
483 | ofs += 4; | 547 | ofs += 4; |
@@ -492,7 +556,7 @@ scan_more: | |||
492 | if (hdr_crc != je32_to_cpu(node->hdr_crc)) { | 556 | if (hdr_crc != je32_to_cpu(node->hdr_crc)) { |
493 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", | 557 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", |
494 | ofs, je16_to_cpu(node->magic), | 558 | ofs, je16_to_cpu(node->magic), |
495 | je16_to_cpu(node->nodetype), | 559 | je16_to_cpu(node->nodetype), |
496 | je32_to_cpu(node->totlen), | 560 | je32_to_cpu(node->totlen), |
497 | je32_to_cpu(node->hdr_crc), | 561 | je32_to_cpu(node->hdr_crc), |
498 | hdr_crc); | 562 | hdr_crc); |
@@ -501,7 +565,7 @@ scan_more: | |||
501 | continue; | 565 | continue; |
502 | } | 566 | } |
503 | 567 | ||
504 | if (ofs + je32_to_cpu(node->totlen) > | 568 | if (ofs + je32_to_cpu(node->totlen) > |
505 | jeb->offset + c->sector_size) { | 569 | jeb->offset + c->sector_size) { |
506 | /* Eep. Node goes over the end of the erase block. */ | 570 | /* Eep. Node goes over the end of the erase block. */ |
507 | printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", | 571 | printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", |
@@ -532,11 +596,11 @@ scan_more: | |||
532 | buf_ofs = ofs; | 596 | buf_ofs = ofs; |
533 | node = (void *)buf; | 597 | node = (void *)buf; |
534 | } | 598 | } |
535 | err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs); | 599 | err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s); |
536 | if (err) return err; | 600 | if (err) return err; |
537 | ofs += PAD(je32_to_cpu(node->totlen)); | 601 | ofs += PAD(je32_to_cpu(node->totlen)); |
538 | break; | 602 | break; |
539 | 603 | ||
540 | case JFFS2_NODETYPE_DIRENT: | 604 | case JFFS2_NODETYPE_DIRENT: |
541 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | 605 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { |
542 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 606 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
@@ -548,7 +612,7 @@ scan_more: | |||
548 | buf_ofs = ofs; | 612 | buf_ofs = ofs; |
549 | node = (void *)buf; | 613 | node = (void *)buf; |
550 | } | 614 | } |
551 | err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs); | 615 | err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s); |
552 | if (err) return err; | 616 | if (err) return err; |
553 | ofs += PAD(je32_to_cpu(node->totlen)); | 617 | ofs += PAD(je32_to_cpu(node->totlen)); |
554 | break; | 618 | break; |
@@ -556,7 +620,7 @@ scan_more: | |||
556 | case JFFS2_NODETYPE_CLEANMARKER: | 620 | case JFFS2_NODETYPE_CLEANMARKER: |
557 | D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); | 621 | D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); |
558 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { | 622 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { |
559 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", | 623 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", |
560 | ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); | 624 | ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); |
561 | DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); | 625 | DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); |
562 | ofs += PAD(sizeof(struct jffs2_unknown_node)); | 626 | ofs += PAD(sizeof(struct jffs2_unknown_node)); |
@@ -575,13 +639,15 @@ scan_more: | |||
575 | marker_ref->flash_offset = ofs | REF_NORMAL; | 639 | marker_ref->flash_offset = ofs | REF_NORMAL; |
576 | marker_ref->__totlen = c->cleanmarker_size; | 640 | marker_ref->__totlen = c->cleanmarker_size; |
577 | jeb->first_node = jeb->last_node = marker_ref; | 641 | jeb->first_node = jeb->last_node = marker_ref; |
578 | 642 | ||
579 | USED_SPACE(PAD(c->cleanmarker_size)); | 643 | USED_SPACE(PAD(c->cleanmarker_size)); |
580 | ofs += PAD(c->cleanmarker_size); | 644 | ofs += PAD(c->cleanmarker_size); |
581 | } | 645 | } |
582 | break; | 646 | break; |
583 | 647 | ||
584 | case JFFS2_NODETYPE_PADDING: | 648 | case JFFS2_NODETYPE_PADDING: |
649 | if (jffs2_sum_active()) | ||
650 | jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen)); | ||
585 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | 651 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); |
586 | ofs += PAD(je32_to_cpu(node->totlen)); | 652 | ofs += PAD(je32_to_cpu(node->totlen)); |
587 | break; | 653 | break; |
@@ -616,8 +682,15 @@ scan_more: | |||
616 | } | 682 | } |
617 | } | 683 | } |
618 | 684 | ||
685 | if (jffs2_sum_active()) { | ||
686 | if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) { | ||
687 | dbg_summary("There is not enough space for " | ||
688 | "summary information, disabling for this jeb!\n"); | ||
689 | jffs2_sum_disable_collecting(s); | ||
690 | } | ||
691 | } | ||
619 | 692 | ||
620 | D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset, | 693 | D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset, |
621 | jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size)); | 694 | jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size)); |
622 | 695 | ||
623 | /* mark_node_obsolete can add to wasted !! */ | 696 | /* mark_node_obsolete can add to wasted !! */ |
@@ -628,24 +701,10 @@ scan_more: | |||
628 | jeb->wasted_size = 0; | 701 | jeb->wasted_size = 0; |
629 | } | 702 | } |
630 | 703 | ||
631 | if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size | 704 | return jffs2_scan_classify_jeb(c, jeb); |
632 | && (!jeb->first_node || !jeb->first_node->next_phys) ) | ||
633 | return BLK_STATE_CLEANMARKER; | ||
634 | |||
635 | /* move blocks with max 4 byte dirty space to cleanlist */ | ||
636 | else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) { | ||
637 | c->dirty_size -= jeb->dirty_size; | ||
638 | c->wasted_size += jeb->dirty_size; | ||
639 | jeb->wasted_size += jeb->dirty_size; | ||
640 | jeb->dirty_size = 0; | ||
641 | return BLK_STATE_CLEAN; | ||
642 | } else if (jeb->used_size || jeb->unchecked_size) | ||
643 | return BLK_STATE_PARTDIRTY; | ||
644 | else | ||
645 | return BLK_STATE_ALLDIRTY; | ||
646 | } | 705 | } |
647 | 706 | ||
648 | static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino) | 707 | struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino) |
649 | { | 708 | { |
650 | struct jffs2_inode_cache *ic; | 709 | struct jffs2_inode_cache *ic; |
651 | 710 | ||
@@ -671,8 +730,8 @@ static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info | |||
671 | return ic; | 730 | return ic; |
672 | } | 731 | } |
673 | 732 | ||
674 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 733 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
675 | struct jffs2_raw_inode *ri, uint32_t ofs) | 734 | struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s) |
676 | { | 735 | { |
677 | struct jffs2_raw_node_ref *raw; | 736 | struct jffs2_raw_node_ref *raw; |
678 | struct jffs2_inode_cache *ic; | 737 | struct jffs2_inode_cache *ic; |
@@ -681,11 +740,11 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
681 | D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); | 740 | D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); |
682 | 741 | ||
683 | /* We do very little here now. Just check the ino# to which we should attribute | 742 | /* We do very little here now. Just check the ino# to which we should attribute |
684 | this node; we can do all the CRC checking etc. later. There's a tradeoff here -- | 743 | this node; we can do all the CRC checking etc. later. There's a tradeoff here -- |
685 | we used to scan the flash once only, reading everything we want from it into | 744 | we used to scan the flash once only, reading everything we want from it into |
686 | memory, then building all our in-core data structures and freeing the extra | 745 | memory, then building all our in-core data structures and freeing the extra |
687 | information. Now we allow the first part of the mount to complete a lot quicker, | 746 | information. Now we allow the first part of the mount to complete a lot quicker, |
688 | but we have to go _back_ to the flash in order to finish the CRC checking, etc. | 747 | but we have to go _back_ to the flash in order to finish the CRC checking, etc. |
689 | Which means that the _full_ amount of time to get to proper write mode with GC | 748 | Which means that the _full_ amount of time to get to proper write mode with GC |
690 | operational may actually be _longer_ than before. Sucks to be me. */ | 749 | operational may actually be _longer_ than before. Sucks to be me. */ |
691 | 750 | ||
@@ -731,7 +790,7 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
731 | jeb->last_node->next_phys = raw; | 790 | jeb->last_node->next_phys = raw; |
732 | jeb->last_node = raw; | 791 | jeb->last_node = raw; |
733 | 792 | ||
734 | D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", | 793 | D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", |
735 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), | 794 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), |
736 | je32_to_cpu(ri->offset), | 795 | je32_to_cpu(ri->offset), |
737 | je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize))); | 796 | je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize))); |
@@ -739,11 +798,16 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
739 | pseudo_random += je32_to_cpu(ri->version); | 798 | pseudo_random += je32_to_cpu(ri->version); |
740 | 799 | ||
741 | UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen))); | 800 | UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen))); |
801 | |||
802 | if (jffs2_sum_active()) { | ||
803 | jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset); | ||
804 | } | ||
805 | |||
742 | return 0; | 806 | return 0; |
743 | } | 807 | } |
744 | 808 | ||
745 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 809 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
746 | struct jffs2_raw_dirent *rd, uint32_t ofs) | 810 | struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s) |
747 | { | 811 | { |
748 | struct jffs2_raw_node_ref *raw; | 812 | struct jffs2_raw_node_ref *raw; |
749 | struct jffs2_full_dirent *fd; | 813 | struct jffs2_full_dirent *fd; |
@@ -776,7 +840,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
776 | crc = crc32(0, fd->name, rd->nsize); | 840 | crc = crc32(0, fd->name, rd->nsize); |
777 | if (crc != je32_to_cpu(rd->name_crc)) { | 841 | if (crc != je32_to_cpu(rd->name_crc)) { |
778 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 842 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
779 | ofs, je32_to_cpu(rd->name_crc), crc); | 843 | ofs, je32_to_cpu(rd->name_crc), crc); |
780 | D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino))); | 844 | D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino))); |
781 | jffs2_free_full_dirent(fd); | 845 | jffs2_free_full_dirent(fd); |
782 | /* FIXME: Why do we believe totlen? */ | 846 | /* FIXME: Why do we believe totlen? */ |
@@ -796,7 +860,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
796 | jffs2_free_raw_node_ref(raw); | 860 | jffs2_free_raw_node_ref(raw); |
797 | return -ENOMEM; | 861 | return -ENOMEM; |
798 | } | 862 | } |
799 | 863 | ||
800 | raw->__totlen = PAD(je32_to_cpu(rd->totlen)); | 864 | raw->__totlen = PAD(je32_to_cpu(rd->totlen)); |
801 | raw->flash_offset = ofs | REF_PRISTINE; | 865 | raw->flash_offset = ofs | REF_PRISTINE; |
802 | raw->next_phys = NULL; | 866 | raw->next_phys = NULL; |
@@ -817,6 +881,10 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
817 | USED_SPACE(PAD(je32_to_cpu(rd->totlen))); | 881 | USED_SPACE(PAD(je32_to_cpu(rd->totlen))); |
818 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); | 882 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); |
819 | 883 | ||
884 | if (jffs2_sum_active()) { | ||
885 | jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset); | ||
886 | } | ||
887 | |||
820 | return 0; | 888 | return 0; |
821 | } | 889 | } |
822 | 890 | ||
@@ -852,76 +920,34 @@ void jffs2_rotate_lists(struct jffs2_sb_info *c) | |||
852 | x = count_list(&c->clean_list); | 920 | x = count_list(&c->clean_list); |
853 | if (x) { | 921 | if (x) { |
854 | rotateby = pseudo_random % x; | 922 | rotateby = pseudo_random % x; |
855 | D1(printk(KERN_DEBUG "Rotating clean_list by %d\n", rotateby)); | ||
856 | |||
857 | rotate_list((&c->clean_list), rotateby); | 923 | rotate_list((&c->clean_list), rotateby); |
858 | |||
859 | D1(printk(KERN_DEBUG "Erase block at front of clean_list is at %08x\n", | ||
860 | list_entry(c->clean_list.next, struct jffs2_eraseblock, list)->offset)); | ||
861 | } else { | ||
862 | D1(printk(KERN_DEBUG "Not rotating empty clean_list\n")); | ||
863 | } | 924 | } |
864 | 925 | ||
865 | x = count_list(&c->very_dirty_list); | 926 | x = count_list(&c->very_dirty_list); |
866 | if (x) { | 927 | if (x) { |
867 | rotateby = pseudo_random % x; | 928 | rotateby = pseudo_random % x; |
868 | D1(printk(KERN_DEBUG "Rotating very_dirty_list by %d\n", rotateby)); | ||
869 | |||
870 | rotate_list((&c->very_dirty_list), rotateby); | 929 | rotate_list((&c->very_dirty_list), rotateby); |
871 | |||
872 | D1(printk(KERN_DEBUG "Erase block at front of very_dirty_list is at %08x\n", | ||
873 | list_entry(c->very_dirty_list.next, struct jffs2_eraseblock, list)->offset)); | ||
874 | } else { | ||
875 | D1(printk(KERN_DEBUG "Not rotating empty very_dirty_list\n")); | ||
876 | } | 930 | } |
877 | 931 | ||
878 | x = count_list(&c->dirty_list); | 932 | x = count_list(&c->dirty_list); |
879 | if (x) { | 933 | if (x) { |
880 | rotateby = pseudo_random % x; | 934 | rotateby = pseudo_random % x; |
881 | D1(printk(KERN_DEBUG "Rotating dirty_list by %d\n", rotateby)); | ||
882 | |||
883 | rotate_list((&c->dirty_list), rotateby); | 935 | rotate_list((&c->dirty_list), rotateby); |
884 | |||
885 | D1(printk(KERN_DEBUG "Erase block at front of dirty_list is at %08x\n", | ||
886 | list_entry(c->dirty_list.next, struct jffs2_eraseblock, list)->offset)); | ||
887 | } else { | ||
888 | D1(printk(KERN_DEBUG "Not rotating empty dirty_list\n")); | ||
889 | } | 936 | } |
890 | 937 | ||
891 | x = count_list(&c->erasable_list); | 938 | x = count_list(&c->erasable_list); |
892 | if (x) { | 939 | if (x) { |
893 | rotateby = pseudo_random % x; | 940 | rotateby = pseudo_random % x; |
894 | D1(printk(KERN_DEBUG "Rotating erasable_list by %d\n", rotateby)); | ||
895 | |||
896 | rotate_list((&c->erasable_list), rotateby); | 941 | rotate_list((&c->erasable_list), rotateby); |
897 | |||
898 | D1(printk(KERN_DEBUG "Erase block at front of erasable_list is at %08x\n", | ||
899 | list_entry(c->erasable_list.next, struct jffs2_eraseblock, list)->offset)); | ||
900 | } else { | ||
901 | D1(printk(KERN_DEBUG "Not rotating empty erasable_list\n")); | ||
902 | } | 942 | } |
903 | 943 | ||
904 | if (c->nr_erasing_blocks) { | 944 | if (c->nr_erasing_blocks) { |
905 | rotateby = pseudo_random % c->nr_erasing_blocks; | 945 | rotateby = pseudo_random % c->nr_erasing_blocks; |
906 | D1(printk(KERN_DEBUG "Rotating erase_pending_list by %d\n", rotateby)); | ||
907 | |||
908 | rotate_list((&c->erase_pending_list), rotateby); | 946 | rotate_list((&c->erase_pending_list), rotateby); |
909 | |||
910 | D1(printk(KERN_DEBUG "Erase block at front of erase_pending_list is at %08x\n", | ||
911 | list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list)->offset)); | ||
912 | } else { | ||
913 | D1(printk(KERN_DEBUG "Not rotating empty erase_pending_list\n")); | ||
914 | } | 947 | } |
915 | 948 | ||
916 | if (c->nr_free_blocks) { | 949 | if (c->nr_free_blocks) { |
917 | rotateby = pseudo_random % c->nr_free_blocks; | 950 | rotateby = pseudo_random % c->nr_free_blocks; |
918 | D1(printk(KERN_DEBUG "Rotating free_list by %d\n", rotateby)); | ||
919 | |||
920 | rotate_list((&c->free_list), rotateby); | 951 | rotate_list((&c->free_list), rotateby); |
921 | |||
922 | D1(printk(KERN_DEBUG "Erase block at front of free_list is at %08x\n", | ||
923 | list_entry(c->free_list.next, struct jffs2_eraseblock, list)->offset)); | ||
924 | } else { | ||
925 | D1(printk(KERN_DEBUG "Not rotating empty free_list\n")); | ||
926 | } | 952 | } |
927 | } | 953 | } |
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c new file mode 100644 index 000000000000..fb9cec61fcf2 --- /dev/null +++ b/fs/jffs2/summary.c | |||
@@ -0,0 +1,730 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, | ||
5 | * Zoltan Sogor <weth@inf.u-szeged.hu>, | ||
6 | * Patrik Kluba <pajko@halom.u-szeged.hu>, | ||
7 | * University of Szeged, Hungary | ||
8 | * | ||
9 | * For licensing information, see the file 'LICENCE' in this directory. | ||
10 | * | ||
11 | * $Id: summary.c,v 1.4 2005/09/26 11:37:21 havasi Exp $ | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/mtd/mtd.h> | ||
19 | #include <linux/pagemap.h> | ||
20 | #include <linux/crc32.h> | ||
21 | #include <linux/compiler.h> | ||
22 | #include <linux/vmalloc.h> | ||
23 | #include "nodelist.h" | ||
24 | #include "debug.h" | ||
25 | |||
26 | int jffs2_sum_init(struct jffs2_sb_info *c) | ||
27 | { | ||
28 | c->summary = kmalloc(sizeof(struct jffs2_summary), GFP_KERNEL); | ||
29 | |||
30 | if (!c->summary) { | ||
31 | JFFS2_WARNING("Can't allocate memory for summary information!\n"); | ||
32 | return -ENOMEM; | ||
33 | } | ||
34 | |||
35 | memset(c->summary, 0, sizeof(struct jffs2_summary)); | ||
36 | |||
37 | c->summary->sum_buf = vmalloc(c->sector_size); | ||
38 | |||
39 | if (!c->summary->sum_buf) { | ||
40 | JFFS2_WARNING("Can't allocate buffer for writing out summary information!\n"); | ||
41 | kfree(c->summary); | ||
42 | return -ENOMEM; | ||
43 | } | ||
44 | |||
45 | dbg_summary("returned succesfully\n"); | ||
46 | |||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | void jffs2_sum_exit(struct jffs2_sb_info *c) | ||
51 | { | ||
52 | dbg_summary("called\n"); | ||
53 | |||
54 | jffs2_sum_disable_collecting(c->summary); | ||
55 | |||
56 | vfree(c->summary->sum_buf); | ||
57 | c->summary->sum_buf = NULL; | ||
58 | |||
59 | kfree(c->summary); | ||
60 | c->summary = NULL; | ||
61 | } | ||
62 | |||
63 | static int jffs2_sum_add_mem(struct jffs2_summary *s, union jffs2_sum_mem *item) | ||
64 | { | ||
65 | if (!s->sum_list_head) | ||
66 | s->sum_list_head = (union jffs2_sum_mem *) item; | ||
67 | if (s->sum_list_tail) | ||
68 | s->sum_list_tail->u.next = (union jffs2_sum_mem *) item; | ||
69 | s->sum_list_tail = (union jffs2_sum_mem *) item; | ||
70 | |||
71 | switch (je16_to_cpu(item->u.nodetype)) { | ||
72 | case JFFS2_NODETYPE_INODE: | ||
73 | s->sum_size += JFFS2_SUMMARY_INODE_SIZE; | ||
74 | s->sum_num++; | ||
75 | dbg_summary("inode (%u) added to summary\n", | ||
76 | je32_to_cpu(item->i.inode)); | ||
77 | break; | ||
78 | case JFFS2_NODETYPE_DIRENT: | ||
79 | s->sum_size += JFFS2_SUMMARY_DIRENT_SIZE(item->d.nsize); | ||
80 | s->sum_num++; | ||
81 | dbg_summary("dirent (%u) added to summary\n", | ||
82 | je32_to_cpu(item->d.ino)); | ||
83 | break; | ||
84 | default: | ||
85 | JFFS2_WARNING("UNKNOWN node type %u\n", | ||
86 | je16_to_cpu(item->u.nodetype)); | ||
87 | return 1; | ||
88 | } | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | |||
93 | /* The following 3 functions are called from scan.c to collect summary info for not closed jeb */ | ||
94 | |||
95 | int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size) | ||
96 | { | ||
97 | dbg_summary("called with %u\n", size); | ||
98 | s->sum_padded += size; | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, | ||
103 | uint32_t ofs) | ||
104 | { | ||
105 | struct jffs2_sum_inode_mem *temp = kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL); | ||
106 | |||
107 | if (!temp) | ||
108 | return -ENOMEM; | ||
109 | |||
110 | temp->nodetype = ri->nodetype; | ||
111 | temp->inode = ri->ino; | ||
112 | temp->version = ri->version; | ||
113 | temp->offset = cpu_to_je32(ofs); /* relative offset from the begining of the jeb */ | ||
114 | temp->totlen = ri->totlen; | ||
115 | temp->next = NULL; | ||
116 | |||
117 | return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); | ||
118 | } | ||
119 | |||
120 | int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, | ||
121 | uint32_t ofs) | ||
122 | { | ||
123 | struct jffs2_sum_dirent_mem *temp = | ||
124 | kmalloc(sizeof(struct jffs2_sum_dirent_mem) + rd->nsize, GFP_KERNEL); | ||
125 | |||
126 | if (!temp) | ||
127 | return -ENOMEM; | ||
128 | |||
129 | temp->nodetype = rd->nodetype; | ||
130 | temp->totlen = rd->totlen; | ||
131 | temp->offset = cpu_to_je32(ofs); /* relative from the begining of the jeb */ | ||
132 | temp->pino = rd->pino; | ||
133 | temp->version = rd->version; | ||
134 | temp->ino = rd->ino; | ||
135 | temp->nsize = rd->nsize; | ||
136 | temp->type = rd->type; | ||
137 | temp->next = NULL; | ||
138 | |||
139 | memcpy(temp->name, rd->name, rd->nsize); | ||
140 | |||
141 | return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); | ||
142 | } | ||
143 | |||
144 | /* Cleanup every collected summary information */ | ||
145 | |||
146 | static void jffs2_sum_clean_collected(struct jffs2_summary *s) | ||
147 | { | ||
148 | union jffs2_sum_mem *temp; | ||
149 | |||
150 | if (!s->sum_list_head) { | ||
151 | dbg_summary("already empty\n"); | ||
152 | } | ||
153 | while (s->sum_list_head) { | ||
154 | temp = s->sum_list_head; | ||
155 | s->sum_list_head = s->sum_list_head->u.next; | ||
156 | kfree(temp); | ||
157 | } | ||
158 | s->sum_list_tail = NULL; | ||
159 | s->sum_padded = 0; | ||
160 | s->sum_num = 0; | ||
161 | } | ||
162 | |||
163 | void jffs2_sum_reset_collected(struct jffs2_summary *s) | ||
164 | { | ||
165 | dbg_summary("called\n"); | ||
166 | jffs2_sum_clean_collected(s); | ||
167 | s->sum_size = 0; | ||
168 | } | ||
169 | |||
170 | void jffs2_sum_disable_collecting(struct jffs2_summary *s) | ||
171 | { | ||
172 | dbg_summary("called\n"); | ||
173 | jffs2_sum_clean_collected(s); | ||
174 | s->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; | ||
175 | } | ||
176 | |||
177 | int jffs2_sum_is_disabled(struct jffs2_summary *s) | ||
178 | { | ||
179 | return (s->sum_size == JFFS2_SUMMARY_NOSUM_SIZE); | ||
180 | } | ||
181 | |||
182 | /* Move the collected summary information into sb (called from scan.c) */ | ||
183 | |||
184 | void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s) | ||
185 | { | ||
186 | dbg_summary("oldsize=0x%x oldnum=%u => newsize=0x%x newnum=%u\n", | ||
187 | c->summary->sum_size, c->summary->sum_num, | ||
188 | s->sum_size, s->sum_num); | ||
189 | |||
190 | c->summary->sum_size = s->sum_size; | ||
191 | c->summary->sum_num = s->sum_num; | ||
192 | c->summary->sum_padded = s->sum_padded; | ||
193 | c->summary->sum_list_head = s->sum_list_head; | ||
194 | c->summary->sum_list_tail = s->sum_list_tail; | ||
195 | |||
196 | s->sum_list_head = s->sum_list_tail = NULL; | ||
197 | } | ||
198 | |||
199 | /* Called from wbuf.c to collect writed node info */ | ||
200 | |||
201 | int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, | ||
202 | unsigned long count, uint32_t ofs) | ||
203 | { | ||
204 | union jffs2_node_union *node; | ||
205 | struct jffs2_eraseblock *jeb; | ||
206 | |||
207 | node = invecs[0].iov_base; | ||
208 | jeb = &c->blocks[ofs / c->sector_size]; | ||
209 | ofs -= jeb->offset; | ||
210 | |||
211 | switch (je16_to_cpu(node->u.nodetype)) { | ||
212 | case JFFS2_NODETYPE_INODE: { | ||
213 | struct jffs2_sum_inode_mem *temp = | ||
214 | kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL); | ||
215 | |||
216 | if (!temp) | ||
217 | goto no_mem; | ||
218 | |||
219 | temp->nodetype = node->i.nodetype; | ||
220 | temp->inode = node->i.ino; | ||
221 | temp->version = node->i.version; | ||
222 | temp->offset = cpu_to_je32(ofs); | ||
223 | temp->totlen = node->i.totlen; | ||
224 | temp->next = NULL; | ||
225 | |||
226 | return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); | ||
227 | } | ||
228 | |||
229 | case JFFS2_NODETYPE_DIRENT: { | ||
230 | struct jffs2_sum_dirent_mem *temp = | ||
231 | kmalloc(sizeof(struct jffs2_sum_dirent_mem) + node->d.nsize, GFP_KERNEL); | ||
232 | |||
233 | if (!temp) | ||
234 | goto no_mem; | ||
235 | |||
236 | temp->nodetype = node->d.nodetype; | ||
237 | temp->totlen = node->d.totlen; | ||
238 | temp->offset = cpu_to_je32(ofs); | ||
239 | temp->pino = node->d.pino; | ||
240 | temp->version = node->d.version; | ||
241 | temp->ino = node->d.ino; | ||
242 | temp->nsize = node->d.nsize; | ||
243 | temp->type = node->d.type; | ||
244 | temp->next = NULL; | ||
245 | |||
246 | switch (count) { | ||
247 | case 1: | ||
248 | memcpy(temp->name,node->d.name,node->d.nsize); | ||
249 | break; | ||
250 | |||
251 | case 2: | ||
252 | memcpy(temp->name,invecs[1].iov_base,node->d.nsize); | ||
253 | break; | ||
254 | |||
255 | default: | ||
256 | BUG(); /* impossible count value */ | ||
257 | break; | ||
258 | } | ||
259 | |||
260 | return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); | ||
261 | } | ||
262 | |||
263 | case JFFS2_NODETYPE_PADDING: | ||
264 | dbg_summary("node PADDING\n"); | ||
265 | c->summary->sum_padded += je32_to_cpu(node->u.totlen); | ||
266 | break; | ||
267 | |||
268 | case JFFS2_NODETYPE_CLEANMARKER: | ||
269 | dbg_summary("node CLEANMARKER\n"); | ||
270 | break; | ||
271 | |||
272 | case JFFS2_NODETYPE_SUMMARY: | ||
273 | dbg_summary("node SUMMARY\n"); | ||
274 | break; | ||
275 | |||
276 | default: | ||
277 | /* If you implement a new node type you should also implement | ||
278 | summary support for it or disable summary. | ||
279 | */ | ||
280 | BUG(); | ||
281 | break; | ||
282 | } | ||
283 | |||
284 | return 0; | ||
285 | |||
286 | no_mem: | ||
287 | JFFS2_WARNING("MEMORY ALLOCATION ERROR!"); | ||
288 | return -ENOMEM; | ||
289 | } | ||
290 | |||
291 | |||
292 | /* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */ | ||
293 | |||
294 | static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
295 | struct jffs2_raw_summary *summary, uint32_t *pseudo_random) | ||
296 | { | ||
297 | struct jffs2_raw_node_ref *raw; | ||
298 | struct jffs2_inode_cache *ic; | ||
299 | struct jffs2_full_dirent *fd; | ||
300 | void *sp; | ||
301 | int i, ino; | ||
302 | |||
303 | sp = summary->sum; | ||
304 | |||
305 | for (i=0; i<je32_to_cpu(summary->sum_num); i++) { | ||
306 | dbg_summary("processing summary index %d\n", i); | ||
307 | |||
308 | switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) { | ||
309 | case JFFS2_NODETYPE_INODE: { | ||
310 | struct jffs2_sum_inode_flash *spi; | ||
311 | spi = sp; | ||
312 | |||
313 | ino = je32_to_cpu(spi->inode); | ||
314 | |||
315 | dbg_summary("Inode at 0x%08x\n", | ||
316 | jeb->offset + je32_to_cpu(spi->offset)); | ||
317 | |||
318 | raw = jffs2_alloc_raw_node_ref(); | ||
319 | if (!raw) { | ||
320 | JFFS2_NOTICE("allocation of node reference failed\n"); | ||
321 | kfree(summary); | ||
322 | return -ENOMEM; | ||
323 | } | ||
324 | |||
325 | ic = jffs2_scan_make_ino_cache(c, ino); | ||
326 | if (!ic) { | ||
327 | JFFS2_NOTICE("scan_make_ino_cache failed\n"); | ||
328 | jffs2_free_raw_node_ref(raw); | ||
329 | kfree(summary); | ||
330 | return -ENOMEM; | ||
331 | } | ||
332 | |||
333 | raw->flash_offset = (jeb->offset + je32_to_cpu(spi->offset)) | REF_UNCHECKED; | ||
334 | raw->__totlen = PAD(je32_to_cpu(spi->totlen)); | ||
335 | raw->next_phys = NULL; | ||
336 | raw->next_in_ino = ic->nodes; | ||
337 | |||
338 | ic->nodes = raw; | ||
339 | if (!jeb->first_node) | ||
340 | jeb->first_node = raw; | ||
341 | if (jeb->last_node) | ||
342 | jeb->last_node->next_phys = raw; | ||
343 | jeb->last_node = raw; | ||
344 | *pseudo_random += je32_to_cpu(spi->version); | ||
345 | |||
346 | UNCHECKED_SPACE(PAD(je32_to_cpu(spi->totlen))); | ||
347 | |||
348 | sp += JFFS2_SUMMARY_INODE_SIZE; | ||
349 | |||
350 | break; | ||
351 | } | ||
352 | |||
353 | case JFFS2_NODETYPE_DIRENT: { | ||
354 | struct jffs2_sum_dirent_flash *spd; | ||
355 | spd = sp; | ||
356 | |||
357 | dbg_summary("Dirent at 0x%08x\n", | ||
358 | jeb->offset + je32_to_cpu(spd->offset)); | ||
359 | |||
360 | fd = jffs2_alloc_full_dirent(spd->nsize+1); | ||
361 | if (!fd) { | ||
362 | kfree(summary); | ||
363 | return -ENOMEM; | ||
364 | } | ||
365 | |||
366 | memcpy(&fd->name, spd->name, spd->nsize); | ||
367 | fd->name[spd->nsize] = 0; | ||
368 | |||
369 | raw = jffs2_alloc_raw_node_ref(); | ||
370 | if (!raw) { | ||
371 | jffs2_free_full_dirent(fd); | ||
372 | JFFS2_NOTICE("allocation of node reference failed\n"); | ||
373 | kfree(summary); | ||
374 | return -ENOMEM; | ||
375 | } | ||
376 | |||
377 | ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino)); | ||
378 | if (!ic) { | ||
379 | jffs2_free_full_dirent(fd); | ||
380 | jffs2_free_raw_node_ref(raw); | ||
381 | kfree(summary); | ||
382 | return -ENOMEM; | ||
383 | } | ||
384 | |||
385 | raw->__totlen = PAD(je32_to_cpu(spd->totlen)); | ||
386 | raw->flash_offset = (jeb->offset + je32_to_cpu(spd->offset)) | REF_PRISTINE; | ||
387 | raw->next_phys = NULL; | ||
388 | raw->next_in_ino = ic->nodes; | ||
389 | ic->nodes = raw; | ||
390 | if (!jeb->first_node) | ||
391 | jeb->first_node = raw; | ||
392 | if (jeb->last_node) | ||
393 | jeb->last_node->next_phys = raw; | ||
394 | jeb->last_node = raw; | ||
395 | |||
396 | fd->raw = raw; | ||
397 | fd->next = NULL; | ||
398 | fd->version = je32_to_cpu(spd->version); | ||
399 | fd->ino = je32_to_cpu(spd->ino); | ||
400 | fd->nhash = full_name_hash(fd->name, spd->nsize); | ||
401 | fd->type = spd->type; | ||
402 | USED_SPACE(PAD(je32_to_cpu(spd->totlen))); | ||
403 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); | ||
404 | |||
405 | *pseudo_random += je32_to_cpu(spd->version); | ||
406 | |||
407 | sp += JFFS2_SUMMARY_DIRENT_SIZE(spd->nsize); | ||
408 | |||
409 | break; | ||
410 | } | ||
411 | |||
412 | default : { | ||
413 | JFFS2_WARNING("Unsupported node type found in summary! Exiting..."); | ||
414 | kfree(summary); | ||
415 | return -EIO; | ||
416 | } | ||
417 | } | ||
418 | } | ||
419 | |||
420 | kfree(summary); | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | /* Process the summary node - called from jffs2_scan_eraseblock() */ | ||
425 | |||
426 | int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
427 | uint32_t ofs, uint32_t *pseudo_random) | ||
428 | { | ||
429 | struct jffs2_unknown_node crcnode; | ||
430 | struct jffs2_raw_node_ref *cache_ref; | ||
431 | struct jffs2_raw_summary *summary; | ||
432 | int ret, sumsize; | ||
433 | uint32_t crc; | ||
434 | |||
435 | sumsize = c->sector_size - ofs; | ||
436 | ofs += jeb->offset; | ||
437 | |||
438 | dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n", | ||
439 | jeb->offset, ofs, sumsize); | ||
440 | |||
441 | summary = kmalloc(sumsize, GFP_KERNEL); | ||
442 | |||
443 | if (!summary) { | ||
444 | return -ENOMEM; | ||
445 | } | ||
446 | |||
447 | ret = jffs2_fill_scan_buf(c, (unsigned char *)summary, ofs, sumsize); | ||
448 | |||
449 | if (ret) { | ||
450 | kfree(summary); | ||
451 | return ret; | ||
452 | } | ||
453 | |||
454 | /* OK, now check for node validity and CRC */ | ||
455 | crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
456 | crcnode.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY); | ||
457 | crcnode.totlen = summary->totlen; | ||
458 | crc = crc32(0, &crcnode, sizeof(crcnode)-4); | ||
459 | |||
460 | if (je32_to_cpu(summary->hdr_crc) != crc) { | ||
461 | dbg_summary("Summary node header is corrupt (bad CRC or " | ||
462 | "no summary at all)\n"); | ||
463 | goto crc_err; | ||
464 | } | ||
465 | |||
466 | if (je32_to_cpu(summary->totlen) != sumsize) { | ||
467 | dbg_summary("Summary node is corrupt (wrong erasesize?)\n"); | ||
468 | goto crc_err; | ||
469 | } | ||
470 | |||
471 | crc = crc32(0, summary, sizeof(struct jffs2_raw_summary)-8); | ||
472 | |||
473 | if (je32_to_cpu(summary->node_crc) != crc) { | ||
474 | dbg_summary("Summary node is corrupt (bad CRC)\n"); | ||
475 | goto crc_err; | ||
476 | } | ||
477 | |||
478 | crc = crc32(0, summary->sum, sumsize - sizeof(struct jffs2_raw_summary)); | ||
479 | |||
480 | if (je32_to_cpu(summary->sum_crc) != crc) { | ||
481 | dbg_summary("Summary node data is corrupt (bad CRC)\n"); | ||
482 | goto crc_err; | ||
483 | } | ||
484 | |||
485 | if ( je32_to_cpu(summary->cln_mkr) ) { | ||
486 | |||
487 | dbg_summary("Summary : CLEANMARKER node \n"); | ||
488 | |||
489 | if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) { | ||
490 | dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n", | ||
491 | je32_to_cpu(summary->cln_mkr), c->cleanmarker_size); | ||
492 | UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr))); | ||
493 | } else if (jeb->first_node) { | ||
494 | dbg_summary("CLEANMARKER node not first node in block " | ||
495 | "(0x%08x)\n", jeb->offset); | ||
496 | UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr))); | ||
497 | } else { | ||
498 | struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref(); | ||
499 | |||
500 | if (!marker_ref) { | ||
501 | JFFS2_NOTICE("Failed to allocate node ref for clean marker\n"); | ||
502 | kfree(summary); | ||
503 | return -ENOMEM; | ||
504 | } | ||
505 | |||
506 | marker_ref->next_in_ino = NULL; | ||
507 | marker_ref->next_phys = NULL; | ||
508 | marker_ref->flash_offset = jeb->offset | REF_NORMAL; | ||
509 | marker_ref->__totlen = je32_to_cpu(summary->cln_mkr); | ||
510 | jeb->first_node = jeb->last_node = marker_ref; | ||
511 | |||
512 | USED_SPACE( PAD(je32_to_cpu(summary->cln_mkr)) ); | ||
513 | } | ||
514 | } | ||
515 | |||
516 | if (je32_to_cpu(summary->padded)) { | ||
517 | DIRTY_SPACE(je32_to_cpu(summary->padded)); | ||
518 | } | ||
519 | |||
520 | ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random); | ||
521 | if (ret) | ||
522 | return ret; | ||
523 | |||
524 | /* for PARANOIA_CHECK */ | ||
525 | cache_ref = jffs2_alloc_raw_node_ref(); | ||
526 | |||
527 | if (!cache_ref) { | ||
528 | JFFS2_NOTICE("Failed to allocate node ref for cache\n"); | ||
529 | return -ENOMEM; | ||
530 | } | ||
531 | |||
532 | cache_ref->next_in_ino = NULL; | ||
533 | cache_ref->next_phys = NULL; | ||
534 | cache_ref->flash_offset = ofs | REF_NORMAL; | ||
535 | cache_ref->__totlen = sumsize; | ||
536 | |||
537 | if (!jeb->first_node) | ||
538 | jeb->first_node = cache_ref; | ||
539 | if (jeb->last_node) | ||
540 | jeb->last_node->next_phys = cache_ref; | ||
541 | jeb->last_node = cache_ref; | ||
542 | |||
543 | USED_SPACE(sumsize); | ||
544 | |||
545 | jeb->wasted_size += jeb->free_size; | ||
546 | c->wasted_size += jeb->free_size; | ||
547 | c->free_size -= jeb->free_size; | ||
548 | jeb->free_size = 0; | ||
549 | |||
550 | return jffs2_scan_classify_jeb(c, jeb); | ||
551 | |||
552 | crc_err: | ||
553 | JFFS2_WARNING("Summary node crc error, skipping summary information.\n"); | ||
554 | |||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | /* Write summary data to flash - helper function for jffs2_sum_write_sumnode() */ | ||
559 | |||
560 | static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
561 | uint32_t infosize, uint32_t datasize, int padsize) | ||
562 | { | ||
563 | struct jffs2_raw_summary isum; | ||
564 | union jffs2_sum_mem *temp; | ||
565 | struct jffs2_sum_marker *sm; | ||
566 | struct kvec vecs[2]; | ||
567 | void *wpage; | ||
568 | int ret; | ||
569 | size_t retlen; | ||
570 | |||
571 | memset(c->summary->sum_buf, 0xff, datasize); | ||
572 | memset(&isum, 0, sizeof(isum)); | ||
573 | |||
574 | isum.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
575 | isum.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY); | ||
576 | isum.totlen = cpu_to_je32(infosize); | ||
577 | isum.hdr_crc = cpu_to_je32(crc32(0, &isum, sizeof(struct jffs2_unknown_node) - 4)); | ||
578 | isum.padded = cpu_to_je32(c->summary->sum_padded); | ||
579 | isum.cln_mkr = cpu_to_je32(c->cleanmarker_size); | ||
580 | isum.sum_num = cpu_to_je32(c->summary->sum_num); | ||
581 | wpage = c->summary->sum_buf; | ||
582 | |||
583 | while (c->summary->sum_num) { | ||
584 | |||
585 | switch (je16_to_cpu(c->summary->sum_list_head->u.nodetype)) { | ||
586 | case JFFS2_NODETYPE_INODE: { | ||
587 | struct jffs2_sum_inode_flash *sino_ptr = wpage; | ||
588 | |||
589 | sino_ptr->nodetype = c->summary->sum_list_head->i.nodetype; | ||
590 | sino_ptr->inode = c->summary->sum_list_head->i.inode; | ||
591 | sino_ptr->version = c->summary->sum_list_head->i.version; | ||
592 | sino_ptr->offset = c->summary->sum_list_head->i.offset; | ||
593 | sino_ptr->totlen = c->summary->sum_list_head->i.totlen; | ||
594 | |||
595 | wpage += JFFS2_SUMMARY_INODE_SIZE; | ||
596 | |||
597 | break; | ||
598 | } | ||
599 | |||
600 | case JFFS2_NODETYPE_DIRENT: { | ||
601 | struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage; | ||
602 | |||
603 | sdrnt_ptr->nodetype = c->summary->sum_list_head->d.nodetype; | ||
604 | sdrnt_ptr->totlen = c->summary->sum_list_head->d.totlen; | ||
605 | sdrnt_ptr->offset = c->summary->sum_list_head->d.offset; | ||
606 | sdrnt_ptr->pino = c->summary->sum_list_head->d.pino; | ||
607 | sdrnt_ptr->version = c->summary->sum_list_head->d.version; | ||
608 | sdrnt_ptr->ino = c->summary->sum_list_head->d.ino; | ||
609 | sdrnt_ptr->nsize = c->summary->sum_list_head->d.nsize; | ||
610 | sdrnt_ptr->type = c->summary->sum_list_head->d.type; | ||
611 | |||
612 | memcpy(sdrnt_ptr->name, c->summary->sum_list_head->d.name, | ||
613 | c->summary->sum_list_head->d.nsize); | ||
614 | |||
615 | wpage += JFFS2_SUMMARY_DIRENT_SIZE(c->summary->sum_list_head->d.nsize); | ||
616 | |||
617 | break; | ||
618 | } | ||
619 | |||
620 | default : { | ||
621 | BUG(); /* unknown node in summary information */ | ||
622 | } | ||
623 | } | ||
624 | |||
625 | temp = c->summary->sum_list_head; | ||
626 | c->summary->sum_list_head = c->summary->sum_list_head->u.next; | ||
627 | kfree(temp); | ||
628 | |||
629 | c->summary->sum_num--; | ||
630 | } | ||
631 | |||
632 | jffs2_sum_reset_collected(c->summary); | ||
633 | |||
634 | wpage += padsize; | ||
635 | |||
636 | sm = wpage; | ||
637 | sm->offset = cpu_to_je32(c->sector_size - jeb->free_size); | ||
638 | sm->magic = cpu_to_je32(JFFS2_SUM_MAGIC); | ||
639 | |||
640 | isum.sum_crc = cpu_to_je32(crc32(0, c->summary->sum_buf, datasize)); | ||
641 | isum.node_crc = cpu_to_je32(crc32(0, &isum, sizeof(isum) - 8)); | ||
642 | |||
643 | vecs[0].iov_base = &isum; | ||
644 | vecs[0].iov_len = sizeof(isum); | ||
645 | vecs[1].iov_base = c->summary->sum_buf; | ||
646 | vecs[1].iov_len = datasize; | ||
647 | |||
648 | dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n", | ||
649 | jeb->offset + c->sector_size - jeb->free_size); | ||
650 | |||
651 | spin_unlock(&c->erase_completion_lock); | ||
652 | ret = jffs2_flash_writev(c, vecs, 2, jeb->offset + c->sector_size - | ||
653 | jeb->free_size, &retlen, 0); | ||
654 | spin_lock(&c->erase_completion_lock); | ||
655 | |||
656 | |||
657 | if (ret || (retlen != infosize)) { | ||
658 | JFFS2_WARNING("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", | ||
659 | infosize, jeb->offset + c->sector_size - jeb->free_size, ret, retlen); | ||
660 | |||
661 | c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; | ||
662 | WASTED_SPACE(infosize); | ||
663 | |||
664 | return 1; | ||
665 | } | ||
666 | |||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | /* Write out summary information - called from jffs2_do_reserve_space */ | ||
671 | |||
672 | int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) | ||
673 | { | ||
674 | struct jffs2_raw_node_ref *summary_ref; | ||
675 | int datasize, infosize, padsize, ret; | ||
676 | struct jffs2_eraseblock *jeb; | ||
677 | |||
678 | dbg_summary("called\n"); | ||
679 | |||
680 | jeb = c->nextblock; | ||
681 | |||
682 | if (!c->summary->sum_num || !c->summary->sum_list_head) { | ||
683 | JFFS2_WARNING("Empty summary info!!!\n"); | ||
684 | BUG(); | ||
685 | } | ||
686 | |||
687 | datasize = c->summary->sum_size + sizeof(struct jffs2_sum_marker); | ||
688 | infosize = sizeof(struct jffs2_raw_summary) + datasize; | ||
689 | padsize = jeb->free_size - infosize; | ||
690 | infosize += padsize; | ||
691 | datasize += padsize; | ||
692 | |||
693 | /* Is there enough space for summary? */ | ||
694 | if (padsize < 0) { | ||
695 | /* don't try to write out summary for this jeb */ | ||
696 | jffs2_sum_disable_collecting(c->summary); | ||
697 | |||
698 | JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize); | ||
699 | return 0; | ||
700 | } | ||
701 | |||
702 | ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize); | ||
703 | if (ret) | ||
704 | return 0; /* can't write out summary, block is marked as NOSUM_SIZE */ | ||
705 | |||
706 | /* for ACCT_PARANOIA_CHECK */ | ||
707 | spin_unlock(&c->erase_completion_lock); | ||
708 | summary_ref = jffs2_alloc_raw_node_ref(); | ||
709 | spin_lock(&c->erase_completion_lock); | ||
710 | |||
711 | if (!summary_ref) { | ||
712 | JFFS2_NOTICE("Failed to allocate node ref for summary\n"); | ||
713 | return -ENOMEM; | ||
714 | } | ||
715 | |||
716 | summary_ref->next_in_ino = NULL; | ||
717 | summary_ref->next_phys = NULL; | ||
718 | summary_ref->flash_offset = (jeb->offset + c->sector_size - jeb->free_size) | REF_NORMAL; | ||
719 | summary_ref->__totlen = infosize; | ||
720 | |||
721 | if (!jeb->first_node) | ||
722 | jeb->first_node = summary_ref; | ||
723 | if (jeb->last_node) | ||
724 | jeb->last_node->next_phys = summary_ref; | ||
725 | jeb->last_node = summary_ref; | ||
726 | |||
727 | USED_SPACE(infosize); | ||
728 | |||
729 | return 0; | ||
730 | } | ||
diff --git a/fs/jffs2/summary.h b/fs/jffs2/summary.h new file mode 100644 index 000000000000..b7a678be1709 --- /dev/null +++ b/fs/jffs2/summary.h | |||
@@ -0,0 +1,183 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, | ||
5 | * Zoltan Sogor <weth@inf.u-szeged.hu>, | ||
6 | * Patrik Kluba <pajko@halom.u-szeged.hu>, | ||
7 | * University of Szeged, Hungary | ||
8 | * | ||
9 | * For licensing information, see the file 'LICENCE' in this directory. | ||
10 | * | ||
11 | * $Id: summary.h,v 1.2 2005/09/26 11:37:21 havasi Exp $ | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #ifndef JFFS2_SUMMARY_H | ||
16 | #define JFFS2_SUMMARY_H | ||
17 | |||
18 | #include <linux/uio.h> | ||
19 | #include <linux/jffs2.h> | ||
20 | |||
21 | #define DIRTY_SPACE(x) do { typeof(x) _x = (x); \ | ||
22 | c->free_size -= _x; c->dirty_size += _x; \ | ||
23 | jeb->free_size -= _x ; jeb->dirty_size += _x; \ | ||
24 | }while(0) | ||
25 | #define USED_SPACE(x) do { typeof(x) _x = (x); \ | ||
26 | c->free_size -= _x; c->used_size += _x; \ | ||
27 | jeb->free_size -= _x ; jeb->used_size += _x; \ | ||
28 | }while(0) | ||
29 | #define WASTED_SPACE(x) do { typeof(x) _x = (x); \ | ||
30 | c->free_size -= _x; c->wasted_size += _x; \ | ||
31 | jeb->free_size -= _x ; jeb->wasted_size += _x; \ | ||
32 | }while(0) | ||
33 | #define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \ | ||
34 | c->free_size -= _x; c->unchecked_size += _x; \ | ||
35 | jeb->free_size -= _x ; jeb->unchecked_size += _x; \ | ||
36 | }while(0) | ||
37 | |||
38 | #define BLK_STATE_ALLFF 0 | ||
39 | #define BLK_STATE_CLEAN 1 | ||
40 | #define BLK_STATE_PARTDIRTY 2 | ||
41 | #define BLK_STATE_CLEANMARKER 3 | ||
42 | #define BLK_STATE_ALLDIRTY 4 | ||
43 | #define BLK_STATE_BADBLOCK 5 | ||
44 | |||
45 | #define JFFS2_SUMMARY_NOSUM_SIZE 0xffffffff | ||
46 | #define JFFS2_SUMMARY_INODE_SIZE (sizeof(struct jffs2_sum_inode_flash)) | ||
47 | #define JFFS2_SUMMARY_DIRENT_SIZE(x) (sizeof(struct jffs2_sum_dirent_flash) + (x)) | ||
48 | |||
49 | /* Summary structures used on flash */ | ||
50 | |||
51 | struct jffs2_sum_unknown_flash | ||
52 | { | ||
53 | jint16_t nodetype; /* node type */ | ||
54 | }; | ||
55 | |||
56 | struct jffs2_sum_inode_flash | ||
57 | { | ||
58 | jint16_t nodetype; /* node type */ | ||
59 | jint32_t inode; /* inode number */ | ||
60 | jint32_t version; /* inode version */ | ||
61 | jint32_t offset; /* offset on jeb */ | ||
62 | jint32_t totlen; /* record length */ | ||
63 | } __attribute__((packed)); | ||
64 | |||
65 | struct jffs2_sum_dirent_flash | ||
66 | { | ||
67 | jint16_t nodetype; /* == JFFS_NODETYPE_DIRENT */ | ||
68 | jint32_t totlen; /* record length */ | ||
69 | jint32_t offset; /* offset on jeb */ | ||
70 | jint32_t pino; /* parent inode */ | ||
71 | jint32_t version; /* dirent version */ | ||
72 | jint32_t ino; /* == zero for unlink */ | ||
73 | uint8_t nsize; /* dirent name size */ | ||
74 | uint8_t type; /* dirent type */ | ||
75 | uint8_t name[0]; /* dirent name */ | ||
76 | } __attribute__((packed)); | ||
77 | |||
78 | union jffs2_sum_flash | ||
79 | { | ||
80 | struct jffs2_sum_unknown_flash u; | ||
81 | struct jffs2_sum_inode_flash i; | ||
82 | struct jffs2_sum_dirent_flash d; | ||
83 | }; | ||
84 | |||
85 | /* Summary structures used in the memory */ | ||
86 | |||
87 | struct jffs2_sum_unknown_mem | ||
88 | { | ||
89 | union jffs2_sum_mem *next; | ||
90 | jint16_t nodetype; /* node type */ | ||
91 | }; | ||
92 | |||
93 | struct jffs2_sum_inode_mem | ||
94 | { | ||
95 | union jffs2_sum_mem *next; | ||
96 | jint16_t nodetype; /* node type */ | ||
97 | jint32_t inode; /* inode number */ | ||
98 | jint32_t version; /* inode version */ | ||
99 | jint32_t offset; /* offset on jeb */ | ||
100 | jint32_t totlen; /* record length */ | ||
101 | } __attribute__((packed)); | ||
102 | |||
103 | struct jffs2_sum_dirent_mem | ||
104 | { | ||
105 | union jffs2_sum_mem *next; | ||
106 | jint16_t nodetype; /* == JFFS_NODETYPE_DIRENT */ | ||
107 | jint32_t totlen; /* record length */ | ||
108 | jint32_t offset; /* ofset on jeb */ | ||
109 | jint32_t pino; /* parent inode */ | ||
110 | jint32_t version; /* dirent version */ | ||
111 | jint32_t ino; /* == zero for unlink */ | ||
112 | uint8_t nsize; /* dirent name size */ | ||
113 | uint8_t type; /* dirent type */ | ||
114 | uint8_t name[0]; /* dirent name */ | ||
115 | } __attribute__((packed)); | ||
116 | |||
117 | union jffs2_sum_mem | ||
118 | { | ||
119 | struct jffs2_sum_unknown_mem u; | ||
120 | struct jffs2_sum_inode_mem i; | ||
121 | struct jffs2_sum_dirent_mem d; | ||
122 | }; | ||
123 | |||
124 | /* Summary related information stored in superblock */ | ||
125 | |||
126 | struct jffs2_summary | ||
127 | { | ||
128 | uint32_t sum_size; /* collected summary information for nextblock */ | ||
129 | uint32_t sum_num; | ||
130 | uint32_t sum_padded; | ||
131 | union jffs2_sum_mem *sum_list_head; | ||
132 | union jffs2_sum_mem *sum_list_tail; | ||
133 | |||
134 | jint32_t *sum_buf; /* buffer for writing out summary */ | ||
135 | }; | ||
136 | |||
137 | /* Summary marker is stored at the end of every sumarized erase block */ | ||
138 | |||
139 | struct jffs2_sum_marker | ||
140 | { | ||
141 | jint32_t offset; /* offset of the summary node in the jeb */ | ||
142 | jint32_t magic; /* == JFFS2_SUM_MAGIC */ | ||
143 | }; | ||
144 | |||
145 | #define JFFS2_SUMMARY_FRAME_SIZE (sizeof(struct jffs2_raw_summary) + sizeof(struct jffs2_sum_marker)) | ||
146 | |||
147 | #ifdef CONFIG_JFFS2_SUMMARY /* SUMMARY SUPPORT ENABLED */ | ||
148 | |||
149 | #define jffs2_sum_active() (1) | ||
150 | int jffs2_sum_init(struct jffs2_sb_info *c); | ||
151 | void jffs2_sum_exit(struct jffs2_sb_info *c); | ||
152 | void jffs2_sum_disable_collecting(struct jffs2_summary *s); | ||
153 | int jffs2_sum_is_disabled(struct jffs2_summary *s); | ||
154 | void jffs2_sum_reset_collected(struct jffs2_summary *s); | ||
155 | void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s); | ||
156 | int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, | ||
157 | unsigned long count, uint32_t to); | ||
158 | int jffs2_sum_write_sumnode(struct jffs2_sb_info *c); | ||
159 | int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size); | ||
160 | int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs); | ||
161 | int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs); | ||
162 | int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
163 | uint32_t ofs, uint32_t *pseudo_random); | ||
164 | |||
165 | #else /* SUMMARY DISABLED */ | ||
166 | |||
167 | #define jffs2_sum_active() (0) | ||
168 | #define jffs2_sum_init(a) (0) | ||
169 | #define jffs2_sum_exit(a) | ||
170 | #define jffs2_sum_disable_collecting(a) | ||
171 | #define jffs2_sum_is_disabled(a) (0) | ||
172 | #define jffs2_sum_reset_collected(a) | ||
173 | #define jffs2_sum_add_kvec(a,b,c,d) (0) | ||
174 | #define jffs2_sum_move_collected(a,b) | ||
175 | #define jffs2_sum_write_sumnode(a) (0) | ||
176 | #define jffs2_sum_add_padding_mem(a,b) | ||
177 | #define jffs2_sum_add_inode_mem(a,b,c) | ||
178 | #define jffs2_sum_add_dirent_mem(a,b,c) | ||
179 | #define jffs2_sum_scan_sumnode(a,b,c,d) (0) | ||
180 | |||
181 | #endif /* CONFIG_JFFS2_SUMMARY */ | ||
182 | |||
183 | #endif /* JFFS2_SUMMARY_H */ | ||
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index aaf9475cfb6a..9e0b5458d9c0 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: super.c,v 1.107 2005/07/12 16:37:08 dedekind Exp $ | 10 | * $Id: super.c,v 1.110 2005/11/07 11:14:42 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -62,7 +62,7 @@ static int jffs2_sync_fs(struct super_block *sb, int wait) | |||
62 | 62 | ||
63 | down(&c->alloc_sem); | 63 | down(&c->alloc_sem); |
64 | jffs2_flush_wbuf_pad(c); | 64 | jffs2_flush_wbuf_pad(c); |
65 | up(&c->alloc_sem); | 65 | up(&c->alloc_sem); |
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
68 | 68 | ||
@@ -112,7 +112,7 @@ static int jffs2_sb_set(struct super_block *sb, void *data) | |||
112 | } | 112 | } |
113 | 113 | ||
114 | static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type, | 114 | static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type, |
115 | int flags, const char *dev_name, | 115 | int flags, const char *dev_name, |
116 | void *data, struct mtd_info *mtd) | 116 | void *data, struct mtd_info *mtd) |
117 | { | 117 | { |
118 | struct super_block *sb; | 118 | struct super_block *sb; |
@@ -172,7 +172,7 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type, | |||
172 | } | 172 | } |
173 | 173 | ||
174 | static struct super_block *jffs2_get_sb_mtdnr(struct file_system_type *fs_type, | 174 | static struct super_block *jffs2_get_sb_mtdnr(struct file_system_type *fs_type, |
175 | int flags, const char *dev_name, | 175 | int flags, const char *dev_name, |
176 | void *data, int mtdnr) | 176 | void *data, int mtdnr) |
177 | { | 177 | { |
178 | struct mtd_info *mtd; | 178 | struct mtd_info *mtd; |
@@ -201,7 +201,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type, | |||
201 | 201 | ||
202 | /* The preferred way of mounting in future; especially when | 202 | /* The preferred way of mounting in future; especially when |
203 | CONFIG_BLK_DEV is implemented - we specify the underlying | 203 | CONFIG_BLK_DEV is implemented - we specify the underlying |
204 | MTD device by number or by name, so that we don't require | 204 | MTD device by number or by name, so that we don't require |
205 | block device support to be present in the kernel. */ | 205 | block device support to be present in the kernel. */ |
206 | 206 | ||
207 | /* FIXME: How to do the root fs this way? */ | 207 | /* FIXME: How to do the root fs this way? */ |
@@ -225,7 +225,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type, | |||
225 | } else if (isdigit(dev_name[3])) { | 225 | } else if (isdigit(dev_name[3])) { |
226 | /* Mount by MTD device number name */ | 226 | /* Mount by MTD device number name */ |
227 | char *endptr; | 227 | char *endptr; |
228 | 228 | ||
229 | mtdnr = simple_strtoul(dev_name+3, &endptr, 0); | 229 | mtdnr = simple_strtoul(dev_name+3, &endptr, 0); |
230 | if (!*endptr) { | 230 | if (!*endptr) { |
231 | /* It was a valid number */ | 231 | /* It was a valid number */ |
@@ -235,7 +235,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type, | |||
235 | } | 235 | } |
236 | } | 236 | } |
237 | 237 | ||
238 | /* Try the old way - the hack where we allowed users to mount | 238 | /* Try the old way - the hack where we allowed users to mount |
239 | /dev/mtdblock$(n) but didn't actually _use_ the blkdev */ | 239 | /dev/mtdblock$(n) but didn't actually _use_ the blkdev */ |
240 | 240 | ||
241 | err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); | 241 | err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); |
@@ -282,9 +282,12 @@ static void jffs2_put_super (struct super_block *sb) | |||
282 | down(&c->alloc_sem); | 282 | down(&c->alloc_sem); |
283 | jffs2_flush_wbuf_pad(c); | 283 | jffs2_flush_wbuf_pad(c); |
284 | up(&c->alloc_sem); | 284 | up(&c->alloc_sem); |
285 | |||
286 | jffs2_sum_exit(c); | ||
287 | |||
285 | jffs2_free_ino_caches(c); | 288 | jffs2_free_ino_caches(c); |
286 | jffs2_free_raw_node_refs(c); | 289 | jffs2_free_raw_node_refs(c); |
287 | if (c->mtd->flags & MTD_NO_VIRTBLOCKS) | 290 | if (jffs2_blocks_use_vmalloc(c)) |
288 | vfree(c->blocks); | 291 | vfree(c->blocks); |
289 | else | 292 | else |
290 | kfree(c->blocks); | 293 | kfree(c->blocks); |
@@ -321,6 +324,9 @@ static int __init init_jffs2_fs(void) | |||
321 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 324 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
322 | " (NAND)" | 325 | " (NAND)" |
323 | #endif | 326 | #endif |
327 | #ifdef CONFIG_JFFS2_SUMMARY | ||
328 | " (SUMMARY) " | ||
329 | #endif | ||
324 | " (C) 2001-2003 Red Hat, Inc.\n"); | 330 | " (C) 2001-2003 Red Hat, Inc.\n"); |
325 | 331 | ||
326 | jffs2_inode_cachep = kmem_cache_create("jffs2_i", | 332 | jffs2_inode_cachep = kmem_cache_create("jffs2_i", |
@@ -370,5 +376,5 @@ module_exit(exit_jffs2_fs); | |||
370 | 376 | ||
371 | MODULE_DESCRIPTION("The Journalling Flash File System, v2"); | 377 | MODULE_DESCRIPTION("The Journalling Flash File System, v2"); |
372 | MODULE_AUTHOR("Red Hat, Inc."); | 378 | MODULE_AUTHOR("Red Hat, Inc."); |
373 | MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for | 379 | MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for |
374 | // the sake of this tag. It's Free Software. | 380 | // the sake of this tag. It's Free Software. |
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c index 82ef484f5e12..d55754fe8925 100644 --- a/fs/jffs2/symlink.c +++ b/fs/jffs2/symlink.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: symlink.c,v 1.16 2005/03/01 10:50:48 dedekind Exp $ | 10 | * $Id: symlink.c,v 1.19 2005/11/07 11:14:42 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -21,7 +21,7 @@ | |||
21 | static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd); | 21 | static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd); |
22 | 22 | ||
23 | struct inode_operations jffs2_symlink_inode_operations = | 23 | struct inode_operations jffs2_symlink_inode_operations = |
24 | { | 24 | { |
25 | .readlink = generic_readlink, | 25 | .readlink = generic_readlink, |
26 | .follow_link = jffs2_follow_link, | 26 | .follow_link = jffs2_follow_link, |
27 | .setattr = jffs2_setattr | 27 | .setattr = jffs2_setattr |
@@ -30,35 +30,33 @@ struct inode_operations jffs2_symlink_inode_operations = | |||
30 | static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) | 30 | static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) |
31 | { | 31 | { |
32 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode); | 32 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode); |
33 | char *p = (char *)f->dents; | 33 | char *p = (char *)f->target; |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * We don't acquire the f->sem mutex here since the only data we | 36 | * We don't acquire the f->sem mutex here since the only data we |
37 | * use is f->dents which in case of the symlink inode points to the | 37 | * use is f->target. |
38 | * symlink's target path. | ||
39 | * | 38 | * |
40 | * 1. If we are here the inode has already built and f->dents has | 39 | * 1. If we are here the inode has already built and f->target has |
41 | * to point to the target path. | 40 | * to point to the target path. |
42 | * 2. Nobody uses f->dents (if the inode is symlink's inode). The | 41 | * 2. Nobody uses f->target (if the inode is symlink's inode). The |
43 | * exception is inode freeing function which frees f->dents. But | 42 | * exception is inode freeing function which frees f->target. But |
44 | * it can't be called while we are here and before VFS has | 43 | * it can't be called while we are here and before VFS has |
45 | * stopped using our f->dents string which we provide by means of | 44 | * stopped using our f->target string which we provide by means of |
46 | * nd_set_link() call. | 45 | * nd_set_link() call. |
47 | */ | 46 | */ |
48 | 47 | ||
49 | if (!p) { | 48 | if (!p) { |
50 | printk(KERN_ERR "jffs2_follow_link(): can't find symlink taerget\n"); | 49 | printk(KERN_ERR "jffs2_follow_link(): can't find symlink taerget\n"); |
51 | p = ERR_PTR(-EIO); | 50 | p = ERR_PTR(-EIO); |
52 | } else { | ||
53 | D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->dents)); | ||
54 | } | 51 | } |
52 | D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->target)); | ||
55 | 53 | ||
56 | nd_set_link(nd, p); | 54 | nd_set_link(nd, p); |
57 | 55 | ||
58 | /* | 56 | /* |
59 | * We unlock the f->sem mutex but VFS will use the f->dents string. This is safe | 57 | * We will unlock the f->sem mutex but VFS will use the f->target string. This is safe |
60 | * since the only way that may cause f->dents to be changed is iput() operation. | 58 | * since the only way that may cause f->target to be changed is iput() operation. |
61 | * But VFS will not use f->dents after iput() has been called. | 59 | * But VFS will not use f->target after iput() has been called. |
62 | */ | 60 | */ |
63 | return NULL; | 61 | return NULL; |
64 | } | 62 | } |
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index 316133c626b7..4cebf0e57c46 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * | 9 | * |
10 | * For licensing information, see the file 'LICENCE' in this directory. | 10 | * For licensing information, see the file 'LICENCE' in this directory. |
11 | * | 11 | * |
12 | * $Id: wbuf.c,v 1.92 2005/04/05 12:51:54 dedekind Exp $ | 12 | * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $ |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
@@ -30,12 +30,12 @@ | |||
30 | static unsigned char *brokenbuf; | 30 | static unsigned char *brokenbuf; |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) ) | ||
34 | #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) ) | ||
35 | |||
33 | /* max. erase failures before we mark a block bad */ | 36 | /* max. erase failures before we mark a block bad */ |
34 | #define MAX_ERASE_FAILURES 2 | 37 | #define MAX_ERASE_FAILURES 2 |
35 | 38 | ||
36 | /* two seconds timeout for timed wbuf-flushing */ | ||
37 | #define WBUF_FLUSH_TIMEOUT 2 * HZ | ||
38 | |||
39 | struct jffs2_inodirty { | 39 | struct jffs2_inodirty { |
40 | uint32_t ino; | 40 | uint32_t ino; |
41 | struct jffs2_inodirty *next; | 41 | struct jffs2_inodirty *next; |
@@ -139,7 +139,6 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
139 | { | 139 | { |
140 | D1(printk("About to refile bad block at %08x\n", jeb->offset)); | 140 | D1(printk("About to refile bad block at %08x\n", jeb->offset)); |
141 | 141 | ||
142 | D2(jffs2_dump_block_lists(c)); | ||
143 | /* File the existing block on the bad_used_list.... */ | 142 | /* File the existing block on the bad_used_list.... */ |
144 | if (c->nextblock == jeb) | 143 | if (c->nextblock == jeb) |
145 | c->nextblock = NULL; | 144 | c->nextblock = NULL; |
@@ -156,7 +155,6 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
156 | c->nr_erasing_blocks++; | 155 | c->nr_erasing_blocks++; |
157 | jffs2_erase_pending_trigger(c); | 156 | jffs2_erase_pending_trigger(c); |
158 | } | 157 | } |
159 | D2(jffs2_dump_block_lists(c)); | ||
160 | 158 | ||
161 | /* Adjust its size counts accordingly */ | 159 | /* Adjust its size counts accordingly */ |
162 | c->wasted_size += jeb->free_size; | 160 | c->wasted_size += jeb->free_size; |
@@ -164,8 +162,9 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
164 | jeb->wasted_size += jeb->free_size; | 162 | jeb->wasted_size += jeb->free_size; |
165 | jeb->free_size = 0; | 163 | jeb->free_size = 0; |
166 | 164 | ||
167 | ACCT_SANITY_CHECK(c,jeb); | 165 | jffs2_dbg_dump_block_lists_nolock(c); |
168 | D1(ACCT_PARANOIA_CHECK(jeb)); | 166 | jffs2_dbg_acct_sanity_check_nolock(c,jeb); |
167 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | ||
169 | } | 168 | } |
170 | 169 | ||
171 | /* Recover from failure to write wbuf. Recover the nodes up to the | 170 | /* Recover from failure to write wbuf. Recover the nodes up to the |
@@ -189,7 +188,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
189 | /* Find the first node to be recovered, by skipping over every | 188 | /* Find the first node to be recovered, by skipping over every |
190 | node which ends before the wbuf starts, or which is obsolete. */ | 189 | node which ends before the wbuf starts, or which is obsolete. */ |
191 | first_raw = &jeb->first_node; | 190 | first_raw = &jeb->first_node; |
192 | while (*first_raw && | 191 | while (*first_raw && |
193 | (ref_obsolete(*first_raw) || | 192 | (ref_obsolete(*first_raw) || |
194 | (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) { | 193 | (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) { |
195 | D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", | 194 | D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", |
@@ -238,7 +237,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
238 | ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo); | 237 | ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo); |
239 | else | 238 | else |
240 | ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); | 239 | ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); |
241 | 240 | ||
242 | if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) { | 241 | if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) { |
243 | /* ECC recovered */ | 242 | /* ECC recovered */ |
244 | ret = 0; | 243 | ret = 0; |
@@ -266,7 +265,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
266 | 265 | ||
267 | 266 | ||
268 | /* ... and get an allocation of space from a shiny new block instead */ | 267 | /* ... and get an allocation of space from a shiny new block instead */ |
269 | ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len); | 268 | ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len, JFFS2_SUMMARY_NOSUM_SIZE); |
270 | if (ret) { | 269 | if (ret) { |
271 | printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); | 270 | printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); |
272 | kfree(buf); | 271 | kfree(buf); |
@@ -275,15 +274,15 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
275 | if (end-start >= c->wbuf_pagesize) { | 274 | if (end-start >= c->wbuf_pagesize) { |
276 | /* Need to do another write immediately, but it's possible | 275 | /* Need to do another write immediately, but it's possible |
277 | that this is just because the wbuf itself is completely | 276 | that this is just because the wbuf itself is completely |
278 | full, and there's nothing earlier read back from the | 277 | full, and there's nothing earlier read back from the |
279 | flash. Hence 'buf' isn't necessarily what we're writing | 278 | flash. Hence 'buf' isn't necessarily what we're writing |
280 | from. */ | 279 | from. */ |
281 | unsigned char *rewrite_buf = buf?:c->wbuf; | 280 | unsigned char *rewrite_buf = buf?:c->wbuf; |
282 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); | 281 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); |
283 | 282 | ||
284 | D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", | 283 | D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", |
285 | towrite, ofs)); | 284 | towrite, ofs)); |
286 | 285 | ||
287 | #ifdef BREAKMEHEADER | 286 | #ifdef BREAKMEHEADER |
288 | static int breakme; | 287 | static int breakme; |
289 | if (breakme++ == 20) { | 288 | if (breakme++ == 20) { |
@@ -327,8 +326,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
327 | c->wbuf_ofs = ofs + towrite; | 326 | c->wbuf_ofs = ofs + towrite; |
328 | memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len); | 327 | memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len); |
329 | /* Don't muck about with c->wbuf_inodes. False positives are harmless. */ | 328 | /* Don't muck about with c->wbuf_inodes. False positives are harmless. */ |
330 | if (buf) | 329 | kfree(buf); |
331 | kfree(buf); | ||
332 | } else { | 330 | } else { |
333 | /* OK, now we're left with the dregs in whichever buffer we're using */ | 331 | /* OK, now we're left with the dregs in whichever buffer we're using */ |
334 | if (buf) { | 332 | if (buf) { |
@@ -392,11 +390,11 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
392 | else | 390 | else |
393 | jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys); | 391 | jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys); |
394 | 392 | ||
395 | ACCT_SANITY_CHECK(c,jeb); | 393 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); |
396 | D1(ACCT_PARANOIA_CHECK(jeb)); | 394 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
397 | 395 | ||
398 | ACCT_SANITY_CHECK(c,new_jeb); | 396 | jffs2_dbg_acct_sanity_check_nolock(c, new_jeb); |
399 | D1(ACCT_PARANOIA_CHECK(new_jeb)); | 397 | jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb); |
400 | 398 | ||
401 | spin_unlock(&c->erase_completion_lock); | 399 | spin_unlock(&c->erase_completion_lock); |
402 | 400 | ||
@@ -435,15 +433,15 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
435 | this happens, if we have a change to a new block, | 433 | this happens, if we have a change to a new block, |
436 | or if fsync forces us to flush the writebuffer. | 434 | or if fsync forces us to flush the writebuffer. |
437 | if we have a switch to next page, we will not have | 435 | if we have a switch to next page, we will not have |
438 | enough remaining space for this. | 436 | enough remaining space for this. |
439 | */ | 437 | */ |
440 | if (pad && !jffs2_dataflash(c)) { | 438 | if (pad ) { |
441 | c->wbuf_len = PAD(c->wbuf_len); | 439 | c->wbuf_len = PAD(c->wbuf_len); |
442 | 440 | ||
443 | /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR | 441 | /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR |
444 | with 8 byte page size */ | 442 | with 8 byte page size */ |
445 | memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len); | 443 | memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len); |
446 | 444 | ||
447 | if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) { | 445 | if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) { |
448 | struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len); | 446 | struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len); |
449 | padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 447 | padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
@@ -454,7 +452,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
454 | } | 452 | } |
455 | /* else jffs2_flash_writev has actually filled in the rest of the | 453 | /* else jffs2_flash_writev has actually filled in the rest of the |
456 | buffer for us, and will deal with the node refs etc. later. */ | 454 | buffer for us, and will deal with the node refs etc. later. */ |
457 | 455 | ||
458 | #ifdef BREAKME | 456 | #ifdef BREAKME |
459 | static int breakme; | 457 | static int breakme; |
460 | if (breakme++ == 20) { | 458 | if (breakme++ == 20) { |
@@ -463,9 +461,9 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
463 | c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, | 461 | c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, |
464 | &retlen, brokenbuf, NULL, c->oobinfo); | 462 | &retlen, brokenbuf, NULL, c->oobinfo); |
465 | ret = -EIO; | 463 | ret = -EIO; |
466 | } else | 464 | } else |
467 | #endif | 465 | #endif |
468 | 466 | ||
469 | if (jffs2_cleanmarker_oob(c)) | 467 | if (jffs2_cleanmarker_oob(c)) |
470 | ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo); | 468 | ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo); |
471 | else | 469 | else |
@@ -488,7 +486,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
488 | spin_lock(&c->erase_completion_lock); | 486 | spin_lock(&c->erase_completion_lock); |
489 | 487 | ||
490 | /* Adjust free size of the block if we padded. */ | 488 | /* Adjust free size of the block if we padded. */ |
491 | if (pad && !jffs2_dataflash(c)) { | 489 | if (pad) { |
492 | struct jffs2_eraseblock *jeb; | 490 | struct jffs2_eraseblock *jeb; |
493 | 491 | ||
494 | jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; | 492 | jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; |
@@ -496,7 +494,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
496 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", | 494 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", |
497 | (jeb==c->nextblock)?"next":"", jeb->offset)); | 495 | (jeb==c->nextblock)?"next":"", jeb->offset)); |
498 | 496 | ||
499 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be | 497 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be |
500 | padded. If there is less free space in the block than that, | 498 | padded. If there is less free space in the block than that, |
501 | something screwed up */ | 499 | something screwed up */ |
502 | if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) { | 500 | if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) { |
@@ -524,9 +522,9 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
524 | return 0; | 522 | return 0; |
525 | } | 523 | } |
526 | 524 | ||
527 | /* Trigger garbage collection to flush the write-buffer. | 525 | /* Trigger garbage collection to flush the write-buffer. |
528 | If ino arg is zero, do it if _any_ real (i.e. not GC) writes are | 526 | If ino arg is zero, do it if _any_ real (i.e. not GC) writes are |
529 | outstanding. If ino arg non-zero, do it only if a write for the | 527 | outstanding. If ino arg non-zero, do it only if a write for the |
530 | given inode is outstanding. */ | 528 | given inode is outstanding. */ |
531 | int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | 529 | int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) |
532 | { | 530 | { |
@@ -605,15 +603,6 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c) | |||
605 | 603 | ||
606 | return ret; | 604 | return ret; |
607 | } | 605 | } |
608 | |||
609 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | ||
610 | #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) ) | ||
611 | #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) ) | ||
612 | #else | ||
613 | #define PAGE_DIV(x) ( (x) & (~(c->wbuf_pagesize - 1)) ) | ||
614 | #define PAGE_MOD(x) ( (x) & (c->wbuf_pagesize - 1) ) | ||
615 | #endif | ||
616 | |||
617 | int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) | 606 | int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) |
618 | { | 607 | { |
619 | struct kvec outvecs[3]; | 608 | struct kvec outvecs[3]; |
@@ -630,13 +619,13 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
630 | /* If not NAND flash, don't bother */ | 619 | /* If not NAND flash, don't bother */ |
631 | if (!jffs2_is_writebuffered(c)) | 620 | if (!jffs2_is_writebuffered(c)) |
632 | return jffs2_flash_direct_writev(c, invecs, count, to, retlen); | 621 | return jffs2_flash_direct_writev(c, invecs, count, to, retlen); |
633 | 622 | ||
634 | down_write(&c->wbuf_sem); | 623 | down_write(&c->wbuf_sem); |
635 | 624 | ||
636 | /* If wbuf_ofs is not initialized, set it to target address */ | 625 | /* If wbuf_ofs is not initialized, set it to target address */ |
637 | if (c->wbuf_ofs == 0xFFFFFFFF) { | 626 | if (c->wbuf_ofs == 0xFFFFFFFF) { |
638 | c->wbuf_ofs = PAGE_DIV(to); | 627 | c->wbuf_ofs = PAGE_DIV(to); |
639 | c->wbuf_len = PAGE_MOD(to); | 628 | c->wbuf_len = PAGE_MOD(to); |
640 | memset(c->wbuf,0xff,c->wbuf_pagesize); | 629 | memset(c->wbuf,0xff,c->wbuf_pagesize); |
641 | } | 630 | } |
642 | 631 | ||
@@ -650,10 +639,10 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
650 | memset(c->wbuf,0xff,c->wbuf_pagesize); | 639 | memset(c->wbuf,0xff,c->wbuf_pagesize); |
651 | } | 640 | } |
652 | } | 641 | } |
653 | 642 | ||
654 | /* Sanity checks on target address. | 643 | /* Sanity checks on target address. |
655 | It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs), | 644 | It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs), |
656 | and it's permitted to write at the beginning of a new | 645 | and it's permitted to write at the beginning of a new |
657 | erase block. Anything else, and you die. | 646 | erase block. Anything else, and you die. |
658 | New block starts at xxx000c (0-b = block header) | 647 | New block starts at xxx000c (0-b = block header) |
659 | */ | 648 | */ |
@@ -671,8 +660,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
671 | } | 660 | } |
672 | /* set pointer to new block */ | 661 | /* set pointer to new block */ |
673 | c->wbuf_ofs = PAGE_DIV(to); | 662 | c->wbuf_ofs = PAGE_DIV(to); |
674 | c->wbuf_len = PAGE_MOD(to); | 663 | c->wbuf_len = PAGE_MOD(to); |
675 | } | 664 | } |
676 | 665 | ||
677 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { | 666 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { |
678 | /* We're not writing immediately after the writebuffer. Bad. */ | 667 | /* We're not writing immediately after the writebuffer. Bad. */ |
@@ -692,21 +681,21 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
692 | invec = 0; | 681 | invec = 0; |
693 | outvec = 0; | 682 | outvec = 0; |
694 | 683 | ||
695 | /* Fill writebuffer first, if already in use */ | 684 | /* Fill writebuffer first, if already in use */ |
696 | if (c->wbuf_len) { | 685 | if (c->wbuf_len) { |
697 | uint32_t invec_ofs = 0; | 686 | uint32_t invec_ofs = 0; |
698 | 687 | ||
699 | /* adjust alignment offset */ | 688 | /* adjust alignment offset */ |
700 | if (c->wbuf_len != PAGE_MOD(to)) { | 689 | if (c->wbuf_len != PAGE_MOD(to)) { |
701 | c->wbuf_len = PAGE_MOD(to); | 690 | c->wbuf_len = PAGE_MOD(to); |
702 | /* take care of alignment to next page */ | 691 | /* take care of alignment to next page */ |
703 | if (!c->wbuf_len) | 692 | if (!c->wbuf_len) |
704 | c->wbuf_len = c->wbuf_pagesize; | 693 | c->wbuf_len = c->wbuf_pagesize; |
705 | } | 694 | } |
706 | 695 | ||
707 | while(c->wbuf_len < c->wbuf_pagesize) { | 696 | while(c->wbuf_len < c->wbuf_pagesize) { |
708 | uint32_t thislen; | 697 | uint32_t thislen; |
709 | 698 | ||
710 | if (invec == count) | 699 | if (invec == count) |
711 | goto alldone; | 700 | goto alldone; |
712 | 701 | ||
@@ -714,17 +703,17 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
714 | 703 | ||
715 | if (thislen >= invecs[invec].iov_len) | 704 | if (thislen >= invecs[invec].iov_len) |
716 | thislen = invecs[invec].iov_len; | 705 | thislen = invecs[invec].iov_len; |
717 | 706 | ||
718 | invec_ofs = thislen; | 707 | invec_ofs = thislen; |
719 | 708 | ||
720 | memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen); | 709 | memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen); |
721 | c->wbuf_len += thislen; | 710 | c->wbuf_len += thislen; |
722 | donelen += thislen; | 711 | donelen += thislen; |
723 | /* Get next invec, if actual did not fill the buffer */ | 712 | /* Get next invec, if actual did not fill the buffer */ |
724 | if (c->wbuf_len < c->wbuf_pagesize) | 713 | if (c->wbuf_len < c->wbuf_pagesize) |
725 | invec++; | 714 | invec++; |
726 | } | 715 | } |
727 | 716 | ||
728 | /* write buffer is full, flush buffer */ | 717 | /* write buffer is full, flush buffer */ |
729 | ret = __jffs2_flush_wbuf(c, NOPAD); | 718 | ret = __jffs2_flush_wbuf(c, NOPAD); |
730 | if (ret) { | 719 | if (ret) { |
@@ -783,10 +772,10 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
783 | 772 | ||
784 | /* We did cross a page boundary, so we write some now */ | 773 | /* We did cross a page boundary, so we write some now */ |
785 | if (jffs2_cleanmarker_oob(c)) | 774 | if (jffs2_cleanmarker_oob(c)) |
786 | ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo); | 775 | ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo); |
787 | else | 776 | else |
788 | ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen); | 777 | ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen); |
789 | 778 | ||
790 | if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) { | 779 | if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) { |
791 | /* At this point we have no problem, | 780 | /* At this point we have no problem, |
792 | c->wbuf is empty. However refile nextblock to avoid | 781 | c->wbuf is empty. However refile nextblock to avoid |
@@ -803,7 +792,7 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
803 | spin_unlock(&c->erase_completion_lock); | 792 | spin_unlock(&c->erase_completion_lock); |
804 | goto exit; | 793 | goto exit; |
805 | } | 794 | } |
806 | 795 | ||
807 | donelen += wbuf_retlen; | 796 | donelen += wbuf_retlen; |
808 | c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen); | 797 | c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen); |
809 | 798 | ||
@@ -837,11 +826,17 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
837 | alldone: | 826 | alldone: |
838 | *retlen = donelen; | 827 | *retlen = donelen; |
839 | 828 | ||
829 | if (jffs2_sum_active()) { | ||
830 | int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to); | ||
831 | if (res) | ||
832 | return res; | ||
833 | } | ||
834 | |||
840 | if (c->wbuf_len && ino) | 835 | if (c->wbuf_len && ino) |
841 | jffs2_wbuf_dirties_inode(c, ino); | 836 | jffs2_wbuf_dirties_inode(c, ino); |
842 | 837 | ||
843 | ret = 0; | 838 | ret = 0; |
844 | 839 | ||
845 | exit: | 840 | exit: |
846 | up_write(&c->wbuf_sem); | 841 | up_write(&c->wbuf_sem); |
847 | return ret; | 842 | return ret; |
@@ -856,7 +851,7 @@ int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *r | |||
856 | struct kvec vecs[1]; | 851 | struct kvec vecs[1]; |
857 | 852 | ||
858 | if (!jffs2_is_writebuffered(c)) | 853 | if (!jffs2_is_writebuffered(c)) |
859 | return c->mtd->write(c->mtd, ofs, len, retlen, buf); | 854 | return jffs2_flash_direct_write(c, ofs, len, retlen, buf); |
860 | 855 | ||
861 | vecs[0].iov_base = (unsigned char *) buf; | 856 | vecs[0].iov_base = (unsigned char *) buf; |
862 | vecs[0].iov_len = len; | 857 | vecs[0].iov_len = len; |
@@ -884,18 +879,18 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re | |||
884 | if ( (ret == -EBADMSG) && (*retlen == len) ) { | 879 | if ( (ret == -EBADMSG) && (*retlen == len) ) { |
885 | printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", | 880 | printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", |
886 | len, ofs); | 881 | len, ofs); |
887 | /* | 882 | /* |
888 | * We have the raw data without ECC correction in the buffer, maybe | 883 | * We have the raw data without ECC correction in the buffer, maybe |
889 | * we are lucky and all data or parts are correct. We check the node. | 884 | * we are lucky and all data or parts are correct. We check the node. |
890 | * If data are corrupted node check will sort it out. | 885 | * If data are corrupted node check will sort it out. |
891 | * We keep this block, it will fail on write or erase and the we | 886 | * We keep this block, it will fail on write or erase and the we |
892 | * mark it bad. Or should we do that now? But we should give him a chance. | 887 | * mark it bad. Or should we do that now? But we should give him a chance. |
893 | * Maybe we had a system crash or power loss before the ecc write or | 888 | * Maybe we had a system crash or power loss before the ecc write or |
894 | * a erase was completed. | 889 | * a erase was completed. |
895 | * So we return success. :) | 890 | * So we return success. :) |
896 | */ | 891 | */ |
897 | ret = 0; | 892 | ret = 0; |
898 | } | 893 | } |
899 | 894 | ||
900 | /* if no writebuffer available or write buffer empty, return */ | 895 | /* if no writebuffer available or write buffer empty, return */ |
901 | if (!c->wbuf_pagesize || !c->wbuf_len) | 896 | if (!c->wbuf_pagesize || !c->wbuf_len) |
@@ -910,16 +905,16 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re | |||
910 | if (owbf > c->wbuf_len) /* is read beyond write buffer ? */ | 905 | if (owbf > c->wbuf_len) /* is read beyond write buffer ? */ |
911 | goto exit; | 906 | goto exit; |
912 | lwbf = c->wbuf_len - owbf; /* number of bytes to copy */ | 907 | lwbf = c->wbuf_len - owbf; /* number of bytes to copy */ |
913 | if (lwbf > len) | 908 | if (lwbf > len) |
914 | lwbf = len; | 909 | lwbf = len; |
915 | } else { | 910 | } else { |
916 | orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ | 911 | orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ |
917 | if (orbf > len) /* is write beyond write buffer ? */ | 912 | if (orbf > len) /* is write beyond write buffer ? */ |
918 | goto exit; | 913 | goto exit; |
919 | lwbf = len - orbf; /* number of bytes to copy */ | 914 | lwbf = len - orbf; /* number of bytes to copy */ |
920 | if (lwbf > c->wbuf_len) | 915 | if (lwbf > c->wbuf_len) |
921 | lwbf = c->wbuf_len; | 916 | lwbf = c->wbuf_len; |
922 | } | 917 | } |
923 | if (lwbf > 0) | 918 | if (lwbf > 0) |
924 | memcpy(buf+orbf,c->wbuf+owbf,lwbf); | 919 | memcpy(buf+orbf,c->wbuf+owbf,lwbf); |
925 | 920 | ||
@@ -947,7 +942,7 @@ int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb | |||
947 | printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n"); | 942 | printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n"); |
948 | return -ENOMEM; | 943 | return -ENOMEM; |
949 | } | 944 | } |
950 | /* | 945 | /* |
951 | * if mode = 0, we scan for a total empty oob area, else we have | 946 | * if mode = 0, we scan for a total empty oob area, else we have |
952 | * to take care of the cleanmarker in the first page of the block | 947 | * to take care of the cleanmarker in the first page of the block |
953 | */ | 948 | */ |
@@ -956,41 +951,41 @@ int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb | |||
956 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); | 951 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); |
957 | goto out; | 952 | goto out; |
958 | } | 953 | } |
959 | 954 | ||
960 | if (retlen < len) { | 955 | if (retlen < len) { |
961 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read " | 956 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read " |
962 | "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset)); | 957 | "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset)); |
963 | ret = -EIO; | 958 | ret = -EIO; |
964 | goto out; | 959 | goto out; |
965 | } | 960 | } |
966 | 961 | ||
967 | /* Special check for first page */ | 962 | /* Special check for first page */ |
968 | for(i = 0; i < oob_size ; i++) { | 963 | for(i = 0; i < oob_size ; i++) { |
969 | /* Yeah, we know about the cleanmarker. */ | 964 | /* Yeah, we know about the cleanmarker. */ |
970 | if (mode && i >= c->fsdata_pos && | 965 | if (mode && i >= c->fsdata_pos && |
971 | i < c->fsdata_pos + c->fsdata_len) | 966 | i < c->fsdata_pos + c->fsdata_len) |
972 | continue; | 967 | continue; |
973 | 968 | ||
974 | if (buf[i] != 0xFF) { | 969 | if (buf[i] != 0xFF) { |
975 | D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n", | 970 | D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n", |
976 | buf[page+i], page+i, jeb->offset)); | 971 | buf[i], i, jeb->offset)); |
977 | ret = 1; | 972 | ret = 1; |
978 | goto out; | 973 | goto out; |
979 | } | 974 | } |
980 | } | 975 | } |
981 | 976 | ||
982 | /* we know, we are aligned :) */ | 977 | /* we know, we are aligned :) */ |
983 | for (page = oob_size; page < len; page += sizeof(long)) { | 978 | for (page = oob_size; page < len; page += sizeof(long)) { |
984 | unsigned long dat = *(unsigned long *)(&buf[page]); | 979 | unsigned long dat = *(unsigned long *)(&buf[page]); |
985 | if(dat != -1) { | 980 | if(dat != -1) { |
986 | ret = 1; | 981 | ret = 1; |
987 | goto out; | 982 | goto out; |
988 | } | 983 | } |
989 | } | 984 | } |
990 | 985 | ||
991 | out: | 986 | out: |
992 | kfree(buf); | 987 | kfree(buf); |
993 | 988 | ||
994 | return ret; | 989 | return ret; |
995 | } | 990 | } |
996 | 991 | ||
@@ -1072,7 +1067,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
1072 | n.totlen = cpu_to_je32(8); | 1067 | n.totlen = cpu_to_je32(8); |
1073 | 1068 | ||
1074 | ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n); | 1069 | ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n); |
1075 | 1070 | ||
1076 | if (ret) { | 1071 | if (ret) { |
1077 | D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); | 1072 | D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); |
1078 | return ret; | 1073 | return ret; |
@@ -1084,7 +1079,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
1084 | return 0; | 1079 | return 0; |
1085 | } | 1080 | } |
1086 | 1081 | ||
1087 | /* | 1082 | /* |
1088 | * On NAND we try to mark this block bad. If the block was erased more | 1083 | * On NAND we try to mark this block bad. If the block was erased more |
1089 | * than MAX_ERASE_FAILURES we mark it finaly bad. | 1084 | * than MAX_ERASE_FAILURES we mark it finaly bad. |
1090 | * Don't care about failures. This block remains on the erase-pending | 1085 | * Don't care about failures. This block remains on the erase-pending |
@@ -1105,7 +1100,7 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock * | |||
1105 | 1100 | ||
1106 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset)); | 1101 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset)); |
1107 | ret = c->mtd->block_markbad(c->mtd, bad_offset); | 1102 | ret = c->mtd->block_markbad(c->mtd, bad_offset); |
1108 | 1103 | ||
1109 | if (ret) { | 1104 | if (ret) { |
1110 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); | 1105 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); |
1111 | return ret; | 1106 | return ret; |
@@ -1129,7 +1124,7 @@ static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c) | |||
1129 | /* Do this only, if we have an oob buffer */ | 1124 | /* Do this only, if we have an oob buffer */ |
1130 | if (!c->mtd->oobsize) | 1125 | if (!c->mtd->oobsize) |
1131 | return 0; | 1126 | return 0; |
1132 | 1127 | ||
1133 | /* Cleanmarker is out-of-band, so inline size zero */ | 1128 | /* Cleanmarker is out-of-band, so inline size zero */ |
1134 | c->cleanmarker_size = 0; | 1129 | c->cleanmarker_size = 0; |
1135 | 1130 | ||
@@ -1155,7 +1150,7 @@ static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c) | |||
1155 | c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN; | 1150 | c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN; |
1156 | c->badblock_pos = 15; | 1151 | c->badblock_pos = 15; |
1157 | break; | 1152 | break; |
1158 | 1153 | ||
1159 | default: | 1154 | default: |
1160 | D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n")); | 1155 | D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n")); |
1161 | return -EINVAL; | 1156 | return -EINVAL; |
@@ -1172,7 +1167,7 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c) | |||
1172 | init_rwsem(&c->wbuf_sem); | 1167 | init_rwsem(&c->wbuf_sem); |
1173 | c->wbuf_pagesize = c->mtd->oobblock; | 1168 | c->wbuf_pagesize = c->mtd->oobblock; |
1174 | c->wbuf_ofs = 0xFFFFFFFF; | 1169 | c->wbuf_ofs = 0xFFFFFFFF; |
1175 | 1170 | ||
1176 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | 1171 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); |
1177 | if (!c->wbuf) | 1172 | if (!c->wbuf) |
1178 | return -ENOMEM; | 1173 | return -ENOMEM; |
@@ -1198,17 +1193,41 @@ void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c) | |||
1198 | 1193 | ||
1199 | int jffs2_dataflash_setup(struct jffs2_sb_info *c) { | 1194 | int jffs2_dataflash_setup(struct jffs2_sb_info *c) { |
1200 | c->cleanmarker_size = 0; /* No cleanmarkers needed */ | 1195 | c->cleanmarker_size = 0; /* No cleanmarkers needed */ |
1201 | 1196 | ||
1202 | /* Initialize write buffer */ | 1197 | /* Initialize write buffer */ |
1203 | init_rwsem(&c->wbuf_sem); | 1198 | init_rwsem(&c->wbuf_sem); |
1204 | c->wbuf_pagesize = c->sector_size; | ||
1205 | c->wbuf_ofs = 0xFFFFFFFF; | ||
1206 | 1199 | ||
1200 | |||
1201 | c->wbuf_pagesize = c->mtd->erasesize; | ||
1202 | |||
1203 | /* Find a suitable c->sector_size | ||
1204 | * - Not too much sectors | ||
1205 | * - Sectors have to be at least 4 K + some bytes | ||
1206 | * - All known dataflashes have erase sizes of 528 or 1056 | ||
1207 | * - we take at least 8 eraseblocks and want to have at least 8K size | ||
1208 | * - The concatenation should be a power of 2 | ||
1209 | */ | ||
1210 | |||
1211 | c->sector_size = 8 * c->mtd->erasesize; | ||
1212 | |||
1213 | while (c->sector_size < 8192) { | ||
1214 | c->sector_size *= 2; | ||
1215 | } | ||
1216 | |||
1217 | /* It may be necessary to adjust the flash size */ | ||
1218 | c->flash_size = c->mtd->size; | ||
1219 | |||
1220 | if ((c->flash_size % c->sector_size) != 0) { | ||
1221 | c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; | ||
1222 | printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size); | ||
1223 | }; | ||
1224 | |||
1225 | c->wbuf_ofs = 0xFFFFFFFF; | ||
1207 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | 1226 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); |
1208 | if (!c->wbuf) | 1227 | if (!c->wbuf) |
1209 | return -ENOMEM; | 1228 | return -ENOMEM; |
1210 | 1229 | ||
1211 | printk(KERN_INFO "JFFS2 write-buffering enabled (%i)\n", c->wbuf_pagesize); | 1230 | printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); |
1212 | 1231 | ||
1213 | return 0; | 1232 | return 0; |
1214 | } | 1233 | } |
@@ -1236,3 +1255,23 @@ int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) { | |||
1236 | void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) { | 1255 | void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) { |
1237 | kfree(c->wbuf); | 1256 | kfree(c->wbuf); |
1238 | } | 1257 | } |
1258 | |||
1259 | int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { | ||
1260 | /* Cleanmarker currently occupies a whole programming region */ | ||
1261 | c->cleanmarker_size = MTD_PROGREGION_SIZE(c->mtd); | ||
1262 | |||
1263 | /* Initialize write buffer */ | ||
1264 | init_rwsem(&c->wbuf_sem); | ||
1265 | c->wbuf_pagesize = MTD_PROGREGION_SIZE(c->mtd); | ||
1266 | c->wbuf_ofs = 0xFFFFFFFF; | ||
1267 | |||
1268 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | ||
1269 | if (!c->wbuf) | ||
1270 | return -ENOMEM; | ||
1271 | |||
1272 | return 0; | ||
1273 | } | ||
1274 | |||
1275 | void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) { | ||
1276 | kfree(c->wbuf); | ||
1277 | } | ||
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c index 69100615d9ae..1342f0158e9b 100644 --- a/fs/jffs2/write.c +++ b/fs/jffs2/write.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: write.c,v 1.92 2005/04/13 13:22:35 dwmw2 Exp $ | 10 | * $Id: write.c,v 1.97 2005/11/07 11:14:42 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -54,35 +54,7 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint | |||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | 56 | ||
57 | #if CONFIG_JFFS2_FS_DEBUG > 0 | 57 | /* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, |
58 | static void writecheck(struct jffs2_sb_info *c, uint32_t ofs) | ||
59 | { | ||
60 | unsigned char buf[16]; | ||
61 | size_t retlen; | ||
62 | int ret, i; | ||
63 | |||
64 | ret = jffs2_flash_read(c, ofs, 16, &retlen, buf); | ||
65 | if (ret || (retlen != 16)) { | ||
66 | D1(printk(KERN_DEBUG "read failed or short in writecheck(). ret %d, retlen %zd\n", ret, retlen)); | ||
67 | return; | ||
68 | } | ||
69 | ret = 0; | ||
70 | for (i=0; i<16; i++) { | ||
71 | if (buf[i] != 0xff) | ||
72 | ret = 1; | ||
73 | } | ||
74 | if (ret) { | ||
75 | printk(KERN_WARNING "ARGH. About to write node to 0x%08x on flash, but there are data already there:\n", ofs); | ||
76 | printk(KERN_WARNING "0x%08x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
77 | ofs, | ||
78 | buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], | ||
79 | buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]); | ||
80 | } | ||
81 | } | ||
82 | #endif | ||
83 | |||
84 | |||
85 | /* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, | ||
86 | write it to the flash, link it into the existing inode/fragment list */ | 58 | write it to the flash, link it into the existing inode/fragment list */ |
87 | 59 | ||
88 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode) | 60 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode) |
@@ -106,7 +78,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
106 | vecs[1].iov_base = (unsigned char *)data; | 78 | vecs[1].iov_base = (unsigned char *)data; |
107 | vecs[1].iov_len = datalen; | 79 | vecs[1].iov_len = datalen; |
108 | 80 | ||
109 | D1(writecheck(c, flash_ofs)); | 81 | jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); |
110 | 82 | ||
111 | if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { | 83 | if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { |
112 | printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen); | 84 | printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen); |
@@ -114,7 +86,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
114 | raw = jffs2_alloc_raw_node_ref(); | 86 | raw = jffs2_alloc_raw_node_ref(); |
115 | if (!raw) | 87 | if (!raw) |
116 | return ERR_PTR(-ENOMEM); | 88 | return ERR_PTR(-ENOMEM); |
117 | 89 | ||
118 | fn = jffs2_alloc_full_dnode(); | 90 | fn = jffs2_alloc_full_dnode(); |
119 | if (!fn) { | 91 | if (!fn) { |
120 | jffs2_free_raw_node_ref(raw); | 92 | jffs2_free_raw_node_ref(raw); |
@@ -138,7 +110,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
138 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { | 110 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { |
139 | BUG_ON(!retried); | 111 | BUG_ON(!retried); |
140 | D1(printk(KERN_DEBUG "jffs2_write_dnode : dnode_version %d, " | 112 | D1(printk(KERN_DEBUG "jffs2_write_dnode : dnode_version %d, " |
141 | "highest version %d -> updating dnode\n", | 113 | "highest version %d -> updating dnode\n", |
142 | je32_to_cpu(ri->version), f->highest_version)); | 114 | je32_to_cpu(ri->version), f->highest_version)); |
143 | ri->version = cpu_to_je32(++f->highest_version); | 115 | ri->version = cpu_to_je32(++f->highest_version); |
144 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 116 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
@@ -148,7 +120,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
148 | (alloc_mode==ALLOC_GC)?0:f->inocache->ino); | 120 | (alloc_mode==ALLOC_GC)?0:f->inocache->ino); |
149 | 121 | ||
150 | if (ret || (retlen != sizeof(*ri) + datalen)) { | 122 | if (ret || (retlen != sizeof(*ri) + datalen)) { |
151 | printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", | 123 | printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", |
152 | sizeof(*ri)+datalen, flash_ofs, ret, retlen); | 124 | sizeof(*ri)+datalen, flash_ofs, ret, retlen); |
153 | 125 | ||
154 | /* Mark the space as dirtied */ | 126 | /* Mark the space as dirtied */ |
@@ -156,10 +128,10 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
156 | /* Doesn't belong to any inode */ | 128 | /* Doesn't belong to any inode */ |
157 | raw->next_in_ino = NULL; | 129 | raw->next_in_ino = NULL; |
158 | 130 | ||
159 | /* Don't change raw->size to match retlen. We may have | 131 | /* Don't change raw->size to match retlen. We may have |
160 | written the node header already, and only the data will | 132 | written the node header already, and only the data will |
161 | seem corrupted, in which case the scan would skip over | 133 | seem corrupted, in which case the scan would skip over |
162 | any node we write before the original intended end of | 134 | any node we write before the original intended end of |
163 | this node */ | 135 | this node */ |
164 | raw->flash_offset |= REF_OBSOLETE; | 136 | raw->flash_offset |= REF_OBSOLETE; |
165 | jffs2_add_physical_node_ref(c, raw); | 137 | jffs2_add_physical_node_ref(c, raw); |
@@ -176,26 +148,28 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
176 | retried = 1; | 148 | retried = 1; |
177 | 149 | ||
178 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); | 150 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); |
179 | 151 | ||
180 | ACCT_SANITY_CHECK(c,jeb); | 152 | jffs2_dbg_acct_sanity_check(c,jeb); |
181 | D1(ACCT_PARANOIA_CHECK(jeb)); | 153 | jffs2_dbg_acct_paranoia_check(c, jeb); |
182 | 154 | ||
183 | if (alloc_mode == ALLOC_GC) { | 155 | if (alloc_mode == ALLOC_GC) { |
184 | ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs, &dummy); | 156 | ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs, |
157 | &dummy, JFFS2_SUMMARY_INODE_SIZE); | ||
185 | } else { | 158 | } else { |
186 | /* Locking pain */ | 159 | /* Locking pain */ |
187 | up(&f->sem); | 160 | up(&f->sem); |
188 | jffs2_complete_reservation(c); | 161 | jffs2_complete_reservation(c); |
189 | 162 | ||
190 | ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs, &dummy, alloc_mode); | 163 | ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs, |
164 | &dummy, alloc_mode, JFFS2_SUMMARY_INODE_SIZE); | ||
191 | down(&f->sem); | 165 | down(&f->sem); |
192 | } | 166 | } |
193 | 167 | ||
194 | if (!ret) { | 168 | if (!ret) { |
195 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); | 169 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); |
196 | 170 | ||
197 | ACCT_SANITY_CHECK(c,jeb); | 171 | jffs2_dbg_acct_sanity_check(c,jeb); |
198 | D1(ACCT_PARANOIA_CHECK(jeb)); | 172 | jffs2_dbg_acct_paranoia_check(c, jeb); |
199 | 173 | ||
200 | goto retry; | 174 | goto retry; |
201 | } | 175 | } |
@@ -207,9 +181,9 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
207 | return ERR_PTR(ret?ret:-EIO); | 181 | return ERR_PTR(ret?ret:-EIO); |
208 | } | 182 | } |
209 | /* Mark the space used */ | 183 | /* Mark the space used */ |
210 | /* If node covers at least a whole page, or if it starts at the | 184 | /* If node covers at least a whole page, or if it starts at the |
211 | beginning of a page and runs to the end of the file, or if | 185 | beginning of a page and runs to the end of the file, or if |
212 | it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. | 186 | it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. |
213 | */ | 187 | */ |
214 | if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || | 188 | if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || |
215 | ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && | 189 | ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && |
@@ -227,12 +201,12 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
227 | spin_unlock(&c->erase_completion_lock); | 201 | spin_unlock(&c->erase_completion_lock); |
228 | 202 | ||
229 | D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", | 203 | D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", |
230 | flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize), | 204 | flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize), |
231 | je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), | 205 | je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), |
232 | je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen))); | 206 | je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen))); |
233 | 207 | ||
234 | if (retried) { | 208 | if (retried) { |
235 | ACCT_SANITY_CHECK(c,NULL); | 209 | jffs2_dbg_acct_sanity_check(c,NULL); |
236 | } | 210 | } |
237 | 211 | ||
238 | return fn; | 212 | return fn; |
@@ -247,10 +221,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
247 | int retried = 0; | 221 | int retried = 0; |
248 | int ret; | 222 | int ret; |
249 | 223 | ||
250 | D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", | 224 | D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", |
251 | je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), | 225 | je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), |
252 | je32_to_cpu(rd->name_crc))); | 226 | je32_to_cpu(rd->name_crc))); |
253 | D1(writecheck(c, flash_ofs)); | ||
254 | 227 | ||
255 | D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { | 228 | D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { |
256 | printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); | 229 | printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); |
@@ -262,7 +235,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
262 | vecs[0].iov_len = sizeof(*rd); | 235 | vecs[0].iov_len = sizeof(*rd); |
263 | vecs[1].iov_base = (unsigned char *)name; | 236 | vecs[1].iov_base = (unsigned char *)name; |
264 | vecs[1].iov_len = namelen; | 237 | vecs[1].iov_len = namelen; |
265 | 238 | ||
239 | jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); | ||
240 | |||
266 | raw = jffs2_alloc_raw_node_ref(); | 241 | raw = jffs2_alloc_raw_node_ref(); |
267 | 242 | ||
268 | if (!raw) | 243 | if (!raw) |
@@ -301,7 +276,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
301 | ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen, | 276 | ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen, |
302 | (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino)); | 277 | (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino)); |
303 | if (ret || (retlen != sizeof(*rd) + namelen)) { | 278 | if (ret || (retlen != sizeof(*rd) + namelen)) { |
304 | printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", | 279 | printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", |
305 | sizeof(*rd)+namelen, flash_ofs, ret, retlen); | 280 | sizeof(*rd)+namelen, flash_ofs, ret, retlen); |
306 | /* Mark the space as dirtied */ | 281 | /* Mark the space as dirtied */ |
307 | if (retlen) { | 282 | if (retlen) { |
@@ -322,24 +297,26 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
322 | 297 | ||
323 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); | 298 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); |
324 | 299 | ||
325 | ACCT_SANITY_CHECK(c,jeb); | 300 | jffs2_dbg_acct_sanity_check(c,jeb); |
326 | D1(ACCT_PARANOIA_CHECK(jeb)); | 301 | jffs2_dbg_acct_paranoia_check(c, jeb); |
327 | 302 | ||
328 | if (alloc_mode == ALLOC_GC) { | 303 | if (alloc_mode == ALLOC_GC) { |
329 | ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs, &dummy); | 304 | ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs, |
305 | &dummy, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
330 | } else { | 306 | } else { |
331 | /* Locking pain */ | 307 | /* Locking pain */ |
332 | up(&f->sem); | 308 | up(&f->sem); |
333 | jffs2_complete_reservation(c); | 309 | jffs2_complete_reservation(c); |
334 | 310 | ||
335 | ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs, &dummy, alloc_mode); | 311 | ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs, |
312 | &dummy, alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
336 | down(&f->sem); | 313 | down(&f->sem); |
337 | } | 314 | } |
338 | 315 | ||
339 | if (!ret) { | 316 | if (!ret) { |
340 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); | 317 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); |
341 | ACCT_SANITY_CHECK(c,jeb); | 318 | jffs2_dbg_acct_sanity_check(c,jeb); |
342 | D1(ACCT_PARANOIA_CHECK(jeb)); | 319 | jffs2_dbg_acct_paranoia_check(c, jeb); |
343 | goto retry; | 320 | goto retry; |
344 | } | 321 | } |
345 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | 322 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); |
@@ -359,7 +336,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
359 | spin_unlock(&c->erase_completion_lock); | 336 | spin_unlock(&c->erase_completion_lock); |
360 | 337 | ||
361 | if (retried) { | 338 | if (retried) { |
362 | ACCT_SANITY_CHECK(c,NULL); | 339 | jffs2_dbg_acct_sanity_check(c,NULL); |
363 | } | 340 | } |
364 | 341 | ||
365 | return fd; | 342 | return fd; |
@@ -369,7 +346,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
369 | we don't have to go digging in struct inode or its equivalent. It should set: | 346 | we don't have to go digging in struct inode or its equivalent. It should set: |
370 | mode, uid, gid, (starting)isize, atime, ctime, mtime */ | 347 | mode, uid, gid, (starting)isize, atime, ctime, mtime */ |
371 | int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 348 | int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
372 | struct jffs2_raw_inode *ri, unsigned char *buf, | 349 | struct jffs2_raw_inode *ri, unsigned char *buf, |
373 | uint32_t offset, uint32_t writelen, uint32_t *retlen) | 350 | uint32_t offset, uint32_t writelen, uint32_t *retlen) |
374 | { | 351 | { |
375 | int ret = 0; | 352 | int ret = 0; |
@@ -377,7 +354,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
377 | 354 | ||
378 | D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n", | 355 | D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n", |
379 | f->inocache->ino, offset, writelen)); | 356 | f->inocache->ino, offset, writelen)); |
380 | 357 | ||
381 | while(writelen) { | 358 | while(writelen) { |
382 | struct jffs2_full_dnode *fn; | 359 | struct jffs2_full_dnode *fn; |
383 | unsigned char *comprbuf = NULL; | 360 | unsigned char *comprbuf = NULL; |
@@ -389,7 +366,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
389 | retry: | 366 | retry: |
390 | D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset)); | 367 | D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset)); |
391 | 368 | ||
392 | ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen, ALLOC_NORMAL); | 369 | ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, |
370 | &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | ||
393 | if (ret) { | 371 | if (ret) { |
394 | D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret)); | 372 | D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret)); |
395 | break; | 373 | break; |
@@ -473,10 +451,11 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str | |||
473 | uint32_t alloclen, phys_ofs; | 451 | uint32_t alloclen, phys_ofs; |
474 | int ret; | 452 | int ret; |
475 | 453 | ||
476 | /* Try to reserve enough space for both node and dirent. | 454 | /* Try to reserve enough space for both node and dirent. |
477 | * Just the node will do for now, though | 455 | * Just the node will do for now, though |
478 | */ | 456 | */ |
479 | ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL); | 457 | ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL, |
458 | JFFS2_SUMMARY_INODE_SIZE); | ||
480 | D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen)); | 459 | D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen)); |
481 | if (ret) { | 460 | if (ret) { |
482 | up(&f->sem); | 461 | up(&f->sem); |
@@ -498,15 +477,16 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str | |||
498 | jffs2_complete_reservation(c); | 477 | jffs2_complete_reservation(c); |
499 | return PTR_ERR(fn); | 478 | return PTR_ERR(fn); |
500 | } | 479 | } |
501 | /* No data here. Only a metadata node, which will be | 480 | /* No data here. Only a metadata node, which will be |
502 | obsoleted by the first data write | 481 | obsoleted by the first data write |
503 | */ | 482 | */ |
504 | f->metadata = fn; | 483 | f->metadata = fn; |
505 | 484 | ||
506 | up(&f->sem); | 485 | up(&f->sem); |
507 | jffs2_complete_reservation(c); | 486 | jffs2_complete_reservation(c); |
508 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 487 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, |
509 | 488 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | |
489 | |||
510 | if (ret) { | 490 | if (ret) { |
511 | /* Eep. */ | 491 | /* Eep. */ |
512 | D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n")); | 492 | D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n")); |
@@ -539,9 +519,9 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str | |||
539 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); | 519 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); |
540 | 520 | ||
541 | jffs2_free_raw_dirent(rd); | 521 | jffs2_free_raw_dirent(rd); |
542 | 522 | ||
543 | if (IS_ERR(fd)) { | 523 | if (IS_ERR(fd)) { |
544 | /* dirent failed to write. Delete the inode normally | 524 | /* dirent failed to write. Delete the inode normally |
545 | as if it were the final unlink() */ | 525 | as if it were the final unlink() */ |
546 | jffs2_complete_reservation(c); | 526 | jffs2_complete_reservation(c); |
547 | up(&dir_f->sem); | 527 | up(&dir_f->sem); |
@@ -560,14 +540,15 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str | |||
560 | 540 | ||
561 | 541 | ||
562 | int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | 542 | int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, |
563 | const char *name, int namelen, struct jffs2_inode_info *dead_f) | 543 | const char *name, int namelen, struct jffs2_inode_info *dead_f, |
544 | uint32_t time) | ||
564 | { | 545 | { |
565 | struct jffs2_raw_dirent *rd; | 546 | struct jffs2_raw_dirent *rd; |
566 | struct jffs2_full_dirent *fd; | 547 | struct jffs2_full_dirent *fd; |
567 | uint32_t alloclen, phys_ofs; | 548 | uint32_t alloclen, phys_ofs; |
568 | int ret; | 549 | int ret; |
569 | 550 | ||
570 | if (1 /* alternative branch needs testing */ || | 551 | if (1 /* alternative branch needs testing */ || |
571 | !jffs2_can_mark_obsolete(c)) { | 552 | !jffs2_can_mark_obsolete(c)) { |
572 | /* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */ | 553 | /* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */ |
573 | 554 | ||
@@ -575,7 +556,8 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
575 | if (!rd) | 556 | if (!rd) |
576 | return -ENOMEM; | 557 | return -ENOMEM; |
577 | 558 | ||
578 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_DELETION); | 559 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, |
560 | ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
579 | if (ret) { | 561 | if (ret) { |
580 | jffs2_free_raw_dirent(rd); | 562 | jffs2_free_raw_dirent(rd); |
581 | return ret; | 563 | return ret; |
@@ -588,18 +570,18 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
588 | rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); | 570 | rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); |
589 | rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); | 571 | rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); |
590 | rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); | 572 | rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); |
591 | 573 | ||
592 | rd->pino = cpu_to_je32(dir_f->inocache->ino); | 574 | rd->pino = cpu_to_je32(dir_f->inocache->ino); |
593 | rd->version = cpu_to_je32(++dir_f->highest_version); | 575 | rd->version = cpu_to_je32(++dir_f->highest_version); |
594 | rd->ino = cpu_to_je32(0); | 576 | rd->ino = cpu_to_je32(0); |
595 | rd->mctime = cpu_to_je32(get_seconds()); | 577 | rd->mctime = cpu_to_je32(time); |
596 | rd->nsize = namelen; | 578 | rd->nsize = namelen; |
597 | rd->type = DT_UNKNOWN; | 579 | rd->type = DT_UNKNOWN; |
598 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | 580 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); |
599 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); | 581 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); |
600 | 582 | ||
601 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION); | 583 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION); |
602 | 584 | ||
603 | jffs2_free_raw_dirent(rd); | 585 | jffs2_free_raw_dirent(rd); |
604 | 586 | ||
605 | if (IS_ERR(fd)) { | 587 | if (IS_ERR(fd)) { |
@@ -618,7 +600,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
618 | down(&dir_f->sem); | 600 | down(&dir_f->sem); |
619 | 601 | ||
620 | while ((*prev) && (*prev)->nhash <= nhash) { | 602 | while ((*prev) && (*prev)->nhash <= nhash) { |
621 | if ((*prev)->nhash == nhash && | 603 | if ((*prev)->nhash == nhash && |
622 | !memcmp((*prev)->name, name, namelen) && | 604 | !memcmp((*prev)->name, name, namelen) && |
623 | !(*prev)->name[namelen]) { | 605 | !(*prev)->name[namelen]) { |
624 | struct jffs2_full_dirent *this = *prev; | 606 | struct jffs2_full_dirent *this = *prev; |
@@ -639,7 +621,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
639 | /* dead_f is NULL if this was a rename not a real unlink */ | 621 | /* dead_f is NULL if this was a rename not a real unlink */ |
640 | /* Also catch the !f->inocache case, where there was a dirent | 622 | /* Also catch the !f->inocache case, where there was a dirent |
641 | pointing to an inode which didn't exist. */ | 623 | pointing to an inode which didn't exist. */ |
642 | if (dead_f && dead_f->inocache) { | 624 | if (dead_f && dead_f->inocache) { |
643 | 625 | ||
644 | down(&dead_f->sem); | 626 | down(&dead_f->sem); |
645 | 627 | ||
@@ -647,9 +629,9 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
647 | while (dead_f->dents) { | 629 | while (dead_f->dents) { |
648 | /* There can be only deleted ones */ | 630 | /* There can be only deleted ones */ |
649 | fd = dead_f->dents; | 631 | fd = dead_f->dents; |
650 | 632 | ||
651 | dead_f->dents = fd->next; | 633 | dead_f->dents = fd->next; |
652 | 634 | ||
653 | if (fd->ino) { | 635 | if (fd->ino) { |
654 | printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n", | 636 | printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n", |
655 | dead_f->inocache->ino, fd->name, fd->ino); | 637 | dead_f->inocache->ino, fd->name, fd->ino); |
@@ -673,7 +655,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
673 | } | 655 | } |
674 | 656 | ||
675 | 657 | ||
676 | int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen) | 658 | int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time) |
677 | { | 659 | { |
678 | struct jffs2_raw_dirent *rd; | 660 | struct jffs2_raw_dirent *rd; |
679 | struct jffs2_full_dirent *fd; | 661 | struct jffs2_full_dirent *fd; |
@@ -684,12 +666,13 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint | |||
684 | if (!rd) | 666 | if (!rd) |
685 | return -ENOMEM; | 667 | return -ENOMEM; |
686 | 668 | ||
687 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 669 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, |
670 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
688 | if (ret) { | 671 | if (ret) { |
689 | jffs2_free_raw_dirent(rd); | 672 | jffs2_free_raw_dirent(rd); |
690 | return ret; | 673 | return ret; |
691 | } | 674 | } |
692 | 675 | ||
693 | down(&dir_f->sem); | 676 | down(&dir_f->sem); |
694 | 677 | ||
695 | /* Build a deletion node */ | 678 | /* Build a deletion node */ |
@@ -701,7 +684,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint | |||
701 | rd->pino = cpu_to_je32(dir_f->inocache->ino); | 684 | rd->pino = cpu_to_je32(dir_f->inocache->ino); |
702 | rd->version = cpu_to_je32(++dir_f->highest_version); | 685 | rd->version = cpu_to_je32(++dir_f->highest_version); |
703 | rd->ino = cpu_to_je32(ino); | 686 | rd->ino = cpu_to_je32(ino); |
704 | rd->mctime = cpu_to_je32(get_seconds()); | 687 | rd->mctime = cpu_to_je32(time); |
705 | rd->nsize = namelen; | 688 | rd->nsize = namelen; |
706 | 689 | ||
707 | rd->type = type; | 690 | rd->type = type; |
@@ -710,7 +693,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint | |||
710 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); | 693 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); |
711 | 694 | ||
712 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); | 695 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); |
713 | 696 | ||
714 | jffs2_free_raw_dirent(rd); | 697 | jffs2_free_raw_dirent(rd); |
715 | 698 | ||
716 | if (IS_ERR(fd)) { | 699 | if (IS_ERR(fd)) { |
diff --git a/fs/jffs2/writev.c b/fs/jffs2/writev.c index f079f8388566..c638ae1008de 100644 --- a/fs/jffs2/writev.c +++ b/fs/jffs2/writev.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: writev.c,v 1.6 2004/11/16 20:36:12 dwmw2 Exp $ | 10 | * $Id: writev.c,v 1.8 2005/09/09 15:11:58 havasi Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -42,9 +42,40 @@ static inline int mtd_fake_writev(struct mtd_info *mtd, const struct kvec *vecs, | |||
42 | int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, | 42 | int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, |
43 | unsigned long count, loff_t to, size_t *retlen) | 43 | unsigned long count, loff_t to, size_t *retlen) |
44 | { | 44 | { |
45 | if (!jffs2_is_writebuffered(c)) { | ||
46 | if (jffs2_sum_active()) { | ||
47 | int res; | ||
48 | res = jffs2_sum_add_kvec(c, vecs, count, (uint32_t) to); | ||
49 | if (res) { | ||
50 | return res; | ||
51 | } | ||
52 | } | ||
53 | } | ||
54 | |||
45 | if (c->mtd->writev) | 55 | if (c->mtd->writev) |
46 | return c->mtd->writev(c->mtd, vecs, count, to, retlen); | 56 | return c->mtd->writev(c->mtd, vecs, count, to, retlen); |
47 | else | 57 | else { |
48 | return mtd_fake_writev(c->mtd, vecs, count, to, retlen); | 58 | return mtd_fake_writev(c->mtd, vecs, count, to, retlen); |
59 | } | ||
49 | } | 60 | } |
50 | 61 | ||
62 | int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, | ||
63 | size_t *retlen, const u_char *buf) | ||
64 | { | ||
65 | int ret; | ||
66 | ret = c->mtd->write(c->mtd, ofs, len, retlen, buf); | ||
67 | |||
68 | if (jffs2_sum_active()) { | ||
69 | struct kvec vecs[1]; | ||
70 | int res; | ||
71 | |||
72 | vecs[0].iov_base = (unsigned char *) buf; | ||
73 | vecs[0].iov_len = len; | ||
74 | |||
75 | res = jffs2_sum_add_kvec(c, vecs, 1, (uint32_t) ofs); | ||
76 | if (res) { | ||
77 | return res; | ||
78 | } | ||
79 | } | ||
80 | return ret; | ||
81 | } | ||
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 87332f30141b..c5a33648e9fd 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c | |||
@@ -112,8 +112,7 @@ static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_ | |||
112 | } | 112 | } |
113 | } | 113 | } |
114 | spin_unlock(&host->h_lock); | 114 | spin_unlock(&host->h_lock); |
115 | if (new != NULL) | 115 | kfree(new); |
116 | kfree(new); | ||
117 | return res; | 116 | return res; |
118 | } | 117 | } |
119 | 118 | ||
diff --git a/fs/mbcache.c b/fs/mbcache.c index 298997f17475..0f1e4530670f 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c | |||
@@ -301,8 +301,7 @@ fail: | |||
301 | if (cache) { | 301 | if (cache) { |
302 | while (--m >= 0) | 302 | while (--m >= 0) |
303 | kfree(cache->c_indexes_hash[m]); | 303 | kfree(cache->c_indexes_hash[m]); |
304 | if (cache->c_block_hash) | 304 | kfree(cache->c_block_hash); |
305 | kfree(cache->c_block_hash); | ||
306 | kfree(cache); | 305 | kfree(cache); |
307 | } | 306 | } |
308 | return NULL; | 307 | return NULL; |
diff --git a/fs/namei.c b/fs/namei.c index c5769c4fcab1..b3f8a1966c9c 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1459,7 +1459,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag) | |||
1459 | if (!error) { | 1459 | if (!error) { |
1460 | DQUOT_INIT(inode); | 1460 | DQUOT_INIT(inode); |
1461 | 1461 | ||
1462 | error = do_truncate(dentry, 0); | 1462 | error = do_truncate(dentry, 0, NULL); |
1463 | } | 1463 | } |
1464 | put_write_access(inode); | 1464 | put_write_access(inode); |
1465 | if (error) | 1465 | if (error) |
diff --git a/fs/namespace.c b/fs/namespace.c index 2fa9fdf7d6f5..caa9187f67e5 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/mount.h> | 24 | #include <linux/mount.h> |
25 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
26 | #include <asm/unistd.h> | 26 | #include <asm/unistd.h> |
27 | #include "pnode.h" | ||
27 | 28 | ||
28 | extern int __init init_rootfs(void); | 29 | extern int __init init_rootfs(void); |
29 | 30 | ||
@@ -37,33 +38,39 @@ static inline int sysfs_init(void) | |||
37 | #endif | 38 | #endif |
38 | 39 | ||
39 | /* spinlock for vfsmount related operations, inplace of dcache_lock */ | 40 | /* spinlock for vfsmount related operations, inplace of dcache_lock */ |
40 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); | 41 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); |
42 | |||
43 | static int event; | ||
41 | 44 | ||
42 | static struct list_head *mount_hashtable; | 45 | static struct list_head *mount_hashtable; |
43 | static int hash_mask __read_mostly, hash_bits __read_mostly; | 46 | static int hash_mask __read_mostly, hash_bits __read_mostly; |
44 | static kmem_cache_t *mnt_cache; | 47 | static kmem_cache_t *mnt_cache; |
48 | static struct rw_semaphore namespace_sem; | ||
45 | 49 | ||
46 | static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) | 50 | static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) |
47 | { | 51 | { |
48 | unsigned long tmp = ((unsigned long) mnt / L1_CACHE_BYTES); | 52 | unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); |
49 | tmp += ((unsigned long) dentry / L1_CACHE_BYTES); | 53 | tmp += ((unsigned long)dentry / L1_CACHE_BYTES); |
50 | tmp = tmp + (tmp >> hash_bits); | 54 | tmp = tmp + (tmp >> hash_bits); |
51 | return tmp & hash_mask; | 55 | return tmp & hash_mask; |
52 | } | 56 | } |
53 | 57 | ||
54 | struct vfsmount *alloc_vfsmnt(const char *name) | 58 | struct vfsmount *alloc_vfsmnt(const char *name) |
55 | { | 59 | { |
56 | struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL); | 60 | struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL); |
57 | if (mnt) { | 61 | if (mnt) { |
58 | memset(mnt, 0, sizeof(struct vfsmount)); | 62 | memset(mnt, 0, sizeof(struct vfsmount)); |
59 | atomic_set(&mnt->mnt_count,1); | 63 | atomic_set(&mnt->mnt_count, 1); |
60 | INIT_LIST_HEAD(&mnt->mnt_hash); | 64 | INIT_LIST_HEAD(&mnt->mnt_hash); |
61 | INIT_LIST_HEAD(&mnt->mnt_child); | 65 | INIT_LIST_HEAD(&mnt->mnt_child); |
62 | INIT_LIST_HEAD(&mnt->mnt_mounts); | 66 | INIT_LIST_HEAD(&mnt->mnt_mounts); |
63 | INIT_LIST_HEAD(&mnt->mnt_list); | 67 | INIT_LIST_HEAD(&mnt->mnt_list); |
64 | INIT_LIST_HEAD(&mnt->mnt_expire); | 68 | INIT_LIST_HEAD(&mnt->mnt_expire); |
69 | INIT_LIST_HEAD(&mnt->mnt_share); | ||
70 | INIT_LIST_HEAD(&mnt->mnt_slave_list); | ||
71 | INIT_LIST_HEAD(&mnt->mnt_slave); | ||
65 | if (name) { | 72 | if (name) { |
66 | int size = strlen(name)+1; | 73 | int size = strlen(name) + 1; |
67 | char *newname = kmalloc(size, GFP_KERNEL); | 74 | char *newname = kmalloc(size, GFP_KERNEL); |
68 | if (newname) { | 75 | if (newname) { |
69 | memcpy(newname, name, size); | 76 | memcpy(newname, name, size); |
@@ -81,36 +88,65 @@ void free_vfsmnt(struct vfsmount *mnt) | |||
81 | } | 88 | } |
82 | 89 | ||
83 | /* | 90 | /* |
84 | * Now, lookup_mnt increments the ref count before returning | 91 | * find the first or last mount at @dentry on vfsmount @mnt depending on |
85 | * the vfsmount struct. | 92 | * @dir. If @dir is set return the first mount else return the last mount. |
86 | */ | 93 | */ |
87 | struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) | 94 | struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, |
95 | int dir) | ||
88 | { | 96 | { |
89 | struct list_head * head = mount_hashtable + hash(mnt, dentry); | 97 | struct list_head *head = mount_hashtable + hash(mnt, dentry); |
90 | struct list_head * tmp = head; | 98 | struct list_head *tmp = head; |
91 | struct vfsmount *p, *found = NULL; | 99 | struct vfsmount *p, *found = NULL; |
92 | 100 | ||
93 | spin_lock(&vfsmount_lock); | ||
94 | for (;;) { | 101 | for (;;) { |
95 | tmp = tmp->next; | 102 | tmp = dir ? tmp->next : tmp->prev; |
96 | p = NULL; | 103 | p = NULL; |
97 | if (tmp == head) | 104 | if (tmp == head) |
98 | break; | 105 | break; |
99 | p = list_entry(tmp, struct vfsmount, mnt_hash); | 106 | p = list_entry(tmp, struct vfsmount, mnt_hash); |
100 | if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) { | 107 | if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) { |
101 | found = mntget(p); | 108 | found = p; |
102 | break; | 109 | break; |
103 | } | 110 | } |
104 | } | 111 | } |
105 | spin_unlock(&vfsmount_lock); | ||
106 | return found; | 112 | return found; |
107 | } | 113 | } |
108 | 114 | ||
115 | /* | ||
116 | * lookup_mnt increments the ref count before returning | ||
117 | * the vfsmount struct. | ||
118 | */ | ||
119 | struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) | ||
120 | { | ||
121 | struct vfsmount *child_mnt; | ||
122 | spin_lock(&vfsmount_lock); | ||
123 | if ((child_mnt = __lookup_mnt(mnt, dentry, 1))) | ||
124 | mntget(child_mnt); | ||
125 | spin_unlock(&vfsmount_lock); | ||
126 | return child_mnt; | ||
127 | } | ||
128 | |||
109 | static inline int check_mnt(struct vfsmount *mnt) | 129 | static inline int check_mnt(struct vfsmount *mnt) |
110 | { | 130 | { |
111 | return mnt->mnt_namespace == current->namespace; | 131 | return mnt->mnt_namespace == current->namespace; |
112 | } | 132 | } |
113 | 133 | ||
134 | static void touch_namespace(struct namespace *ns) | ||
135 | { | ||
136 | if (ns) { | ||
137 | ns->event = ++event; | ||
138 | wake_up_interruptible(&ns->poll); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | static void __touch_namespace(struct namespace *ns) | ||
143 | { | ||
144 | if (ns && ns->event != event) { | ||
145 | ns->event = event; | ||
146 | wake_up_interruptible(&ns->poll); | ||
147 | } | ||
148 | } | ||
149 | |||
114 | static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd) | 150 | static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd) |
115 | { | 151 | { |
116 | old_nd->dentry = mnt->mnt_mountpoint; | 152 | old_nd->dentry = mnt->mnt_mountpoint; |
@@ -122,13 +158,43 @@ static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd) | |||
122 | old_nd->dentry->d_mounted--; | 158 | old_nd->dentry->d_mounted--; |
123 | } | 159 | } |
124 | 160 | ||
161 | void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, | ||
162 | struct vfsmount *child_mnt) | ||
163 | { | ||
164 | child_mnt->mnt_parent = mntget(mnt); | ||
165 | child_mnt->mnt_mountpoint = dget(dentry); | ||
166 | dentry->d_mounted++; | ||
167 | } | ||
168 | |||
125 | static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd) | 169 | static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd) |
126 | { | 170 | { |
127 | mnt->mnt_parent = mntget(nd->mnt); | 171 | mnt_set_mountpoint(nd->mnt, nd->dentry, mnt); |
128 | mnt->mnt_mountpoint = dget(nd->dentry); | 172 | list_add_tail(&mnt->mnt_hash, mount_hashtable + |
129 | list_add(&mnt->mnt_hash, mount_hashtable+hash(nd->mnt, nd->dentry)); | 173 | hash(nd->mnt, nd->dentry)); |
130 | list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts); | 174 | list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts); |
131 | nd->dentry->d_mounted++; | 175 | } |
176 | |||
177 | /* | ||
178 | * the caller must hold vfsmount_lock | ||
179 | */ | ||
180 | static void commit_tree(struct vfsmount *mnt) | ||
181 | { | ||
182 | struct vfsmount *parent = mnt->mnt_parent; | ||
183 | struct vfsmount *m; | ||
184 | LIST_HEAD(head); | ||
185 | struct namespace *n = parent->mnt_namespace; | ||
186 | |||
187 | BUG_ON(parent == mnt); | ||
188 | |||
189 | list_add_tail(&head, &mnt->mnt_list); | ||
190 | list_for_each_entry(m, &head, mnt_list) | ||
191 | m->mnt_namespace = n; | ||
192 | list_splice(&head, n->list.prev); | ||
193 | |||
194 | list_add_tail(&mnt->mnt_hash, mount_hashtable + | ||
195 | hash(parent, mnt->mnt_mountpoint)); | ||
196 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); | ||
197 | touch_namespace(n); | ||
132 | } | 198 | } |
133 | 199 | ||
134 | static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root) | 200 | static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root) |
@@ -147,8 +213,18 @@ static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root) | |||
147 | return list_entry(next, struct vfsmount, mnt_child); | 213 | return list_entry(next, struct vfsmount, mnt_child); |
148 | } | 214 | } |
149 | 215 | ||
150 | static struct vfsmount * | 216 | static struct vfsmount *skip_mnt_tree(struct vfsmount *p) |
151 | clone_mnt(struct vfsmount *old, struct dentry *root) | 217 | { |
218 | struct list_head *prev = p->mnt_mounts.prev; | ||
219 | while (prev != &p->mnt_mounts) { | ||
220 | p = list_entry(prev, struct vfsmount, mnt_child); | ||
221 | prev = p->mnt_mounts.prev; | ||
222 | } | ||
223 | return p; | ||
224 | } | ||
225 | |||
226 | static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root, | ||
227 | int flag) | ||
152 | { | 228 | { |
153 | struct super_block *sb = old->mnt_sb; | 229 | struct super_block *sb = old->mnt_sb; |
154 | struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname); | 230 | struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname); |
@@ -160,19 +236,34 @@ clone_mnt(struct vfsmount *old, struct dentry *root) | |||
160 | mnt->mnt_root = dget(root); | 236 | mnt->mnt_root = dget(root); |
161 | mnt->mnt_mountpoint = mnt->mnt_root; | 237 | mnt->mnt_mountpoint = mnt->mnt_root; |
162 | mnt->mnt_parent = mnt; | 238 | mnt->mnt_parent = mnt; |
163 | mnt->mnt_namespace = current->namespace; | 239 | |
240 | if (flag & CL_SLAVE) { | ||
241 | list_add(&mnt->mnt_slave, &old->mnt_slave_list); | ||
242 | mnt->mnt_master = old; | ||
243 | CLEAR_MNT_SHARED(mnt); | ||
244 | } else { | ||
245 | if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old)) | ||
246 | list_add(&mnt->mnt_share, &old->mnt_share); | ||
247 | if (IS_MNT_SLAVE(old)) | ||
248 | list_add(&mnt->mnt_slave, &old->mnt_slave); | ||
249 | mnt->mnt_master = old->mnt_master; | ||
250 | } | ||
251 | if (flag & CL_MAKE_SHARED) | ||
252 | set_mnt_shared(mnt); | ||
164 | 253 | ||
165 | /* stick the duplicate mount on the same expiry list | 254 | /* stick the duplicate mount on the same expiry list |
166 | * as the original if that was on one */ | 255 | * as the original if that was on one */ |
167 | spin_lock(&vfsmount_lock); | 256 | if (flag & CL_EXPIRE) { |
168 | if (!list_empty(&old->mnt_expire)) | 257 | spin_lock(&vfsmount_lock); |
169 | list_add(&mnt->mnt_expire, &old->mnt_expire); | 258 | if (!list_empty(&old->mnt_expire)) |
170 | spin_unlock(&vfsmount_lock); | 259 | list_add(&mnt->mnt_expire, &old->mnt_expire); |
260 | spin_unlock(&vfsmount_lock); | ||
261 | } | ||
171 | } | 262 | } |
172 | return mnt; | 263 | return mnt; |
173 | } | 264 | } |
174 | 265 | ||
175 | void __mntput(struct vfsmount *mnt) | 266 | static inline void __mntput(struct vfsmount *mnt) |
176 | { | 267 | { |
177 | struct super_block *sb = mnt->mnt_sb; | 268 | struct super_block *sb = mnt->mnt_sb; |
178 | dput(mnt->mnt_root); | 269 | dput(mnt->mnt_root); |
@@ -180,7 +271,46 @@ void __mntput(struct vfsmount *mnt) | |||
180 | deactivate_super(sb); | 271 | deactivate_super(sb); |
181 | } | 272 | } |
182 | 273 | ||
183 | EXPORT_SYMBOL(__mntput); | 274 | void mntput_no_expire(struct vfsmount *mnt) |
275 | { | ||
276 | repeat: | ||
277 | if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) { | ||
278 | if (likely(!mnt->mnt_pinned)) { | ||
279 | spin_unlock(&vfsmount_lock); | ||
280 | __mntput(mnt); | ||
281 | return; | ||
282 | } | ||
283 | atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count); | ||
284 | mnt->mnt_pinned = 0; | ||
285 | spin_unlock(&vfsmount_lock); | ||
286 | acct_auto_close_mnt(mnt); | ||
287 | security_sb_umount_close(mnt); | ||
288 | goto repeat; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | EXPORT_SYMBOL(mntput_no_expire); | ||
293 | |||
294 | void mnt_pin(struct vfsmount *mnt) | ||
295 | { | ||
296 | spin_lock(&vfsmount_lock); | ||
297 | mnt->mnt_pinned++; | ||
298 | spin_unlock(&vfsmount_lock); | ||
299 | } | ||
300 | |||
301 | EXPORT_SYMBOL(mnt_pin); | ||
302 | |||
303 | void mnt_unpin(struct vfsmount *mnt) | ||
304 | { | ||
305 | spin_lock(&vfsmount_lock); | ||
306 | if (mnt->mnt_pinned) { | ||
307 | atomic_inc(&mnt->mnt_count); | ||
308 | mnt->mnt_pinned--; | ||
309 | } | ||
310 | spin_unlock(&vfsmount_lock); | ||
311 | } | ||
312 | |||
313 | EXPORT_SYMBOL(mnt_unpin); | ||
184 | 314 | ||
185 | /* iterator */ | 315 | /* iterator */ |
186 | static void *m_start(struct seq_file *m, loff_t *pos) | 316 | static void *m_start(struct seq_file *m, loff_t *pos) |
@@ -189,7 +319,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
189 | struct list_head *p; | 319 | struct list_head *p; |
190 | loff_t l = *pos; | 320 | loff_t l = *pos; |
191 | 321 | ||
192 | down_read(&n->sem); | 322 | down_read(&namespace_sem); |
193 | list_for_each(p, &n->list) | 323 | list_for_each(p, &n->list) |
194 | if (!l--) | 324 | if (!l--) |
195 | return list_entry(p, struct vfsmount, mnt_list); | 325 | return list_entry(p, struct vfsmount, mnt_list); |
@@ -201,13 +331,12 @@ static void *m_next(struct seq_file *m, void *v, loff_t *pos) | |||
201 | struct namespace *n = m->private; | 331 | struct namespace *n = m->private; |
202 | struct list_head *p = ((struct vfsmount *)v)->mnt_list.next; | 332 | struct list_head *p = ((struct vfsmount *)v)->mnt_list.next; |
203 | (*pos)++; | 333 | (*pos)++; |
204 | return p==&n->list ? NULL : list_entry(p, struct vfsmount, mnt_list); | 334 | return p == &n->list ? NULL : list_entry(p, struct vfsmount, mnt_list); |
205 | } | 335 | } |
206 | 336 | ||
207 | static void m_stop(struct seq_file *m, void *v) | 337 | static void m_stop(struct seq_file *m, void *v) |
208 | { | 338 | { |
209 | struct namespace *n = m->private; | 339 | up_read(&namespace_sem); |
210 | up_read(&n->sem); | ||
211 | } | 340 | } |
212 | 341 | ||
213 | static inline void mangle(struct seq_file *m, const char *s) | 342 | static inline void mangle(struct seq_file *m, const char *s) |
@@ -275,35 +404,14 @@ struct seq_operations mounts_op = { | |||
275 | */ | 404 | */ |
276 | int may_umount_tree(struct vfsmount *mnt) | 405 | int may_umount_tree(struct vfsmount *mnt) |
277 | { | 406 | { |
278 | struct list_head *next; | 407 | int actual_refs = 0; |
279 | struct vfsmount *this_parent = mnt; | 408 | int minimum_refs = 0; |
280 | int actual_refs; | 409 | struct vfsmount *p; |
281 | int minimum_refs; | ||
282 | 410 | ||
283 | spin_lock(&vfsmount_lock); | 411 | spin_lock(&vfsmount_lock); |
284 | actual_refs = atomic_read(&mnt->mnt_count); | 412 | for (p = mnt; p; p = next_mnt(p, mnt)) { |
285 | minimum_refs = 2; | ||
286 | repeat: | ||
287 | next = this_parent->mnt_mounts.next; | ||
288 | resume: | ||
289 | while (next != &this_parent->mnt_mounts) { | ||
290 | struct vfsmount *p = list_entry(next, struct vfsmount, mnt_child); | ||
291 | |||
292 | next = next->next; | ||
293 | |||
294 | actual_refs += atomic_read(&p->mnt_count); | 413 | actual_refs += atomic_read(&p->mnt_count); |
295 | minimum_refs += 2; | 414 | minimum_refs += 2; |
296 | |||
297 | if (!list_empty(&p->mnt_mounts)) { | ||
298 | this_parent = p; | ||
299 | goto repeat; | ||
300 | } | ||
301 | } | ||
302 | |||
303 | if (this_parent != mnt) { | ||
304 | next = this_parent->mnt_child.next; | ||
305 | this_parent = this_parent->mnt_parent; | ||
306 | goto resume; | ||
307 | } | 415 | } |
308 | spin_unlock(&vfsmount_lock); | 416 | spin_unlock(&vfsmount_lock); |
309 | 417 | ||
@@ -330,45 +438,67 @@ EXPORT_SYMBOL(may_umount_tree); | |||
330 | */ | 438 | */ |
331 | int may_umount(struct vfsmount *mnt) | 439 | int may_umount(struct vfsmount *mnt) |
332 | { | 440 | { |
333 | if (atomic_read(&mnt->mnt_count) > 2) | 441 | int ret = 0; |
334 | return -EBUSY; | 442 | spin_lock(&vfsmount_lock); |
335 | return 0; | 443 | if (propagate_mount_busy(mnt, 2)) |
444 | ret = -EBUSY; | ||
445 | spin_unlock(&vfsmount_lock); | ||
446 | return ret; | ||
336 | } | 447 | } |
337 | 448 | ||
338 | EXPORT_SYMBOL(may_umount); | 449 | EXPORT_SYMBOL(may_umount); |
339 | 450 | ||
340 | static void umount_tree(struct vfsmount *mnt) | 451 | void release_mounts(struct list_head *head) |
452 | { | ||
453 | struct vfsmount *mnt; | ||
454 | while(!list_empty(head)) { | ||
455 | mnt = list_entry(head->next, struct vfsmount, mnt_hash); | ||
456 | list_del_init(&mnt->mnt_hash); | ||
457 | if (mnt->mnt_parent != mnt) { | ||
458 | struct dentry *dentry; | ||
459 | struct vfsmount *m; | ||
460 | spin_lock(&vfsmount_lock); | ||
461 | dentry = mnt->mnt_mountpoint; | ||
462 | m = mnt->mnt_parent; | ||
463 | mnt->mnt_mountpoint = mnt->mnt_root; | ||
464 | mnt->mnt_parent = mnt; | ||
465 | spin_unlock(&vfsmount_lock); | ||
466 | dput(dentry); | ||
467 | mntput(m); | ||
468 | } | ||
469 | mntput(mnt); | ||
470 | } | ||
471 | } | ||
472 | |||
473 | void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) | ||
341 | { | 474 | { |
342 | struct vfsmount *p; | 475 | struct vfsmount *p; |
343 | LIST_HEAD(kill); | ||
344 | 476 | ||
345 | for (p = mnt; p; p = next_mnt(p, mnt)) { | 477 | for (p = mnt; p; p = next_mnt(p, mnt)) { |
346 | list_del(&p->mnt_list); | 478 | list_del(&p->mnt_hash); |
347 | list_add(&p->mnt_list, &kill); | 479 | list_add(&p->mnt_hash, kill); |
348 | p->mnt_namespace = NULL; | ||
349 | } | 480 | } |
350 | 481 | ||
351 | while (!list_empty(&kill)) { | 482 | if (propagate) |
352 | mnt = list_entry(kill.next, struct vfsmount, mnt_list); | 483 | propagate_umount(kill); |
353 | list_del_init(&mnt->mnt_list); | 484 | |
354 | list_del_init(&mnt->mnt_expire); | 485 | list_for_each_entry(p, kill, mnt_hash) { |
355 | if (mnt->mnt_parent == mnt) { | 486 | list_del_init(&p->mnt_expire); |
356 | spin_unlock(&vfsmount_lock); | 487 | list_del_init(&p->mnt_list); |
357 | } else { | 488 | __touch_namespace(p->mnt_namespace); |
358 | struct nameidata old_nd; | 489 | p->mnt_namespace = NULL; |
359 | detach_mnt(mnt, &old_nd); | 490 | list_del_init(&p->mnt_child); |
360 | spin_unlock(&vfsmount_lock); | 491 | if (p->mnt_parent != p) |
361 | path_release(&old_nd); | 492 | mnt->mnt_mountpoint->d_mounted--; |
362 | } | 493 | change_mnt_propagation(p, MS_PRIVATE); |
363 | mntput(mnt); | ||
364 | spin_lock(&vfsmount_lock); | ||
365 | } | 494 | } |
366 | } | 495 | } |
367 | 496 | ||
368 | static int do_umount(struct vfsmount *mnt, int flags) | 497 | static int do_umount(struct vfsmount *mnt, int flags) |
369 | { | 498 | { |
370 | struct super_block * sb = mnt->mnt_sb; | 499 | struct super_block *sb = mnt->mnt_sb; |
371 | int retval; | 500 | int retval; |
501 | LIST_HEAD(umount_list); | ||
372 | 502 | ||
373 | retval = security_sb_umount(mnt, flags); | 503 | retval = security_sb_umount(mnt, flags); |
374 | if (retval) | 504 | if (retval) |
@@ -403,7 +533,7 @@ static int do_umount(struct vfsmount *mnt, int flags) | |||
403 | */ | 533 | */ |
404 | 534 | ||
405 | lock_kernel(); | 535 | lock_kernel(); |
406 | if( (flags&MNT_FORCE) && sb->s_op->umount_begin) | 536 | if ((flags & MNT_FORCE) && sb->s_op->umount_begin) |
407 | sb->s_op->umount_begin(sb); | 537 | sb->s_op->umount_begin(sb); |
408 | unlock_kernel(); | 538 | unlock_kernel(); |
409 | 539 | ||
@@ -432,29 +562,21 @@ static int do_umount(struct vfsmount *mnt, int flags) | |||
432 | return retval; | 562 | return retval; |
433 | } | 563 | } |
434 | 564 | ||
435 | down_write(¤t->namespace->sem); | 565 | down_write(&namespace_sem); |
436 | spin_lock(&vfsmount_lock); | 566 | spin_lock(&vfsmount_lock); |
567 | event++; | ||
437 | 568 | ||
438 | if (atomic_read(&sb->s_active) == 1) { | ||
439 | /* last instance - try to be smart */ | ||
440 | spin_unlock(&vfsmount_lock); | ||
441 | lock_kernel(); | ||
442 | DQUOT_OFF(sb); | ||
443 | acct_auto_close(sb); | ||
444 | unlock_kernel(); | ||
445 | security_sb_umount_close(mnt); | ||
446 | spin_lock(&vfsmount_lock); | ||
447 | } | ||
448 | retval = -EBUSY; | 569 | retval = -EBUSY; |
449 | if (atomic_read(&mnt->mnt_count) == 2 || flags & MNT_DETACH) { | 570 | if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) { |
450 | if (!list_empty(&mnt->mnt_list)) | 571 | if (!list_empty(&mnt->mnt_list)) |
451 | umount_tree(mnt); | 572 | umount_tree(mnt, 1, &umount_list); |
452 | retval = 0; | 573 | retval = 0; |
453 | } | 574 | } |
454 | spin_unlock(&vfsmount_lock); | 575 | spin_unlock(&vfsmount_lock); |
455 | if (retval) | 576 | if (retval) |
456 | security_sb_umount_busy(mnt); | 577 | security_sb_umount_busy(mnt); |
457 | up_write(¤t->namespace->sem); | 578 | up_write(&namespace_sem); |
579 | release_mounts(&umount_list); | ||
458 | return retval; | 580 | return retval; |
459 | } | 581 | } |
460 | 582 | ||
@@ -494,12 +616,11 @@ out: | |||
494 | #ifdef __ARCH_WANT_SYS_OLDUMOUNT | 616 | #ifdef __ARCH_WANT_SYS_OLDUMOUNT |
495 | 617 | ||
496 | /* | 618 | /* |
497 | * The 2.0 compatible umount. No flags. | 619 | * The 2.0 compatible umount. No flags. |
498 | */ | 620 | */ |
499 | |||
500 | asmlinkage long sys_oldumount(char __user * name) | 621 | asmlinkage long sys_oldumount(char __user * name) |
501 | { | 622 | { |
502 | return sys_umount(name,0); | 623 | return sys_umount(name, 0); |
503 | } | 624 | } |
504 | 625 | ||
505 | #endif | 626 | #endif |
@@ -522,8 +643,7 @@ static int mount_is_safe(struct nameidata *nd) | |||
522 | #endif | 643 | #endif |
523 | } | 644 | } |
524 | 645 | ||
525 | static int | 646 | static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry) |
526 | lives_below_in_same_fs(struct dentry *d, struct dentry *dentry) | ||
527 | { | 647 | { |
528 | while (1) { | 648 | while (1) { |
529 | if (d == dentry) | 649 | if (d == dentry) |
@@ -534,12 +654,16 @@ lives_below_in_same_fs(struct dentry *d, struct dentry *dentry) | |||
534 | } | 654 | } |
535 | } | 655 | } |
536 | 656 | ||
537 | static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry) | 657 | struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry, |
658 | int flag) | ||
538 | { | 659 | { |
539 | struct vfsmount *res, *p, *q, *r, *s; | 660 | struct vfsmount *res, *p, *q, *r, *s; |
540 | struct nameidata nd; | 661 | struct nameidata nd; |
541 | 662 | ||
542 | res = q = clone_mnt(mnt, dentry); | 663 | if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt)) |
664 | return NULL; | ||
665 | |||
666 | res = q = clone_mnt(mnt, dentry, flag); | ||
543 | if (!q) | 667 | if (!q) |
544 | goto Enomem; | 668 | goto Enomem; |
545 | q->mnt_mountpoint = mnt->mnt_mountpoint; | 669 | q->mnt_mountpoint = mnt->mnt_mountpoint; |
@@ -550,6 +674,10 @@ static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry) | |||
550 | continue; | 674 | continue; |
551 | 675 | ||
552 | for (s = r; s; s = next_mnt(s, r)) { | 676 | for (s = r; s; s = next_mnt(s, r)) { |
677 | if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) { | ||
678 | s = skip_mnt_tree(s); | ||
679 | continue; | ||
680 | } | ||
553 | while (p != s->mnt_parent) { | 681 | while (p != s->mnt_parent) { |
554 | p = p->mnt_parent; | 682 | p = p->mnt_parent; |
555 | q = q->mnt_parent; | 683 | q = q->mnt_parent; |
@@ -557,7 +685,7 @@ static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry) | |||
557 | p = s; | 685 | p = s; |
558 | nd.mnt = q; | 686 | nd.mnt = q; |
559 | nd.dentry = p->mnt_mountpoint; | 687 | nd.dentry = p->mnt_mountpoint; |
560 | q = clone_mnt(p, p->mnt_root); | 688 | q = clone_mnt(p, p->mnt_root, flag); |
561 | if (!q) | 689 | if (!q) |
562 | goto Enomem; | 690 | goto Enomem; |
563 | spin_lock(&vfsmount_lock); | 691 | spin_lock(&vfsmount_lock); |
@@ -567,15 +695,114 @@ static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry) | |||
567 | } | 695 | } |
568 | } | 696 | } |
569 | return res; | 697 | return res; |
570 | Enomem: | 698 | Enomem: |
571 | if (res) { | 699 | if (res) { |
700 | LIST_HEAD(umount_list); | ||
572 | spin_lock(&vfsmount_lock); | 701 | spin_lock(&vfsmount_lock); |
573 | umount_tree(res); | 702 | umount_tree(res, 0, &umount_list); |
574 | spin_unlock(&vfsmount_lock); | 703 | spin_unlock(&vfsmount_lock); |
704 | release_mounts(&umount_list); | ||
575 | } | 705 | } |
576 | return NULL; | 706 | return NULL; |
577 | } | 707 | } |
578 | 708 | ||
709 | /* | ||
710 | * @source_mnt : mount tree to be attached | ||
711 | * @nd : place the mount tree @source_mnt is attached | ||
712 | * @parent_nd : if non-null, detach the source_mnt from its parent and | ||
713 | * store the parent mount and mountpoint dentry. | ||
714 | * (done when source_mnt is moved) | ||
715 | * | ||
716 | * NOTE: in the table below explains the semantics when a source mount | ||
717 | * of a given type is attached to a destination mount of a given type. | ||
718 | * --------------------------------------------------------------------------- | ||
719 | * | BIND MOUNT OPERATION | | ||
720 | * |************************************************************************** | ||
721 | * | source-->| shared | private | slave | unbindable | | ||
722 | * | dest | | | | | | ||
723 | * | | | | | | | | ||
724 | * | v | | | | | | ||
725 | * |************************************************************************** | ||
726 | * | shared | shared (++) | shared (+) | shared(+++)| invalid | | ||
727 | * | | | | | | | ||
728 | * |non-shared| shared (+) | private | slave (*) | invalid | | ||
729 | * *************************************************************************** | ||
730 | * A bind operation clones the source mount and mounts the clone on the | ||
731 | * destination mount. | ||
732 | * | ||
733 | * (++) the cloned mount is propagated to all the mounts in the propagation | ||
734 | * tree of the destination mount and the cloned mount is added to | ||
735 | * the peer group of the source mount. | ||
736 | * (+) the cloned mount is created under the destination mount and is marked | ||
737 | * as shared. The cloned mount is added to the peer group of the source | ||
738 | * mount. | ||
739 | * (+++) the mount is propagated to all the mounts in the propagation tree | ||
740 | * of the destination mount and the cloned mount is made slave | ||
741 | * of the same master as that of the source mount. The cloned mount | ||
742 | * is marked as 'shared and slave'. | ||
743 | * (*) the cloned mount is made a slave of the same master as that of the | ||
744 | * source mount. | ||
745 | * | ||
746 | * --------------------------------------------------------------------------- | ||
747 | * | MOVE MOUNT OPERATION | | ||
748 | * |************************************************************************** | ||
749 | * | source-->| shared | private | slave | unbindable | | ||
750 | * | dest | | | | | | ||
751 | * | | | | | | | | ||
752 | * | v | | | | | | ||
753 | * |************************************************************************** | ||
754 | * | shared | shared (+) | shared (+) | shared(+++) | invalid | | ||
755 | * | | | | | | | ||
756 | * |non-shared| shared (+*) | private | slave (*) | unbindable | | ||
757 | * *************************************************************************** | ||
758 | * | ||
759 | * (+) the mount is moved to the destination. And is then propagated to | ||
760 | * all the mounts in the propagation tree of the destination mount. | ||
761 | * (+*) the mount is moved to the destination. | ||
762 | * (+++) the mount is moved to the destination and is then propagated to | ||
763 | * all the mounts belonging to the destination mount's propagation tree. | ||
764 | * the mount is marked as 'shared and slave'. | ||
765 | * (*) the mount continues to be a slave at the new location. | ||
766 | * | ||
767 | * if the source mount is a tree, the operations explained above is | ||
768 | * applied to each mount in the tree. | ||
769 | * Must be called without spinlocks held, since this function can sleep | ||
770 | * in allocations. | ||
771 | */ | ||
772 | static int attach_recursive_mnt(struct vfsmount *source_mnt, | ||
773 | struct nameidata *nd, struct nameidata *parent_nd) | ||
774 | { | ||
775 | LIST_HEAD(tree_list); | ||
776 | struct vfsmount *dest_mnt = nd->mnt; | ||
777 | struct dentry *dest_dentry = nd->dentry; | ||
778 | struct vfsmount *child, *p; | ||
779 | |||
780 | if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list)) | ||
781 | return -EINVAL; | ||
782 | |||
783 | if (IS_MNT_SHARED(dest_mnt)) { | ||
784 | for (p = source_mnt; p; p = next_mnt(p, source_mnt)) | ||
785 | set_mnt_shared(p); | ||
786 | } | ||
787 | |||
788 | spin_lock(&vfsmount_lock); | ||
789 | if (parent_nd) { | ||
790 | detach_mnt(source_mnt, parent_nd); | ||
791 | attach_mnt(source_mnt, nd); | ||
792 | touch_namespace(current->namespace); | ||
793 | } else { | ||
794 | mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt); | ||
795 | commit_tree(source_mnt); | ||
796 | } | ||
797 | |||
798 | list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { | ||
799 | list_del_init(&child->mnt_hash); | ||
800 | commit_tree(child); | ||
801 | } | ||
802 | spin_unlock(&vfsmount_lock); | ||
803 | return 0; | ||
804 | } | ||
805 | |||
579 | static int graft_tree(struct vfsmount *mnt, struct nameidata *nd) | 806 | static int graft_tree(struct vfsmount *mnt, struct nameidata *nd) |
580 | { | 807 | { |
581 | int err; | 808 | int err; |
@@ -596,17 +823,8 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd) | |||
596 | goto out_unlock; | 823 | goto out_unlock; |
597 | 824 | ||
598 | err = -ENOENT; | 825 | err = -ENOENT; |
599 | spin_lock(&vfsmount_lock); | 826 | if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) |
600 | if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) { | 827 | err = attach_recursive_mnt(mnt, nd, NULL); |
601 | struct list_head head; | ||
602 | |||
603 | attach_mnt(mnt, nd); | ||
604 | list_add_tail(&head, &mnt->mnt_list); | ||
605 | list_splice(&head, current->namespace->list.prev); | ||
606 | mntget(mnt); | ||
607 | err = 0; | ||
608 | } | ||
609 | spin_unlock(&vfsmount_lock); | ||
610 | out_unlock: | 828 | out_unlock: |
611 | up(&nd->dentry->d_inode->i_sem); | 829 | up(&nd->dentry->d_inode->i_sem); |
612 | if (!err) | 830 | if (!err) |
@@ -615,6 +833,27 @@ out_unlock: | |||
615 | } | 833 | } |
616 | 834 | ||
617 | /* | 835 | /* |
836 | * recursively change the type of the mountpoint. | ||
837 | */ | ||
838 | static int do_change_type(struct nameidata *nd, int flag) | ||
839 | { | ||
840 | struct vfsmount *m, *mnt = nd->mnt; | ||
841 | int recurse = flag & MS_REC; | ||
842 | int type = flag & ~MS_REC; | ||
843 | |||
844 | if (nd->dentry != nd->mnt->mnt_root) | ||
845 | return -EINVAL; | ||
846 | |||
847 | down_write(&namespace_sem); | ||
848 | spin_lock(&vfsmount_lock); | ||
849 | for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) | ||
850 | change_mnt_propagation(m, type); | ||
851 | spin_unlock(&vfsmount_lock); | ||
852 | up_write(&namespace_sem); | ||
853 | return 0; | ||
854 | } | ||
855 | |||
856 | /* | ||
618 | * do loopback mount. | 857 | * do loopback mount. |
619 | */ | 858 | */ |
620 | static int do_loopback(struct nameidata *nd, char *old_name, int recurse) | 859 | static int do_loopback(struct nameidata *nd, char *old_name, int recurse) |
@@ -630,32 +869,34 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse) | |||
630 | if (err) | 869 | if (err) |
631 | return err; | 870 | return err; |
632 | 871 | ||
633 | down_write(¤t->namespace->sem); | 872 | down_write(&namespace_sem); |
634 | err = -EINVAL; | 873 | err = -EINVAL; |
635 | if (check_mnt(nd->mnt) && (!recurse || check_mnt(old_nd.mnt))) { | 874 | if (IS_MNT_UNBINDABLE(old_nd.mnt)) |
636 | err = -ENOMEM; | 875 | goto out; |
637 | if (recurse) | ||
638 | mnt = copy_tree(old_nd.mnt, old_nd.dentry); | ||
639 | else | ||
640 | mnt = clone_mnt(old_nd.mnt, old_nd.dentry); | ||
641 | } | ||
642 | 876 | ||
643 | if (mnt) { | 877 | if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt)) |
644 | /* stop bind mounts from expiring */ | 878 | goto out; |
879 | |||
880 | err = -ENOMEM; | ||
881 | if (recurse) | ||
882 | mnt = copy_tree(old_nd.mnt, old_nd.dentry, 0); | ||
883 | else | ||
884 | mnt = clone_mnt(old_nd.mnt, old_nd.dentry, 0); | ||
885 | |||
886 | if (!mnt) | ||
887 | goto out; | ||
888 | |||
889 | err = graft_tree(mnt, nd); | ||
890 | if (err) { | ||
891 | LIST_HEAD(umount_list); | ||
645 | spin_lock(&vfsmount_lock); | 892 | spin_lock(&vfsmount_lock); |
646 | list_del_init(&mnt->mnt_expire); | 893 | umount_tree(mnt, 0, &umount_list); |
647 | spin_unlock(&vfsmount_lock); | 894 | spin_unlock(&vfsmount_lock); |
648 | 895 | release_mounts(&umount_list); | |
649 | err = graft_tree(mnt, nd); | ||
650 | if (err) { | ||
651 | spin_lock(&vfsmount_lock); | ||
652 | umount_tree(mnt); | ||
653 | spin_unlock(&vfsmount_lock); | ||
654 | } else | ||
655 | mntput(mnt); | ||
656 | } | 896 | } |
657 | 897 | ||
658 | up_write(¤t->namespace->sem); | 898 | out: |
899 | up_write(&namespace_sem); | ||
659 | path_release(&old_nd); | 900 | path_release(&old_nd); |
660 | return err; | 901 | return err; |
661 | } | 902 | } |
@@ -665,12 +906,11 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse) | |||
665 | * If you've mounted a non-root directory somewhere and want to do remount | 906 | * If you've mounted a non-root directory somewhere and want to do remount |
666 | * on it - tough luck. | 907 | * on it - tough luck. |
667 | */ | 908 | */ |
668 | |||
669 | static int do_remount(struct nameidata *nd, int flags, int mnt_flags, | 909 | static int do_remount(struct nameidata *nd, int flags, int mnt_flags, |
670 | void *data) | 910 | void *data) |
671 | { | 911 | { |
672 | int err; | 912 | int err; |
673 | struct super_block * sb = nd->mnt->mnt_sb; | 913 | struct super_block *sb = nd->mnt->mnt_sb; |
674 | 914 | ||
675 | if (!capable(CAP_SYS_ADMIN)) | 915 | if (!capable(CAP_SYS_ADMIN)) |
676 | return -EPERM; | 916 | return -EPERM; |
@@ -684,13 +924,23 @@ static int do_remount(struct nameidata *nd, int flags, int mnt_flags, | |||
684 | down_write(&sb->s_umount); | 924 | down_write(&sb->s_umount); |
685 | err = do_remount_sb(sb, flags, data, 0); | 925 | err = do_remount_sb(sb, flags, data, 0); |
686 | if (!err) | 926 | if (!err) |
687 | nd->mnt->mnt_flags=mnt_flags; | 927 | nd->mnt->mnt_flags = mnt_flags; |
688 | up_write(&sb->s_umount); | 928 | up_write(&sb->s_umount); |
689 | if (!err) | 929 | if (!err) |
690 | security_sb_post_remount(nd->mnt, flags, data); | 930 | security_sb_post_remount(nd->mnt, flags, data); |
691 | return err; | 931 | return err; |
692 | } | 932 | } |
693 | 933 | ||
934 | static inline int tree_contains_unbindable(struct vfsmount *mnt) | ||
935 | { | ||
936 | struct vfsmount *p; | ||
937 | for (p = mnt; p; p = next_mnt(p, mnt)) { | ||
938 | if (IS_MNT_UNBINDABLE(p)) | ||
939 | return 1; | ||
940 | } | ||
941 | return 0; | ||
942 | } | ||
943 | |||
694 | static int do_move_mount(struct nameidata *nd, char *old_name) | 944 | static int do_move_mount(struct nameidata *nd, char *old_name) |
695 | { | 945 | { |
696 | struct nameidata old_nd, parent_nd; | 946 | struct nameidata old_nd, parent_nd; |
@@ -704,8 +954,8 @@ static int do_move_mount(struct nameidata *nd, char *old_name) | |||
704 | if (err) | 954 | if (err) |
705 | return err; | 955 | return err; |
706 | 956 | ||
707 | down_write(¤t->namespace->sem); | 957 | down_write(&namespace_sem); |
708 | while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) | 958 | while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) |
709 | ; | 959 | ; |
710 | err = -EINVAL; | 960 | err = -EINVAL; |
711 | if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt)) | 961 | if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt)) |
@@ -716,39 +966,47 @@ static int do_move_mount(struct nameidata *nd, char *old_name) | |||
716 | if (IS_DEADDIR(nd->dentry->d_inode)) | 966 | if (IS_DEADDIR(nd->dentry->d_inode)) |
717 | goto out1; | 967 | goto out1; |
718 | 968 | ||
719 | spin_lock(&vfsmount_lock); | ||
720 | if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry)) | 969 | if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry)) |
721 | goto out2; | 970 | goto out1; |
722 | 971 | ||
723 | err = -EINVAL; | 972 | err = -EINVAL; |
724 | if (old_nd.dentry != old_nd.mnt->mnt_root) | 973 | if (old_nd.dentry != old_nd.mnt->mnt_root) |
725 | goto out2; | 974 | goto out1; |
726 | 975 | ||
727 | if (old_nd.mnt == old_nd.mnt->mnt_parent) | 976 | if (old_nd.mnt == old_nd.mnt->mnt_parent) |
728 | goto out2; | 977 | goto out1; |
729 | 978 | ||
730 | if (S_ISDIR(nd->dentry->d_inode->i_mode) != | 979 | if (S_ISDIR(nd->dentry->d_inode->i_mode) != |
731 | S_ISDIR(old_nd.dentry->d_inode->i_mode)) | 980 | S_ISDIR(old_nd.dentry->d_inode->i_mode)) |
732 | goto out2; | 981 | goto out1; |
733 | 982 | /* | |
983 | * Don't move a mount residing in a shared parent. | ||
984 | */ | ||
985 | if (old_nd.mnt->mnt_parent && IS_MNT_SHARED(old_nd.mnt->mnt_parent)) | ||
986 | goto out1; | ||
987 | /* | ||
988 | * Don't move a mount tree containing unbindable mounts to a destination | ||
989 | * mount which is shared. | ||
990 | */ | ||
991 | if (IS_MNT_SHARED(nd->mnt) && tree_contains_unbindable(old_nd.mnt)) | ||
992 | goto out1; | ||
734 | err = -ELOOP; | 993 | err = -ELOOP; |
735 | for (p = nd->mnt; p->mnt_parent!=p; p = p->mnt_parent) | 994 | for (p = nd->mnt; p->mnt_parent != p; p = p->mnt_parent) |
736 | if (p == old_nd.mnt) | 995 | if (p == old_nd.mnt) |
737 | goto out2; | 996 | goto out1; |
738 | err = 0; | ||
739 | 997 | ||
740 | detach_mnt(old_nd.mnt, &parent_nd); | 998 | if ((err = attach_recursive_mnt(old_nd.mnt, nd, &parent_nd))) |
741 | attach_mnt(old_nd.mnt, nd); | 999 | goto out1; |
742 | 1000 | ||
1001 | spin_lock(&vfsmount_lock); | ||
743 | /* if the mount is moved, it should no longer be expire | 1002 | /* if the mount is moved, it should no longer be expire |
744 | * automatically */ | 1003 | * automatically */ |
745 | list_del_init(&old_nd.mnt->mnt_expire); | 1004 | list_del_init(&old_nd.mnt->mnt_expire); |
746 | out2: | ||
747 | spin_unlock(&vfsmount_lock); | 1005 | spin_unlock(&vfsmount_lock); |
748 | out1: | 1006 | out1: |
749 | up(&nd->dentry->d_inode->i_sem); | 1007 | up(&nd->dentry->d_inode->i_sem); |
750 | out: | 1008 | out: |
751 | up_write(¤t->namespace->sem); | 1009 | up_write(&namespace_sem); |
752 | if (!err) | 1010 | if (!err) |
753 | path_release(&parent_nd); | 1011 | path_release(&parent_nd); |
754 | path_release(&old_nd); | 1012 | path_release(&old_nd); |
@@ -787,9 +1045,9 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd, | |||
787 | { | 1045 | { |
788 | int err; | 1046 | int err; |
789 | 1047 | ||
790 | down_write(¤t->namespace->sem); | 1048 | down_write(&namespace_sem); |
791 | /* Something was mounted here while we slept */ | 1049 | /* Something was mounted here while we slept */ |
792 | while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) | 1050 | while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) |
793 | ; | 1051 | ; |
794 | err = -EINVAL; | 1052 | err = -EINVAL; |
795 | if (!check_mnt(nd->mnt)) | 1053 | if (!check_mnt(nd->mnt)) |
@@ -806,25 +1064,28 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd, | |||
806 | goto unlock; | 1064 | goto unlock; |
807 | 1065 | ||
808 | newmnt->mnt_flags = mnt_flags; | 1066 | newmnt->mnt_flags = mnt_flags; |
809 | newmnt->mnt_namespace = current->namespace; | 1067 | if ((err = graft_tree(newmnt, nd))) |
810 | err = graft_tree(newmnt, nd); | 1068 | goto unlock; |
811 | 1069 | ||
812 | if (err == 0 && fslist) { | 1070 | if (fslist) { |
813 | /* add to the specified expiration list */ | 1071 | /* add to the specified expiration list */ |
814 | spin_lock(&vfsmount_lock); | 1072 | spin_lock(&vfsmount_lock); |
815 | list_add_tail(&newmnt->mnt_expire, fslist); | 1073 | list_add_tail(&newmnt->mnt_expire, fslist); |
816 | spin_unlock(&vfsmount_lock); | 1074 | spin_unlock(&vfsmount_lock); |
817 | } | 1075 | } |
1076 | up_write(&namespace_sem); | ||
1077 | return 0; | ||
818 | 1078 | ||
819 | unlock: | 1079 | unlock: |
820 | up_write(¤t->namespace->sem); | 1080 | up_write(&namespace_sem); |
821 | mntput(newmnt); | 1081 | mntput(newmnt); |
822 | return err; | 1082 | return err; |
823 | } | 1083 | } |
824 | 1084 | ||
825 | EXPORT_SYMBOL_GPL(do_add_mount); | 1085 | EXPORT_SYMBOL_GPL(do_add_mount); |
826 | 1086 | ||
827 | static void expire_mount(struct vfsmount *mnt, struct list_head *mounts) | 1087 | static void expire_mount(struct vfsmount *mnt, struct list_head *mounts, |
1088 | struct list_head *umounts) | ||
828 | { | 1089 | { |
829 | spin_lock(&vfsmount_lock); | 1090 | spin_lock(&vfsmount_lock); |
830 | 1091 | ||
@@ -841,27 +1102,13 @@ static void expire_mount(struct vfsmount *mnt, struct list_head *mounts) | |||
841 | * Check that it is still dead: the count should now be 2 - as | 1102 | * Check that it is still dead: the count should now be 2 - as |
842 | * contributed by the vfsmount parent and the mntget above | 1103 | * contributed by the vfsmount parent and the mntget above |
843 | */ | 1104 | */ |
844 | if (atomic_read(&mnt->mnt_count) == 2) { | 1105 | if (!propagate_mount_busy(mnt, 2)) { |
845 | struct nameidata old_nd; | ||
846 | |||
847 | /* delete from the namespace */ | 1106 | /* delete from the namespace */ |
1107 | touch_namespace(mnt->mnt_namespace); | ||
848 | list_del_init(&mnt->mnt_list); | 1108 | list_del_init(&mnt->mnt_list); |
849 | mnt->mnt_namespace = NULL; | 1109 | mnt->mnt_namespace = NULL; |
850 | detach_mnt(mnt, &old_nd); | 1110 | umount_tree(mnt, 1, umounts); |
851 | spin_unlock(&vfsmount_lock); | 1111 | spin_unlock(&vfsmount_lock); |
852 | path_release(&old_nd); | ||
853 | |||
854 | /* | ||
855 | * Now lay it to rest if this was the last ref on the superblock | ||
856 | */ | ||
857 | if (atomic_read(&mnt->mnt_sb->s_active) == 1) { | ||
858 | /* last instance - try to be smart */ | ||
859 | lock_kernel(); | ||
860 | DQUOT_OFF(mnt->mnt_sb); | ||
861 | acct_auto_close(mnt->mnt_sb); | ||
862 | unlock_kernel(); | ||
863 | } | ||
864 | mntput(mnt); | ||
865 | } else { | 1112 | } else { |
866 | /* | 1113 | /* |
867 | * Someone brought it back to life whilst we didn't have any | 1114 | * Someone brought it back to life whilst we didn't have any |
@@ -910,6 +1157,7 @@ void mark_mounts_for_expiry(struct list_head *mounts) | |||
910 | * - dispose of the corpse | 1157 | * - dispose of the corpse |
911 | */ | 1158 | */ |
912 | while (!list_empty(&graveyard)) { | 1159 | while (!list_empty(&graveyard)) { |
1160 | LIST_HEAD(umounts); | ||
913 | mnt = list_entry(graveyard.next, struct vfsmount, mnt_expire); | 1161 | mnt = list_entry(graveyard.next, struct vfsmount, mnt_expire); |
914 | list_del_init(&mnt->mnt_expire); | 1162 | list_del_init(&mnt->mnt_expire); |
915 | 1163 | ||
@@ -921,13 +1169,12 @@ void mark_mounts_for_expiry(struct list_head *mounts) | |||
921 | get_namespace(namespace); | 1169 | get_namespace(namespace); |
922 | 1170 | ||
923 | spin_unlock(&vfsmount_lock); | 1171 | spin_unlock(&vfsmount_lock); |
924 | down_write(&namespace->sem); | 1172 | down_write(&namespace_sem); |
925 | expire_mount(mnt, mounts); | 1173 | expire_mount(mnt, mounts, &umounts); |
926 | up_write(&namespace->sem); | 1174 | up_write(&namespace_sem); |
927 | 1175 | release_mounts(&umounts); | |
928 | mntput(mnt); | 1176 | mntput(mnt); |
929 | put_namespace(namespace); | 1177 | put_namespace(namespace); |
930 | |||
931 | spin_lock(&vfsmount_lock); | 1178 | spin_lock(&vfsmount_lock); |
932 | } | 1179 | } |
933 | 1180 | ||
@@ -942,8 +1189,8 @@ EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); | |||
942 | * Note that this function differs from copy_from_user() in that it will oops | 1189 | * Note that this function differs from copy_from_user() in that it will oops |
943 | * on bad values of `to', rather than returning a short copy. | 1190 | * on bad values of `to', rather than returning a short copy. |
944 | */ | 1191 | */ |
945 | static long | 1192 | static long exact_copy_from_user(void *to, const void __user * from, |
946 | exact_copy_from_user(void *to, const void __user *from, unsigned long n) | 1193 | unsigned long n) |
947 | { | 1194 | { |
948 | char *t = to; | 1195 | char *t = to; |
949 | const char __user *f = from; | 1196 | const char __user *f = from; |
@@ -964,12 +1211,12 @@ exact_copy_from_user(void *to, const void __user *from, unsigned long n) | |||
964 | return n; | 1211 | return n; |
965 | } | 1212 | } |
966 | 1213 | ||
967 | int copy_mount_options(const void __user *data, unsigned long *where) | 1214 | int copy_mount_options(const void __user * data, unsigned long *where) |
968 | { | 1215 | { |
969 | int i; | 1216 | int i; |
970 | unsigned long page; | 1217 | unsigned long page; |
971 | unsigned long size; | 1218 | unsigned long size; |
972 | 1219 | ||
973 | *where = 0; | 1220 | *where = 0; |
974 | if (!data) | 1221 | if (!data) |
975 | return 0; | 1222 | return 0; |
@@ -988,7 +1235,7 @@ int copy_mount_options(const void __user *data, unsigned long *where) | |||
988 | 1235 | ||
989 | i = size - exact_copy_from_user((void *)page, data, size); | 1236 | i = size - exact_copy_from_user((void *)page, data, size); |
990 | if (!i) { | 1237 | if (!i) { |
991 | free_page(page); | 1238 | free_page(page); |
992 | return -EFAULT; | 1239 | return -EFAULT; |
993 | } | 1240 | } |
994 | if (i != PAGE_SIZE) | 1241 | if (i != PAGE_SIZE) |
@@ -1011,7 +1258,7 @@ int copy_mount_options(const void __user *data, unsigned long *where) | |||
1011 | * Therefore, if this magic number is present, it carries no information | 1258 | * Therefore, if this magic number is present, it carries no information |
1012 | * and must be discarded. | 1259 | * and must be discarded. |
1013 | */ | 1260 | */ |
1014 | long do_mount(char * dev_name, char * dir_name, char *type_page, | 1261 | long do_mount(char *dev_name, char *dir_name, char *type_page, |
1015 | unsigned long flags, void *data_page) | 1262 | unsigned long flags, void *data_page) |
1016 | { | 1263 | { |
1017 | struct nameidata nd; | 1264 | struct nameidata nd; |
@@ -1039,7 +1286,7 @@ long do_mount(char * dev_name, char * dir_name, char *type_page, | |||
1039 | mnt_flags |= MNT_NODEV; | 1286 | mnt_flags |= MNT_NODEV; |
1040 | if (flags & MS_NOEXEC) | 1287 | if (flags & MS_NOEXEC) |
1041 | mnt_flags |= MNT_NOEXEC; | 1288 | mnt_flags |= MNT_NOEXEC; |
1042 | flags &= ~(MS_NOSUID|MS_NOEXEC|MS_NODEV|MS_ACTIVE); | 1289 | flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE); |
1043 | 1290 | ||
1044 | /* ... and get the mountpoint */ | 1291 | /* ... and get the mountpoint */ |
1045 | retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd); | 1292 | retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd); |
@@ -1055,6 +1302,8 @@ long do_mount(char * dev_name, char * dir_name, char *type_page, | |||
1055 | data_page); | 1302 | data_page); |
1056 | else if (flags & MS_BIND) | 1303 | else if (flags & MS_BIND) |
1057 | retval = do_loopback(&nd, dev_name, flags & MS_REC); | 1304 | retval = do_loopback(&nd, dev_name, flags & MS_REC); |
1305 | else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) | ||
1306 | retval = do_change_type(&nd, flags); | ||
1058 | else if (flags & MS_MOVE) | 1307 | else if (flags & MS_MOVE) |
1059 | retval = do_move_mount(&nd, dev_name); | 1308 | retval = do_move_mount(&nd, dev_name); |
1060 | else | 1309 | else |
@@ -1091,14 +1340,16 @@ int copy_namespace(int flags, struct task_struct *tsk) | |||
1091 | goto out; | 1340 | goto out; |
1092 | 1341 | ||
1093 | atomic_set(&new_ns->count, 1); | 1342 | atomic_set(&new_ns->count, 1); |
1094 | init_rwsem(&new_ns->sem); | ||
1095 | INIT_LIST_HEAD(&new_ns->list); | 1343 | INIT_LIST_HEAD(&new_ns->list); |
1344 | init_waitqueue_head(&new_ns->poll); | ||
1345 | new_ns->event = 0; | ||
1096 | 1346 | ||
1097 | down_write(&tsk->namespace->sem); | 1347 | down_write(&namespace_sem); |
1098 | /* First pass: copy the tree topology */ | 1348 | /* First pass: copy the tree topology */ |
1099 | new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root); | 1349 | new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root, |
1350 | CL_COPY_ALL | CL_EXPIRE); | ||
1100 | if (!new_ns->root) { | 1351 | if (!new_ns->root) { |
1101 | up_write(&tsk->namespace->sem); | 1352 | up_write(&namespace_sem); |
1102 | kfree(new_ns); | 1353 | kfree(new_ns); |
1103 | goto out; | 1354 | goto out; |
1104 | } | 1355 | } |
@@ -1132,7 +1383,7 @@ int copy_namespace(int flags, struct task_struct *tsk) | |||
1132 | p = next_mnt(p, namespace->root); | 1383 | p = next_mnt(p, namespace->root); |
1133 | q = next_mnt(q, new_ns->root); | 1384 | q = next_mnt(q, new_ns->root); |
1134 | } | 1385 | } |
1135 | up_write(&tsk->namespace->sem); | 1386 | up_write(&namespace_sem); |
1136 | 1387 | ||
1137 | tsk->namespace = new_ns; | 1388 | tsk->namespace = new_ns; |
1138 | 1389 | ||
@@ -1161,7 +1412,7 @@ asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name, | |||
1161 | unsigned long dev_page; | 1412 | unsigned long dev_page; |
1162 | char *dir_page; | 1413 | char *dir_page; |
1163 | 1414 | ||
1164 | retval = copy_mount_options (type, &type_page); | 1415 | retval = copy_mount_options(type, &type_page); |
1165 | if (retval < 0) | 1416 | if (retval < 0) |
1166 | return retval; | 1417 | return retval; |
1167 | 1418 | ||
@@ -1170,17 +1421,17 @@ asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name, | |||
1170 | if (IS_ERR(dir_page)) | 1421 | if (IS_ERR(dir_page)) |
1171 | goto out1; | 1422 | goto out1; |
1172 | 1423 | ||
1173 | retval = copy_mount_options (dev_name, &dev_page); | 1424 | retval = copy_mount_options(dev_name, &dev_page); |
1174 | if (retval < 0) | 1425 | if (retval < 0) |
1175 | goto out2; | 1426 | goto out2; |
1176 | 1427 | ||
1177 | retval = copy_mount_options (data, &data_page); | 1428 | retval = copy_mount_options(data, &data_page); |
1178 | if (retval < 0) | 1429 | if (retval < 0) |
1179 | goto out3; | 1430 | goto out3; |
1180 | 1431 | ||
1181 | lock_kernel(); | 1432 | lock_kernel(); |
1182 | retval = do_mount((char*)dev_page, dir_page, (char*)type_page, | 1433 | retval = do_mount((char *)dev_page, dir_page, (char *)type_page, |
1183 | flags, (void*)data_page); | 1434 | flags, (void *)data_page); |
1184 | unlock_kernel(); | 1435 | unlock_kernel(); |
1185 | free_page(data_page); | 1436 | free_page(data_page); |
1186 | 1437 | ||
@@ -1249,9 +1500,11 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd) | |||
1249 | if (fs) { | 1500 | if (fs) { |
1250 | atomic_inc(&fs->count); | 1501 | atomic_inc(&fs->count); |
1251 | task_unlock(p); | 1502 | task_unlock(p); |
1252 | if (fs->root==old_nd->dentry&&fs->rootmnt==old_nd->mnt) | 1503 | if (fs->root == old_nd->dentry |
1504 | && fs->rootmnt == old_nd->mnt) | ||
1253 | set_fs_root(fs, new_nd->mnt, new_nd->dentry); | 1505 | set_fs_root(fs, new_nd->mnt, new_nd->dentry); |
1254 | if (fs->pwd==old_nd->dentry&&fs->pwdmnt==old_nd->mnt) | 1506 | if (fs->pwd == old_nd->dentry |
1507 | && fs->pwdmnt == old_nd->mnt) | ||
1255 | set_fs_pwd(fs, new_nd->mnt, new_nd->dentry); | 1508 | set_fs_pwd(fs, new_nd->mnt, new_nd->dentry); |
1256 | put_fs_struct(fs); | 1509 | put_fs_struct(fs); |
1257 | } else | 1510 | } else |
@@ -1281,8 +1534,8 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd) | |||
1281 | * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root | 1534 | * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root |
1282 | * first. | 1535 | * first. |
1283 | */ | 1536 | */ |
1284 | 1537 | asmlinkage long sys_pivot_root(const char __user * new_root, | |
1285 | asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *put_old) | 1538 | const char __user * put_old) |
1286 | { | 1539 | { |
1287 | struct vfsmount *tmp; | 1540 | struct vfsmount *tmp; |
1288 | struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd; | 1541 | struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd; |
@@ -1293,14 +1546,15 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p | |||
1293 | 1546 | ||
1294 | lock_kernel(); | 1547 | lock_kernel(); |
1295 | 1548 | ||
1296 | error = __user_walk(new_root, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd); | 1549 | error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, |
1550 | &new_nd); | ||
1297 | if (error) | 1551 | if (error) |
1298 | goto out0; | 1552 | goto out0; |
1299 | error = -EINVAL; | 1553 | error = -EINVAL; |
1300 | if (!check_mnt(new_nd.mnt)) | 1554 | if (!check_mnt(new_nd.mnt)) |
1301 | goto out1; | 1555 | goto out1; |
1302 | 1556 | ||
1303 | error = __user_walk(put_old, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &old_nd); | 1557 | error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd); |
1304 | if (error) | 1558 | if (error) |
1305 | goto out1; | 1559 | goto out1; |
1306 | 1560 | ||
@@ -1314,9 +1568,13 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p | |||
1314 | user_nd.mnt = mntget(current->fs->rootmnt); | 1568 | user_nd.mnt = mntget(current->fs->rootmnt); |
1315 | user_nd.dentry = dget(current->fs->root); | 1569 | user_nd.dentry = dget(current->fs->root); |
1316 | read_unlock(¤t->fs->lock); | 1570 | read_unlock(¤t->fs->lock); |
1317 | down_write(¤t->namespace->sem); | 1571 | down_write(&namespace_sem); |
1318 | down(&old_nd.dentry->d_inode->i_sem); | 1572 | down(&old_nd.dentry->d_inode->i_sem); |
1319 | error = -EINVAL; | 1573 | error = -EINVAL; |
1574 | if (IS_MNT_SHARED(old_nd.mnt) || | ||
1575 | IS_MNT_SHARED(new_nd.mnt->mnt_parent) || | ||
1576 | IS_MNT_SHARED(user_nd.mnt->mnt_parent)) | ||
1577 | goto out2; | ||
1320 | if (!check_mnt(user_nd.mnt)) | 1578 | if (!check_mnt(user_nd.mnt)) |
1321 | goto out2; | 1579 | goto out2; |
1322 | error = -ENOENT; | 1580 | error = -ENOENT; |
@@ -1356,6 +1614,7 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p | |||
1356 | detach_mnt(user_nd.mnt, &root_parent); | 1614 | detach_mnt(user_nd.mnt, &root_parent); |
1357 | attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */ | 1615 | attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */ |
1358 | attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */ | 1616 | attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */ |
1617 | touch_namespace(current->namespace); | ||
1359 | spin_unlock(&vfsmount_lock); | 1618 | spin_unlock(&vfsmount_lock); |
1360 | chroot_fs_refs(&user_nd, &new_nd); | 1619 | chroot_fs_refs(&user_nd, &new_nd); |
1361 | security_sb_post_pivotroot(&user_nd, &new_nd); | 1620 | security_sb_post_pivotroot(&user_nd, &new_nd); |
@@ -1364,7 +1623,7 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p | |||
1364 | path_release(&parent_nd); | 1623 | path_release(&parent_nd); |
1365 | out2: | 1624 | out2: |
1366 | up(&old_nd.dentry->d_inode->i_sem); | 1625 | up(&old_nd.dentry->d_inode->i_sem); |
1367 | up_write(¤t->namespace->sem); | 1626 | up_write(&namespace_sem); |
1368 | path_release(&user_nd); | 1627 | path_release(&user_nd); |
1369 | path_release(&old_nd); | 1628 | path_release(&old_nd); |
1370 | out1: | 1629 | out1: |
@@ -1391,7 +1650,8 @@ static void __init init_mount_tree(void) | |||
1391 | panic("Can't allocate initial namespace"); | 1650 | panic("Can't allocate initial namespace"); |
1392 | atomic_set(&namespace->count, 1); | 1651 | atomic_set(&namespace->count, 1); |
1393 | INIT_LIST_HEAD(&namespace->list); | 1652 | INIT_LIST_HEAD(&namespace->list); |
1394 | init_rwsem(&namespace->sem); | 1653 | init_waitqueue_head(&namespace->poll); |
1654 | namespace->event = 0; | ||
1395 | list_add(&mnt->mnt_list, &namespace->list); | 1655 | list_add(&mnt->mnt_list, &namespace->list); |
1396 | namespace->root = mnt; | 1656 | namespace->root = mnt; |
1397 | mnt->mnt_namespace = namespace; | 1657 | mnt->mnt_namespace = namespace; |
@@ -1414,11 +1674,12 @@ void __init mnt_init(unsigned long mempages) | |||
1414 | unsigned int nr_hash; | 1674 | unsigned int nr_hash; |
1415 | int i; | 1675 | int i; |
1416 | 1676 | ||
1677 | init_rwsem(&namespace_sem); | ||
1678 | |||
1417 | mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), | 1679 | mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), |
1418 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 1680 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL); |
1419 | 1681 | ||
1420 | mount_hashtable = (struct list_head *) | 1682 | mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); |
1421 | __get_free_page(GFP_ATOMIC); | ||
1422 | 1683 | ||
1423 | if (!mount_hashtable) | 1684 | if (!mount_hashtable) |
1424 | panic("Failed to allocate mount hash table\n"); | 1685 | panic("Failed to allocate mount hash table\n"); |
@@ -1440,7 +1701,7 @@ void __init mnt_init(unsigned long mempages) | |||
1440 | * from the number of bits we can fit. | 1701 | * from the number of bits we can fit. |
1441 | */ | 1702 | */ |
1442 | nr_hash = 1UL << hash_bits; | 1703 | nr_hash = 1UL << hash_bits; |
1443 | hash_mask = nr_hash-1; | 1704 | hash_mask = nr_hash - 1; |
1444 | 1705 | ||
1445 | printk("Mount-cache hash table entries: %d\n", nr_hash); | 1706 | printk("Mount-cache hash table entries: %d\n", nr_hash); |
1446 | 1707 | ||
@@ -1460,12 +1721,14 @@ void __init mnt_init(unsigned long mempages) | |||
1460 | void __put_namespace(struct namespace *namespace) | 1721 | void __put_namespace(struct namespace *namespace) |
1461 | { | 1722 | { |
1462 | struct vfsmount *root = namespace->root; | 1723 | struct vfsmount *root = namespace->root; |
1724 | LIST_HEAD(umount_list); | ||
1463 | namespace->root = NULL; | 1725 | namespace->root = NULL; |
1464 | spin_unlock(&vfsmount_lock); | 1726 | spin_unlock(&vfsmount_lock); |
1465 | down_write(&namespace->sem); | 1727 | down_write(&namespace_sem); |
1466 | spin_lock(&vfsmount_lock); | 1728 | spin_lock(&vfsmount_lock); |
1467 | umount_tree(root); | 1729 | umount_tree(root, 0, &umount_list); |
1468 | spin_unlock(&vfsmount_lock); | 1730 | spin_unlock(&vfsmount_lock); |
1469 | up_write(&namespace->sem); | 1731 | up_write(&namespace_sem); |
1732 | release_mounts(&umount_list); | ||
1470 | kfree(namespace); | 1733 | kfree(namespace); |
1471 | } | 1734 | } |
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 3976c177a7d0..618a327027b3 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
@@ -149,8 +149,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct | |||
149 | } | 149 | } |
150 | } | 150 | } |
151 | spin_unlock(&clp->cl_lock); | 151 | spin_unlock(&clp->cl_lock); |
152 | if (delegation != NULL) | 152 | kfree(delegation); |
153 | kfree(delegation); | ||
154 | return status; | 153 | return status; |
155 | } | 154 | } |
156 | 155 | ||
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 24d2fbf549bd..6391d8964214 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -1688,8 +1688,7 @@ static void nfs_kill_super(struct super_block *s) | |||
1688 | 1688 | ||
1689 | rpciod_down(); /* release rpciod */ | 1689 | rpciod_down(); /* release rpciod */ |
1690 | 1690 | ||
1691 | if (server->hostname != NULL) | 1691 | kfree(server->hostname); |
1692 | kfree(server->hostname); | ||
1693 | kfree(server); | 1692 | kfree(server); |
1694 | } | 1693 | } |
1695 | 1694 | ||
@@ -1908,8 +1907,7 @@ nfs_copy_user_string(char *dst, struct nfs_string *src, int maxlen) | |||
1908 | return ERR_PTR(-ENOMEM); | 1907 | return ERR_PTR(-ENOMEM); |
1909 | } | 1908 | } |
1910 | if (copy_from_user(dst, src->data, maxlen)) { | 1909 | if (copy_from_user(dst, src->data, maxlen)) { |
1911 | if (p != NULL) | 1910 | kfree(p); |
1912 | kfree(p); | ||
1913 | return ERR_PTR(-EFAULT); | 1911 | return ERR_PTR(-EFAULT); |
1914 | } | 1912 | } |
1915 | dst[maxlen] = '\0'; | 1913 | dst[maxlen] = '\0'; |
@@ -2000,10 +1998,8 @@ static struct super_block *nfs4_get_sb(struct file_system_type *fs_type, | |||
2000 | out_err: | 1998 | out_err: |
2001 | s = (struct super_block *)p; | 1999 | s = (struct super_block *)p; |
2002 | out_free: | 2000 | out_free: |
2003 | if (server->mnt_path) | 2001 | kfree(server->mnt_path); |
2004 | kfree(server->mnt_path); | 2002 | kfree(server->hostname); |
2005 | if (server->hostname) | ||
2006 | kfree(server->hostname); | ||
2007 | kfree(server); | 2003 | kfree(server); |
2008 | return s; | 2004 | return s; |
2009 | } | 2005 | } |
@@ -2023,8 +2019,7 @@ static void nfs4_kill_super(struct super_block *sb) | |||
2023 | 2019 | ||
2024 | destroy_nfsv4_state(server); | 2020 | destroy_nfsv4_state(server); |
2025 | 2021 | ||
2026 | if (server->hostname != NULL) | 2022 | kfree(server->hostname); |
2027 | kfree(server->hostname); | ||
2028 | kfree(server); | 2023 | kfree(server); |
2029 | } | 2024 | } |
2030 | 2025 | ||
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 52a26baa114c..0675f3215e0a 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -69,10 +69,8 @@ init_nfsv4_state(struct nfs_server *server) | |||
69 | void | 69 | void |
70 | destroy_nfsv4_state(struct nfs_server *server) | 70 | destroy_nfsv4_state(struct nfs_server *server) |
71 | { | 71 | { |
72 | if (server->mnt_path) { | 72 | kfree(server->mnt_path); |
73 | kfree(server->mnt_path); | 73 | server->mnt_path = NULL; |
74 | server->mnt_path = NULL; | ||
75 | } | ||
76 | if (server->nfs4_state) { | 74 | if (server->nfs4_state) { |
77 | nfs4_put_client(server->nfs4_state); | 75 | nfs4_put_client(server->nfs4_state); |
78 | server->nfs4_state = NULL; | 76 | server->nfs4_state = NULL; |
@@ -311,8 +309,7 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct | |||
311 | new = NULL; | 309 | new = NULL; |
312 | } | 310 | } |
313 | spin_unlock(&clp->cl_lock); | 311 | spin_unlock(&clp->cl_lock); |
314 | if (new) | 312 | kfree(new); |
315 | kfree(new); | ||
316 | if (sp != NULL) | 313 | if (sp != NULL) |
317 | return sp; | 314 | return sp; |
318 | put_rpccred(cred); | 315 | put_rpccred(cred); |
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index f732541a3332..d639d172d568 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c | |||
@@ -52,8 +52,7 @@ nfs_put_unlinkdata(struct nfs_unlinkdata *data) | |||
52 | { | 52 | { |
53 | if (--data->count == 0) { | 53 | if (--data->count == 0) { |
54 | nfs_detach_unlinkdata(data); | 54 | nfs_detach_unlinkdata(data); |
55 | if (data->name.name != NULL) | 55 | kfree(data->name.name); |
56 | kfree(data->name.name); | ||
57 | kfree(data); | 56 | kfree(data); |
58 | } | 57 | } |
59 | } | 58 | } |
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 057aff745506..417ec02df44f 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c | |||
@@ -190,8 +190,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen) | |||
190 | out: | 190 | out: |
191 | if (dom) | 191 | if (dom) |
192 | auth_domain_put(dom); | 192 | auth_domain_put(dom); |
193 | if (buf) | 193 | kfree(buf); |
194 | kfree(buf); | ||
195 | return err; | 194 | return err; |
196 | } | 195 | } |
197 | 196 | ||
@@ -428,8 +427,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) | |||
428 | path_release(&nd); | 427 | path_release(&nd); |
429 | if (dom) | 428 | if (dom) |
430 | auth_domain_put(dom); | 429 | auth_domain_put(dom); |
431 | if (buf) | 430 | kfree(buf); |
432 | kfree(buf); | ||
433 | return err; | 431 | return err; |
434 | } | 432 | } |
435 | 433 | ||
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index e0e134d6baba..9147b8524d05 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c | |||
@@ -366,7 +366,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, u32 *p, | |||
366 | len = args->len = ntohl(*p++); | 366 | len = args->len = ntohl(*p++); |
367 | 367 | ||
368 | hdr = (void*)p - rqstp->rq_arg.head[0].iov_base; | 368 | hdr = (void*)p - rqstp->rq_arg.head[0].iov_base; |
369 | if (rqstp->rq_arg.len < len + hdr) | 369 | if (rqstp->rq_arg.len < hdr || |
370 | rqstp->rq_arg.len - hdr < len) | ||
370 | return 0; | 371 | return 0; |
371 | 372 | ||
372 | args->vec[0].iov_base = (void*)p; | 373 | args->vec[0].iov_base = (void*)p; |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 4c4146350236..dcd673186944 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -151,8 +151,7 @@ static u32 *read_buf(struct nfsd4_compoundargs *argp, int nbytes) | |||
151 | if (nbytes <= sizeof(argp->tmp)) | 151 | if (nbytes <= sizeof(argp->tmp)) |
152 | p = argp->tmp; | 152 | p = argp->tmp; |
153 | else { | 153 | else { |
154 | if (argp->tmpp) | 154 | kfree(argp->tmpp); |
155 | kfree(argp->tmpp); | ||
156 | p = argp->tmpp = kmalloc(nbytes, GFP_KERNEL); | 155 | p = argp->tmpp = kmalloc(nbytes, GFP_KERNEL); |
157 | if (!p) | 156 | if (!p) |
158 | return NULL; | 157 | return NULL; |
@@ -2476,10 +2475,8 @@ void nfsd4_release_compoundargs(struct nfsd4_compoundargs *args) | |||
2476 | kfree(args->ops); | 2475 | kfree(args->ops); |
2477 | args->ops = args->iops; | 2476 | args->ops = args->iops; |
2478 | } | 2477 | } |
2479 | if (args->tmpp) { | 2478 | kfree(args->tmpp); |
2480 | kfree(args->tmpp); | 2479 | args->tmpp = NULL; |
2481 | args->tmpp = NULL; | ||
2482 | } | ||
2483 | while (args->to_free) { | 2480 | while (args->to_free) { |
2484 | struct tmpbuf *tb = args->to_free; | 2481 | struct tmpbuf *tb = args->to_free; |
2485 | args->to_free = tb->next; | 2482 | args->to_free = tb->next; |
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 119e4d4495b8..d852ebb538e3 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c | |||
@@ -93,8 +93,7 @@ nfsd_cache_shutdown(void) | |||
93 | 93 | ||
94 | cache_disabled = 1; | 94 | cache_disabled = 1; |
95 | 95 | ||
96 | if (hash_list) | 96 | kfree (hash_list); |
97 | kfree (hash_list); | ||
98 | hash_list = NULL; | 97 | hash_list = NULL; |
99 | } | 98 | } |
100 | 99 | ||
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 841c562991e8..a0871b3efeb7 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
24 | #include <linux/pagemap.h> | 24 | #include <linux/pagemap.h> |
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/string.h> | ||
26 | 27 | ||
27 | #include <linux/nfs.h> | 28 | #include <linux/nfs.h> |
28 | #include <linux/nfsd_idmap.h> | 29 | #include <linux/nfsd_idmap.h> |
@@ -35,6 +36,8 @@ | |||
35 | 36 | ||
36 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
37 | 38 | ||
39 | unsigned int nfsd_versbits = ~0; | ||
40 | |||
38 | /* | 41 | /* |
39 | * We have a single directory with 9 nodes in it. | 42 | * We have a single directory with 9 nodes in it. |
40 | */ | 43 | */ |
@@ -50,8 +53,15 @@ enum { | |||
50 | NFSD_List, | 53 | NFSD_List, |
51 | NFSD_Fh, | 54 | NFSD_Fh, |
52 | NFSD_Threads, | 55 | NFSD_Threads, |
56 | NFSD_Versions, | ||
57 | /* | ||
58 | * The below MUST come last. Otherwise we leave a hole in nfsd_files[] | ||
59 | * with !CONFIG_NFSD_V4 and simple_fill_super() goes oops | ||
60 | */ | ||
61 | #ifdef CONFIG_NFSD_V4 | ||
53 | NFSD_Leasetime, | 62 | NFSD_Leasetime, |
54 | NFSD_RecoveryDir, | 63 | NFSD_RecoveryDir, |
64 | #endif | ||
55 | }; | 65 | }; |
56 | 66 | ||
57 | /* | 67 | /* |
@@ -66,8 +76,11 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size); | |||
66 | static ssize_t write_getfs(struct file *file, char *buf, size_t size); | 76 | static ssize_t write_getfs(struct file *file, char *buf, size_t size); |
67 | static ssize_t write_filehandle(struct file *file, char *buf, size_t size); | 77 | static ssize_t write_filehandle(struct file *file, char *buf, size_t size); |
68 | static ssize_t write_threads(struct file *file, char *buf, size_t size); | 78 | static ssize_t write_threads(struct file *file, char *buf, size_t size); |
79 | static ssize_t write_versions(struct file *file, char *buf, size_t size); | ||
80 | #ifdef CONFIG_NFSD_V4 | ||
69 | static ssize_t write_leasetime(struct file *file, char *buf, size_t size); | 81 | static ssize_t write_leasetime(struct file *file, char *buf, size_t size); |
70 | static ssize_t write_recoverydir(struct file *file, char *buf, size_t size); | 82 | static ssize_t write_recoverydir(struct file *file, char *buf, size_t size); |
83 | #endif | ||
71 | 84 | ||
72 | static ssize_t (*write_op[])(struct file *, char *, size_t) = { | 85 | static ssize_t (*write_op[])(struct file *, char *, size_t) = { |
73 | [NFSD_Svc] = write_svc, | 86 | [NFSD_Svc] = write_svc, |
@@ -79,8 +92,11 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = { | |||
79 | [NFSD_Getfs] = write_getfs, | 92 | [NFSD_Getfs] = write_getfs, |
80 | [NFSD_Fh] = write_filehandle, | 93 | [NFSD_Fh] = write_filehandle, |
81 | [NFSD_Threads] = write_threads, | 94 | [NFSD_Threads] = write_threads, |
95 | [NFSD_Versions] = write_versions, | ||
96 | #ifdef CONFIG_NFSD_V4 | ||
82 | [NFSD_Leasetime] = write_leasetime, | 97 | [NFSD_Leasetime] = write_leasetime, |
83 | [NFSD_RecoveryDir] = write_recoverydir, | 98 | [NFSD_RecoveryDir] = write_recoverydir, |
99 | #endif | ||
84 | }; | 100 | }; |
85 | 101 | ||
86 | static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos) | 102 | static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos) |
@@ -104,9 +120,23 @@ static ssize_t nfsctl_transaction_write(struct file *file, const char __user *bu | |||
104 | return rv; | 120 | return rv; |
105 | } | 121 | } |
106 | 122 | ||
123 | static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos) | ||
124 | { | ||
125 | if (! file->private_data) { | ||
126 | /* An attempt to read a transaction file without writing | ||
127 | * causes a 0-byte write so that the file can return | ||
128 | * state information | ||
129 | */ | ||
130 | ssize_t rv = nfsctl_transaction_write(file, buf, 0, pos); | ||
131 | if (rv < 0) | ||
132 | return rv; | ||
133 | } | ||
134 | return simple_transaction_read(file, buf, size, pos); | ||
135 | } | ||
136 | |||
107 | static struct file_operations transaction_ops = { | 137 | static struct file_operations transaction_ops = { |
108 | .write = nfsctl_transaction_write, | 138 | .write = nfsctl_transaction_write, |
109 | .read = simple_transaction_read, | 139 | .read = nfsctl_transaction_read, |
110 | .release = simple_transaction_release, | 140 | .release = simple_transaction_release, |
111 | }; | 141 | }; |
112 | 142 | ||
@@ -329,6 +359,70 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size) | |||
329 | return strlen(buf); | 359 | return strlen(buf); |
330 | } | 360 | } |
331 | 361 | ||
362 | static ssize_t write_versions(struct file *file, char *buf, size_t size) | ||
363 | { | ||
364 | /* | ||
365 | * Format: | ||
366 | * [-/+]vers [-/+]vers ... | ||
367 | */ | ||
368 | char *mesg = buf; | ||
369 | char *vers, sign; | ||
370 | int len, num; | ||
371 | ssize_t tlen = 0; | ||
372 | char *sep; | ||
373 | |||
374 | if (size>0) { | ||
375 | if (nfsd_serv) | ||
376 | return -EBUSY; | ||
377 | if (buf[size-1] != '\n') | ||
378 | return -EINVAL; | ||
379 | buf[size-1] = 0; | ||
380 | |||
381 | vers = mesg; | ||
382 | len = qword_get(&mesg, vers, size); | ||
383 | if (len <= 0) return -EINVAL; | ||
384 | do { | ||
385 | sign = *vers; | ||
386 | if (sign == '+' || sign == '-') | ||
387 | num = simple_strtol((vers+1), NULL, 0); | ||
388 | else | ||
389 | num = simple_strtol(vers, NULL, 0); | ||
390 | switch(num) { | ||
391 | case 2: | ||
392 | case 3: | ||
393 | case 4: | ||
394 | if (sign != '-') | ||
395 | NFSCTL_VERSET(nfsd_versbits, num); | ||
396 | else | ||
397 | NFSCTL_VERUNSET(nfsd_versbits, num); | ||
398 | break; | ||
399 | default: | ||
400 | return -EINVAL; | ||
401 | } | ||
402 | vers += len + 1; | ||
403 | tlen += len; | ||
404 | } while ((len = qword_get(&mesg, vers, size)) > 0); | ||
405 | /* If all get turned off, turn them back on, as | ||
406 | * having no versions is BAD | ||
407 | */ | ||
408 | if ((nfsd_versbits & NFSCTL_VERALL)==0) | ||
409 | nfsd_versbits = NFSCTL_VERALL; | ||
410 | } | ||
411 | /* Now write current state into reply buffer */ | ||
412 | len = 0; | ||
413 | sep = ""; | ||
414 | for (num=2 ; num <= 4 ; num++) | ||
415 | if (NFSCTL_VERISSET(NFSCTL_VERALL, num)) { | ||
416 | len += sprintf(buf+len, "%s%c%d", sep, | ||
417 | NFSCTL_VERISSET(nfsd_versbits, num)?'+':'-', | ||
418 | num); | ||
419 | sep = " "; | ||
420 | } | ||
421 | len += sprintf(buf+len, "\n"); | ||
422 | return len; | ||
423 | } | ||
424 | |||
425 | #ifdef CONFIG_NFSD_V4 | ||
332 | extern time_t nfs4_leasetime(void); | 426 | extern time_t nfs4_leasetime(void); |
333 | 427 | ||
334 | static ssize_t write_leasetime(struct file *file, char *buf, size_t size) | 428 | static ssize_t write_leasetime(struct file *file, char *buf, size_t size) |
@@ -370,6 +464,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size) | |||
370 | status = nfs4_reset_recoverydir(recdir); | 464 | status = nfs4_reset_recoverydir(recdir); |
371 | return strlen(buf); | 465 | return strlen(buf); |
372 | } | 466 | } |
467 | #endif | ||
373 | 468 | ||
374 | /*----------------------------------------------------------------------------*/ | 469 | /*----------------------------------------------------------------------------*/ |
375 | /* | 470 | /* |
@@ -389,6 +484,7 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent) | |||
389 | [NFSD_List] = {"exports", &exports_operations, S_IRUGO}, | 484 | [NFSD_List] = {"exports", &exports_operations, S_IRUGO}, |
390 | [NFSD_Fh] = {"filehandle", &transaction_ops, S_IWUSR|S_IRUSR}, | 485 | [NFSD_Fh] = {"filehandle", &transaction_ops, S_IWUSR|S_IRUSR}, |
391 | [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR}, | 486 | [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR}, |
487 | [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, | ||
392 | #ifdef CONFIG_NFSD_V4 | 488 | #ifdef CONFIG_NFSD_V4 |
393 | [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, | 489 | [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, |
394 | [NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR}, | 490 | [NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR}, |
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 1697539a7171..89ed04696865 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/nfsd/nfsd.h> | 30 | #include <linux/nfsd/nfsd.h> |
31 | #include <linux/nfsd/stats.h> | 31 | #include <linux/nfsd/stats.h> |
32 | #include <linux/nfsd/cache.h> | 32 | #include <linux/nfsd/cache.h> |
33 | #include <linux/nfsd/syscall.h> | ||
33 | #include <linux/lockd/bind.h> | 34 | #include <linux/lockd/bind.h> |
34 | #include <linux/nfsacl.h> | 35 | #include <linux/nfsacl.h> |
35 | 36 | ||
@@ -52,7 +53,7 @@ | |||
52 | extern struct svc_program nfsd_program; | 53 | extern struct svc_program nfsd_program; |
53 | static void nfsd(struct svc_rqst *rqstp); | 54 | static void nfsd(struct svc_rqst *rqstp); |
54 | struct timeval nfssvc_boot; | 55 | struct timeval nfssvc_boot; |
55 | static struct svc_serv *nfsd_serv; | 56 | struct svc_serv *nfsd_serv; |
56 | static atomic_t nfsd_busy; | 57 | static atomic_t nfsd_busy; |
57 | static unsigned long nfsd_last_call; | 58 | static unsigned long nfsd_last_call; |
58 | static DEFINE_SPINLOCK(nfsd_call_lock); | 59 | static DEFINE_SPINLOCK(nfsd_call_lock); |
@@ -63,6 +64,31 @@ struct nfsd_list { | |||
63 | }; | 64 | }; |
64 | static struct list_head nfsd_list = LIST_HEAD_INIT(nfsd_list); | 65 | static struct list_head nfsd_list = LIST_HEAD_INIT(nfsd_list); |
65 | 66 | ||
67 | static struct svc_version * nfsd_version[] = { | ||
68 | [2] = &nfsd_version2, | ||
69 | #if defined(CONFIG_NFSD_V3) | ||
70 | [3] = &nfsd_version3, | ||
71 | #endif | ||
72 | #if defined(CONFIG_NFSD_V4) | ||
73 | [4] = &nfsd_version4, | ||
74 | #endif | ||
75 | }; | ||
76 | |||
77 | #define NFSD_MINVERS 2 | ||
78 | #define NFSD_NRVERS (sizeof(nfsd_version)/sizeof(nfsd_version[0])) | ||
79 | static struct svc_version *nfsd_versions[NFSD_NRVERS]; | ||
80 | |||
81 | struct svc_program nfsd_program = { | ||
82 | .pg_prog = NFS_PROGRAM, /* program number */ | ||
83 | .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */ | ||
84 | .pg_vers = nfsd_versions, /* version table */ | ||
85 | .pg_name = "nfsd", /* program name */ | ||
86 | .pg_class = "nfsd", /* authentication class */ | ||
87 | .pg_stats = &nfsd_svcstats, /* version table */ | ||
88 | .pg_authenticate = &svc_set_client, /* export authentication */ | ||
89 | |||
90 | }; | ||
91 | |||
66 | /* | 92 | /* |
67 | * Maximum number of nfsd processes | 93 | * Maximum number of nfsd processes |
68 | */ | 94 | */ |
@@ -80,11 +106,12 @@ int | |||
80 | nfsd_svc(unsigned short port, int nrservs) | 106 | nfsd_svc(unsigned short port, int nrservs) |
81 | { | 107 | { |
82 | int error; | 108 | int error; |
83 | int none_left; | 109 | int none_left, found_one, i; |
84 | struct list_head *victim; | 110 | struct list_head *victim; |
85 | 111 | ||
86 | lock_kernel(); | 112 | lock_kernel(); |
87 | dprintk("nfsd: creating service\n"); | 113 | dprintk("nfsd: creating service: vers 0x%x\n", |
114 | nfsd_versbits); | ||
88 | error = -EINVAL; | 115 | error = -EINVAL; |
89 | if (nrservs <= 0) | 116 | if (nrservs <= 0) |
90 | nrservs = 0; | 117 | nrservs = 0; |
@@ -99,6 +126,27 @@ nfsd_svc(unsigned short port, int nrservs) | |||
99 | if (error<0) | 126 | if (error<0) |
100 | goto out; | 127 | goto out; |
101 | if (!nfsd_serv) { | 128 | if (!nfsd_serv) { |
129 | /* | ||
130 | * Use the nfsd_ctlbits to define which | ||
131 | * versions that will be advertised. | ||
132 | * If nfsd_ctlbits doesn't list any version, | ||
133 | * export them all. | ||
134 | */ | ||
135 | found_one = 0; | ||
136 | |||
137 | for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) { | ||
138 | if (NFSCTL_VERISSET(nfsd_versbits, i)) { | ||
139 | nfsd_program.pg_vers[i] = nfsd_version[i]; | ||
140 | found_one = 1; | ||
141 | } else | ||
142 | nfsd_program.pg_vers[i] = NULL; | ||
143 | } | ||
144 | |||
145 | if (!found_one) { | ||
146 | for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) | ||
147 | nfsd_program.pg_vers[i] = nfsd_version[i]; | ||
148 | } | ||
149 | |||
102 | atomic_set(&nfsd_busy, 0); | 150 | atomic_set(&nfsd_busy, 0); |
103 | error = -ENOMEM; | 151 | error = -ENOMEM; |
104 | nfsd_serv = svc_create(&nfsd_program, NFSD_BUFSIZE); | 152 | nfsd_serv = svc_create(&nfsd_program, NFSD_BUFSIZE); |
@@ -379,6 +427,7 @@ static struct svc_program nfsd_acl_program = { | |||
379 | .pg_name = "nfsd", | 427 | .pg_name = "nfsd", |
380 | .pg_class = "nfsd", | 428 | .pg_class = "nfsd", |
381 | .pg_stats = &nfsd_acl_svcstats, | 429 | .pg_stats = &nfsd_acl_svcstats, |
430 | .pg_authenticate = &svc_set_client, | ||
382 | }; | 431 | }; |
383 | 432 | ||
384 | static struct svc_stat nfsd_acl_svcstats = { | 433 | static struct svc_stat nfsd_acl_svcstats = { |
@@ -389,28 +438,3 @@ static struct svc_stat nfsd_acl_svcstats = { | |||
389 | #else | 438 | #else |
390 | #define nfsd_acl_program_p NULL | 439 | #define nfsd_acl_program_p NULL |
391 | #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */ | 440 | #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */ |
392 | |||
393 | extern struct svc_version nfsd_version2, nfsd_version3, nfsd_version4; | ||
394 | |||
395 | static struct svc_version * nfsd_version[] = { | ||
396 | [2] = &nfsd_version2, | ||
397 | #if defined(CONFIG_NFSD_V3) | ||
398 | [3] = &nfsd_version3, | ||
399 | #endif | ||
400 | #if defined(CONFIG_NFSD_V4) | ||
401 | [4] = &nfsd_version4, | ||
402 | #endif | ||
403 | }; | ||
404 | |||
405 | #define NFSD_NRVERS (sizeof(nfsd_version)/sizeof(nfsd_version[0])) | ||
406 | struct svc_program nfsd_program = { | ||
407 | .pg_next = nfsd_acl_program_p, | ||
408 | .pg_prog = NFS_PROGRAM, /* program number */ | ||
409 | .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */ | ||
410 | .pg_vers = nfsd_version, /* version table */ | ||
411 | .pg_name = "nfsd", /* program name */ | ||
412 | .pg_class = "nfsd", /* authentication class */ | ||
413 | .pg_stats = &nfsd_svcstats, /* version table */ | ||
414 | .pg_authenticate = &svc_set_client, /* export authentication */ | ||
415 | |||
416 | }; | ||
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 4f2cd3d27566..af7c3c3074b0 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -254,12 +254,19 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, | |||
254 | 254 | ||
255 | /* Get inode */ | 255 | /* Get inode */ |
256 | err = fh_verify(rqstp, fhp, ftype, accmode); | 256 | err = fh_verify(rqstp, fhp, ftype, accmode); |
257 | if (err || !iap->ia_valid) | 257 | if (err) |
258 | goto out; | 258 | goto out; |
259 | 259 | ||
260 | dentry = fhp->fh_dentry; | 260 | dentry = fhp->fh_dentry; |
261 | inode = dentry->d_inode; | 261 | inode = dentry->d_inode; |
262 | 262 | ||
263 | /* Ignore any mode updates on symlinks */ | ||
264 | if (S_ISLNK(inode->i_mode)) | ||
265 | iap->ia_valid &= ~ATTR_MODE; | ||
266 | |||
267 | if (!iap->ia_valid) | ||
268 | goto out; | ||
269 | |||
263 | /* NFSv2 does not differentiate between "set-[ac]time-to-now" | 270 | /* NFSv2 does not differentiate between "set-[ac]time-to-now" |
264 | * which only requires access, and "set-[ac]time-to-X" which | 271 | * which only requires access, and "set-[ac]time-to-X" which |
265 | * requires ownership. | 272 | * requires ownership. |
@@ -194,7 +194,7 @@ out: | |||
194 | return error; | 194 | return error; |
195 | } | 195 | } |
196 | 196 | ||
197 | int do_truncate(struct dentry *dentry, loff_t length) | 197 | int do_truncate(struct dentry *dentry, loff_t length, struct file *filp) |
198 | { | 198 | { |
199 | int err; | 199 | int err; |
200 | struct iattr newattrs; | 200 | struct iattr newattrs; |
@@ -205,6 +205,10 @@ int do_truncate(struct dentry *dentry, loff_t length) | |||
205 | 205 | ||
206 | newattrs.ia_size = length; | 206 | newattrs.ia_size = length; |
207 | newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME; | 207 | newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME; |
208 | if (filp) { | ||
209 | newattrs.ia_file = filp; | ||
210 | newattrs.ia_valid |= ATTR_FILE; | ||
211 | } | ||
208 | 212 | ||
209 | down(&dentry->d_inode->i_sem); | 213 | down(&dentry->d_inode->i_sem); |
210 | err = notify_change(dentry, &newattrs); | 214 | err = notify_change(dentry, &newattrs); |
@@ -262,7 +266,7 @@ static inline long do_sys_truncate(const char __user * path, loff_t length) | |||
262 | error = locks_verify_truncate(inode, NULL, length); | 266 | error = locks_verify_truncate(inode, NULL, length); |
263 | if (!error) { | 267 | if (!error) { |
264 | DQUOT_INIT(inode); | 268 | DQUOT_INIT(inode); |
265 | error = do_truncate(nd.dentry, length); | 269 | error = do_truncate(nd.dentry, length, NULL); |
266 | } | 270 | } |
267 | put_write_access(inode); | 271 | put_write_access(inode); |
268 | 272 | ||
@@ -314,7 +318,7 @@ static inline long do_sys_ftruncate(unsigned int fd, loff_t length, int small) | |||
314 | 318 | ||
315 | error = locks_verify_truncate(inode, file, length); | 319 | error = locks_verify_truncate(inode, file, length); |
316 | if (!error) | 320 | if (!error) |
317 | error = do_truncate(dentry, length); | 321 | error = do_truncate(dentry, length, file); |
318 | out_putf: | 322 | out_putf: |
319 | fput(file); | 323 | fput(file); |
320 | out: | 324 | out: |
@@ -887,6 +891,10 @@ struct file *nameidata_to_filp(struct nameidata *nd, int flags) | |||
887 | return filp; | 891 | return filp; |
888 | } | 892 | } |
889 | 893 | ||
894 | /* | ||
895 | * dentry_open() will have done dput(dentry) and mntput(mnt) if it returns an | ||
896 | * error. | ||
897 | */ | ||
890 | struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags) | 898 | struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags) |
891 | { | 899 | { |
892 | int error; | 900 | int error; |
@@ -894,8 +902,11 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags) | |||
894 | 902 | ||
895 | error = -ENFILE; | 903 | error = -ENFILE; |
896 | f = get_empty_filp(); | 904 | f = get_empty_filp(); |
897 | if (f == NULL) | 905 | if (f == NULL) { |
906 | dput(dentry); | ||
907 | mntput(mnt); | ||
898 | return ERR_PTR(error); | 908 | return ERR_PTR(error); |
909 | } | ||
899 | 910 | ||
900 | return __dentry_open(dentry, mnt, flags, f, NULL); | 911 | return __dentry_open(dentry, mnt, flags, f, NULL); |
901 | } | 912 | } |
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index 1be11ce96b0f..aeb0106890e4 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c | |||
@@ -1088,8 +1088,7 @@ static void __exit exit_openprom_fs(void) | |||
1088 | unregister_filesystem(&openprom_fs_type); | 1088 | unregister_filesystem(&openprom_fs_type); |
1089 | free_pages ((unsigned long)nodes, alloced); | 1089 | free_pages ((unsigned long)nodes, alloced); |
1090 | for (i = 0; i < aliases_nodes; i++) | 1090 | for (i = 0; i < aliases_nodes; i++) |
1091 | if (alias_names [i]) | 1091 | kfree (alias_names [i]); |
1092 | kfree (alias_names [i]); | ||
1093 | nodes = NULL; | 1092 | nodes = NULL; |
1094 | } | 1093 | } |
1095 | 1094 | ||
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c index d59dcbf2bd4a..6327bcb2d73d 100644 --- a/fs/partitions/ibm.c +++ b/fs/partitions/ibm.c | |||
@@ -29,7 +29,7 @@ | |||
29 | * cyl-cyl-head-head structure | 29 | * cyl-cyl-head-head structure |
30 | */ | 30 | */ |
31 | static inline int | 31 | static inline int |
32 | cchh2blk (cchh_t *ptr, struct hd_geometry *geo) { | 32 | cchh2blk (struct vtoc_cchh *ptr, struct hd_geometry *geo) { |
33 | return ptr->cc * geo->heads * geo->sectors + | 33 | return ptr->cc * geo->heads * geo->sectors + |
34 | ptr->hh * geo->sectors; | 34 | ptr->hh * geo->sectors; |
35 | } | 35 | } |
@@ -40,7 +40,7 @@ cchh2blk (cchh_t *ptr, struct hd_geometry *geo) { | |||
40 | * cyl-cyl-head-head-block structure | 40 | * cyl-cyl-head-head-block structure |
41 | */ | 41 | */ |
42 | static inline int | 42 | static inline int |
43 | cchhb2blk (cchhb_t *ptr, struct hd_geometry *geo) { | 43 | cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) { |
44 | return ptr->cc * geo->heads * geo->sectors + | 44 | return ptr->cc * geo->heads * geo->sectors + |
45 | ptr->hh * geo->sectors + | 45 | ptr->hh * geo->sectors + |
46 | ptr->b; | 46 | ptr->b; |
@@ -56,7 +56,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) | |||
56 | struct hd_geometry *geo; | 56 | struct hd_geometry *geo; |
57 | char type[5] = {0,}; | 57 | char type[5] = {0,}; |
58 | char name[7] = {0,}; | 58 | char name[7] = {0,}; |
59 | volume_label_t *vlabel; | 59 | struct vtoc_volume_label *vlabel; |
60 | unsigned char *data; | 60 | unsigned char *data; |
61 | Sector sect; | 61 | Sector sect; |
62 | 62 | ||
@@ -64,7 +64,8 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) | |||
64 | goto out_noinfo; | 64 | goto out_noinfo; |
65 | if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL) | 65 | if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL) |
66 | goto out_nogeo; | 66 | goto out_nogeo; |
67 | if ((vlabel = kmalloc(sizeof(volume_label_t), GFP_KERNEL)) == NULL) | 67 | if ((vlabel = kmalloc(sizeof(struct vtoc_volume_label), |
68 | GFP_KERNEL)) == NULL) | ||
68 | goto out_novlab; | 69 | goto out_novlab; |
69 | 70 | ||
70 | if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 || | 71 | if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 || |
@@ -86,7 +87,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) | |||
86 | strncpy(name, data + 8, 6); | 87 | strncpy(name, data + 8, 6); |
87 | else | 88 | else |
88 | strncpy(name, data + 4, 6); | 89 | strncpy(name, data + 4, 6); |
89 | memcpy (vlabel, data, sizeof(volume_label_t)); | 90 | memcpy (vlabel, data, sizeof(struct vtoc_volume_label)); |
90 | put_dev_sector(sect); | 91 | put_dev_sector(sect); |
91 | 92 | ||
92 | EBCASC(type, 4); | 93 | EBCASC(type, 4); |
@@ -129,9 +130,9 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) | |||
129 | counter = 0; | 130 | counter = 0; |
130 | while ((data = read_dev_sector(bdev, blk*(blocksize/512), | 131 | while ((data = read_dev_sector(bdev, blk*(blocksize/512), |
131 | §)) != NULL) { | 132 | §)) != NULL) { |
132 | format1_label_t f1; | 133 | struct vtoc_format1_label f1; |
133 | 134 | ||
134 | memcpy(&f1, data, sizeof(format1_label_t)); | 135 | memcpy(&f1, data, sizeof(struct vtoc_format1_label)); |
135 | put_dev_sector(sect); | 136 | put_dev_sector(sect); |
136 | 137 | ||
137 | /* skip FMT4 / FMT5 / FMT7 labels */ | 138 | /* skip FMT4 / FMT5 / FMT7 labels */ |
diff --git a/fs/pnode.c b/fs/pnode.c new file mode 100644 index 000000000000..aeeec8ba8dd2 --- /dev/null +++ b/fs/pnode.c | |||
@@ -0,0 +1,305 @@ | |||
1 | /* | ||
2 | * linux/fs/pnode.c | ||
3 | * | ||
4 | * (C) Copyright IBM Corporation 2005. | ||
5 | * Released under GPL v2. | ||
6 | * Author : Ram Pai (linuxram@us.ibm.com) | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/namespace.h> | ||
10 | #include <linux/mount.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include "pnode.h" | ||
13 | |||
14 | /* return the next shared peer mount of @p */ | ||
15 | static inline struct vfsmount *next_peer(struct vfsmount *p) | ||
16 | { | ||
17 | return list_entry(p->mnt_share.next, struct vfsmount, mnt_share); | ||
18 | } | ||
19 | |||
20 | static inline struct vfsmount *first_slave(struct vfsmount *p) | ||
21 | { | ||
22 | return list_entry(p->mnt_slave_list.next, struct vfsmount, mnt_slave); | ||
23 | } | ||
24 | |||
25 | static inline struct vfsmount *next_slave(struct vfsmount *p) | ||
26 | { | ||
27 | return list_entry(p->mnt_slave.next, struct vfsmount, mnt_slave); | ||
28 | } | ||
29 | |||
30 | static int do_make_slave(struct vfsmount *mnt) | ||
31 | { | ||
32 | struct vfsmount *peer_mnt = mnt, *master = mnt->mnt_master; | ||
33 | struct vfsmount *slave_mnt; | ||
34 | |||
35 | /* | ||
36 | * slave 'mnt' to a peer mount that has the | ||
37 | * same root dentry. If none is available than | ||
38 | * slave it to anything that is available. | ||
39 | */ | ||
40 | while ((peer_mnt = next_peer(peer_mnt)) != mnt && | ||
41 | peer_mnt->mnt_root != mnt->mnt_root) ; | ||
42 | |||
43 | if (peer_mnt == mnt) { | ||
44 | peer_mnt = next_peer(mnt); | ||
45 | if (peer_mnt == mnt) | ||
46 | peer_mnt = NULL; | ||
47 | } | ||
48 | list_del_init(&mnt->mnt_share); | ||
49 | |||
50 | if (peer_mnt) | ||
51 | master = peer_mnt; | ||
52 | |||
53 | if (master) { | ||
54 | list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave) | ||
55 | slave_mnt->mnt_master = master; | ||
56 | list_del(&mnt->mnt_slave); | ||
57 | list_add(&mnt->mnt_slave, &master->mnt_slave_list); | ||
58 | list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev); | ||
59 | INIT_LIST_HEAD(&mnt->mnt_slave_list); | ||
60 | } else { | ||
61 | struct list_head *p = &mnt->mnt_slave_list; | ||
62 | while (!list_empty(p)) { | ||
63 | slave_mnt = list_entry(p->next, | ||
64 | struct vfsmount, mnt_slave); | ||
65 | list_del_init(&slave_mnt->mnt_slave); | ||
66 | slave_mnt->mnt_master = NULL; | ||
67 | } | ||
68 | } | ||
69 | mnt->mnt_master = master; | ||
70 | CLEAR_MNT_SHARED(mnt); | ||
71 | INIT_LIST_HEAD(&mnt->mnt_slave_list); | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | void change_mnt_propagation(struct vfsmount *mnt, int type) | ||
76 | { | ||
77 | if (type == MS_SHARED) { | ||
78 | set_mnt_shared(mnt); | ||
79 | return; | ||
80 | } | ||
81 | do_make_slave(mnt); | ||
82 | if (type != MS_SLAVE) { | ||
83 | list_del_init(&mnt->mnt_slave); | ||
84 | mnt->mnt_master = NULL; | ||
85 | if (type == MS_UNBINDABLE) | ||
86 | mnt->mnt_flags |= MNT_UNBINDABLE; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * get the next mount in the propagation tree. | ||
92 | * @m: the mount seen last | ||
93 | * @origin: the original mount from where the tree walk initiated | ||
94 | */ | ||
95 | static struct vfsmount *propagation_next(struct vfsmount *m, | ||
96 | struct vfsmount *origin) | ||
97 | { | ||
98 | /* are there any slaves of this mount? */ | ||
99 | if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) | ||
100 | return first_slave(m); | ||
101 | |||
102 | while (1) { | ||
103 | struct vfsmount *next; | ||
104 | struct vfsmount *master = m->mnt_master; | ||
105 | |||
106 | if ( master == origin->mnt_master ) { | ||
107 | next = next_peer(m); | ||
108 | return ((next == origin) ? NULL : next); | ||
109 | } else if (m->mnt_slave.next != &master->mnt_slave_list) | ||
110 | return next_slave(m); | ||
111 | |||
112 | /* back at master */ | ||
113 | m = master; | ||
114 | } | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * return the source mount to be used for cloning | ||
119 | * | ||
120 | * @dest the current destination mount | ||
121 | * @last_dest the last seen destination mount | ||
122 | * @last_src the last seen source mount | ||
123 | * @type return CL_SLAVE if the new mount has to be | ||
124 | * cloned as a slave. | ||
125 | */ | ||
126 | static struct vfsmount *get_source(struct vfsmount *dest, | ||
127 | struct vfsmount *last_dest, | ||
128 | struct vfsmount *last_src, | ||
129 | int *type) | ||
130 | { | ||
131 | struct vfsmount *p_last_src = NULL; | ||
132 | struct vfsmount *p_last_dest = NULL; | ||
133 | *type = CL_PROPAGATION;; | ||
134 | |||
135 | if (IS_MNT_SHARED(dest)) | ||
136 | *type |= CL_MAKE_SHARED; | ||
137 | |||
138 | while (last_dest != dest->mnt_master) { | ||
139 | p_last_dest = last_dest; | ||
140 | p_last_src = last_src; | ||
141 | last_dest = last_dest->mnt_master; | ||
142 | last_src = last_src->mnt_master; | ||
143 | } | ||
144 | |||
145 | if (p_last_dest) { | ||
146 | do { | ||
147 | p_last_dest = next_peer(p_last_dest); | ||
148 | } while (IS_MNT_NEW(p_last_dest)); | ||
149 | } | ||
150 | |||
151 | if (dest != p_last_dest) { | ||
152 | *type |= CL_SLAVE; | ||
153 | return last_src; | ||
154 | } else | ||
155 | return p_last_src; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * mount 'source_mnt' under the destination 'dest_mnt' at | ||
160 | * dentry 'dest_dentry'. And propagate that mount to | ||
161 | * all the peer and slave mounts of 'dest_mnt'. | ||
162 | * Link all the new mounts into a propagation tree headed at | ||
163 | * source_mnt. Also link all the new mounts using ->mnt_list | ||
164 | * headed at source_mnt's ->mnt_list | ||
165 | * | ||
166 | * @dest_mnt: destination mount. | ||
167 | * @dest_dentry: destination dentry. | ||
168 | * @source_mnt: source mount. | ||
169 | * @tree_list : list of heads of trees to be attached. | ||
170 | */ | ||
171 | int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry, | ||
172 | struct vfsmount *source_mnt, struct list_head *tree_list) | ||
173 | { | ||
174 | struct vfsmount *m, *child; | ||
175 | int ret = 0; | ||
176 | struct vfsmount *prev_dest_mnt = dest_mnt; | ||
177 | struct vfsmount *prev_src_mnt = source_mnt; | ||
178 | LIST_HEAD(tmp_list); | ||
179 | LIST_HEAD(umount_list); | ||
180 | |||
181 | for (m = propagation_next(dest_mnt, dest_mnt); m; | ||
182 | m = propagation_next(m, dest_mnt)) { | ||
183 | int type; | ||
184 | struct vfsmount *source; | ||
185 | |||
186 | if (IS_MNT_NEW(m)) | ||
187 | continue; | ||
188 | |||
189 | source = get_source(m, prev_dest_mnt, prev_src_mnt, &type); | ||
190 | |||
191 | if (!(child = copy_tree(source, source->mnt_root, type))) { | ||
192 | ret = -ENOMEM; | ||
193 | list_splice(tree_list, tmp_list.prev); | ||
194 | goto out; | ||
195 | } | ||
196 | |||
197 | if (is_subdir(dest_dentry, m->mnt_root)) { | ||
198 | mnt_set_mountpoint(m, dest_dentry, child); | ||
199 | list_add_tail(&child->mnt_hash, tree_list); | ||
200 | } else { | ||
201 | /* | ||
202 | * This can happen if the parent mount was bind mounted | ||
203 | * on some subdirectory of a shared/slave mount. | ||
204 | */ | ||
205 | list_add_tail(&child->mnt_hash, &tmp_list); | ||
206 | } | ||
207 | prev_dest_mnt = m; | ||
208 | prev_src_mnt = child; | ||
209 | } | ||
210 | out: | ||
211 | spin_lock(&vfsmount_lock); | ||
212 | while (!list_empty(&tmp_list)) { | ||
213 | child = list_entry(tmp_list.next, struct vfsmount, mnt_hash); | ||
214 | list_del_init(&child->mnt_hash); | ||
215 | umount_tree(child, 0, &umount_list); | ||
216 | } | ||
217 | spin_unlock(&vfsmount_lock); | ||
218 | release_mounts(&umount_list); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * return true if the refcount is greater than count | ||
224 | */ | ||
225 | static inline int do_refcount_check(struct vfsmount *mnt, int count) | ||
226 | { | ||
227 | int mycount = atomic_read(&mnt->mnt_count); | ||
228 | return (mycount > count); | ||
229 | } | ||
230 | |||
231 | /* | ||
232 | * check if the mount 'mnt' can be unmounted successfully. | ||
233 | * @mnt: the mount to be checked for unmount | ||
234 | * NOTE: unmounting 'mnt' would naturally propagate to all | ||
235 | * other mounts its parent propagates to. | ||
236 | * Check if any of these mounts that **do not have submounts** | ||
237 | * have more references than 'refcnt'. If so return busy. | ||
238 | */ | ||
239 | int propagate_mount_busy(struct vfsmount *mnt, int refcnt) | ||
240 | { | ||
241 | struct vfsmount *m, *child; | ||
242 | struct vfsmount *parent = mnt->mnt_parent; | ||
243 | int ret = 0; | ||
244 | |||
245 | if (mnt == parent) | ||
246 | return do_refcount_check(mnt, refcnt); | ||
247 | |||
248 | /* | ||
249 | * quickly check if the current mount can be unmounted. | ||
250 | * If not, we don't have to go checking for all other | ||
251 | * mounts | ||
252 | */ | ||
253 | if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) | ||
254 | return 1; | ||
255 | |||
256 | for (m = propagation_next(parent, parent); m; | ||
257 | m = propagation_next(m, parent)) { | ||
258 | child = __lookup_mnt(m, mnt->mnt_mountpoint, 0); | ||
259 | if (child && list_empty(&child->mnt_mounts) && | ||
260 | (ret = do_refcount_check(child, 1))) | ||
261 | break; | ||
262 | } | ||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * NOTE: unmounting 'mnt' naturally propagates to all other mounts its | ||
268 | * parent propagates to. | ||
269 | */ | ||
270 | static void __propagate_umount(struct vfsmount *mnt) | ||
271 | { | ||
272 | struct vfsmount *parent = mnt->mnt_parent; | ||
273 | struct vfsmount *m; | ||
274 | |||
275 | BUG_ON(parent == mnt); | ||
276 | |||
277 | for (m = propagation_next(parent, parent); m; | ||
278 | m = propagation_next(m, parent)) { | ||
279 | |||
280 | struct vfsmount *child = __lookup_mnt(m, | ||
281 | mnt->mnt_mountpoint, 0); | ||
282 | /* | ||
283 | * umount the child only if the child has no | ||
284 | * other children | ||
285 | */ | ||
286 | if (child && list_empty(&child->mnt_mounts)) { | ||
287 | list_del(&child->mnt_hash); | ||
288 | list_add_tail(&child->mnt_hash, &mnt->mnt_hash); | ||
289 | } | ||
290 | } | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * collect all mounts that receive propagation from the mount in @list, | ||
295 | * and return these additional mounts in the same list. | ||
296 | * @list: the list of mounts to be unmounted. | ||
297 | */ | ||
298 | int propagate_umount(struct list_head *list) | ||
299 | { | ||
300 | struct vfsmount *mnt; | ||
301 | |||
302 | list_for_each_entry(mnt, list, mnt_hash) | ||
303 | __propagate_umount(mnt); | ||
304 | return 0; | ||
305 | } | ||
diff --git a/fs/pnode.h b/fs/pnode.h new file mode 100644 index 000000000000..020e1bb60fdb --- /dev/null +++ b/fs/pnode.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * linux/fs/pnode.h | ||
3 | * | ||
4 | * (C) Copyright IBM Corporation 2005. | ||
5 | * Released under GPL v2. | ||
6 | * | ||
7 | */ | ||
8 | #ifndef _LINUX_PNODE_H | ||
9 | #define _LINUX_PNODE_H | ||
10 | |||
11 | #include <linux/list.h> | ||
12 | #include <linux/mount.h> | ||
13 | |||
14 | #define IS_MNT_SHARED(mnt) (mnt->mnt_flags & MNT_SHARED) | ||
15 | #define IS_MNT_SLAVE(mnt) (mnt->mnt_master) | ||
16 | #define IS_MNT_NEW(mnt) (!mnt->mnt_namespace) | ||
17 | #define CLEAR_MNT_SHARED(mnt) (mnt->mnt_flags &= ~MNT_SHARED) | ||
18 | #define IS_MNT_UNBINDABLE(mnt) (mnt->mnt_flags & MNT_UNBINDABLE) | ||
19 | |||
20 | #define CL_EXPIRE 0x01 | ||
21 | #define CL_SLAVE 0x02 | ||
22 | #define CL_COPY_ALL 0x04 | ||
23 | #define CL_MAKE_SHARED 0x08 | ||
24 | #define CL_PROPAGATION 0x10 | ||
25 | |||
26 | static inline void set_mnt_shared(struct vfsmount *mnt) | ||
27 | { | ||
28 | mnt->mnt_flags &= ~MNT_PNODE_MASK; | ||
29 | mnt->mnt_flags |= MNT_SHARED; | ||
30 | } | ||
31 | |||
32 | void change_mnt_propagation(struct vfsmount *, int); | ||
33 | int propagate_mnt(struct vfsmount *, struct dentry *, struct vfsmount *, | ||
34 | struct list_head *); | ||
35 | int propagate_umount(struct list_head *); | ||
36 | int propagate_mount_busy(struct vfsmount *, int); | ||
37 | #endif /* _LINUX_PNODE_H */ | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index a170450aadb1..634355e16986 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -70,6 +70,7 @@ | |||
70 | #include <linux/seccomp.h> | 70 | #include <linux/seccomp.h> |
71 | #include <linux/cpuset.h> | 71 | #include <linux/cpuset.h> |
72 | #include <linux/audit.h> | 72 | #include <linux/audit.h> |
73 | #include <linux/poll.h> | ||
73 | #include "internal.h" | 74 | #include "internal.h" |
74 | 75 | ||
75 | /* | 76 | /* |
@@ -660,26 +661,38 @@ static struct file_operations proc_smaps_operations = { | |||
660 | #endif | 661 | #endif |
661 | 662 | ||
662 | extern struct seq_operations mounts_op; | 663 | extern struct seq_operations mounts_op; |
664 | struct proc_mounts { | ||
665 | struct seq_file m; | ||
666 | int event; | ||
667 | }; | ||
668 | |||
663 | static int mounts_open(struct inode *inode, struct file *file) | 669 | static int mounts_open(struct inode *inode, struct file *file) |
664 | { | 670 | { |
665 | struct task_struct *task = proc_task(inode); | 671 | struct task_struct *task = proc_task(inode); |
666 | int ret = seq_open(file, &mounts_op); | 672 | struct namespace *namespace; |
673 | struct proc_mounts *p; | ||
674 | int ret = -EINVAL; | ||
667 | 675 | ||
668 | if (!ret) { | 676 | task_lock(task); |
669 | struct seq_file *m = file->private_data; | 677 | namespace = task->namespace; |
670 | struct namespace *namespace; | 678 | if (namespace) |
671 | task_lock(task); | 679 | get_namespace(namespace); |
672 | namespace = task->namespace; | 680 | task_unlock(task); |
673 | if (namespace) | 681 | |
674 | get_namespace(namespace); | 682 | if (namespace) { |
675 | task_unlock(task); | 683 | ret = -ENOMEM; |
676 | 684 | p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); | |
677 | if (namespace) | 685 | if (p) { |
678 | m->private = namespace; | 686 | file->private_data = &p->m; |
679 | else { | 687 | ret = seq_open(file, &mounts_op); |
680 | seq_release(inode, file); | 688 | if (!ret) { |
681 | ret = -EINVAL; | 689 | p->m.private = namespace; |
690 | p->event = namespace->event; | ||
691 | return 0; | ||
692 | } | ||
693 | kfree(p); | ||
682 | } | 694 | } |
695 | put_namespace(namespace); | ||
683 | } | 696 | } |
684 | return ret; | 697 | return ret; |
685 | } | 698 | } |
@@ -692,11 +705,30 @@ static int mounts_release(struct inode *inode, struct file *file) | |||
692 | return seq_release(inode, file); | 705 | return seq_release(inode, file); |
693 | } | 706 | } |
694 | 707 | ||
708 | static unsigned mounts_poll(struct file *file, poll_table *wait) | ||
709 | { | ||
710 | struct proc_mounts *p = file->private_data; | ||
711 | struct namespace *ns = p->m.private; | ||
712 | unsigned res = 0; | ||
713 | |||
714 | poll_wait(file, &ns->poll, wait); | ||
715 | |||
716 | spin_lock(&vfsmount_lock); | ||
717 | if (p->event != ns->event) { | ||
718 | p->event = ns->event; | ||
719 | res = POLLERR; | ||
720 | } | ||
721 | spin_unlock(&vfsmount_lock); | ||
722 | |||
723 | return res; | ||
724 | } | ||
725 | |||
695 | static struct file_operations proc_mounts_operations = { | 726 | static struct file_operations proc_mounts_operations = { |
696 | .open = mounts_open, | 727 | .open = mounts_open, |
697 | .read = seq_read, | 728 | .read = seq_read, |
698 | .llseek = seq_lseek, | 729 | .llseek = seq_lseek, |
699 | .release = mounts_release, | 730 | .release = mounts_release, |
731 | .poll = mounts_poll, | ||
700 | }; | 732 | }; |
701 | 733 | ||
702 | #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */ | 734 | #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */ |
diff --git a/fs/quota.c b/fs/quota.c index 1df7832b4e08..612e04db4b93 100644 --- a/fs/quota.c +++ b/fs/quota.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/security.h> | 15 | #include <linux/security.h> |
16 | #include <linux/syscalls.h> | 16 | #include <linux/syscalls.h> |
17 | #include <linux/buffer_head.h> | 17 | #include <linux/buffer_head.h> |
18 | #include <linux/quotaops.h> | ||
18 | 19 | ||
19 | /* Check validity of generic quotactl commands */ | 20 | /* Check validity of generic quotactl commands */ |
20 | static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) | 21 | static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) |
diff --git a/fs/seq_file.c b/fs/seq_file.c index 38ef913767ff..7c40570b71dc 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c | |||
@@ -28,13 +28,17 @@ | |||
28 | */ | 28 | */ |
29 | int seq_open(struct file *file, struct seq_operations *op) | 29 | int seq_open(struct file *file, struct seq_operations *op) |
30 | { | 30 | { |
31 | struct seq_file *p = kmalloc(sizeof(*p), GFP_KERNEL); | 31 | struct seq_file *p = file->private_data; |
32 | if (!p) | 32 | |
33 | return -ENOMEM; | 33 | if (!p) { |
34 | p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
35 | if (!p) | ||
36 | return -ENOMEM; | ||
37 | file->private_data = p; | ||
38 | } | ||
34 | memset(p, 0, sizeof(*p)); | 39 | memset(p, 0, sizeof(*p)); |
35 | sema_init(&p->sem, 1); | 40 | sema_init(&p->sem, 1); |
36 | p->op = op; | 41 | p->op = op; |
37 | file->private_data = p; | ||
38 | 42 | ||
39 | /* | 43 | /* |
40 | * Wrappers around seq_open(e.g. swaps_open) need to be | 44 | * Wrappers around seq_open(e.g. swaps_open) need to be |
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c index 2d85dd7415bb..a0f296d9928a 100644 --- a/fs/smbfs/request.c +++ b/fs/smbfs/request.c | |||
@@ -786,8 +786,7 @@ int smb_request_recv(struct smb_sb_info *server) | |||
786 | /* We should never be called with any of these states */ | 786 | /* We should never be called with any of these states */ |
787 | case SMB_RECV_END: | 787 | case SMB_RECV_END: |
788 | case SMB_RECV_REQUEST: | 788 | case SMB_RECV_REQUEST: |
789 | server->rstate = SMB_RECV_END; | 789 | BUG(); |
790 | break; | ||
791 | } | 790 | } |
792 | 791 | ||
793 | if (result < 0) { | 792 | if (result < 0) { |
diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c index 0c64bc3a0127..cdc53c4fb381 100644 --- a/fs/smbfs/symlink.c +++ b/fs/smbfs/symlink.c | |||
@@ -45,7 +45,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
45 | int len = smb_proc_read_link(server_from_dentry(dentry), | 45 | int len = smb_proc_read_link(server_from_dentry(dentry), |
46 | dentry, link, PATH_MAX - 1); | 46 | dentry, link, PATH_MAX - 1); |
47 | if (len < 0) { | 47 | if (len < 0) { |
48 | putname(link); | 48 | __putname(link); |
49 | link = ERR_PTR(len); | 49 | link = ERR_PTR(len); |
50 | } else { | 50 | } else { |
51 | link[len] = 0; | 51 | link[len] = 0; |
@@ -59,7 +59,7 @@ static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p) | |||
59 | { | 59 | { |
60 | char *s = nd_get_link(nd); | 60 | char *s = nd_get_link(nd); |
61 | if (!IS_ERR(s)) | 61 | if (!IS_ERR(s)) |
62 | putname(s); | 62 | __putname(s); |
63 | } | 63 | } |
64 | 64 | ||
65 | struct inode_operations smb_link_inode_operations = | 65 | struct inode_operations smb_link_inode_operations = |
diff --git a/fs/super.c b/fs/super.c index f60155ec7780..6689dded3c84 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -171,6 +171,7 @@ void deactivate_super(struct super_block *s) | |||
171 | if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { | 171 | if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { |
172 | s->s_count -= S_BIAS-1; | 172 | s->s_count -= S_BIAS-1; |
173 | spin_unlock(&sb_lock); | 173 | spin_unlock(&sb_lock); |
174 | DQUOT_OFF(s); | ||
174 | down_write(&s->s_umount); | 175 | down_write(&s->s_umount); |
175 | fs->kill_sb(s); | 176 | fs->kill_sb(s); |
176 | put_filesystem(fs); | 177 | put_filesystem(fs); |
@@ -474,8 +475,6 @@ rescan: | |||
474 | return NULL; | 475 | return NULL; |
475 | } | 476 | } |
476 | 477 | ||
477 | EXPORT_SYMBOL(user_get_super); | ||
478 | |||
479 | asmlinkage long sys_ustat(unsigned dev, struct ustat __user * ubuf) | 478 | asmlinkage long sys_ustat(unsigned dev, struct ustat __user * ubuf) |
480 | { | 479 | { |
481 | struct super_block *s; | 480 | struct super_block *s; |
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h index 0e54922daa09..663669810be6 100644 --- a/fs/udf/udf_sb.h +++ b/fs/udf/udf_sb.h | |||
@@ -39,8 +39,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb) | |||
39 | {\ | 39 | {\ |
40 | if (UDF_SB(X))\ | 40 | if (UDF_SB(X))\ |
41 | {\ | 41 | {\ |
42 | if (UDF_SB_PARTMAPS(X))\ | 42 | kfree(UDF_SB_PARTMAPS(X));\ |
43 | kfree(UDF_SB_PARTMAPS(X));\ | ||
44 | UDF_SB_PARTMAPS(X) = NULL;\ | 43 | UDF_SB_PARTMAPS(X) = NULL;\ |
45 | }\ | 44 | }\ |
46 | } | 45 | } |
diff --git a/fs/ufs/super.c b/fs/ufs/super.c index f036d694ba5a..54828ebcf1ba 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c | |||
@@ -472,13 +472,14 @@ static int ufs_read_cylinder_structures (struct super_block *sb) { | |||
472 | return 1; | 472 | return 1; |
473 | 473 | ||
474 | failed: | 474 | failed: |
475 | if (base) kfree (base); | 475 | kfree (base); |
476 | if (sbi->s_ucg) { | 476 | if (sbi->s_ucg) { |
477 | for (i = 0; i < uspi->s_ncg; i++) | 477 | for (i = 0; i < uspi->s_ncg; i++) |
478 | if (sbi->s_ucg[i]) brelse (sbi->s_ucg[i]); | 478 | if (sbi->s_ucg[i]) |
479 | brelse (sbi->s_ucg[i]); | ||
479 | kfree (sbi->s_ucg); | 480 | kfree (sbi->s_ucg); |
480 | for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) | 481 | for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) |
481 | if (sbi->s_ucpi[i]) kfree (sbi->s_ucpi[i]); | 482 | kfree (sbi->s_ucpi[i]); |
482 | } | 483 | } |
483 | UFSD(("EXIT (FAILED)\n")) | 484 | UFSD(("EXIT (FAILED)\n")) |
484 | return 0; | 485 | return 0; |
@@ -981,9 +982,10 @@ magic_found: | |||
981 | dalloc_failed: | 982 | dalloc_failed: |
982 | iput(inode); | 983 | iput(inode); |
983 | failed: | 984 | failed: |
984 | if (ubh) ubh_brelse_uspi (uspi); | 985 | if (ubh) |
985 | if (uspi) kfree (uspi); | 986 | ubh_brelse_uspi (uspi); |
986 | if (sbi) kfree(sbi); | 987 | kfree (uspi); |
988 | kfree(sbi); | ||
987 | sb->s_fs_info = NULL; | 989 | sb->s_fs_info = NULL; |
988 | UFSD(("EXIT (FAILED)\n")) | 990 | UFSD(("EXIT (FAILED)\n")) |
989 | return -EINVAL; | 991 | return -EINVAL; |
diff --git a/fs/xattr.c b/fs/xattr.c index f6e00c0e114f..a9db22557998 100644 --- a/fs/xattr.c +++ b/fs/xattr.c | |||
@@ -74,8 +74,7 @@ setxattr(struct dentry *d, char __user *name, void __user *value, | |||
74 | } | 74 | } |
75 | out: | 75 | out: |
76 | up(&d->d_inode->i_sem); | 76 | up(&d->d_inode->i_sem); |
77 | if (kvalue) | 77 | kfree(kvalue); |
78 | kfree(kvalue); | ||
79 | return error; | 78 | return error; |
80 | } | 79 | } |
81 | 80 | ||
@@ -173,8 +172,7 @@ getxattr(struct dentry *d, char __user *name, void __user *value, size_t size) | |||
173 | error = -E2BIG; | 172 | error = -E2BIG; |
174 | } | 173 | } |
175 | out: | 174 | out: |
176 | if (kvalue) | 175 | kfree(kvalue); |
177 | kfree(kvalue); | ||
178 | return error; | 176 | return error; |
179 | } | 177 | } |
180 | 178 | ||
@@ -259,8 +257,7 @@ listxattr(struct dentry *d, char __user *list, size_t size) | |||
259 | error = -E2BIG; | 257 | error = -E2BIG; |
260 | } | 258 | } |
261 | out: | 259 | out: |
262 | if (klist) | 260 | kfree(klist); |
263 | kfree(klist); | ||
264 | return error; | 261 | return error; |
265 | } | 262 | } |
266 | 263 | ||
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h index 8f82c1a20dc5..c64a29cdfff3 100644 --- a/fs/xfs/linux-2.6/kmem.h +++ b/fs/xfs/linux-2.6/kmem.h | |||
@@ -30,8 +30,8 @@ | |||
30 | #define KM_NOFS 0x0004u | 30 | #define KM_NOFS 0x0004u |
31 | #define KM_MAYFAIL 0x0008u | 31 | #define KM_MAYFAIL 0x0008u |
32 | 32 | ||
33 | #define kmem_zone kmem_cache_s | 33 | #define kmem_zone kmem_cache |
34 | #define kmem_zone_t kmem_cache_t | 34 | #define kmem_zone_t struct kmem_cache |
35 | 35 | ||
36 | typedef unsigned long xfs_pflags_t; | 36 | typedef unsigned long xfs_pflags_t; |
37 | 37 | ||