diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-07 13:24:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-07 13:24:08 -0500 |
commit | b3ce1debe2685383a9ad6ace9c49869c3968c013 (patch) | |
tree | dcb606fac467d6ce78a9c608a1e0d2323af44f2b /fs/jffs2 | |
parent | 5b2f7ffcb734d3046144dfbd5ac6d76254a9e522 (diff) | |
parent | c2965f1129ee54afcc4ef293ff0f25fa3a7e7392 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/tglx/mtd-2.6
Some manual fixups for clashing kfree() cleanups etc.
Diffstat (limited to 'fs/jffs2')
36 files changed, 4529 insertions, 2133 deletions
diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile index f1afe681ecd6..77dc5561a04e 100644 --- a/fs/jffs2/Makefile +++ b/fs/jffs2/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Makefile for the Linux Journalling Flash File System v2 (JFFS2) | 2 | # Makefile for the Linux Journalling Flash File System v2 (JFFS2) |
3 | # | 3 | # |
4 | # $Id: Makefile.common,v 1.9 2005/02/09 09:23:53 pavlov Exp $ | 4 | # $Id: Makefile.common,v 1.11 2005/09/07 08:34:53 havasi Exp $ |
5 | # | 5 | # |
6 | 6 | ||
7 | obj-$(CONFIG_JFFS2_FS) += jffs2.o | 7 | obj-$(CONFIG_JFFS2_FS) += jffs2.o |
@@ -9,9 +9,10 @@ obj-$(CONFIG_JFFS2_FS) += jffs2.o | |||
9 | jffs2-y := compr.o dir.o file.o ioctl.o nodelist.o malloc.o | 9 | jffs2-y := compr.o dir.o file.o ioctl.o nodelist.o malloc.o |
10 | jffs2-y += read.o nodemgmt.o readinode.o write.o scan.o gc.o | 10 | jffs2-y += read.o nodemgmt.o readinode.o write.o scan.o gc.o |
11 | jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o | 11 | jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o |
12 | jffs2-y += super.o | 12 | jffs2-y += super.o debug.o |
13 | 13 | ||
14 | jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o | 14 | jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o |
15 | jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o | 15 | jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o |
16 | jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o | 16 | jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o |
17 | jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o | 17 | jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o |
18 | jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o | ||
diff --git a/fs/jffs2/TODO b/fs/jffs2/TODO index 2bff82fd221f..d0e23b26fa50 100644 --- a/fs/jffs2/TODO +++ b/fs/jffs2/TODO | |||
@@ -1,5 +1,11 @@ | |||
1 | $Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $ | 1 | $Id: TODO,v 1.18 2005/09/22 11:24:56 dedekind Exp $ |
2 | 2 | ||
3 | - support asynchronous operation -- add a per-fs 'reserved_space' count, | ||
4 | let each outstanding write reserve the _maximum_ amount of physical | ||
5 | space it could take. Let GC flush the outstanding writes because the | ||
6 | reservations will necessarily be pessimistic. With this we could even | ||
7 | do shared writable mmap, if we can have a fs hook for do_wp_page() to | ||
8 | make the reservation. | ||
3 | - disable compression in commit_write()? | 9 | - disable compression in commit_write()? |
4 | - fine-tune the allocation / GC thresholds | 10 | - fine-tune the allocation / GC thresholds |
5 | - chattr support - turning on/off and tuning compression per-inode | 11 | - chattr support - turning on/off and tuning compression per-inode |
@@ -11,26 +17,15 @@ $Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $ | |||
11 | - test, test, test | 17 | - test, test, test |
12 | 18 | ||
13 | - NAND flash support: | 19 | - NAND flash support: |
14 | - flush_wbuf using GC to fill it, don't just pad. | 20 | - almost done :) |
15 | - Deal with write errors. Data don't get lost - we just have to write | 21 | - use bad block check instead of the hardwired byte check |
16 | the affected node(s) out again somewhere else. | ||
17 | - make fsync flush only if actually required | ||
18 | - make sys_sync() work. | ||
19 | - reboot notifier | ||
20 | - timed flush of old wbuf | ||
21 | - fix magical second arg of jffs2_flush_wbuf(). Split into two or more functions instead. | ||
22 | |||
23 | 22 | ||
24 | - Optimisations: | 23 | - Optimisations: |
25 | - Stop GC from decompressing and immediately recompressing nodes which could | 24 | - Split writes so they go to two separate blocks rather than just c->nextblock. |
26 | just be copied intact. (We now keep track of REF_PRISTINE flag. Easy now.) | 25 | By writing _new_ nodes to one block, and garbage-collected REF_PRISTINE |
27 | - Furthermore, in the case where it could be copied intact we don't even need | 26 | nodes to a different one, we can separate clean nodes from those which |
28 | to call iget() for it -- if we use (raw_node_raw->flash_offset & 2) as a flag | 27 | are likely to become dirty, and end up with blocks which are each far |
29 | to show a node can be copied intact and it's _not_ in icache, we could just do | 28 | closer to 100% or 0% clean, hence speeding up later GC progress dramatically. |
30 | it, fix up the next_in_ino list and move on. We would need a way to find out | ||
31 | _whether_ it's in icache though -- if it's in icache we also need to do the | ||
32 | fragment lists, etc. P'raps a flag or pointer in the jffs2_inode_cache could | ||
33 | help. (We have half of this now.) | ||
34 | - Stop keeping name in-core with struct jffs2_full_dirent. If we keep the hash in | 29 | - Stop keeping name in-core with struct jffs2_full_dirent. If we keep the hash in |
35 | the full dirent, we only need to go to the flash in lookup() when we think we've | 30 | the full dirent, we only need to go to the flash in lookup() when we think we've |
36 | got a match, and in readdir(). | 31 | got a match, and in readdir(). |
@@ -38,3 +33,8 @@ $Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $ | |||
38 | - Remove totlen from jffs2_raw_node_ref? Need to have totlen passed into | 33 | - Remove totlen from jffs2_raw_node_ref? Need to have totlen passed into |
39 | jffs2_mark_node_obsolete(). Can all callers work it out? | 34 | jffs2_mark_node_obsolete(). Can all callers work it out? |
40 | - Remove size from jffs2_raw_node_frag. | 35 | - Remove size from jffs2_raw_node_frag. |
36 | |||
37 | dedekind: | ||
38 | 1. __jffs2_flush_wbuf() has a strange 'pad' parameter. Eliminate. | ||
39 | 2. get_sb()->build_fs()->scan() path... Why get_sb() removes scan()'s crap in | ||
40 | case of failure? scan() does not clean everything. Fix. | ||
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index 8210ac16a368..7b77a9541125 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c | |||
@@ -51,7 +51,7 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) | |||
51 | D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid)); | 51 | D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid)); |
52 | wait_for_completion(&c->gc_thread_start); | 52 | wait_for_completion(&c->gc_thread_start); |
53 | } | 53 | } |
54 | 54 | ||
55 | return ret; | 55 | return ret; |
56 | } | 56 | } |
57 | 57 | ||
@@ -101,7 +101,7 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
101 | 101 | ||
102 | cond_resched(); | 102 | cond_resched(); |
103 | 103 | ||
104 | /* Put_super will send a SIGKILL and then wait on the sem. | 104 | /* Put_super will send a SIGKILL and then wait on the sem. |
105 | */ | 105 | */ |
106 | while (signal_pending(current)) { | 106 | while (signal_pending(current)) { |
107 | siginfo_t info; | 107 | siginfo_t info; |
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c index 97dc39796e2c..fff108bb118b 100644 --- a/fs/jffs2/build.c +++ b/fs/jffs2/build.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: build.c,v 1.71 2005/07/12 16:37:08 dedekind Exp $ | 10 | * $Id: build.c,v 1.85 2005/11/07 11:14:38 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -18,7 +18,8 @@ | |||
18 | #include <linux/mtd/mtd.h> | 18 | #include <linux/mtd/mtd.h> |
19 | #include "nodelist.h" | 19 | #include "nodelist.h" |
20 | 20 | ||
21 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **); | 21 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, |
22 | struct jffs2_inode_cache *, struct jffs2_full_dirent **); | ||
22 | 23 | ||
23 | static inline struct jffs2_inode_cache * | 24 | static inline struct jffs2_inode_cache * |
24 | first_inode_chain(int *i, struct jffs2_sb_info *c) | 25 | first_inode_chain(int *i, struct jffs2_sb_info *c) |
@@ -46,11 +47,12 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) | |||
46 | ic = next_inode(&i, ic, (c))) | 47 | ic = next_inode(&i, ic, (c))) |
47 | 48 | ||
48 | 49 | ||
49 | static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | 50 | static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, |
51 | struct jffs2_inode_cache *ic) | ||
50 | { | 52 | { |
51 | struct jffs2_full_dirent *fd; | 53 | struct jffs2_full_dirent *fd; |
52 | 54 | ||
53 | D1(printk(KERN_DEBUG "jffs2_build_inode building directory inode #%u\n", ic->ino)); | 55 | dbg_fsbuild("building directory inode #%u\n", ic->ino); |
54 | 56 | ||
55 | /* For each child, increase nlink */ | 57 | /* For each child, increase nlink */ |
56 | for(fd = ic->scan_dents; fd; fd = fd->next) { | 58 | for(fd = ic->scan_dents; fd; fd = fd->next) { |
@@ -58,26 +60,23 @@ static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2 | |||
58 | if (!fd->ino) | 60 | if (!fd->ino) |
59 | continue; | 61 | continue; |
60 | 62 | ||
61 | /* XXX: Can get high latency here with huge directories */ | 63 | /* we can get high latency here with huge directories */ |
62 | 64 | ||
63 | child_ic = jffs2_get_ino_cache(c, fd->ino); | 65 | child_ic = jffs2_get_ino_cache(c, fd->ino); |
64 | if (!child_ic) { | 66 | if (!child_ic) { |
65 | printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", | 67 | dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", |
66 | fd->name, fd->ino, ic->ino); | 68 | fd->name, fd->ino, ic->ino); |
67 | jffs2_mark_node_obsolete(c, fd->raw); | 69 | jffs2_mark_node_obsolete(c, fd->raw); |
68 | continue; | 70 | continue; |
69 | } | 71 | } |
70 | 72 | ||
71 | if (child_ic->nlink++ && fd->type == DT_DIR) { | 73 | if (child_ic->nlink++ && fd->type == DT_DIR) { |
72 | printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino); | 74 | JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", |
73 | if (fd->ino == 1 && ic->ino == 1) { | 75 | fd->name, fd->ino, ic->ino); |
74 | printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n"); | 76 | /* TODO: What do we do about it? */ |
75 | printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n"); | ||
76 | } | ||
77 | /* What do we do about it? */ | ||
78 | } | 77 | } |
79 | D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino)); | 78 | dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); |
80 | /* Can't free them. We might need them in pass 2 */ | 79 | /* Can't free scan_dents so far. We might need them in pass 2 */ |
81 | } | 80 | } |
82 | } | 81 | } |
83 | 82 | ||
@@ -94,6 +93,8 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) | |||
94 | struct jffs2_full_dirent *fd; | 93 | struct jffs2_full_dirent *fd; |
95 | struct jffs2_full_dirent *dead_fds = NULL; | 94 | struct jffs2_full_dirent *dead_fds = NULL; |
96 | 95 | ||
96 | dbg_fsbuild("build FS data structures\n"); | ||
97 | |||
97 | /* First, scan the medium and build all the inode caches with | 98 | /* First, scan the medium and build all the inode caches with |
98 | lists of physical nodes */ | 99 | lists of physical nodes */ |
99 | 100 | ||
@@ -103,60 +104,54 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) | |||
103 | if (ret) | 104 | if (ret) |
104 | goto exit; | 105 | goto exit; |
105 | 106 | ||
106 | D1(printk(KERN_DEBUG "Scanned flash completely\n")); | 107 | dbg_fsbuild("scanned flash completely\n"); |
107 | D2(jffs2_dump_block_lists(c)); | 108 | jffs2_dbg_dump_block_lists_nolock(c); |
108 | 109 | ||
110 | dbg_fsbuild("pass 1 starting\n"); | ||
109 | c->flags |= JFFS2_SB_FLAG_BUILDING; | 111 | c->flags |= JFFS2_SB_FLAG_BUILDING; |
110 | /* Now scan the directory tree, increasing nlink according to every dirent found. */ | 112 | /* Now scan the directory tree, increasing nlink according to every dirent found. */ |
111 | for_each_inode(i, c, ic) { | 113 | for_each_inode(i, c, ic) { |
112 | D1(printk(KERN_DEBUG "Pass 1: ino #%u\n", ic->ino)); | ||
113 | |||
114 | D1(BUG_ON(ic->ino > c->highest_ino)); | ||
115 | |||
116 | if (ic->scan_dents) { | 114 | if (ic->scan_dents) { |
117 | jffs2_build_inode_pass1(c, ic); | 115 | jffs2_build_inode_pass1(c, ic); |
118 | cond_resched(); | 116 | cond_resched(); |
119 | } | 117 | } |
120 | } | 118 | } |
121 | 119 | ||
122 | D1(printk(KERN_DEBUG "Pass 1 complete\n")); | 120 | dbg_fsbuild("pass 1 complete\n"); |
123 | 121 | ||
124 | /* Next, scan for inodes with nlink == 0 and remove them. If | 122 | /* Next, scan for inodes with nlink == 0 and remove them. If |
125 | they were directories, then decrement the nlink of their | 123 | they were directories, then decrement the nlink of their |
126 | children too, and repeat the scan. As that's going to be | 124 | children too, and repeat the scan. As that's going to be |
127 | a fairly uncommon occurrence, it's not so evil to do it this | 125 | a fairly uncommon occurrence, it's not so evil to do it this |
128 | way. Recursion bad. */ | 126 | way. Recursion bad. */ |
129 | D1(printk(KERN_DEBUG "Pass 2 starting\n")); | 127 | dbg_fsbuild("pass 2 starting\n"); |
130 | 128 | ||
131 | for_each_inode(i, c, ic) { | 129 | for_each_inode(i, c, ic) { |
132 | D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes)); | ||
133 | if (ic->nlink) | 130 | if (ic->nlink) |
134 | continue; | 131 | continue; |
135 | 132 | ||
136 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); | 133 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); |
137 | cond_resched(); | 134 | cond_resched(); |
138 | } | 135 | } |
139 | 136 | ||
140 | D1(printk(KERN_DEBUG "Pass 2a starting\n")); | 137 | dbg_fsbuild("pass 2a starting\n"); |
141 | 138 | ||
142 | while (dead_fds) { | 139 | while (dead_fds) { |
143 | fd = dead_fds; | 140 | fd = dead_fds; |
144 | dead_fds = fd->next; | 141 | dead_fds = fd->next; |
145 | 142 | ||
146 | ic = jffs2_get_ino_cache(c, fd->ino); | 143 | ic = jffs2_get_ino_cache(c, fd->ino); |
147 | D1(printk(KERN_DEBUG "Removing dead_fd ino #%u (\"%s\"), ic at %p\n", fd->ino, fd->name, ic)); | ||
148 | 144 | ||
149 | if (ic) | 145 | if (ic) |
150 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); | 146 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); |
151 | jffs2_free_full_dirent(fd); | 147 | jffs2_free_full_dirent(fd); |
152 | } | 148 | } |
153 | 149 | ||
154 | D1(printk(KERN_DEBUG "Pass 2 complete\n")); | 150 | dbg_fsbuild("pass 2a complete\n"); |
155 | 151 | dbg_fsbuild("freeing temporary data structures\n"); | |
152 | |||
156 | /* Finally, we can scan again and free the dirent structs */ | 153 | /* Finally, we can scan again and free the dirent structs */ |
157 | for_each_inode(i, c, ic) { | 154 | for_each_inode(i, c, ic) { |
158 | D1(printk(KERN_DEBUG "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes)); | ||
159 | |||
160 | while(ic->scan_dents) { | 155 | while(ic->scan_dents) { |
161 | fd = ic->scan_dents; | 156 | fd = ic->scan_dents; |
162 | ic->scan_dents = fd->next; | 157 | ic->scan_dents = fd->next; |
@@ -166,9 +161,8 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) | |||
166 | cond_resched(); | 161 | cond_resched(); |
167 | } | 162 | } |
168 | c->flags &= ~JFFS2_SB_FLAG_BUILDING; | 163 | c->flags &= ~JFFS2_SB_FLAG_BUILDING; |
169 | 164 | ||
170 | D1(printk(KERN_DEBUG "Pass 3 complete\n")); | 165 | dbg_fsbuild("FS build complete\n"); |
171 | D2(jffs2_dump_block_lists(c)); | ||
172 | 166 | ||
173 | /* Rotate the lists by some number to ensure wear levelling */ | 167 | /* Rotate the lists by some number to ensure wear levelling */ |
174 | jffs2_rotate_lists(c); | 168 | jffs2_rotate_lists(c); |
@@ -189,24 +183,26 @@ exit: | |||
189 | return ret; | 183 | return ret; |
190 | } | 184 | } |
191 | 185 | ||
192 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds) | 186 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, |
187 | struct jffs2_inode_cache *ic, | ||
188 | struct jffs2_full_dirent **dead_fds) | ||
193 | { | 189 | { |
194 | struct jffs2_raw_node_ref *raw; | 190 | struct jffs2_raw_node_ref *raw; |
195 | struct jffs2_full_dirent *fd; | 191 | struct jffs2_full_dirent *fd; |
196 | 192 | ||
197 | D1(printk(KERN_DEBUG "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino)); | 193 | dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino); |
198 | 194 | ||
199 | raw = ic->nodes; | 195 | raw = ic->nodes; |
200 | while (raw != (void *)ic) { | 196 | while (raw != (void *)ic) { |
201 | struct jffs2_raw_node_ref *next = raw->next_in_ino; | 197 | struct jffs2_raw_node_ref *next = raw->next_in_ino; |
202 | D1(printk(KERN_DEBUG "obsoleting node at 0x%08x\n", ref_offset(raw))); | 198 | dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw)); |
203 | jffs2_mark_node_obsolete(c, raw); | 199 | jffs2_mark_node_obsolete(c, raw); |
204 | raw = next; | 200 | raw = next; |
205 | } | 201 | } |
206 | 202 | ||
207 | if (ic->scan_dents) { | 203 | if (ic->scan_dents) { |
208 | int whinged = 0; | 204 | int whinged = 0; |
209 | D1(printk(KERN_DEBUG "Inode #%u was a directory which may have children...\n", ic->ino)); | 205 | dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino); |
210 | 206 | ||
211 | while(ic->scan_dents) { | 207 | while(ic->scan_dents) { |
212 | struct jffs2_inode_cache *child_ic; | 208 | struct jffs2_inode_cache *child_ic; |
@@ -216,45 +212,43 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jf | |||
216 | 212 | ||
217 | if (!fd->ino) { | 213 | if (!fd->ino) { |
218 | /* It's a deletion dirent. Ignore it */ | 214 | /* It's a deletion dirent. Ignore it */ |
219 | D1(printk(KERN_DEBUG "Child \"%s\" is a deletion dirent, skipping...\n", fd->name)); | 215 | dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name); |
220 | jffs2_free_full_dirent(fd); | 216 | jffs2_free_full_dirent(fd); |
221 | continue; | 217 | continue; |
222 | } | 218 | } |
223 | if (!whinged) { | 219 | if (!whinged) |
224 | whinged = 1; | 220 | whinged = 1; |
225 | printk(KERN_NOTICE "Inode #%u was a directory with children - removing those too...\n", ic->ino); | ||
226 | } | ||
227 | 221 | ||
228 | D1(printk(KERN_DEBUG "Removing child \"%s\", ino #%u\n", | 222 | dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino); |
229 | fd->name, fd->ino)); | 223 | |
230 | |||
231 | child_ic = jffs2_get_ino_cache(c, fd->ino); | 224 | child_ic = jffs2_get_ino_cache(c, fd->ino); |
232 | if (!child_ic) { | 225 | if (!child_ic) { |
233 | printk(KERN_NOTICE "Cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino); | 226 | dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n", |
227 | fd->name, fd->ino); | ||
234 | jffs2_free_full_dirent(fd); | 228 | jffs2_free_full_dirent(fd); |
235 | continue; | 229 | continue; |
236 | } | 230 | } |
237 | 231 | ||
238 | /* Reduce nlink of the child. If it's now zero, stick it on the | 232 | /* Reduce nlink of the child. If it's now zero, stick it on the |
239 | dead_fds list to be cleaned up later. Else just free the fd */ | 233 | dead_fds list to be cleaned up later. Else just free the fd */ |
240 | 234 | ||
241 | child_ic->nlink--; | 235 | child_ic->nlink--; |
242 | 236 | ||
243 | if (!child_ic->nlink) { | 237 | if (!child_ic->nlink) { |
244 | D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got zero nlink. Adding to dead_fds list.\n", | 238 | dbg_fsbuild("inode #%u (\"%s\") has now got zero nlink, adding to dead_fds list.\n", |
245 | fd->ino, fd->name)); | 239 | fd->ino, fd->name); |
246 | fd->next = *dead_fds; | 240 | fd->next = *dead_fds; |
247 | *dead_fds = fd; | 241 | *dead_fds = fd; |
248 | } else { | 242 | } else { |
249 | D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", | 243 | dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", |
250 | fd->ino, fd->name, child_ic->nlink)); | 244 | fd->ino, fd->name, child_ic->nlink); |
251 | jffs2_free_full_dirent(fd); | 245 | jffs2_free_full_dirent(fd); |
252 | } | 246 | } |
253 | } | 247 | } |
254 | } | 248 | } |
255 | 249 | ||
256 | /* | 250 | /* |
257 | We don't delete the inocache from the hash list and free it yet. | 251 | We don't delete the inocache from the hash list and free it yet. |
258 | The erase code will do that, when all the nodes are completely gone. | 252 | The erase code will do that, when all the nodes are completely gone. |
259 | */ | 253 | */ |
260 | } | 254 | } |
@@ -268,7 +262,7 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) | |||
268 | because there's not enough free space... */ | 262 | because there's not enough free space... */ |
269 | c->resv_blocks_deletion = 2; | 263 | c->resv_blocks_deletion = 2; |
270 | 264 | ||
271 | /* Be conservative about how much space we need before we allow writes. | 265 | /* Be conservative about how much space we need before we allow writes. |
272 | On top of that which is required for deletia, require an extra 2% | 266 | On top of that which is required for deletia, require an extra 2% |
273 | of the medium to be available, for overhead caused by nodes being | 267 | of the medium to be available, for overhead caused by nodes being |
274 | split across blocks, etc. */ | 268 | split across blocks, etc. */ |
@@ -283,7 +277,7 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) | |||
283 | 277 | ||
284 | c->resv_blocks_gctrigger = c->resv_blocks_write + 1; | 278 | c->resv_blocks_gctrigger = c->resv_blocks_write + 1; |
285 | 279 | ||
286 | /* When do we allow garbage collection to merge nodes to make | 280 | /* When do we allow garbage collection to merge nodes to make |
287 | long-term progress at the expense of short-term space exhaustion? */ | 281 | long-term progress at the expense of short-term space exhaustion? */ |
288 | c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; | 282 | c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; |
289 | 283 | ||
@@ -295,45 +289,45 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) | |||
295 | trying to GC to make more space. It'll be a fruitless task */ | 289 | trying to GC to make more space. It'll be a fruitless task */ |
296 | c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); | 290 | c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); |
297 | 291 | ||
298 | D1(printk(KERN_DEBUG "JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", | 292 | dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", |
299 | c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks)); | 293 | c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); |
300 | D1(printk(KERN_DEBUG "Blocks required to allow deletion: %d (%d KiB)\n", | 294 | dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n", |
301 | c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024)); | 295 | c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); |
302 | D1(printk(KERN_DEBUG "Blocks required to allow writes: %d (%d KiB)\n", | 296 | dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n", |
303 | c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024)); | 297 | c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024); |
304 | D1(printk(KERN_DEBUG "Blocks required to quiesce GC thread: %d (%d KiB)\n", | 298 | dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n", |
305 | c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024)); | 299 | c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024); |
306 | D1(printk(KERN_DEBUG "Blocks required to allow GC merges: %d (%d KiB)\n", | 300 | dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n", |
307 | c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024)); | 301 | c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024); |
308 | D1(printk(KERN_DEBUG "Blocks required to GC bad blocks: %d (%d KiB)\n", | 302 | dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n", |
309 | c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024)); | 303 | c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024); |
310 | D1(printk(KERN_DEBUG "Amount of dirty space required to GC: %d bytes\n", | 304 | dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n", |
311 | c->nospc_dirty_size)); | 305 | c->nospc_dirty_size); |
312 | } | 306 | } |
313 | 307 | ||
314 | int jffs2_do_mount_fs(struct jffs2_sb_info *c) | 308 | int jffs2_do_mount_fs(struct jffs2_sb_info *c) |
315 | { | 309 | { |
310 | int ret; | ||
316 | int i; | 311 | int i; |
312 | int size; | ||
317 | 313 | ||
318 | c->free_size = c->flash_size; | 314 | c->free_size = c->flash_size; |
319 | c->nr_blocks = c->flash_size / c->sector_size; | 315 | c->nr_blocks = c->flash_size / c->sector_size; |
320 | if (c->mtd->flags & MTD_NO_VIRTBLOCKS) | 316 | size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; |
321 | c->blocks = vmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks); | 317 | #ifndef __ECOS |
318 | if (jffs2_blocks_use_vmalloc(c)) | ||
319 | c->blocks = vmalloc(size); | ||
322 | else | 320 | else |
323 | c->blocks = kmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks, GFP_KERNEL); | 321 | #endif |
322 | c->blocks = kmalloc(size, GFP_KERNEL); | ||
324 | if (!c->blocks) | 323 | if (!c->blocks) |
325 | return -ENOMEM; | 324 | return -ENOMEM; |
325 | |||
326 | memset(c->blocks, 0, size); | ||
326 | for (i=0; i<c->nr_blocks; i++) { | 327 | for (i=0; i<c->nr_blocks; i++) { |
327 | INIT_LIST_HEAD(&c->blocks[i].list); | 328 | INIT_LIST_HEAD(&c->blocks[i].list); |
328 | c->blocks[i].offset = i * c->sector_size; | 329 | c->blocks[i].offset = i * c->sector_size; |
329 | c->blocks[i].free_size = c->sector_size; | 330 | c->blocks[i].free_size = c->sector_size; |
330 | c->blocks[i].dirty_size = 0; | ||
331 | c->blocks[i].wasted_size = 0; | ||
332 | c->blocks[i].unchecked_size = 0; | ||
333 | c->blocks[i].used_size = 0; | ||
334 | c->blocks[i].first_node = NULL; | ||
335 | c->blocks[i].last_node = NULL; | ||
336 | c->blocks[i].bad_count = 0; | ||
337 | } | 331 | } |
338 | 332 | ||
339 | INIT_LIST_HEAD(&c->clean_list); | 333 | INIT_LIST_HEAD(&c->clean_list); |
@@ -348,16 +342,23 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c) | |||
348 | INIT_LIST_HEAD(&c->bad_list); | 342 | INIT_LIST_HEAD(&c->bad_list); |
349 | INIT_LIST_HEAD(&c->bad_used_list); | 343 | INIT_LIST_HEAD(&c->bad_used_list); |
350 | c->highest_ino = 1; | 344 | c->highest_ino = 1; |
345 | c->summary = NULL; | ||
346 | |||
347 | ret = jffs2_sum_init(c); | ||
348 | if (ret) | ||
349 | return ret; | ||
351 | 350 | ||
352 | if (jffs2_build_filesystem(c)) { | 351 | if (jffs2_build_filesystem(c)) { |
353 | D1(printk(KERN_DEBUG "build_fs failed\n")); | 352 | dbg_fsbuild("build_fs failed\n"); |
354 | jffs2_free_ino_caches(c); | 353 | jffs2_free_ino_caches(c); |
355 | jffs2_free_raw_node_refs(c); | 354 | jffs2_free_raw_node_refs(c); |
356 | if (c->mtd->flags & MTD_NO_VIRTBLOCKS) { | 355 | #ifndef __ECOS |
356 | if (jffs2_blocks_use_vmalloc(c)) | ||
357 | vfree(c->blocks); | 357 | vfree(c->blocks); |
358 | } else { | 358 | else |
359 | #endif | ||
359 | kfree(c->blocks); | 360 | kfree(c->blocks); |
360 | } | 361 | |
361 | return -EIO; | 362 | return -EIO; |
362 | } | 363 | } |
363 | 364 | ||
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c index af922a9618ac..e7944e665b9f 100644 --- a/fs/jffs2/compr.c +++ b/fs/jffs2/compr.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * | 9 | * |
10 | * For licensing information, see the file 'LICENCE' in this directory. | 10 | * For licensing information, see the file 'LICENCE' in this directory. |
11 | * | 11 | * |
12 | * $Id: compr.c,v 1.42 2004/08/07 21:56:08 dwmw2 Exp $ | 12 | * $Id: compr.c,v 1.46 2005/11/07 11:14:38 gleixner Exp $ |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
@@ -36,16 +36,16 @@ static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_co | |||
36 | * data. | 36 | * data. |
37 | * | 37 | * |
38 | * Returns: Lower byte to be stored with data indicating compression type used. | 38 | * Returns: Lower byte to be stored with data indicating compression type used. |
39 | * Zero is used to show that the data could not be compressed - the | 39 | * Zero is used to show that the data could not be compressed - the |
40 | * compressed version was actually larger than the original. | 40 | * compressed version was actually larger than the original. |
41 | * Upper byte will be used later. (soon) | 41 | * Upper byte will be used later. (soon) |
42 | * | 42 | * |
43 | * If the cdata buffer isn't large enough to hold all the uncompressed data, | 43 | * If the cdata buffer isn't large enough to hold all the uncompressed data, |
44 | * jffs2_compress should compress as much as will fit, and should set | 44 | * jffs2_compress should compress as much as will fit, and should set |
45 | * *datalen accordingly to show the amount of data which were compressed. | 45 | * *datalen accordingly to show the amount of data which were compressed. |
46 | */ | 46 | */ |
47 | uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 47 | uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
48 | unsigned char *data_in, unsigned char **cpage_out, | 48 | unsigned char *data_in, unsigned char **cpage_out, |
49 | uint32_t *datalen, uint32_t *cdatalen) | 49 | uint32_t *datalen, uint32_t *cdatalen) |
50 | { | 50 | { |
51 | int ret = JFFS2_COMPR_NONE; | 51 | int ret = JFFS2_COMPR_NONE; |
@@ -164,7 +164,7 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
164 | } | 164 | } |
165 | 165 | ||
166 | int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 166 | int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
167 | uint16_t comprtype, unsigned char *cdata_in, | 167 | uint16_t comprtype, unsigned char *cdata_in, |
168 | unsigned char *data_out, uint32_t cdatalen, uint32_t datalen) | 168 | unsigned char *data_out, uint32_t cdatalen, uint32_t datalen) |
169 | { | 169 | { |
170 | struct jffs2_compressor *this; | 170 | struct jffs2_compressor *this; |
@@ -298,7 +298,7 @@ char *jffs2_stats(void) | |||
298 | 298 | ||
299 | act_buf += sprintf(act_buf,"JFFS2 compressor statistics:\n"); | 299 | act_buf += sprintf(act_buf,"JFFS2 compressor statistics:\n"); |
300 | act_buf += sprintf(act_buf,"%10s ","none"); | 300 | act_buf += sprintf(act_buf,"%10s ","none"); |
301 | act_buf += sprintf(act_buf,"compr: %d blocks (%d) decompr: %d blocks\n", none_stat_compr_blocks, | 301 | act_buf += sprintf(act_buf,"compr: %d blocks (%d) decompr: %d blocks\n", none_stat_compr_blocks, |
302 | none_stat_compr_size, none_stat_decompr_blocks); | 302 | none_stat_compr_size, none_stat_decompr_blocks); |
303 | spin_lock(&jffs2_compressor_list_lock); | 303 | spin_lock(&jffs2_compressor_list_lock); |
304 | list_for_each_entry(this, &jffs2_compressor_list, list) { | 304 | list_for_each_entry(this, &jffs2_compressor_list, list) { |
@@ -307,8 +307,8 @@ char *jffs2_stats(void) | |||
307 | act_buf += sprintf(act_buf,"- "); | 307 | act_buf += sprintf(act_buf,"- "); |
308 | else | 308 | else |
309 | act_buf += sprintf(act_buf,"+ "); | 309 | act_buf += sprintf(act_buf,"+ "); |
310 | act_buf += sprintf(act_buf,"compr: %d blocks (%d/%d) decompr: %d blocks ", this->stat_compr_blocks, | 310 | act_buf += sprintf(act_buf,"compr: %d blocks (%d/%d) decompr: %d blocks ", this->stat_compr_blocks, |
311 | this->stat_compr_new_size, this->stat_compr_orig_size, | 311 | this->stat_compr_new_size, this->stat_compr_orig_size, |
312 | this->stat_decompr_blocks); | 312 | this->stat_decompr_blocks); |
313 | act_buf += sprintf(act_buf,"\n"); | 313 | act_buf += sprintf(act_buf,"\n"); |
314 | } | 314 | } |
@@ -317,7 +317,7 @@ char *jffs2_stats(void) | |||
317 | return buf; | 317 | return buf; |
318 | } | 318 | } |
319 | 319 | ||
320 | char *jffs2_get_compression_mode_name(void) | 320 | char *jffs2_get_compression_mode_name(void) |
321 | { | 321 | { |
322 | switch (jffs2_compression_mode) { | 322 | switch (jffs2_compression_mode) { |
323 | case JFFS2_COMPR_MODE_NONE: | 323 | case JFFS2_COMPR_MODE_NONE: |
@@ -330,7 +330,7 @@ char *jffs2_get_compression_mode_name(void) | |||
330 | return "unkown"; | 330 | return "unkown"; |
331 | } | 331 | } |
332 | 332 | ||
333 | int jffs2_set_compression_mode_name(const char *name) | 333 | int jffs2_set_compression_mode_name(const char *name) |
334 | { | 334 | { |
335 | if (!strcmp("none",name)) { | 335 | if (!strcmp("none",name)) { |
336 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; | 336 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; |
@@ -355,7 +355,7 @@ static int jffs2_compressor_Xable(const char *name, int disabled) | |||
355 | if (!strcmp(this->name, name)) { | 355 | if (!strcmp(this->name, name)) { |
356 | this->disabled = disabled; | 356 | this->disabled = disabled; |
357 | spin_unlock(&jffs2_compressor_list_lock); | 357 | spin_unlock(&jffs2_compressor_list_lock); |
358 | return 0; | 358 | return 0; |
359 | } | 359 | } |
360 | } | 360 | } |
361 | spin_unlock(&jffs2_compressor_list_lock); | 361 | spin_unlock(&jffs2_compressor_list_lock); |
@@ -385,7 +385,7 @@ int jffs2_set_compressor_priority(const char *name, int priority) | |||
385 | } | 385 | } |
386 | } | 386 | } |
387 | spin_unlock(&jffs2_compressor_list_lock); | 387 | spin_unlock(&jffs2_compressor_list_lock); |
388 | printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name); | 388 | printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name); |
389 | return 1; | 389 | return 1; |
390 | reinsert: | 390 | reinsert: |
391 | /* list is sorted in the order of priority, so if | 391 | /* list is sorted in the order of priority, so if |
@@ -412,7 +412,7 @@ void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig) | |||
412 | kfree(comprbuf); | 412 | kfree(comprbuf); |
413 | } | 413 | } |
414 | 414 | ||
415 | int jffs2_compressors_init(void) | 415 | int jffs2_compressors_init(void) |
416 | { | 416 | { |
417 | /* Registering compressors */ | 417 | /* Registering compressors */ |
418 | #ifdef CONFIG_JFFS2_ZLIB | 418 | #ifdef CONFIG_JFFS2_ZLIB |
@@ -425,12 +425,6 @@ int jffs2_compressors_init(void) | |||
425 | jffs2_rubinmips_init(); | 425 | jffs2_rubinmips_init(); |
426 | jffs2_dynrubin_init(); | 426 | jffs2_dynrubin_init(); |
427 | #endif | 427 | #endif |
428 | #ifdef CONFIG_JFFS2_LZARI | ||
429 | jffs2_lzari_init(); | ||
430 | #endif | ||
431 | #ifdef CONFIG_JFFS2_LZO | ||
432 | jffs2_lzo_init(); | ||
433 | #endif | ||
434 | /* Setting default compression mode */ | 428 | /* Setting default compression mode */ |
435 | #ifdef CONFIG_JFFS2_CMODE_NONE | 429 | #ifdef CONFIG_JFFS2_CMODE_NONE |
436 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; | 430 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; |
@@ -446,15 +440,9 @@ int jffs2_compressors_init(void) | |||
446 | return 0; | 440 | return 0; |
447 | } | 441 | } |
448 | 442 | ||
449 | int jffs2_compressors_exit(void) | 443 | int jffs2_compressors_exit(void) |
450 | { | 444 | { |
451 | /* Unregistering compressors */ | 445 | /* Unregistering compressors */ |
452 | #ifdef CONFIG_JFFS2_LZO | ||
453 | jffs2_lzo_exit(); | ||
454 | #endif | ||
455 | #ifdef CONFIG_JFFS2_LZARI | ||
456 | jffs2_lzari_exit(); | ||
457 | #endif | ||
458 | #ifdef CONFIG_JFFS2_RUBIN | 446 | #ifdef CONFIG_JFFS2_RUBIN |
459 | jffs2_dynrubin_exit(); | 447 | jffs2_dynrubin_exit(); |
460 | jffs2_rubinmips_exit(); | 448 | jffs2_rubinmips_exit(); |
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h index 89ceeed201eb..a77e830d85c5 100644 --- a/fs/jffs2/compr.h +++ b/fs/jffs2/compr.h | |||
@@ -4,10 +4,10 @@ | |||
4 | * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, | 4 | * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, |
5 | * University of Szeged, Hungary | 5 | * University of Szeged, Hungary |
6 | * | 6 | * |
7 | * For licensing information, see the file 'LICENCE' in the | 7 | * For licensing information, see the file 'LICENCE' in the |
8 | * jffs2 directory. | 8 | * jffs2 directory. |
9 | * | 9 | * |
10 | * $Id: compr.h,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $ | 10 | * $Id: compr.h,v 1.9 2005/11/07 11:14:38 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -103,13 +103,5 @@ void jffs2_rtime_exit(void); | |||
103 | int jffs2_zlib_init(void); | 103 | int jffs2_zlib_init(void); |
104 | void jffs2_zlib_exit(void); | 104 | void jffs2_zlib_exit(void); |
105 | #endif | 105 | #endif |
106 | #ifdef CONFIG_JFFS2_LZARI | ||
107 | int jffs2_lzari_init(void); | ||
108 | void jffs2_lzari_exit(void); | ||
109 | #endif | ||
110 | #ifdef CONFIG_JFFS2_LZO | ||
111 | int jffs2_lzo_init(void); | ||
112 | void jffs2_lzo_exit(void); | ||
113 | #endif | ||
114 | 106 | ||
115 | #endif /* __JFFS2_COMPR_H__ */ | 107 | #endif /* __JFFS2_COMPR_H__ */ |
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c index 393129418666..2eb1b7428d16 100644 --- a/fs/jffs2/compr_rtime.c +++ b/fs/jffs2/compr_rtime.c | |||
@@ -24,8 +24,8 @@ | |||
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/errno.h> | 26 | #include <linux/errno.h> |
27 | #include <linux/string.h> | 27 | #include <linux/string.h> |
28 | #include <linux/jffs2.h> | 28 | #include <linux/jffs2.h> |
29 | #include "compr.h" | 29 | #include "compr.h" |
30 | 30 | ||
31 | /* _compress returns the compressed size, -1 if bigger */ | 31 | /* _compress returns the compressed size, -1 if bigger */ |
@@ -38,19 +38,19 @@ static int jffs2_rtime_compress(unsigned char *data_in, | |||
38 | int outpos = 0; | 38 | int outpos = 0; |
39 | int pos=0; | 39 | int pos=0; |
40 | 40 | ||
41 | memset(positions,0,sizeof(positions)); | 41 | memset(positions,0,sizeof(positions)); |
42 | 42 | ||
43 | while (pos < (*sourcelen) && outpos <= (*dstlen)-2) { | 43 | while (pos < (*sourcelen) && outpos <= (*dstlen)-2) { |
44 | int backpos, runlen=0; | 44 | int backpos, runlen=0; |
45 | unsigned char value; | 45 | unsigned char value; |
46 | 46 | ||
47 | value = data_in[pos]; | 47 | value = data_in[pos]; |
48 | 48 | ||
49 | cpage_out[outpos++] = data_in[pos++]; | 49 | cpage_out[outpos++] = data_in[pos++]; |
50 | 50 | ||
51 | backpos = positions[value]; | 51 | backpos = positions[value]; |
52 | positions[value]=pos; | 52 | positions[value]=pos; |
53 | 53 | ||
54 | while ((backpos < pos) && (pos < (*sourcelen)) && | 54 | while ((backpos < pos) && (pos < (*sourcelen)) && |
55 | (data_in[pos]==data_in[backpos++]) && (runlen<255)) { | 55 | (data_in[pos]==data_in[backpos++]) && (runlen<255)) { |
56 | pos++; | 56 | pos++; |
@@ -63,12 +63,12 @@ static int jffs2_rtime_compress(unsigned char *data_in, | |||
63 | /* We failed */ | 63 | /* We failed */ |
64 | return -1; | 64 | return -1; |
65 | } | 65 | } |
66 | 66 | ||
67 | /* Tell the caller how much we managed to compress, and how much space it took */ | 67 | /* Tell the caller how much we managed to compress, and how much space it took */ |
68 | *sourcelen = pos; | 68 | *sourcelen = pos; |
69 | *dstlen = outpos; | 69 | *dstlen = outpos; |
70 | return 0; | 70 | return 0; |
71 | } | 71 | } |
72 | 72 | ||
73 | 73 | ||
74 | static int jffs2_rtime_decompress(unsigned char *data_in, | 74 | static int jffs2_rtime_decompress(unsigned char *data_in, |
@@ -79,19 +79,19 @@ static int jffs2_rtime_decompress(unsigned char *data_in, | |||
79 | short positions[256]; | 79 | short positions[256]; |
80 | int outpos = 0; | 80 | int outpos = 0; |
81 | int pos=0; | 81 | int pos=0; |
82 | 82 | ||
83 | memset(positions,0,sizeof(positions)); | 83 | memset(positions,0,sizeof(positions)); |
84 | 84 | ||
85 | while (outpos<destlen) { | 85 | while (outpos<destlen) { |
86 | unsigned char value; | 86 | unsigned char value; |
87 | int backoffs; | 87 | int backoffs; |
88 | int repeat; | 88 | int repeat; |
89 | 89 | ||
90 | value = data_in[pos++]; | 90 | value = data_in[pos++]; |
91 | cpage_out[outpos++] = value; /* first the verbatim copied byte */ | 91 | cpage_out[outpos++] = value; /* first the verbatim copied byte */ |
92 | repeat = data_in[pos++]; | 92 | repeat = data_in[pos++]; |
93 | backoffs = positions[value]; | 93 | backoffs = positions[value]; |
94 | 94 | ||
95 | positions[value]=outpos; | 95 | positions[value]=outpos; |
96 | if (repeat) { | 96 | if (repeat) { |
97 | if (backoffs + repeat >= outpos) { | 97 | if (backoffs + repeat >= outpos) { |
@@ -101,12 +101,12 @@ static int jffs2_rtime_decompress(unsigned char *data_in, | |||
101 | } | 101 | } |
102 | } else { | 102 | } else { |
103 | memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat); | 103 | memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat); |
104 | outpos+=repeat; | 104 | outpos+=repeat; |
105 | } | 105 | } |
106 | } | 106 | } |
107 | } | 107 | } |
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
110 | 110 | ||
111 | static struct jffs2_compressor jffs2_rtime_comp = { | 111 | static struct jffs2_compressor jffs2_rtime_comp = { |
112 | .priority = JFFS2_RTIME_PRIORITY, | 112 | .priority = JFFS2_RTIME_PRIORITY, |
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c index 09422388fb96..e792e675d624 100644 --- a/fs/jffs2/compr_rubin.c +++ b/fs/jffs2/compr_rubin.c | |||
@@ -11,7 +11,6 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | |||
15 | #include <linux/string.h> | 14 | #include <linux/string.h> |
16 | #include <linux/types.h> | 15 | #include <linux/types.h> |
17 | #include <linux/jffs2.h> | 16 | #include <linux/jffs2.h> |
@@ -20,7 +19,7 @@ | |||
20 | #include "compr.h" | 19 | #include "compr.h" |
21 | 20 | ||
22 | static void init_rubin(struct rubin_state *rs, int div, int *bits) | 21 | static void init_rubin(struct rubin_state *rs, int div, int *bits) |
23 | { | 22 | { |
24 | int c; | 23 | int c; |
25 | 24 | ||
26 | rs->q = 0; | 25 | rs->q = 0; |
@@ -40,7 +39,7 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol) | |||
40 | 39 | ||
41 | while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { | 40 | while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { |
42 | rs->bit_number++; | 41 | rs->bit_number++; |
43 | 42 | ||
44 | ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); | 43 | ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); |
45 | if (ret) | 44 | if (ret) |
46 | return ret; | 45 | return ret; |
@@ -68,7 +67,7 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol) | |||
68 | 67 | ||
69 | 68 | ||
70 | static void end_rubin(struct rubin_state *rs) | 69 | static void end_rubin(struct rubin_state *rs) |
71 | { | 70 | { |
72 | 71 | ||
73 | int i; | 72 | int i; |
74 | 73 | ||
@@ -82,7 +81,7 @@ static void end_rubin(struct rubin_state *rs) | |||
82 | 81 | ||
83 | static void init_decode(struct rubin_state *rs, int div, int *bits) | 82 | static void init_decode(struct rubin_state *rs, int div, int *bits) |
84 | { | 83 | { |
85 | init_rubin(rs, div, bits); | 84 | init_rubin(rs, div, bits); |
86 | 85 | ||
87 | /* behalve lower */ | 86 | /* behalve lower */ |
88 | rs->rec_q = 0; | 87 | rs->rec_q = 0; |
@@ -188,7 +187,7 @@ static int in_byte(struct rubin_state *rs) | |||
188 | 187 | ||
189 | 188 | ||
190 | 189 | ||
191 | static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, | 190 | static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, |
192 | unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) | 191 | unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) |
193 | { | 192 | { |
194 | int outpos = 0; | 193 | int outpos = 0; |
@@ -198,31 +197,31 @@ static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, | |||
198 | init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32); | 197 | init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32); |
199 | 198 | ||
200 | init_rubin(&rs, bit_divider, bits); | 199 | init_rubin(&rs, bit_divider, bits); |
201 | 200 | ||
202 | while (pos < (*sourcelen) && !out_byte(&rs, data_in[pos])) | 201 | while (pos < (*sourcelen) && !out_byte(&rs, data_in[pos])) |
203 | pos++; | 202 | pos++; |
204 | 203 | ||
205 | end_rubin(&rs); | 204 | end_rubin(&rs); |
206 | 205 | ||
207 | if (outpos > pos) { | 206 | if (outpos > pos) { |
208 | /* We failed */ | 207 | /* We failed */ |
209 | return -1; | 208 | return -1; |
210 | } | 209 | } |
211 | 210 | ||
212 | /* Tell the caller how much we managed to compress, | 211 | /* Tell the caller how much we managed to compress, |
213 | * and how much space it took */ | 212 | * and how much space it took */ |
214 | 213 | ||
215 | outpos = (pushedbits(&rs.pp)+7)/8; | 214 | outpos = (pushedbits(&rs.pp)+7)/8; |
216 | 215 | ||
217 | if (outpos >= pos) | 216 | if (outpos >= pos) |
218 | return -1; /* We didn't actually compress */ | 217 | return -1; /* We didn't actually compress */ |
219 | *sourcelen = pos; | 218 | *sourcelen = pos; |
220 | *dstlen = outpos; | 219 | *dstlen = outpos; |
221 | return 0; | 220 | return 0; |
222 | } | 221 | } |
223 | #if 0 | 222 | #if 0 |
224 | /* _compress returns the compressed size, -1 if bigger */ | 223 | /* _compress returns the compressed size, -1 if bigger */ |
225 | int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, | 224 | int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, |
226 | uint32_t *sourcelen, uint32_t *dstlen, void *model) | 225 | uint32_t *sourcelen, uint32_t *dstlen, void *model) |
227 | { | 226 | { |
228 | return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); | 227 | return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); |
@@ -277,7 +276,7 @@ static int jffs2_dynrubin_compress(unsigned char *data_in, | |||
277 | } | 276 | } |
278 | 277 | ||
279 | ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen); | 278 | ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen); |
280 | if (ret) | 279 | if (ret) |
281 | return ret; | 280 | return ret; |
282 | 281 | ||
283 | /* Add back the 8 bytes we took for the probabilities */ | 282 | /* Add back the 8 bytes we took for the probabilities */ |
@@ -293,19 +292,19 @@ static int jffs2_dynrubin_compress(unsigned char *data_in, | |||
293 | return 0; | 292 | return 0; |
294 | } | 293 | } |
295 | 294 | ||
296 | static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in, | 295 | static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in, |
297 | unsigned char *page_out, uint32_t srclen, uint32_t destlen) | 296 | unsigned char *page_out, uint32_t srclen, uint32_t destlen) |
298 | { | 297 | { |
299 | int outpos = 0; | 298 | int outpos = 0; |
300 | struct rubin_state rs; | 299 | struct rubin_state rs; |
301 | 300 | ||
302 | init_pushpull(&rs.pp, cdata_in, srclen, 0, 0); | 301 | init_pushpull(&rs.pp, cdata_in, srclen, 0, 0); |
303 | init_decode(&rs, bit_divider, bits); | 302 | init_decode(&rs, bit_divider, bits); |
304 | 303 | ||
305 | while (outpos < destlen) { | 304 | while (outpos < destlen) { |
306 | page_out[outpos++] = in_byte(&rs); | 305 | page_out[outpos++] = in_byte(&rs); |
307 | } | 306 | } |
308 | } | 307 | } |
309 | 308 | ||
310 | 309 | ||
311 | static int jffs2_rubinmips_decompress(unsigned char *data_in, | 310 | static int jffs2_rubinmips_decompress(unsigned char *data_in, |
diff --git a/fs/jffs2/compr_rubin.h b/fs/jffs2/compr_rubin.h index cf51e34f6574..bf1a93451621 100644 --- a/fs/jffs2/compr_rubin.h +++ b/fs/jffs2/compr_rubin.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* Rubin encoder/decoder header */ | 1 | /* Rubin encoder/decoder header */ |
2 | /* work started at : aug 3, 1994 */ | 2 | /* work started at : aug 3, 1994 */ |
3 | /* last modification : aug 15, 1994 */ | 3 | /* last modification : aug 15, 1994 */ |
4 | /* $Id: compr_rubin.h,v 1.6 2002/01/25 01:49:26 dwmw2 Exp $ */ | 4 | /* $Id: compr_rubin.h,v 1.7 2005/11/07 11:14:38 gleixner Exp $ */ |
5 | 5 | ||
6 | #include "pushpull.h" | 6 | #include "pushpull.h" |
7 | 7 | ||
@@ -11,8 +11,8 @@ | |||
11 | 11 | ||
12 | 12 | ||
13 | struct rubin_state { | 13 | struct rubin_state { |
14 | unsigned long p; | 14 | unsigned long p; |
15 | unsigned long q; | 15 | unsigned long q; |
16 | unsigned long rec_q; | 16 | unsigned long rec_q; |
17 | long bit_number; | 17 | long bit_number; |
18 | struct pushpull pp; | 18 | struct pushpull pp; |
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c index 83f7e0788fd0..4db8be8e90cc 100644 --- a/fs/jffs2/compr_zlib.c +++ b/fs/jffs2/compr_zlib.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: compr_zlib.c,v 1.31 2005/05/20 19:30:06 gleixner Exp $ | 10 | * $Id: compr_zlib.c,v 1.32 2005/11/07 11:14:38 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -24,11 +24,11 @@ | |||
24 | #include "nodelist.h" | 24 | #include "nodelist.h" |
25 | #include "compr.h" | 25 | #include "compr.h" |
26 | 26 | ||
27 | /* Plan: call deflate() with avail_in == *sourcelen, | 27 | /* Plan: call deflate() with avail_in == *sourcelen, |
28 | avail_out = *dstlen - 12 and flush == Z_FINISH. | 28 | avail_out = *dstlen - 12 and flush == Z_FINISH. |
29 | If it doesn't manage to finish, call it again with | 29 | If it doesn't manage to finish, call it again with |
30 | avail_in == 0 and avail_out set to the remaining 12 | 30 | avail_in == 0 and avail_out set to the remaining 12 |
31 | bytes for it to clean up. | 31 | bytes for it to clean up. |
32 | Q: Is 12 bytes sufficient? | 32 | Q: Is 12 bytes sufficient? |
33 | */ | 33 | */ |
34 | #define STREAM_END_SPACE 12 | 34 | #define STREAM_END_SPACE 12 |
@@ -89,7 +89,7 @@ static int jffs2_zlib_compress(unsigned char *data_in, | |||
89 | 89 | ||
90 | def_strm.next_in = data_in; | 90 | def_strm.next_in = data_in; |
91 | def_strm.total_in = 0; | 91 | def_strm.total_in = 0; |
92 | 92 | ||
93 | def_strm.next_out = cpage_out; | 93 | def_strm.next_out = cpage_out; |
94 | def_strm.total_out = 0; | 94 | def_strm.total_out = 0; |
95 | 95 | ||
@@ -99,7 +99,7 @@ static int jffs2_zlib_compress(unsigned char *data_in, | |||
99 | D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n", | 99 | D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n", |
100 | def_strm.avail_in, def_strm.avail_out)); | 100 | def_strm.avail_in, def_strm.avail_out)); |
101 | ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); | 101 | ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); |
102 | D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", | 102 | D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", |
103 | def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out)); | 103 | def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out)); |
104 | if (ret != Z_OK) { | 104 | if (ret != Z_OK) { |
105 | D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); | 105 | D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); |
@@ -150,7 +150,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in, | |||
150 | inf_strm.next_in = data_in; | 150 | inf_strm.next_in = data_in; |
151 | inf_strm.avail_in = srclen; | 151 | inf_strm.avail_in = srclen; |
152 | inf_strm.total_in = 0; | 152 | inf_strm.total_in = 0; |
153 | 153 | ||
154 | inf_strm.next_out = cpage_out; | 154 | inf_strm.next_out = cpage_out; |
155 | inf_strm.avail_out = destlen; | 155 | inf_strm.avail_out = destlen; |
156 | inf_strm.total_out = 0; | 156 | inf_strm.total_out = 0; |
diff --git a/fs/jffs2/comprtest.c b/fs/jffs2/comprtest.c index cf51f091d0e7..f0fb8be7740c 100644 --- a/fs/jffs2/comprtest.c +++ b/fs/jffs2/comprtest.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* $Id: comprtest.c,v 1.5 2002/01/03 15:20:44 dwmw2 Exp $ */ | 1 | /* $Id: comprtest.c,v 1.6 2005/11/07 11:14:38 gleixner Exp $ */ |
2 | 2 | ||
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/string.h> | 4 | #include <linux/string.h> |
@@ -265,9 +265,9 @@ static unsigned char testdata[TESTDATA_LEN] = { | |||
265 | static unsigned char comprbuf[TESTDATA_LEN]; | 265 | static unsigned char comprbuf[TESTDATA_LEN]; |
266 | static unsigned char decomprbuf[TESTDATA_LEN]; | 266 | static unsigned char decomprbuf[TESTDATA_LEN]; |
267 | 267 | ||
268 | int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in, | 268 | int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in, |
269 | unsigned char *data_out, uint32_t cdatalen, uint32_t datalen); | 269 | unsigned char *data_out, uint32_t cdatalen, uint32_t datalen); |
270 | unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out, | 270 | unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out, |
271 | uint32_t *datalen, uint32_t *cdatalen); | 271 | uint32_t *datalen, uint32_t *cdatalen); |
272 | 272 | ||
273 | int init_module(void ) { | 273 | int init_module(void ) { |
@@ -276,10 +276,10 @@ int init_module(void ) { | |||
276 | int ret; | 276 | int ret; |
277 | 277 | ||
278 | printk("Original data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | 278 | printk("Original data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", |
279 | testdata[0],testdata[1],testdata[2],testdata[3], | 279 | testdata[0],testdata[1],testdata[2],testdata[3], |
280 | testdata[4],testdata[5],testdata[6],testdata[7], | 280 | testdata[4],testdata[5],testdata[6],testdata[7], |
281 | testdata[8],testdata[9],testdata[10],testdata[11], | 281 | testdata[8],testdata[9],testdata[10],testdata[11], |
282 | testdata[12],testdata[13],testdata[14],testdata[15]); | 282 | testdata[12],testdata[13],testdata[14],testdata[15]); |
283 | d = TESTDATA_LEN; | 283 | d = TESTDATA_LEN; |
284 | c = TESTDATA_LEN; | 284 | c = TESTDATA_LEN; |
285 | comprtype = jffs2_compress(testdata, comprbuf, &d, &c); | 285 | comprtype = jffs2_compress(testdata, comprbuf, &d, &c); |
@@ -287,18 +287,18 @@ int init_module(void ) { | |||
287 | printk("jffs2_compress used compression type %d. Compressed size %d, uncompressed size %d\n", | 287 | printk("jffs2_compress used compression type %d. Compressed size %d, uncompressed size %d\n", |
288 | comprtype, c, d); | 288 | comprtype, c, d); |
289 | printk("Compressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | 289 | printk("Compressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", |
290 | comprbuf[0],comprbuf[1],comprbuf[2],comprbuf[3], | 290 | comprbuf[0],comprbuf[1],comprbuf[2],comprbuf[3], |
291 | comprbuf[4],comprbuf[5],comprbuf[6],comprbuf[7], | 291 | comprbuf[4],comprbuf[5],comprbuf[6],comprbuf[7], |
292 | comprbuf[8],comprbuf[9],comprbuf[10],comprbuf[11], | 292 | comprbuf[8],comprbuf[9],comprbuf[10],comprbuf[11], |
293 | comprbuf[12],comprbuf[13],comprbuf[14],comprbuf[15]); | 293 | comprbuf[12],comprbuf[13],comprbuf[14],comprbuf[15]); |
294 | 294 | ||
295 | ret = jffs2_decompress(comprtype, comprbuf, decomprbuf, c, d); | 295 | ret = jffs2_decompress(comprtype, comprbuf, decomprbuf, c, d); |
296 | printk("jffs2_decompress returned %d\n", ret); | 296 | printk("jffs2_decompress returned %d\n", ret); |
297 | printk("Decompressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | 297 | printk("Decompressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", |
298 | decomprbuf[0],decomprbuf[1],decomprbuf[2],decomprbuf[3], | 298 | decomprbuf[0],decomprbuf[1],decomprbuf[2],decomprbuf[3], |
299 | decomprbuf[4],decomprbuf[5],decomprbuf[6],decomprbuf[7], | 299 | decomprbuf[4],decomprbuf[5],decomprbuf[6],decomprbuf[7], |
300 | decomprbuf[8],decomprbuf[9],decomprbuf[10],decomprbuf[11], | 300 | decomprbuf[8],decomprbuf[9],decomprbuf[10],decomprbuf[11], |
301 | decomprbuf[12],decomprbuf[13],decomprbuf[14],decomprbuf[15]); | 301 | decomprbuf[12],decomprbuf[13],decomprbuf[14],decomprbuf[15]); |
302 | if (memcmp(decomprbuf, testdata, d)) | 302 | if (memcmp(decomprbuf, testdata, d)) |
303 | printk("Compression and decompression corrupted data\n"); | 303 | printk("Compression and decompression corrupted data\n"); |
304 | else | 304 | else |
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c new file mode 100644 index 000000000000..1fe17de713e8 --- /dev/null +++ b/fs/jffs2/debug.c | |||
@@ -0,0 +1,705 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: debug.c,v 1.12 2005/11/07 11:14:39 gleixner Exp $ | ||
11 | * | ||
12 | */ | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/pagemap.h> | ||
16 | #include <linux/crc32.h> | ||
17 | #include <linux/jffs2.h> | ||
18 | #include <linux/mtd/mtd.h> | ||
19 | #include "nodelist.h" | ||
20 | #include "debug.h" | ||
21 | |||
22 | #ifdef JFFS2_DBG_SANITY_CHECKS | ||
23 | |||
24 | void | ||
25 | __jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c, | ||
26 | struct jffs2_eraseblock *jeb) | ||
27 | { | ||
28 | if (unlikely(jeb && jeb->used_size + jeb->dirty_size + | ||
29 | jeb->free_size + jeb->wasted_size + | ||
30 | jeb->unchecked_size != c->sector_size)) { | ||
31 | JFFS2_ERROR("eeep, space accounting for block at 0x%08x is screwed.\n", jeb->offset); | ||
32 | JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", | ||
33 | jeb->free_size, jeb->dirty_size, jeb->used_size, | ||
34 | jeb->wasted_size, jeb->unchecked_size, c->sector_size); | ||
35 | BUG(); | ||
36 | } | ||
37 | |||
38 | if (unlikely(c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size | ||
39 | + c->wasted_size + c->unchecked_size != c->flash_size)) { | ||
40 | JFFS2_ERROR("eeep, space accounting superblock info is screwed.\n"); | ||
41 | JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + erasing %#08x + bad %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", | ||
42 | c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, | ||
43 | c->wasted_size, c->unchecked_size, c->flash_size); | ||
44 | BUG(); | ||
45 | } | ||
46 | } | ||
47 | |||
48 | void | ||
49 | __jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c, | ||
50 | struct jffs2_eraseblock *jeb) | ||
51 | { | ||
52 | spin_lock(&c->erase_completion_lock); | ||
53 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); | ||
54 | spin_unlock(&c->erase_completion_lock); | ||
55 | } | ||
56 | |||
57 | #endif /* JFFS2_DBG_SANITY_CHECKS */ | ||
58 | |||
59 | #ifdef JFFS2_DBG_PARANOIA_CHECKS | ||
60 | /* | ||
61 | * Check the fragtree. | ||
62 | */ | ||
63 | void | ||
64 | __jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f) | ||
65 | { | ||
66 | down(&f->sem); | ||
67 | __jffs2_dbg_fragtree_paranoia_check_nolock(f); | ||
68 | up(&f->sem); | ||
69 | } | ||
70 | |||
71 | void | ||
72 | __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f) | ||
73 | { | ||
74 | struct jffs2_node_frag *frag; | ||
75 | int bitched = 0; | ||
76 | |||
77 | for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { | ||
78 | struct jffs2_full_dnode *fn = frag->node; | ||
79 | |||
80 | if (!fn || !fn->raw) | ||
81 | continue; | ||
82 | |||
83 | if (ref_flags(fn->raw) == REF_PRISTINE) { | ||
84 | if (fn->frags > 1) { | ||
85 | JFFS2_ERROR("REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2.\n", | ||
86 | ref_offset(fn->raw), fn->frags); | ||
87 | bitched = 1; | ||
88 | } | ||
89 | |||
90 | /* A hole node which isn't multi-page should be garbage-collected | ||
91 | and merged anyway, so we just check for the frag size here, | ||
92 | rather than mucking around with actually reading the node | ||
93 | and checking the compression type, which is the real way | ||
94 | to tell a hole node. */ | ||
95 | if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) | ||
96 | && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { | ||
97 | JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", | ||
98 | ref_offset(fn->raw)); | ||
99 | bitched = 1; | ||
100 | } | ||
101 | |||
102 | if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) | ||
103 | && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { | ||
104 | JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", | ||
105 | ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); | ||
106 | bitched = 1; | ||
107 | } | ||
108 | } | ||
109 | } | ||
110 | |||
111 | if (bitched) { | ||
112 | JFFS2_ERROR("fragtree is corrupted.\n"); | ||
113 | __jffs2_dbg_dump_fragtree_nolock(f); | ||
114 | BUG(); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * Check if the flash contains all 0xFF before we start writing. | ||
120 | */ | ||
121 | void | ||
122 | __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c, | ||
123 | uint32_t ofs, int len) | ||
124 | { | ||
125 | size_t retlen; | ||
126 | int ret, i; | ||
127 | unsigned char *buf; | ||
128 | |||
129 | buf = kmalloc(len, GFP_KERNEL); | ||
130 | if (!buf) | ||
131 | return; | ||
132 | |||
133 | ret = jffs2_flash_read(c, ofs, len, &retlen, buf); | ||
134 | if (ret || (retlen != len)) { | ||
135 | JFFS2_WARNING("read %d bytes failed or short. ret %d, retlen %zd.\n", | ||
136 | len, ret, retlen); | ||
137 | kfree(buf); | ||
138 | return; | ||
139 | } | ||
140 | |||
141 | ret = 0; | ||
142 | for (i = 0; i < len; i++) | ||
143 | if (buf[i] != 0xff) | ||
144 | ret = 1; | ||
145 | |||
146 | if (ret) { | ||
147 | JFFS2_ERROR("argh, about to write node to %#08x on flash, but there are data already there. The first corrupted byte is at %#08x offset.\n", | ||
148 | ofs, ofs + i); | ||
149 | __jffs2_dbg_dump_buffer(buf, len, ofs); | ||
150 | kfree(buf); | ||
151 | BUG(); | ||
152 | } | ||
153 | |||
154 | kfree(buf); | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Check the space accounting and node_ref list correctness for the JFFS2 erasable block 'jeb'. | ||
159 | */ | ||
160 | void | ||
161 | __jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c, | ||
162 | struct jffs2_eraseblock *jeb) | ||
163 | { | ||
164 | spin_lock(&c->erase_completion_lock); | ||
165 | __jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | ||
166 | spin_unlock(&c->erase_completion_lock); | ||
167 | } | ||
168 | |||
169 | void | ||
170 | __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, | ||
171 | struct jffs2_eraseblock *jeb) | ||
172 | { | ||
173 | uint32_t my_used_size = 0; | ||
174 | uint32_t my_unchecked_size = 0; | ||
175 | uint32_t my_dirty_size = 0; | ||
176 | struct jffs2_raw_node_ref *ref2 = jeb->first_node; | ||
177 | |||
178 | while (ref2) { | ||
179 | uint32_t totlen = ref_totlen(c, jeb, ref2); | ||
180 | |||
181 | if (ref2->flash_offset < jeb->offset || | ||
182 | ref2->flash_offset > jeb->offset + c->sector_size) { | ||
183 | JFFS2_ERROR("node_ref %#08x shouldn't be in block at %#08x.\n", | ||
184 | ref_offset(ref2), jeb->offset); | ||
185 | goto error; | ||
186 | |||
187 | } | ||
188 | if (ref_flags(ref2) == REF_UNCHECKED) | ||
189 | my_unchecked_size += totlen; | ||
190 | else if (!ref_obsolete(ref2)) | ||
191 | my_used_size += totlen; | ||
192 | else | ||
193 | my_dirty_size += totlen; | ||
194 | |||
195 | if ((!ref2->next_phys) != (ref2 == jeb->last_node)) { | ||
196 | JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next_phys at %#08x (mem %p), last_node is at %#08x (mem %p).\n", | ||
197 | ref_offset(ref2), ref2, ref_offset(ref2->next_phys), ref2->next_phys, | ||
198 | ref_offset(jeb->last_node), jeb->last_node); | ||
199 | goto error; | ||
200 | } | ||
201 | ref2 = ref2->next_phys; | ||
202 | } | ||
203 | |||
204 | if (my_used_size != jeb->used_size) { | ||
205 | JFFS2_ERROR("Calculated used size %#08x != stored used size %#08x.\n", | ||
206 | my_used_size, jeb->used_size); | ||
207 | goto error; | ||
208 | } | ||
209 | |||
210 | if (my_unchecked_size != jeb->unchecked_size) { | ||
211 | JFFS2_ERROR("Calculated unchecked size %#08x != stored unchecked size %#08x.\n", | ||
212 | my_unchecked_size, jeb->unchecked_size); | ||
213 | goto error; | ||
214 | } | ||
215 | |||
216 | #if 0 | ||
217 | /* This should work when we implement ref->__totlen elemination */ | ||
218 | if (my_dirty_size != jeb->dirty_size + jeb->wasted_size) { | ||
219 | JFFS2_ERROR("Calculated dirty+wasted size %#08x != stored dirty + wasted size %#08x\n", | ||
220 | my_dirty_size, jeb->dirty_size + jeb->wasted_size); | ||
221 | goto error; | ||
222 | } | ||
223 | |||
224 | if (jeb->free_size == 0 | ||
225 | && my_used_size + my_unchecked_size + my_dirty_size != c->sector_size) { | ||
226 | JFFS2_ERROR("The sum of all nodes in block (%#x) != size of block (%#x)\n", | ||
227 | my_used_size + my_unchecked_size + my_dirty_size, | ||
228 | c->sector_size); | ||
229 | goto error; | ||
230 | } | ||
231 | #endif | ||
232 | |||
233 | return; | ||
234 | |||
235 | error: | ||
236 | __jffs2_dbg_dump_node_refs_nolock(c, jeb); | ||
237 | __jffs2_dbg_dump_jeb_nolock(jeb); | ||
238 | __jffs2_dbg_dump_block_lists_nolock(c); | ||
239 | BUG(); | ||
240 | |||
241 | } | ||
242 | #endif /* JFFS2_DBG_PARANOIA_CHECKS */ | ||
243 | |||
244 | #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) | ||
245 | /* | ||
246 | * Dump the node_refs of the 'jeb' JFFS2 eraseblock. | ||
247 | */ | ||
248 | void | ||
249 | __jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c, | ||
250 | struct jffs2_eraseblock *jeb) | ||
251 | { | ||
252 | spin_lock(&c->erase_completion_lock); | ||
253 | __jffs2_dbg_dump_node_refs_nolock(c, jeb); | ||
254 | spin_unlock(&c->erase_completion_lock); | ||
255 | } | ||
256 | |||
257 | void | ||
258 | __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, | ||
259 | struct jffs2_eraseblock *jeb) | ||
260 | { | ||
261 | struct jffs2_raw_node_ref *ref; | ||
262 | int i = 0; | ||
263 | |||
264 | printk(JFFS2_DBG_MSG_PREFIX " Dump node_refs of the eraseblock %#08x\n", jeb->offset); | ||
265 | if (!jeb->first_node) { | ||
266 | printk(JFFS2_DBG_MSG_PREFIX " no nodes in the eraseblock %#08x\n", jeb->offset); | ||
267 | return; | ||
268 | } | ||
269 | |||
270 | printk(JFFS2_DBG); | ||
271 | for (ref = jeb->first_node; ; ref = ref->next_phys) { | ||
272 | printk("%#08x(%#x)", ref_offset(ref), ref->__totlen); | ||
273 | if (ref->next_phys) | ||
274 | printk("->"); | ||
275 | else | ||
276 | break; | ||
277 | if (++i == 4) { | ||
278 | i = 0; | ||
279 | printk("\n" JFFS2_DBG); | ||
280 | } | ||
281 | } | ||
282 | printk("\n"); | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * Dump an eraseblock's space accounting. | ||
287 | */ | ||
288 | void | ||
289 | __jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
290 | { | ||
291 | spin_lock(&c->erase_completion_lock); | ||
292 | __jffs2_dbg_dump_jeb_nolock(jeb); | ||
293 | spin_unlock(&c->erase_completion_lock); | ||
294 | } | ||
295 | |||
296 | void | ||
297 | __jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb) | ||
298 | { | ||
299 | if (!jeb) | ||
300 | return; | ||
301 | |||
302 | printk(JFFS2_DBG_MSG_PREFIX " dump space accounting for the eraseblock at %#08x:\n", | ||
303 | jeb->offset); | ||
304 | |||
305 | printk(JFFS2_DBG "used_size: %#08x\n", jeb->used_size); | ||
306 | printk(JFFS2_DBG "dirty_size: %#08x\n", jeb->dirty_size); | ||
307 | printk(JFFS2_DBG "wasted_size: %#08x\n", jeb->wasted_size); | ||
308 | printk(JFFS2_DBG "unchecked_size: %#08x\n", jeb->unchecked_size); | ||
309 | printk(JFFS2_DBG "free_size: %#08x\n", jeb->free_size); | ||
310 | } | ||
311 | |||
312 | void | ||
313 | __jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c) | ||
314 | { | ||
315 | spin_lock(&c->erase_completion_lock); | ||
316 | __jffs2_dbg_dump_block_lists_nolock(c); | ||
317 | spin_unlock(&c->erase_completion_lock); | ||
318 | } | ||
319 | |||
320 | void | ||
321 | __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c) | ||
322 | { | ||
323 | printk(JFFS2_DBG_MSG_PREFIX " dump JFFS2 blocks lists:\n"); | ||
324 | |||
325 | printk(JFFS2_DBG "flash_size: %#08x\n", c->flash_size); | ||
326 | printk(JFFS2_DBG "used_size: %#08x\n", c->used_size); | ||
327 | printk(JFFS2_DBG "dirty_size: %#08x\n", c->dirty_size); | ||
328 | printk(JFFS2_DBG "wasted_size: %#08x\n", c->wasted_size); | ||
329 | printk(JFFS2_DBG "unchecked_size: %#08x\n", c->unchecked_size); | ||
330 | printk(JFFS2_DBG "free_size: %#08x\n", c->free_size); | ||
331 | printk(JFFS2_DBG "erasing_size: %#08x\n", c->erasing_size); | ||
332 | printk(JFFS2_DBG "bad_size: %#08x\n", c->bad_size); | ||
333 | printk(JFFS2_DBG "sector_size: %#08x\n", c->sector_size); | ||
334 | printk(JFFS2_DBG "jffs2_reserved_blocks size: %#08x\n", | ||
335 | c->sector_size * c->resv_blocks_write); | ||
336 | |||
337 | if (c->nextblock) | ||
338 | printk(JFFS2_DBG "nextblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
339 | c->nextblock->offset, c->nextblock->used_size, | ||
340 | c->nextblock->dirty_size, c->nextblock->wasted_size, | ||
341 | c->nextblock->unchecked_size, c->nextblock->free_size); | ||
342 | else | ||
343 | printk(JFFS2_DBG "nextblock: NULL\n"); | ||
344 | |||
345 | if (c->gcblock) | ||
346 | printk(JFFS2_DBG "gcblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
347 | c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, | ||
348 | c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); | ||
349 | else | ||
350 | printk(JFFS2_DBG "gcblock: NULL\n"); | ||
351 | |||
352 | if (list_empty(&c->clean_list)) { | ||
353 | printk(JFFS2_DBG "clean_list: empty\n"); | ||
354 | } else { | ||
355 | struct list_head *this; | ||
356 | int numblocks = 0; | ||
357 | uint32_t dirty = 0; | ||
358 | |||
359 | list_for_each(this, &c->clean_list) { | ||
360 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
361 | numblocks ++; | ||
362 | dirty += jeb->wasted_size; | ||
363 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
364 | printk(JFFS2_DBG "clean_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
365 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
366 | jeb->unchecked_size, jeb->free_size); | ||
367 | } | ||
368 | } | ||
369 | |||
370 | printk (JFFS2_DBG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", | ||
371 | numblocks, dirty, dirty / numblocks); | ||
372 | } | ||
373 | |||
374 | if (list_empty(&c->very_dirty_list)) { | ||
375 | printk(JFFS2_DBG "very_dirty_list: empty\n"); | ||
376 | } else { | ||
377 | struct list_head *this; | ||
378 | int numblocks = 0; | ||
379 | uint32_t dirty = 0; | ||
380 | |||
381 | list_for_each(this, &c->very_dirty_list) { | ||
382 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
383 | |||
384 | numblocks ++; | ||
385 | dirty += jeb->dirty_size; | ||
386 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
387 | printk(JFFS2_DBG "very_dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
388 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
389 | jeb->unchecked_size, jeb->free_size); | ||
390 | } | ||
391 | } | ||
392 | |||
393 | printk (JFFS2_DBG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | ||
394 | numblocks, dirty, dirty / numblocks); | ||
395 | } | ||
396 | |||
397 | if (list_empty(&c->dirty_list)) { | ||
398 | printk(JFFS2_DBG "dirty_list: empty\n"); | ||
399 | } else { | ||
400 | struct list_head *this; | ||
401 | int numblocks = 0; | ||
402 | uint32_t dirty = 0; | ||
403 | |||
404 | list_for_each(this, &c->dirty_list) { | ||
405 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
406 | |||
407 | numblocks ++; | ||
408 | dirty += jeb->dirty_size; | ||
409 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
410 | printk(JFFS2_DBG "dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
411 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
412 | jeb->unchecked_size, jeb->free_size); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | printk (JFFS2_DBG "contains %d blocks with total dirty size %u, average dirty size: %u\n", | ||
417 | numblocks, dirty, dirty / numblocks); | ||
418 | } | ||
419 | |||
420 | if (list_empty(&c->erasable_list)) { | ||
421 | printk(JFFS2_DBG "erasable_list: empty\n"); | ||
422 | } else { | ||
423 | struct list_head *this; | ||
424 | |||
425 | list_for_each(this, &c->erasable_list) { | ||
426 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
427 | |||
428 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
429 | printk(JFFS2_DBG "erasable_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
430 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
431 | jeb->unchecked_size, jeb->free_size); | ||
432 | } | ||
433 | } | ||
434 | } | ||
435 | |||
436 | if (list_empty(&c->erasing_list)) { | ||
437 | printk(JFFS2_DBG "erasing_list: empty\n"); | ||
438 | } else { | ||
439 | struct list_head *this; | ||
440 | |||
441 | list_for_each(this, &c->erasing_list) { | ||
442 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
443 | |||
444 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
445 | printk(JFFS2_DBG "erasing_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
446 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
447 | jeb->unchecked_size, jeb->free_size); | ||
448 | } | ||
449 | } | ||
450 | } | ||
451 | |||
452 | if (list_empty(&c->erase_pending_list)) { | ||
453 | printk(JFFS2_DBG "erase_pending_list: empty\n"); | ||
454 | } else { | ||
455 | struct list_head *this; | ||
456 | |||
457 | list_for_each(this, &c->erase_pending_list) { | ||
458 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
459 | |||
460 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
461 | printk(JFFS2_DBG "erase_pending_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
462 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
463 | jeb->unchecked_size, jeb->free_size); | ||
464 | } | ||
465 | } | ||
466 | } | ||
467 | |||
468 | if (list_empty(&c->erasable_pending_wbuf_list)) { | ||
469 | printk(JFFS2_DBG "erasable_pending_wbuf_list: empty\n"); | ||
470 | } else { | ||
471 | struct list_head *this; | ||
472 | |||
473 | list_for_each(this, &c->erasable_pending_wbuf_list) { | ||
474 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
475 | |||
476 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
477 | printk(JFFS2_DBG "erasable_pending_wbuf_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
478 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
479 | jeb->unchecked_size, jeb->free_size); | ||
480 | } | ||
481 | } | ||
482 | } | ||
483 | |||
484 | if (list_empty(&c->free_list)) { | ||
485 | printk(JFFS2_DBG "free_list: empty\n"); | ||
486 | } else { | ||
487 | struct list_head *this; | ||
488 | |||
489 | list_for_each(this, &c->free_list) { | ||
490 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
491 | |||
492 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
493 | printk(JFFS2_DBG "free_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
494 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
495 | jeb->unchecked_size, jeb->free_size); | ||
496 | } | ||
497 | } | ||
498 | } | ||
499 | |||
500 | if (list_empty(&c->bad_list)) { | ||
501 | printk(JFFS2_DBG "bad_list: empty\n"); | ||
502 | } else { | ||
503 | struct list_head *this; | ||
504 | |||
505 | list_for_each(this, &c->bad_list) { | ||
506 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
507 | |||
508 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
509 | printk(JFFS2_DBG "bad_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
510 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
511 | jeb->unchecked_size, jeb->free_size); | ||
512 | } | ||
513 | } | ||
514 | } | ||
515 | |||
516 | if (list_empty(&c->bad_used_list)) { | ||
517 | printk(JFFS2_DBG "bad_used_list: empty\n"); | ||
518 | } else { | ||
519 | struct list_head *this; | ||
520 | |||
521 | list_for_each(this, &c->bad_used_list) { | ||
522 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
523 | |||
524 | if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { | ||
525 | printk(JFFS2_DBG "bad_used_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", | ||
526 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, | ||
527 | jeb->unchecked_size, jeb->free_size); | ||
528 | } | ||
529 | } | ||
530 | } | ||
531 | } | ||
532 | |||
533 | void | ||
534 | __jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f) | ||
535 | { | ||
536 | down(&f->sem); | ||
537 | jffs2_dbg_dump_fragtree_nolock(f); | ||
538 | up(&f->sem); | ||
539 | } | ||
540 | |||
541 | void | ||
542 | __jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f) | ||
543 | { | ||
544 | struct jffs2_node_frag *this = frag_first(&f->fragtree); | ||
545 | uint32_t lastofs = 0; | ||
546 | int buggy = 0; | ||
547 | |||
548 | printk(JFFS2_DBG_MSG_PREFIX " dump fragtree of ino #%u\n", f->inocache->ino); | ||
549 | while(this) { | ||
550 | if (this->node) | ||
551 | printk(JFFS2_DBG "frag %#04x-%#04x: %#08x(%d) on flash (*%p), left (%p), right (%p), parent (%p)\n", | ||
552 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw), | ||
553 | ref_flags(this->node->raw), this, frag_left(this), frag_right(this), | ||
554 | frag_parent(this)); | ||
555 | else | ||
556 | printk(JFFS2_DBG "frag %#04x-%#04x: hole (*%p). left (%p), right (%p), parent (%p)\n", | ||
557 | this->ofs, this->ofs+this->size, this, frag_left(this), | ||
558 | frag_right(this), frag_parent(this)); | ||
559 | if (this->ofs != lastofs) | ||
560 | buggy = 1; | ||
561 | lastofs = this->ofs + this->size; | ||
562 | this = frag_next(this); | ||
563 | } | ||
564 | |||
565 | if (f->metadata) | ||
566 | printk(JFFS2_DBG "metadata at 0x%08x\n", ref_offset(f->metadata->raw)); | ||
567 | |||
568 | if (buggy) { | ||
569 | JFFS2_ERROR("frag tree got a hole in it.\n"); | ||
570 | BUG(); | ||
571 | } | ||
572 | } | ||
573 | |||
574 | #define JFFS2_BUFDUMP_BYTES_PER_LINE 32 | ||
575 | void | ||
576 | __jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs) | ||
577 | { | ||
578 | int skip; | ||
579 | int i; | ||
580 | |||
581 | printk(JFFS2_DBG_MSG_PREFIX " dump from offset %#08x to offset %#08x (%x bytes).\n", | ||
582 | offs, offs + len, len); | ||
583 | i = skip = offs % JFFS2_BUFDUMP_BYTES_PER_LINE; | ||
584 | offs = offs & ~(JFFS2_BUFDUMP_BYTES_PER_LINE - 1); | ||
585 | |||
586 | if (skip != 0) | ||
587 | printk(JFFS2_DBG "%#08x: ", offs); | ||
588 | |||
589 | while (skip--) | ||
590 | printk(" "); | ||
591 | |||
592 | while (i < len) { | ||
593 | if ((i % JFFS2_BUFDUMP_BYTES_PER_LINE) == 0 && i != len -1) { | ||
594 | if (i != 0) | ||
595 | printk("\n"); | ||
596 | offs += JFFS2_BUFDUMP_BYTES_PER_LINE; | ||
597 | printk(JFFS2_DBG "%0#8x: ", offs); | ||
598 | } | ||
599 | |||
600 | printk("%02x ", buf[i]); | ||
601 | |||
602 | i += 1; | ||
603 | } | ||
604 | |||
605 | printk("\n"); | ||
606 | } | ||
607 | |||
608 | /* | ||
609 | * Dump a JFFS2 node. | ||
610 | */ | ||
611 | void | ||
612 | __jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs) | ||
613 | { | ||
614 | union jffs2_node_union node; | ||
615 | int len = sizeof(union jffs2_node_union); | ||
616 | size_t retlen; | ||
617 | uint32_t crc; | ||
618 | int ret; | ||
619 | |||
620 | printk(JFFS2_DBG_MSG_PREFIX " dump node at offset %#08x.\n", ofs); | ||
621 | |||
622 | ret = jffs2_flash_read(c, ofs, len, &retlen, (unsigned char *)&node); | ||
623 | if (ret || (retlen != len)) { | ||
624 | JFFS2_ERROR("read %d bytes failed or short. ret %d, retlen %zd.\n", | ||
625 | len, ret, retlen); | ||
626 | return; | ||
627 | } | ||
628 | |||
629 | printk(JFFS2_DBG "magic:\t%#04x\n", je16_to_cpu(node.u.magic)); | ||
630 | printk(JFFS2_DBG "nodetype:\t%#04x\n", je16_to_cpu(node.u.nodetype)); | ||
631 | printk(JFFS2_DBG "totlen:\t%#08x\n", je32_to_cpu(node.u.totlen)); | ||
632 | printk(JFFS2_DBG "hdr_crc:\t%#08x\n", je32_to_cpu(node.u.hdr_crc)); | ||
633 | |||
634 | crc = crc32(0, &node.u, sizeof(node.u) - 4); | ||
635 | if (crc != je32_to_cpu(node.u.hdr_crc)) { | ||
636 | JFFS2_ERROR("wrong common header CRC.\n"); | ||
637 | return; | ||
638 | } | ||
639 | |||
640 | if (je16_to_cpu(node.u.magic) != JFFS2_MAGIC_BITMASK && | ||
641 | je16_to_cpu(node.u.magic) != JFFS2_OLD_MAGIC_BITMASK) | ||
642 | { | ||
643 | JFFS2_ERROR("wrong node magic: %#04x instead of %#04x.\n", | ||
644 | je16_to_cpu(node.u.magic), JFFS2_MAGIC_BITMASK); | ||
645 | return; | ||
646 | } | ||
647 | |||
648 | switch(je16_to_cpu(node.u.nodetype)) { | ||
649 | |||
650 | case JFFS2_NODETYPE_INODE: | ||
651 | |||
652 | printk(JFFS2_DBG "the node is inode node\n"); | ||
653 | printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.i.ino)); | ||
654 | printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.i.version)); | ||
655 | printk(JFFS2_DBG "mode:\t%#08x\n", node.i.mode.m); | ||
656 | printk(JFFS2_DBG "uid:\t%#04x\n", je16_to_cpu(node.i.uid)); | ||
657 | printk(JFFS2_DBG "gid:\t%#04x\n", je16_to_cpu(node.i.gid)); | ||
658 | printk(JFFS2_DBG "isize:\t%#08x\n", je32_to_cpu(node.i.isize)); | ||
659 | printk(JFFS2_DBG "atime:\t%#08x\n", je32_to_cpu(node.i.atime)); | ||
660 | printk(JFFS2_DBG "mtime:\t%#08x\n", je32_to_cpu(node.i.mtime)); | ||
661 | printk(JFFS2_DBG "ctime:\t%#08x\n", je32_to_cpu(node.i.ctime)); | ||
662 | printk(JFFS2_DBG "offset:\t%#08x\n", je32_to_cpu(node.i.offset)); | ||
663 | printk(JFFS2_DBG "csize:\t%#08x\n", je32_to_cpu(node.i.csize)); | ||
664 | printk(JFFS2_DBG "dsize:\t%#08x\n", je32_to_cpu(node.i.dsize)); | ||
665 | printk(JFFS2_DBG "compr:\t%#02x\n", node.i.compr); | ||
666 | printk(JFFS2_DBG "usercompr:\t%#02x\n", node.i.usercompr); | ||
667 | printk(JFFS2_DBG "flags:\t%#04x\n", je16_to_cpu(node.i.flags)); | ||
668 | printk(JFFS2_DBG "data_crc:\t%#08x\n", je32_to_cpu(node.i.data_crc)); | ||
669 | printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.i.node_crc)); | ||
670 | |||
671 | crc = crc32(0, &node.i, sizeof(node.i) - 8); | ||
672 | if (crc != je32_to_cpu(node.i.node_crc)) { | ||
673 | JFFS2_ERROR("wrong node header CRC.\n"); | ||
674 | return; | ||
675 | } | ||
676 | break; | ||
677 | |||
678 | case JFFS2_NODETYPE_DIRENT: | ||
679 | |||
680 | printk(JFFS2_DBG "the node is dirent node\n"); | ||
681 | printk(JFFS2_DBG "pino:\t%#08x\n", je32_to_cpu(node.d.pino)); | ||
682 | printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.d.version)); | ||
683 | printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.d.ino)); | ||
684 | printk(JFFS2_DBG "mctime:\t%#08x\n", je32_to_cpu(node.d.mctime)); | ||
685 | printk(JFFS2_DBG "nsize:\t%#02x\n", node.d.nsize); | ||
686 | printk(JFFS2_DBG "type:\t%#02x\n", node.d.type); | ||
687 | printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.d.node_crc)); | ||
688 | printk(JFFS2_DBG "name_crc:\t%#08x\n", je32_to_cpu(node.d.name_crc)); | ||
689 | |||
690 | node.d.name[node.d.nsize] = '\0'; | ||
691 | printk(JFFS2_DBG "name:\t\"%s\"\n", node.d.name); | ||
692 | |||
693 | crc = crc32(0, &node.d, sizeof(node.d) - 8); | ||
694 | if (crc != je32_to_cpu(node.d.node_crc)) { | ||
695 | JFFS2_ERROR("wrong node header CRC.\n"); | ||
696 | return; | ||
697 | } | ||
698 | break; | ||
699 | |||
700 | default: | ||
701 | printk(JFFS2_DBG "node type is unknown\n"); | ||
702 | break; | ||
703 | } | ||
704 | } | ||
705 | #endif /* JFFS2_DBG_DUMPS || JFFS2_DBG_PARANOIA_CHECKS */ | ||
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h new file mode 100644 index 000000000000..f193d43a8a59 --- /dev/null +++ b/fs/jffs2/debug.h | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: debug.h,v 1.21 2005/11/07 11:14:39 gleixner Exp $ | ||
11 | * | ||
12 | */ | ||
13 | #ifndef _JFFS2_DEBUG_H_ | ||
14 | #define _JFFS2_DEBUG_H_ | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | |||
18 | #ifndef CONFIG_JFFS2_FS_DEBUG | ||
19 | #define CONFIG_JFFS2_FS_DEBUG 0 | ||
20 | #endif | ||
21 | |||
22 | #if CONFIG_JFFS2_FS_DEBUG > 0 | ||
23 | /* Enable "paranoia" checks and dumps */ | ||
24 | #define JFFS2_DBG_PARANOIA_CHECKS | ||
25 | #define JFFS2_DBG_DUMPS | ||
26 | |||
27 | /* | ||
28 | * By defining/undefining the below macros one may select debugging messages | ||
29 | * fro specific JFFS2 subsystems. | ||
30 | */ | ||
31 | #define JFFS2_DBG_READINODE_MESSAGES | ||
32 | #define JFFS2_DBG_FRAGTREE_MESSAGES | ||
33 | #define JFFS2_DBG_DENTLIST_MESSAGES | ||
34 | #define JFFS2_DBG_NODEREF_MESSAGES | ||
35 | #define JFFS2_DBG_INOCACHE_MESSAGES | ||
36 | #define JFFS2_DBG_SUMMARY_MESSAGES | ||
37 | #define JFFS2_DBG_FSBUILD_MESSAGES | ||
38 | #endif | ||
39 | |||
40 | #if CONFIG_JFFS2_FS_DEBUG > 1 | ||
41 | #define JFFS2_DBG_FRAGTREE2_MESSAGES | ||
42 | #define JFFS2_DBG_MEMALLOC_MESSAGES | ||
43 | #endif | ||
44 | |||
45 | /* Sanity checks are supposed to be light-weight and enabled by default */ | ||
46 | #define JFFS2_DBG_SANITY_CHECKS | ||
47 | |||
48 | /* | ||
49 | * Dx() are mainly used for debugging messages, they must go away and be | ||
50 | * superseded by nicer dbg_xxx() macros... | ||
51 | */ | ||
52 | #if CONFIG_JFFS2_FS_DEBUG > 0 | ||
53 | #define D1(x) x | ||
54 | #else | ||
55 | #define D1(x) | ||
56 | #endif | ||
57 | |||
58 | #if CONFIG_JFFS2_FS_DEBUG > 1 | ||
59 | #define D2(x) x | ||
60 | #else | ||
61 | #define D2(x) | ||
62 | #endif | ||
63 | |||
64 | /* The prefixes of JFFS2 messages */ | ||
65 | #define JFFS2_DBG_PREFIX "[JFFS2 DBG]" | ||
66 | #define JFFS2_ERR_PREFIX "JFFS2 error:" | ||
67 | #define JFFS2_WARN_PREFIX "JFFS2 warning:" | ||
68 | #define JFFS2_NOTICE_PREFIX "JFFS2 notice:" | ||
69 | |||
70 | #define JFFS2_ERR KERN_ERR | ||
71 | #define JFFS2_WARN KERN_WARNING | ||
72 | #define JFFS2_NOT KERN_NOTICE | ||
73 | #define JFFS2_DBG KERN_DEBUG | ||
74 | |||
75 | #define JFFS2_DBG_MSG_PREFIX JFFS2_DBG JFFS2_DBG_PREFIX | ||
76 | #define JFFS2_ERR_MSG_PREFIX JFFS2_ERR JFFS2_ERR_PREFIX | ||
77 | #define JFFS2_WARN_MSG_PREFIX JFFS2_WARN JFFS2_WARN_PREFIX | ||
78 | #define JFFS2_NOTICE_MSG_PREFIX JFFS2_NOT JFFS2_NOTICE_PREFIX | ||
79 | |||
80 | /* JFFS2 message macros */ | ||
81 | #define JFFS2_ERROR(fmt, ...) \ | ||
82 | do { \ | ||
83 | printk(JFFS2_ERR_MSG_PREFIX \ | ||
84 | " (%d) %s: " fmt, current->pid, \ | ||
85 | __FUNCTION__, ##__VA_ARGS__); \ | ||
86 | } while(0) | ||
87 | |||
88 | #define JFFS2_WARNING(fmt, ...) \ | ||
89 | do { \ | ||
90 | printk(JFFS2_WARN_MSG_PREFIX \ | ||
91 | " (%d) %s: " fmt, current->pid, \ | ||
92 | __FUNCTION__, ##__VA_ARGS__); \ | ||
93 | } while(0) | ||
94 | |||
95 | #define JFFS2_NOTICE(fmt, ...) \ | ||
96 | do { \ | ||
97 | printk(JFFS2_NOTICE_MSG_PREFIX \ | ||
98 | " (%d) %s: " fmt, current->pid, \ | ||
99 | __FUNCTION__, ##__VA_ARGS__); \ | ||
100 | } while(0) | ||
101 | |||
102 | #define JFFS2_DEBUG(fmt, ...) \ | ||
103 | do { \ | ||
104 | printk(JFFS2_DBG_MSG_PREFIX \ | ||
105 | " (%d) %s: " fmt, current->pid, \ | ||
106 | __FUNCTION__, ##__VA_ARGS__); \ | ||
107 | } while(0) | ||
108 | |||
109 | /* | ||
110 | * We split our debugging messages on several parts, depending on the JFFS2 | ||
111 | * subsystem the message belongs to. | ||
112 | */ | ||
113 | /* Read inode debugging messages */ | ||
114 | #ifdef JFFS2_DBG_READINODE_MESSAGES | ||
115 | #define dbg_readinode(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
116 | #else | ||
117 | #define dbg_readinode(fmt, ...) | ||
118 | #endif | ||
119 | |||
120 | /* Fragtree build debugging messages */ | ||
121 | #ifdef JFFS2_DBG_FRAGTREE_MESSAGES | ||
122 | #define dbg_fragtree(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
123 | #else | ||
124 | #define dbg_fragtree(fmt, ...) | ||
125 | #endif | ||
126 | #ifdef JFFS2_DBG_FRAGTREE2_MESSAGES | ||
127 | #define dbg_fragtree2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
128 | #else | ||
129 | #define dbg_fragtree2(fmt, ...) | ||
130 | #endif | ||
131 | |||
132 | /* Directory entry list manilulation debugging messages */ | ||
133 | #ifdef JFFS2_DBG_DENTLIST_MESSAGES | ||
134 | #define dbg_dentlist(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
135 | #else | ||
136 | #define dbg_dentlist(fmt, ...) | ||
137 | #endif | ||
138 | |||
139 | /* Print the messages about manipulating node_refs */ | ||
140 | #ifdef JFFS2_DBG_NODEREF_MESSAGES | ||
141 | #define dbg_noderef(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
142 | #else | ||
143 | #define dbg_noderef(fmt, ...) | ||
144 | #endif | ||
145 | |||
146 | /* Manipulations with the list of inodes (JFFS2 inocache) */ | ||
147 | #ifdef JFFS2_DBG_INOCACHE_MESSAGES | ||
148 | #define dbg_inocache(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
149 | #else | ||
150 | #define dbg_inocache(fmt, ...) | ||
151 | #endif | ||
152 | |||
153 | /* Summary debugging messages */ | ||
154 | #ifdef JFFS2_DBG_SUMMARY_MESSAGES | ||
155 | #define dbg_summary(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
156 | #else | ||
157 | #define dbg_summary(fmt, ...) | ||
158 | #endif | ||
159 | |||
160 | /* File system build messages */ | ||
161 | #ifdef JFFS2_DBG_FSBUILD_MESSAGES | ||
162 | #define dbg_fsbuild(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
163 | #else | ||
164 | #define dbg_fsbuild(fmt, ...) | ||
165 | #endif | ||
166 | |||
167 | /* Watch the object allocations */ | ||
168 | #ifdef JFFS2_DBG_MEMALLOC_MESSAGES | ||
169 | #define dbg_memalloc(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
170 | #else | ||
171 | #define dbg_memalloc(fmt, ...) | ||
172 | #endif | ||
173 | |||
174 | |||
175 | /* "Sanity" checks */ | ||
176 | void | ||
177 | __jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c, | ||
178 | struct jffs2_eraseblock *jeb); | ||
179 | void | ||
180 | __jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c, | ||
181 | struct jffs2_eraseblock *jeb); | ||
182 | |||
183 | /* "Paranoia" checks */ | ||
184 | void | ||
185 | __jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f); | ||
186 | void | ||
187 | __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f); | ||
188 | void | ||
189 | __jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c, | ||
190 | struct jffs2_eraseblock *jeb); | ||
191 | void | ||
192 | __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, | ||
193 | struct jffs2_eraseblock *jeb); | ||
194 | void | ||
195 | __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c, | ||
196 | uint32_t ofs, int len); | ||
197 | |||
198 | /* "Dump" functions */ | ||
199 | void | ||
200 | __jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
201 | void | ||
202 | __jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb); | ||
203 | void | ||
204 | __jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c); | ||
205 | void | ||
206 | __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c); | ||
207 | void | ||
208 | __jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c, | ||
209 | struct jffs2_eraseblock *jeb); | ||
210 | void | ||
211 | __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, | ||
212 | struct jffs2_eraseblock *jeb); | ||
213 | void | ||
214 | __jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f); | ||
215 | void | ||
216 | __jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f); | ||
217 | void | ||
218 | __jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs); | ||
219 | void | ||
220 | __jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs); | ||
221 | |||
222 | #ifdef JFFS2_DBG_PARANOIA_CHECKS | ||
223 | #define jffs2_dbg_fragtree_paranoia_check(f) \ | ||
224 | __jffs2_dbg_fragtree_paranoia_check(f) | ||
225 | #define jffs2_dbg_fragtree_paranoia_check_nolock(f) \ | ||
226 | __jffs2_dbg_fragtree_paranoia_check_nolock(f) | ||
227 | #define jffs2_dbg_acct_paranoia_check(c, jeb) \ | ||
228 | __jffs2_dbg_acct_paranoia_check(c,jeb) | ||
229 | #define jffs2_dbg_acct_paranoia_check_nolock(c, jeb) \ | ||
230 | __jffs2_dbg_acct_paranoia_check_nolock(c,jeb) | ||
231 | #define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) \ | ||
232 | __jffs2_dbg_prewrite_paranoia_check(c, ofs, len) | ||
233 | #else | ||
234 | #define jffs2_dbg_fragtree_paranoia_check(f) | ||
235 | #define jffs2_dbg_fragtree_paranoia_check_nolock(f) | ||
236 | #define jffs2_dbg_acct_paranoia_check(c, jeb) | ||
237 | #define jffs2_dbg_acct_paranoia_check_nolock(c, jeb) | ||
238 | #define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) | ||
239 | #endif /* !JFFS2_PARANOIA_CHECKS */ | ||
240 | |||
241 | #ifdef JFFS2_DBG_DUMPS | ||
242 | #define jffs2_dbg_dump_jeb(c, jeb) \ | ||
243 | __jffs2_dbg_dump_jeb(c, jeb); | ||
244 | #define jffs2_dbg_dump_jeb_nolock(jeb) \ | ||
245 | __jffs2_dbg_dump_jeb_nolock(jeb); | ||
246 | #define jffs2_dbg_dump_block_lists(c) \ | ||
247 | __jffs2_dbg_dump_block_lists(c) | ||
248 | #define jffs2_dbg_dump_block_lists_nolock(c) \ | ||
249 | __jffs2_dbg_dump_block_lists_nolock(c) | ||
250 | #define jffs2_dbg_dump_fragtree(f) \ | ||
251 | __jffs2_dbg_dump_fragtree(f); | ||
252 | #define jffs2_dbg_dump_fragtree_nolock(f) \ | ||
253 | __jffs2_dbg_dump_fragtree_nolock(f); | ||
254 | #define jffs2_dbg_dump_buffer(buf, len, offs) \ | ||
255 | __jffs2_dbg_dump_buffer(*buf, len, offs); | ||
256 | #define jffs2_dbg_dump_node(c, ofs) \ | ||
257 | __jffs2_dbg_dump_node(c, ofs); | ||
258 | #else | ||
259 | #define jffs2_dbg_dump_jeb(c, jeb) | ||
260 | #define jffs2_dbg_dump_jeb_nolock(jeb) | ||
261 | #define jffs2_dbg_dump_block_lists(c) | ||
262 | #define jffs2_dbg_dump_block_lists_nolock(c) | ||
263 | #define jffs2_dbg_dump_fragtree(f) | ||
264 | #define jffs2_dbg_dump_fragtree_nolock(f) | ||
265 | #define jffs2_dbg_dump_buffer(buf, len, offs) | ||
266 | #define jffs2_dbg_dump_node(c, ofs) | ||
267 | #endif /* !JFFS2_DBG_DUMPS */ | ||
268 | |||
269 | #ifdef JFFS2_DBG_SANITY_CHECKS | ||
270 | #define jffs2_dbg_acct_sanity_check(c, jeb) \ | ||
271 | __jffs2_dbg_acct_sanity_check(c, jeb) | ||
272 | #define jffs2_dbg_acct_sanity_check_nolock(c, jeb) \ | ||
273 | __jffs2_dbg_acct_sanity_check_nolock(c, jeb) | ||
274 | #else | ||
275 | #define jffs2_dbg_acct_sanity_check(c, jeb) | ||
276 | #define jffs2_dbg_acct_sanity_check_nolock(c, jeb) | ||
277 | #endif /* !JFFS2_DBG_SANITY_CHECKS */ | ||
278 | |||
279 | #endif /* _JFFS2_DEBUG_H_ */ | ||
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 3ca0d25eef1d..a7bf9cb2567f 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: dir.c,v 1.86 2005/07/06 12:13:09 dwmw2 Exp $ | 10 | * $Id: dir.c,v 1.90 2005/11/07 11:14:39 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -64,7 +64,7 @@ struct inode_operations jffs2_dir_inode_operations = | |||
64 | 64 | ||
65 | 65 | ||
66 | /* We keep the dirent list sorted in increasing order of name hash, | 66 | /* We keep the dirent list sorted in increasing order of name hash, |
67 | and we use the same hash function as the dentries. Makes this | 67 | and we use the same hash function as the dentries. Makes this |
68 | nice and simple | 68 | nice and simple |
69 | */ | 69 | */ |
70 | static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, | 70 | static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, |
@@ -85,7 +85,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, | |||
85 | 85 | ||
86 | /* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */ | 86 | /* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */ |
87 | for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) { | 87 | for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) { |
88 | if (fd_list->nhash == target->d_name.hash && | 88 | if (fd_list->nhash == target->d_name.hash && |
89 | (!fd || fd_list->version > fd->version) && | 89 | (!fd || fd_list->version > fd->version) && |
90 | strlen(fd_list->name) == target->d_name.len && | 90 | strlen(fd_list->name) == target->d_name.len && |
91 | !strncmp(fd_list->name, target->d_name.name, target->d_name.len)) { | 91 | !strncmp(fd_list->name, target->d_name.name, target->d_name.len)) { |
@@ -147,7 +147,7 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
147 | curofs++; | 147 | curofs++; |
148 | /* First loop: curofs = 2; offset = 2 */ | 148 | /* First loop: curofs = 2; offset = 2 */ |
149 | if (curofs < offset) { | 149 | if (curofs < offset) { |
150 | D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", | 150 | D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", |
151 | fd->name, fd->ino, fd->type, curofs, offset)); | 151 | fd->name, fd->ino, fd->type, curofs, offset)); |
152 | continue; | 152 | continue; |
153 | } | 153 | } |
@@ -182,7 +182,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode, | |||
182 | ri = jffs2_alloc_raw_inode(); | 182 | ri = jffs2_alloc_raw_inode(); |
183 | if (!ri) | 183 | if (!ri) |
184 | return -ENOMEM; | 184 | return -ENOMEM; |
185 | 185 | ||
186 | c = JFFS2_SB_INFO(dir_i->i_sb); | 186 | c = JFFS2_SB_INFO(dir_i->i_sb); |
187 | 187 | ||
188 | D1(printk(KERN_DEBUG "jffs2_create()\n")); | 188 | D1(printk(KERN_DEBUG "jffs2_create()\n")); |
@@ -203,7 +203,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode, | |||
203 | f = JFFS2_INODE_INFO(inode); | 203 | f = JFFS2_INODE_INFO(inode); |
204 | dir_f = JFFS2_INODE_INFO(dir_i); | 204 | dir_f = JFFS2_INODE_INFO(dir_i); |
205 | 205 | ||
206 | ret = jffs2_do_create(c, dir_f, f, ri, | 206 | ret = jffs2_do_create(c, dir_f, f, ri, |
207 | dentry->d_name.name, dentry->d_name.len); | 207 | dentry->d_name.name, dentry->d_name.len); |
208 | 208 | ||
209 | if (ret) { | 209 | if (ret) { |
@@ -232,11 +232,14 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry) | |||
232 | struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); | 232 | struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); |
233 | struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(dentry->d_inode); | 233 | struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(dentry->d_inode); |
234 | int ret; | 234 | int ret; |
235 | uint32_t now = get_seconds(); | ||
235 | 236 | ||
236 | ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, | 237 | ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, |
237 | dentry->d_name.len, dead_f); | 238 | dentry->d_name.len, dead_f, now); |
238 | if (dead_f->inocache) | 239 | if (dead_f->inocache) |
239 | dentry->d_inode->i_nlink = dead_f->inocache->nlink; | 240 | dentry->d_inode->i_nlink = dead_f->inocache->nlink; |
241 | if (!ret) | ||
242 | dir_i->i_mtime = dir_i->i_ctime = ITIME(now); | ||
240 | return ret; | 243 | return ret; |
241 | } | 244 | } |
242 | /***********************************************************************/ | 245 | /***********************************************************************/ |
@@ -249,6 +252,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de | |||
249 | struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); | 252 | struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); |
250 | int ret; | 253 | int ret; |
251 | uint8_t type; | 254 | uint8_t type; |
255 | uint32_t now; | ||
252 | 256 | ||
253 | /* Don't let people make hard links to bad inodes. */ | 257 | /* Don't let people make hard links to bad inodes. */ |
254 | if (!f->inocache) | 258 | if (!f->inocache) |
@@ -261,13 +265,15 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de | |||
261 | type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; | 265 | type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; |
262 | if (!type) type = DT_REG; | 266 | if (!type) type = DT_REG; |
263 | 267 | ||
264 | ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len); | 268 | now = get_seconds(); |
269 | ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now); | ||
265 | 270 | ||
266 | if (!ret) { | 271 | if (!ret) { |
267 | down(&f->sem); | 272 | down(&f->sem); |
268 | old_dentry->d_inode->i_nlink = ++f->inocache->nlink; | 273 | old_dentry->d_inode->i_nlink = ++f->inocache->nlink; |
269 | up(&f->sem); | 274 | up(&f->sem); |
270 | d_instantiate(dentry, old_dentry->d_inode); | 275 | d_instantiate(dentry, old_dentry->d_inode); |
276 | dir_i->i_mtime = dir_i->i_ctime = ITIME(now); | ||
271 | atomic_inc(&old_dentry->d_inode->i_count); | 277 | atomic_inc(&old_dentry->d_inode->i_count); |
272 | } | 278 | } |
273 | return ret; | 279 | return ret; |
@@ -297,14 +303,15 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
297 | 303 | ||
298 | if (!ri) | 304 | if (!ri) |
299 | return -ENOMEM; | 305 | return -ENOMEM; |
300 | 306 | ||
301 | c = JFFS2_SB_INFO(dir_i->i_sb); | 307 | c = JFFS2_SB_INFO(dir_i->i_sb); |
302 | 308 | ||
303 | /* Try to reserve enough space for both node and dirent. | 309 | /* Try to reserve enough space for both node and dirent. |
304 | * Just the node will do for now, though | 310 | * Just the node will do for now, though |
305 | */ | 311 | */ |
306 | namelen = dentry->d_name.len; | 312 | namelen = dentry->d_name.len; |
307 | ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 313 | ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &phys_ofs, &alloclen, |
314 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | ||
308 | 315 | ||
309 | if (ret) { | 316 | if (ret) { |
310 | jffs2_free_raw_inode(ri); | 317 | jffs2_free_raw_inode(ri); |
@@ -331,7 +338,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
331 | ri->compr = JFFS2_COMPR_NONE; | 338 | ri->compr = JFFS2_COMPR_NONE; |
332 | ri->data_crc = cpu_to_je32(crc32(0, target, targetlen)); | 339 | ri->data_crc = cpu_to_je32(crc32(0, target, targetlen)); |
333 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 340 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
334 | 341 | ||
335 | fn = jffs2_write_dnode(c, f, ri, target, targetlen, phys_ofs, ALLOC_NORMAL); | 342 | fn = jffs2_write_dnode(c, f, ri, target, targetlen, phys_ofs, ALLOC_NORMAL); |
336 | 343 | ||
337 | jffs2_free_raw_inode(ri); | 344 | jffs2_free_raw_inode(ri); |
@@ -344,9 +351,9 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
344 | return PTR_ERR(fn); | 351 | return PTR_ERR(fn); |
345 | } | 352 | } |
346 | 353 | ||
347 | /* We use f->dents field to store the target path. */ | 354 | /* We use f->target field to store the target path. */ |
348 | f->dents = kmalloc(targetlen + 1, GFP_KERNEL); | 355 | f->target = kmalloc(targetlen + 1, GFP_KERNEL); |
349 | if (!f->dents) { | 356 | if (!f->target) { |
350 | printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1); | 357 | printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1); |
351 | up(&f->sem); | 358 | up(&f->sem); |
352 | jffs2_complete_reservation(c); | 359 | jffs2_complete_reservation(c); |
@@ -354,17 +361,18 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
354 | return -ENOMEM; | 361 | return -ENOMEM; |
355 | } | 362 | } |
356 | 363 | ||
357 | memcpy(f->dents, target, targetlen + 1); | 364 | memcpy(f->target, target, targetlen + 1); |
358 | D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->dents)); | 365 | D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->target)); |
359 | 366 | ||
360 | /* No data here. Only a metadata node, which will be | 367 | /* No data here. Only a metadata node, which will be |
361 | obsoleted by the first data write | 368 | obsoleted by the first data write |
362 | */ | 369 | */ |
363 | f->metadata = fn; | 370 | f->metadata = fn; |
364 | up(&f->sem); | 371 | up(&f->sem); |
365 | 372 | ||
366 | jffs2_complete_reservation(c); | 373 | jffs2_complete_reservation(c); |
367 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 374 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, |
375 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
368 | if (ret) { | 376 | if (ret) { |
369 | /* Eep. */ | 377 | /* Eep. */ |
370 | jffs2_clear_inode(inode); | 378 | jffs2_clear_inode(inode); |
@@ -399,7 +407,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
399 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); | 407 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); |
400 | 408 | ||
401 | if (IS_ERR(fd)) { | 409 | if (IS_ERR(fd)) { |
402 | /* dirent failed to write. Delete the inode normally | 410 | /* dirent failed to write. Delete the inode normally |
403 | as if it were the final unlink() */ | 411 | as if it were the final unlink() */ |
404 | jffs2_complete_reservation(c); | 412 | jffs2_complete_reservation(c); |
405 | jffs2_free_raw_dirent(rd); | 413 | jffs2_free_raw_dirent(rd); |
@@ -442,14 +450,15 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
442 | ri = jffs2_alloc_raw_inode(); | 450 | ri = jffs2_alloc_raw_inode(); |
443 | if (!ri) | 451 | if (!ri) |
444 | return -ENOMEM; | 452 | return -ENOMEM; |
445 | 453 | ||
446 | c = JFFS2_SB_INFO(dir_i->i_sb); | 454 | c = JFFS2_SB_INFO(dir_i->i_sb); |
447 | 455 | ||
448 | /* Try to reserve enough space for both node and dirent. | 456 | /* Try to reserve enough space for both node and dirent. |
449 | * Just the node will do for now, though | 457 | * Just the node will do for now, though |
450 | */ | 458 | */ |
451 | namelen = dentry->d_name.len; | 459 | namelen = dentry->d_name.len; |
452 | ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL); | 460 | ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL, |
461 | JFFS2_SUMMARY_INODE_SIZE); | ||
453 | 462 | ||
454 | if (ret) { | 463 | if (ret) { |
455 | jffs2_free_raw_inode(ri); | 464 | jffs2_free_raw_inode(ri); |
@@ -473,7 +482,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
473 | 482 | ||
474 | ri->data_crc = cpu_to_je32(0); | 483 | ri->data_crc = cpu_to_je32(0); |
475 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 484 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
476 | 485 | ||
477 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL); | 486 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL); |
478 | 487 | ||
479 | jffs2_free_raw_inode(ri); | 488 | jffs2_free_raw_inode(ri); |
@@ -485,20 +494,21 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
485 | jffs2_clear_inode(inode); | 494 | jffs2_clear_inode(inode); |
486 | return PTR_ERR(fn); | 495 | return PTR_ERR(fn); |
487 | } | 496 | } |
488 | /* No data here. Only a metadata node, which will be | 497 | /* No data here. Only a metadata node, which will be |
489 | obsoleted by the first data write | 498 | obsoleted by the first data write |
490 | */ | 499 | */ |
491 | f->metadata = fn; | 500 | f->metadata = fn; |
492 | up(&f->sem); | 501 | up(&f->sem); |
493 | 502 | ||
494 | jffs2_complete_reservation(c); | 503 | jffs2_complete_reservation(c); |
495 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 504 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, |
505 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
496 | if (ret) { | 506 | if (ret) { |
497 | /* Eep. */ | 507 | /* Eep. */ |
498 | jffs2_clear_inode(inode); | 508 | jffs2_clear_inode(inode); |
499 | return ret; | 509 | return ret; |
500 | } | 510 | } |
501 | 511 | ||
502 | rd = jffs2_alloc_raw_dirent(); | 512 | rd = jffs2_alloc_raw_dirent(); |
503 | if (!rd) { | 513 | if (!rd) { |
504 | /* Argh. Now we treat it like a normal delete */ | 514 | /* Argh. Now we treat it like a normal delete */ |
@@ -525,9 +535,9 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
525 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); | 535 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); |
526 | 536 | ||
527 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); | 537 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); |
528 | 538 | ||
529 | if (IS_ERR(fd)) { | 539 | if (IS_ERR(fd)) { |
530 | /* dirent failed to write. Delete the inode normally | 540 | /* dirent failed to write. Delete the inode normally |
531 | as if it were the final unlink() */ | 541 | as if it were the final unlink() */ |
532 | jffs2_complete_reservation(c); | 542 | jffs2_complete_reservation(c); |
533 | jffs2_free_raw_dirent(rd); | 543 | jffs2_free_raw_dirent(rd); |
@@ -589,19 +599,20 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
589 | ri = jffs2_alloc_raw_inode(); | 599 | ri = jffs2_alloc_raw_inode(); |
590 | if (!ri) | 600 | if (!ri) |
591 | return -ENOMEM; | 601 | return -ENOMEM; |
592 | 602 | ||
593 | c = JFFS2_SB_INFO(dir_i->i_sb); | 603 | c = JFFS2_SB_INFO(dir_i->i_sb); |
594 | 604 | ||
595 | if (S_ISBLK(mode) || S_ISCHR(mode)) { | 605 | if (S_ISBLK(mode) || S_ISCHR(mode)) { |
596 | dev = cpu_to_je16(old_encode_dev(rdev)); | 606 | dev = cpu_to_je16(old_encode_dev(rdev)); |
597 | devlen = sizeof(dev); | 607 | devlen = sizeof(dev); |
598 | } | 608 | } |
599 | 609 | ||
600 | /* Try to reserve enough space for both node and dirent. | 610 | /* Try to reserve enough space for both node and dirent. |
601 | * Just the node will do for now, though | 611 | * Just the node will do for now, though |
602 | */ | 612 | */ |
603 | namelen = dentry->d_name.len; | 613 | namelen = dentry->d_name.len; |
604 | ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 614 | ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen, |
615 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | ||
605 | 616 | ||
606 | if (ret) { | 617 | if (ret) { |
607 | jffs2_free_raw_inode(ri); | 618 | jffs2_free_raw_inode(ri); |
@@ -627,7 +638,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
627 | ri->compr = JFFS2_COMPR_NONE; | 638 | ri->compr = JFFS2_COMPR_NONE; |
628 | ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen)); | 639 | ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen)); |
629 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 640 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
630 | 641 | ||
631 | fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL); | 642 | fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL); |
632 | 643 | ||
633 | jffs2_free_raw_inode(ri); | 644 | jffs2_free_raw_inode(ri); |
@@ -639,14 +650,15 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
639 | jffs2_clear_inode(inode); | 650 | jffs2_clear_inode(inode); |
640 | return PTR_ERR(fn); | 651 | return PTR_ERR(fn); |
641 | } | 652 | } |
642 | /* No data here. Only a metadata node, which will be | 653 | /* No data here. Only a metadata node, which will be |
643 | obsoleted by the first data write | 654 | obsoleted by the first data write |
644 | */ | 655 | */ |
645 | f->metadata = fn; | 656 | f->metadata = fn; |
646 | up(&f->sem); | 657 | up(&f->sem); |
647 | 658 | ||
648 | jffs2_complete_reservation(c); | 659 | jffs2_complete_reservation(c); |
649 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 660 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, |
661 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
650 | if (ret) { | 662 | if (ret) { |
651 | /* Eep. */ | 663 | /* Eep. */ |
652 | jffs2_clear_inode(inode); | 664 | jffs2_clear_inode(inode); |
@@ -682,9 +694,9 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
682 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); | 694 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); |
683 | 695 | ||
684 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); | 696 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); |
685 | 697 | ||
686 | if (IS_ERR(fd)) { | 698 | if (IS_ERR(fd)) { |
687 | /* dirent failed to write. Delete the inode normally | 699 | /* dirent failed to write. Delete the inode normally |
688 | as if it were the final unlink() */ | 700 | as if it were the final unlink() */ |
689 | jffs2_complete_reservation(c); | 701 | jffs2_complete_reservation(c); |
690 | jffs2_free_raw_dirent(rd); | 702 | jffs2_free_raw_dirent(rd); |
@@ -716,8 +728,9 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, | |||
716 | struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb); | 728 | struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb); |
717 | struct jffs2_inode_info *victim_f = NULL; | 729 | struct jffs2_inode_info *victim_f = NULL; |
718 | uint8_t type; | 730 | uint8_t type; |
731 | uint32_t now; | ||
719 | 732 | ||
720 | /* The VFS will check for us and prevent trying to rename a | 733 | /* The VFS will check for us and prevent trying to rename a |
721 | * file over a directory and vice versa, but if it's a directory, | 734 | * file over a directory and vice versa, but if it's a directory, |
722 | * the VFS can't check whether the victim is empty. The filesystem | 735 | * the VFS can't check whether the victim is empty. The filesystem |
723 | * needs to do that for itself. | 736 | * needs to do that for itself. |
@@ -739,19 +752,20 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, | |||
739 | } | 752 | } |
740 | 753 | ||
741 | /* XXX: We probably ought to alloc enough space for | 754 | /* XXX: We probably ought to alloc enough space for |
742 | both nodes at the same time. Writing the new link, | 755 | both nodes at the same time. Writing the new link, |
743 | then getting -ENOSPC, is quite bad :) | 756 | then getting -ENOSPC, is quite bad :) |
744 | */ | 757 | */ |
745 | 758 | ||
746 | /* Make a hard link */ | 759 | /* Make a hard link */ |
747 | 760 | ||
748 | /* XXX: This is ugly */ | 761 | /* XXX: This is ugly */ |
749 | type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; | 762 | type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; |
750 | if (!type) type = DT_REG; | 763 | if (!type) type = DT_REG; |
751 | 764 | ||
752 | ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i), | 765 | now = get_seconds(); |
766 | ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i), | ||
753 | old_dentry->d_inode->i_ino, type, | 767 | old_dentry->d_inode->i_ino, type, |
754 | new_dentry->d_name.name, new_dentry->d_name.len); | 768 | new_dentry->d_name.name, new_dentry->d_name.len, now); |
755 | 769 | ||
756 | if (ret) | 770 | if (ret) |
757 | return ret; | 771 | return ret; |
@@ -768,14 +782,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, | |||
768 | } | 782 | } |
769 | } | 783 | } |
770 | 784 | ||
771 | /* If it was a directory we moved, and there was no victim, | 785 | /* If it was a directory we moved, and there was no victim, |
772 | increase i_nlink on its new parent */ | 786 | increase i_nlink on its new parent */ |
773 | if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f) | 787 | if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f) |
774 | new_dir_i->i_nlink++; | 788 | new_dir_i->i_nlink++; |
775 | 789 | ||
776 | /* Unlink the original */ | 790 | /* Unlink the original */ |
777 | ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), | 791 | ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), |
778 | old_dentry->d_name.name, old_dentry->d_name.len, NULL); | 792 | old_dentry->d_name.name, old_dentry->d_name.len, NULL, now); |
779 | 793 | ||
780 | /* We don't touch inode->i_nlink */ | 794 | /* We don't touch inode->i_nlink */ |
781 | 795 | ||
@@ -792,12 +806,15 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, | |||
792 | /* Might as well let the VFS know */ | 806 | /* Might as well let the VFS know */ |
793 | d_instantiate(new_dentry, old_dentry->d_inode); | 807 | d_instantiate(new_dentry, old_dentry->d_inode); |
794 | atomic_inc(&old_dentry->d_inode->i_count); | 808 | atomic_inc(&old_dentry->d_inode->i_count); |
809 | new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now); | ||
795 | return ret; | 810 | return ret; |
796 | } | 811 | } |
797 | 812 | ||
798 | if (S_ISDIR(old_dentry->d_inode->i_mode)) | 813 | if (S_ISDIR(old_dentry->d_inode->i_mode)) |
799 | old_dir_i->i_nlink--; | 814 | old_dir_i->i_nlink--; |
800 | 815 | ||
816 | new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now); | ||
817 | |||
801 | return 0; | 818 | return 0; |
802 | } | 819 | } |
803 | 820 | ||
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index 787d84ac2bcd..dad68fdffe9e 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: erase.c,v 1.80 2005/07/14 19:46:24 joern Exp $ | 10 | * $Id: erase.c,v 1.85 2005/09/20 14:53:15 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -24,7 +24,7 @@ struct erase_priv_struct { | |||
24 | struct jffs2_eraseblock *jeb; | 24 | struct jffs2_eraseblock *jeb; |
25 | struct jffs2_sb_info *c; | 25 | struct jffs2_sb_info *c; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | #ifndef __ECOS | 28 | #ifndef __ECOS |
29 | static void jffs2_erase_callback(struct erase_info *); | 29 | static void jffs2_erase_callback(struct erase_info *); |
30 | #endif | 30 | #endif |
@@ -48,7 +48,8 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
48 | #else /* Linux */ | 48 | #else /* Linux */ |
49 | struct erase_info *instr; | 49 | struct erase_info *instr; |
50 | 50 | ||
51 | D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#x (range %#x-%#x)\n", jeb->offset, jeb->offset, jeb->offset + c->sector_size)); | 51 | D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n", |
52 | jeb->offset, jeb->offset, jeb->offset + c->sector_size)); | ||
52 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); | 53 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); |
53 | if (!instr) { | 54 | if (!instr) { |
54 | printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); | 55 | printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); |
@@ -70,7 +71,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
70 | instr->callback = jffs2_erase_callback; | 71 | instr->callback = jffs2_erase_callback; |
71 | instr->priv = (unsigned long)(&instr[1]); | 72 | instr->priv = (unsigned long)(&instr[1]); |
72 | instr->fail_addr = 0xffffffff; | 73 | instr->fail_addr = 0xffffffff; |
73 | 74 | ||
74 | ((struct erase_priv_struct *)instr->priv)->jeb = jeb; | 75 | ((struct erase_priv_struct *)instr->priv)->jeb = jeb; |
75 | ((struct erase_priv_struct *)instr->priv)->c = c; | 76 | ((struct erase_priv_struct *)instr->priv)->c = c; |
76 | 77 | ||
@@ -95,7 +96,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
95 | return; | 96 | return; |
96 | } | 97 | } |
97 | 98 | ||
98 | if (ret == -EROFS) | 99 | if (ret == -EROFS) |
99 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); | 100 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); |
100 | else | 101 | else |
101 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); | 102 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); |
@@ -196,7 +197,7 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
196 | c->nr_erasing_blocks--; | 197 | c->nr_erasing_blocks--; |
197 | spin_unlock(&c->erase_completion_lock); | 198 | spin_unlock(&c->erase_completion_lock); |
198 | wake_up(&c->erase_wait); | 199 | wake_up(&c->erase_wait); |
199 | } | 200 | } |
200 | 201 | ||
201 | #ifndef __ECOS | 202 | #ifndef __ECOS |
202 | static void jffs2_erase_callback(struct erase_info *instr) | 203 | static void jffs2_erase_callback(struct erase_info *instr) |
@@ -208,7 +209,7 @@ static void jffs2_erase_callback(struct erase_info *instr) | |||
208 | jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); | 209 | jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); |
209 | } else { | 210 | } else { |
210 | jffs2_erase_succeeded(priv->c, priv->jeb); | 211 | jffs2_erase_succeeded(priv->c, priv->jeb); |
211 | } | 212 | } |
212 | kfree(instr); | 213 | kfree(instr); |
213 | } | 214 | } |
214 | #endif /* !__ECOS */ | 215 | #endif /* !__ECOS */ |
@@ -226,13 +227,13 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
226 | /* Walk the inode's list once, removing any nodes from this eraseblock */ | 227 | /* Walk the inode's list once, removing any nodes from this eraseblock */ |
227 | while (1) { | 228 | while (1) { |
228 | if (!(*prev)->next_in_ino) { | 229 | if (!(*prev)->next_in_ino) { |
229 | /* We're looking at the jffs2_inode_cache, which is | 230 | /* We're looking at the jffs2_inode_cache, which is |
230 | at the end of the linked list. Stash it and continue | 231 | at the end of the linked list. Stash it and continue |
231 | from the beginning of the list */ | 232 | from the beginning of the list */ |
232 | ic = (struct jffs2_inode_cache *)(*prev); | 233 | ic = (struct jffs2_inode_cache *)(*prev); |
233 | prev = &ic->nodes; | 234 | prev = &ic->nodes; |
234 | continue; | 235 | continue; |
235 | } | 236 | } |
236 | 237 | ||
237 | if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) { | 238 | if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) { |
238 | /* It's in the block we're erasing */ | 239 | /* It's in the block we're erasing */ |
@@ -266,7 +267,7 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
266 | printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG); | 267 | printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG); |
267 | 268 | ||
268 | this = ic->nodes; | 269 | this = ic->nodes; |
269 | 270 | ||
270 | while(this) { | 271 | while(this) { |
271 | printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this)); | 272 | printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this)); |
272 | if (++i == 5) { | 273 | if (++i == 5) { |
@@ -289,7 +290,7 @@ static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_erase | |||
289 | while(jeb->first_node) { | 290 | while(jeb->first_node) { |
290 | ref = jeb->first_node; | 291 | ref = jeb->first_node; |
291 | jeb->first_node = ref->next_phys; | 292 | jeb->first_node = ref->next_phys; |
292 | 293 | ||
293 | /* Remove from the inode-list */ | 294 | /* Remove from the inode-list */ |
294 | if (ref->next_in_ino) | 295 | if (ref->next_in_ino) |
295 | jffs2_remove_node_refs_from_ino_list(c, ref, jeb); | 296 | jffs2_remove_node_refs_from_ino_list(c, ref, jeb); |
@@ -306,7 +307,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl | |||
306 | uint32_t ofs; | 307 | uint32_t ofs; |
307 | size_t retlen; | 308 | size_t retlen; |
308 | int ret = -EIO; | 309 | int ret = -EIO; |
309 | 310 | ||
310 | ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 311 | ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
311 | if (!ebuf) { | 312 | if (!ebuf) { |
312 | printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); | 313 | printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); |
@@ -360,7 +361,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
360 | case -EIO: goto filebad; | 361 | case -EIO: goto filebad; |
361 | } | 362 | } |
362 | 363 | ||
363 | /* Write the erase complete marker */ | 364 | /* Write the erase complete marker */ |
364 | D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); | 365 | D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); |
365 | bad_offset = jeb->offset; | 366 | bad_offset = jeb->offset; |
366 | 367 | ||
@@ -398,7 +399,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
398 | vecs[0].iov_base = (unsigned char *) ▮ | 399 | vecs[0].iov_base = (unsigned char *) ▮ |
399 | vecs[0].iov_len = sizeof(marker); | 400 | vecs[0].iov_len = sizeof(marker); |
400 | ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); | 401 | ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); |
401 | 402 | ||
402 | if (ret || retlen != sizeof(marker)) { | 403 | if (ret || retlen != sizeof(marker)) { |
403 | if (ret) | 404 | if (ret) |
404 | printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", | 405 | printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", |
@@ -415,9 +416,9 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
415 | marker_ref->next_phys = NULL; | 416 | marker_ref->next_phys = NULL; |
416 | marker_ref->flash_offset = jeb->offset | REF_NORMAL; | 417 | marker_ref->flash_offset = jeb->offset | REF_NORMAL; |
417 | marker_ref->__totlen = c->cleanmarker_size; | 418 | marker_ref->__totlen = c->cleanmarker_size; |
418 | 419 | ||
419 | jeb->first_node = jeb->last_node = marker_ref; | 420 | jeb->first_node = jeb->last_node = marker_ref; |
420 | 421 | ||
421 | jeb->free_size = c->sector_size - c->cleanmarker_size; | 422 | jeb->free_size = c->sector_size - c->cleanmarker_size; |
422 | jeb->used_size = c->cleanmarker_size; | 423 | jeb->used_size = c->cleanmarker_size; |
423 | jeb->dirty_size = 0; | 424 | jeb->dirty_size = 0; |
@@ -429,8 +430,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
429 | c->free_size += jeb->free_size; | 430 | c->free_size += jeb->free_size; |
430 | c->used_size += jeb->used_size; | 431 | c->used_size += jeb->used_size; |
431 | 432 | ||
432 | ACCT_SANITY_CHECK(c,jeb); | 433 | jffs2_dbg_acct_sanity_check_nolock(c,jeb); |
433 | D1(ACCT_PARANOIA_CHECK(jeb)); | 434 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
434 | 435 | ||
435 | list_add_tail(&jeb->list, &c->free_list); | 436 | list_add_tail(&jeb->list, &c->free_list); |
436 | c->nr_erasing_blocks--; | 437 | c->nr_erasing_blocks--; |
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 8279bf0133ff..935f273dc57b 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: file.c,v 1.102 2005/07/06 12:13:09 dwmw2 Exp $ | 10 | * $Id: file.c,v 1.104 2005/10/18 23:29:35 tpoynor Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -34,8 +34,8 @@ int jffs2_fsync(struct file *filp, struct dentry *dentry, int datasync) | |||
34 | 34 | ||
35 | /* Trigger GC to flush any pending writes for this inode */ | 35 | /* Trigger GC to flush any pending writes for this inode */ |
36 | jffs2_flush_wbuf_gc(c, inode->i_ino); | 36 | jffs2_flush_wbuf_gc(c, inode->i_ino); |
37 | 37 | ||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | struct file_operations jffs2_file_operations = | 41 | struct file_operations jffs2_file_operations = |
@@ -107,7 +107,7 @@ static int jffs2_readpage (struct file *filp, struct page *pg) | |||
107 | { | 107 | { |
108 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host); | 108 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host); |
109 | int ret; | 109 | int ret; |
110 | 110 | ||
111 | down(&f->sem); | 111 | down(&f->sem); |
112 | ret = jffs2_do_readpage_unlock(pg->mapping->host, pg); | 112 | ret = jffs2_do_readpage_unlock(pg->mapping->host, pg); |
113 | up(&f->sem); | 113 | up(&f->sem); |
@@ -130,11 +130,12 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg, | |||
130 | struct jffs2_raw_inode ri; | 130 | struct jffs2_raw_inode ri; |
131 | struct jffs2_full_dnode *fn; | 131 | struct jffs2_full_dnode *fn; |
132 | uint32_t phys_ofs, alloc_len; | 132 | uint32_t phys_ofs, alloc_len; |
133 | 133 | ||
134 | D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", | 134 | D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", |
135 | (unsigned int)inode->i_size, pageofs)); | 135 | (unsigned int)inode->i_size, pageofs)); |
136 | 136 | ||
137 | ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL); | 137 | ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, |
138 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | ||
138 | if (ret) | 139 | if (ret) |
139 | return ret; | 140 | return ret; |
140 | 141 | ||
@@ -159,7 +160,7 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg, | |||
159 | ri.compr = JFFS2_COMPR_ZERO; | 160 | ri.compr = JFFS2_COMPR_ZERO; |
160 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | 161 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); |
161 | ri.data_crc = cpu_to_je32(0); | 162 | ri.data_crc = cpu_to_je32(0); |
162 | 163 | ||
163 | fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL); | 164 | fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL); |
164 | 165 | ||
165 | if (IS_ERR(fn)) { | 166 | if (IS_ERR(fn)) { |
@@ -186,7 +187,7 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg, | |||
186 | inode->i_size = pageofs; | 187 | inode->i_size = pageofs; |
187 | up(&f->sem); | 188 | up(&f->sem); |
188 | } | 189 | } |
189 | 190 | ||
190 | /* Read in the page if it wasn't already present, unless it's a whole page */ | 191 | /* Read in the page if it wasn't already present, unless it's a whole page */ |
191 | if (!PageUptodate(pg) && (start || end < PAGE_CACHE_SIZE)) { | 192 | if (!PageUptodate(pg) && (start || end < PAGE_CACHE_SIZE)) { |
192 | down(&f->sem); | 193 | down(&f->sem); |
@@ -217,7 +218,7 @@ static int jffs2_commit_write (struct file *filp, struct page *pg, | |||
217 | if (!start && end == PAGE_CACHE_SIZE) { | 218 | if (!start && end == PAGE_CACHE_SIZE) { |
218 | /* We need to avoid deadlock with page_cache_read() in | 219 | /* We need to avoid deadlock with page_cache_read() in |
219 | jffs2_garbage_collect_pass(). So we have to mark the | 220 | jffs2_garbage_collect_pass(). So we have to mark the |
220 | page up to date, to prevent page_cache_read() from | 221 | page up to date, to prevent page_cache_read() from |
221 | trying to re-lock it. */ | 222 | trying to re-lock it. */ |
222 | SetPageUptodate(pg); | 223 | SetPageUptodate(pg); |
223 | } | 224 | } |
@@ -251,7 +252,7 @@ static int jffs2_commit_write (struct file *filp, struct page *pg, | |||
251 | /* There was an error writing. */ | 252 | /* There was an error writing. */ |
252 | SetPageError(pg); | 253 | SetPageError(pg); |
253 | } | 254 | } |
254 | 255 | ||
255 | /* Adjust writtenlen for the padding we did, so we don't confuse our caller */ | 256 | /* Adjust writtenlen for the padding we did, so we don't confuse our caller */ |
256 | if (writtenlen < (start&3)) | 257 | if (writtenlen < (start&3)) |
257 | writtenlen = 0; | 258 | writtenlen = 0; |
@@ -262,7 +263,7 @@ static int jffs2_commit_write (struct file *filp, struct page *pg, | |||
262 | if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) { | 263 | if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) { |
263 | inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen; | 264 | inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen; |
264 | inode->i_blocks = (inode->i_size + 511) >> 9; | 265 | inode->i_blocks = (inode->i_size + 511) >> 9; |
265 | 266 | ||
266 | inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime)); | 267 | inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime)); |
267 | } | 268 | } |
268 | } | 269 | } |
@@ -271,13 +272,13 @@ static int jffs2_commit_write (struct file *filp, struct page *pg, | |||
271 | 272 | ||
272 | if (start+writtenlen < end) { | 273 | if (start+writtenlen < end) { |
273 | /* generic_file_write has written more to the page cache than we've | 274 | /* generic_file_write has written more to the page cache than we've |
274 | actually written to the medium. Mark the page !Uptodate so that | 275 | actually written to the medium. Mark the page !Uptodate so that |
275 | it gets reread */ | 276 | it gets reread */ |
276 | D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n")); | 277 | D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n")); |
277 | SetPageError(pg); | 278 | SetPageError(pg); |
278 | ClearPageUptodate(pg); | 279 | ClearPageUptodate(pg); |
279 | } | 280 | } |
280 | 281 | ||
281 | D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d\n",writtenlen?writtenlen:ret)); | 282 | D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d\n",start+writtenlen==end?0:ret)); |
282 | return writtenlen?writtenlen:ret; | 283 | return start+writtenlen==end?0:ret; |
283 | } | 284 | } |
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 5687c3f42002..543420665c5b 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: fs.c,v 1.56 2005/07/06 12:13:09 dwmw2 Exp $ | 10 | * $Id: fs.c,v 1.66 2005/09/27 13:17:29 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -40,7 +40,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
40 | int ret; | 40 | int ret; |
41 | D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); | 41 | D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); |
42 | ret = inode_change_ok(inode, iattr); | 42 | ret = inode_change_ok(inode, iattr); |
43 | if (ret) | 43 | if (ret) |
44 | return ret; | 44 | return ret; |
45 | 45 | ||
46 | /* Special cases - we don't want more than one data node | 46 | /* Special cases - we don't want more than one data node |
@@ -73,8 +73,9 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
73 | kfree(mdata); | 73 | kfree(mdata); |
74 | return -ENOMEM; | 74 | return -ENOMEM; |
75 | } | 75 | } |
76 | 76 | ||
77 | ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 77 | ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, |
78 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | ||
78 | if (ret) { | 79 | if (ret) { |
79 | jffs2_free_raw_inode(ri); | 80 | jffs2_free_raw_inode(ri); |
80 | if (S_ISLNK(inode->i_mode & S_IFMT)) | 81 | if (S_ISLNK(inode->i_mode & S_IFMT)) |
@@ -83,7 +84,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
83 | } | 84 | } |
84 | down(&f->sem); | 85 | down(&f->sem); |
85 | ivalid = iattr->ia_valid; | 86 | ivalid = iattr->ia_valid; |
86 | 87 | ||
87 | ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 88 | ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
88 | ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); | 89 | ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); |
89 | ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); | 90 | ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); |
@@ -99,7 +100,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
99 | if (iattr->ia_mode & S_ISGID && | 100 | if (iattr->ia_mode & S_ISGID && |
100 | !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID)) | 101 | !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID)) |
101 | ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID); | 102 | ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID); |
102 | else | 103 | else |
103 | ri->mode = cpu_to_jemode(iattr->ia_mode); | 104 | ri->mode = cpu_to_jemode(iattr->ia_mode); |
104 | else | 105 | else |
105 | ri->mode = cpu_to_jemode(inode->i_mode); | 106 | ri->mode = cpu_to_jemode(inode->i_mode); |
@@ -128,7 +129,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
128 | new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL); | 129 | new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL); |
129 | if (S_ISLNK(inode->i_mode)) | 130 | if (S_ISLNK(inode->i_mode)) |
130 | kfree(mdata); | 131 | kfree(mdata); |
131 | 132 | ||
132 | if (IS_ERR(new_metadata)) { | 133 | if (IS_ERR(new_metadata)) { |
133 | jffs2_complete_reservation(c); | 134 | jffs2_complete_reservation(c); |
134 | jffs2_free_raw_inode(ri); | 135 | jffs2_free_raw_inode(ri); |
@@ -147,7 +148,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
147 | old_metadata = f->metadata; | 148 | old_metadata = f->metadata; |
148 | 149 | ||
149 | if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) | 150 | if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) |
150 | jffs2_truncate_fraglist (c, &f->fragtree, iattr->ia_size); | 151 | jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size); |
151 | 152 | ||
152 | if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { | 153 | if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { |
153 | jffs2_add_full_dnode_to_inode(c, f, new_metadata); | 154 | jffs2_add_full_dnode_to_inode(c, f, new_metadata); |
@@ -166,7 +167,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
166 | jffs2_complete_reservation(c); | 167 | jffs2_complete_reservation(c); |
167 | 168 | ||
168 | /* We have to do the vmtruncate() without f->sem held, since | 169 | /* We have to do the vmtruncate() without f->sem held, since |
169 | some pages may be locked and waiting for it in readpage(). | 170 | some pages may be locked and waiting for it in readpage(). |
170 | We are protected from a simultaneous write() extending i_size | 171 | We are protected from a simultaneous write() extending i_size |
171 | back past iattr->ia_size, because do_truncate() holds the | 172 | back past iattr->ia_size, because do_truncate() holds the |
172 | generic inode semaphore. */ | 173 | generic inode semaphore. */ |
@@ -194,31 +195,27 @@ int jffs2_statfs(struct super_block *sb, struct kstatfs *buf) | |||
194 | buf->f_namelen = JFFS2_MAX_NAME_LEN; | 195 | buf->f_namelen = JFFS2_MAX_NAME_LEN; |
195 | 196 | ||
196 | spin_lock(&c->erase_completion_lock); | 197 | spin_lock(&c->erase_completion_lock); |
197 | |||
198 | avail = c->dirty_size + c->free_size; | 198 | avail = c->dirty_size + c->free_size; |
199 | if (avail > c->sector_size * c->resv_blocks_write) | 199 | if (avail > c->sector_size * c->resv_blocks_write) |
200 | avail -= c->sector_size * c->resv_blocks_write; | 200 | avail -= c->sector_size * c->resv_blocks_write; |
201 | else | 201 | else |
202 | avail = 0; | 202 | avail = 0; |
203 | spin_unlock(&c->erase_completion_lock); | ||
203 | 204 | ||
204 | buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT; | 205 | buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT; |
205 | 206 | ||
206 | D2(jffs2_dump_block_lists(c)); | ||
207 | |||
208 | spin_unlock(&c->erase_completion_lock); | ||
209 | |||
210 | return 0; | 207 | return 0; |
211 | } | 208 | } |
212 | 209 | ||
213 | 210 | ||
214 | void jffs2_clear_inode (struct inode *inode) | 211 | void jffs2_clear_inode (struct inode *inode) |
215 | { | 212 | { |
216 | /* We can forget about this inode for now - drop all | 213 | /* We can forget about this inode for now - drop all |
217 | * the nodelists associated with it, etc. | 214 | * the nodelists associated with it, etc. |
218 | */ | 215 | */ |
219 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | 216 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); |
220 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | 217 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); |
221 | 218 | ||
222 | D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); | 219 | D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); |
223 | 220 | ||
224 | jffs2_do_clear_inode(c, f); | 221 | jffs2_do_clear_inode(c, f); |
@@ -237,7 +234,7 @@ void jffs2_read_inode (struct inode *inode) | |||
237 | c = JFFS2_SB_INFO(inode->i_sb); | 234 | c = JFFS2_SB_INFO(inode->i_sb); |
238 | 235 | ||
239 | jffs2_init_inode_info(f); | 236 | jffs2_init_inode_info(f); |
240 | 237 | ||
241 | ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); | 238 | ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); |
242 | 239 | ||
243 | if (ret) { | 240 | if (ret) { |
@@ -257,14 +254,14 @@ void jffs2_read_inode (struct inode *inode) | |||
257 | 254 | ||
258 | inode->i_blksize = PAGE_SIZE; | 255 | inode->i_blksize = PAGE_SIZE; |
259 | inode->i_blocks = (inode->i_size + 511) >> 9; | 256 | inode->i_blocks = (inode->i_size + 511) >> 9; |
260 | 257 | ||
261 | switch (inode->i_mode & S_IFMT) { | 258 | switch (inode->i_mode & S_IFMT) { |
262 | jint16_t rdev; | 259 | jint16_t rdev; |
263 | 260 | ||
264 | case S_IFLNK: | 261 | case S_IFLNK: |
265 | inode->i_op = &jffs2_symlink_inode_operations; | 262 | inode->i_op = &jffs2_symlink_inode_operations; |
266 | break; | 263 | break; |
267 | 264 | ||
268 | case S_IFDIR: | 265 | case S_IFDIR: |
269 | { | 266 | { |
270 | struct jffs2_full_dirent *fd; | 267 | struct jffs2_full_dirent *fd; |
@@ -301,7 +298,7 @@ void jffs2_read_inode (struct inode *inode) | |||
301 | jffs2_do_clear_inode(c, f); | 298 | jffs2_do_clear_inode(c, f); |
302 | make_bad_inode(inode); | 299 | make_bad_inode(inode); |
303 | return; | 300 | return; |
304 | } | 301 | } |
305 | 302 | ||
306 | case S_IFSOCK: | 303 | case S_IFSOCK: |
307 | case S_IFIFO: | 304 | case S_IFIFO: |
@@ -357,11 +354,11 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data) | |||
357 | down(&c->alloc_sem); | 354 | down(&c->alloc_sem); |
358 | jffs2_flush_wbuf_pad(c); | 355 | jffs2_flush_wbuf_pad(c); |
359 | up(&c->alloc_sem); | 356 | up(&c->alloc_sem); |
360 | } | 357 | } |
361 | 358 | ||
362 | if (!(*flags & MS_RDONLY)) | 359 | if (!(*flags & MS_RDONLY)) |
363 | jffs2_start_garbage_collect_thread(c); | 360 | jffs2_start_garbage_collect_thread(c); |
364 | 361 | ||
365 | *flags |= MS_NOATIME; | 362 | *flags |= MS_NOATIME; |
366 | 363 | ||
367 | return 0; | 364 | return 0; |
@@ -395,9 +392,9 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i | |||
395 | D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode)); | 392 | D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode)); |
396 | 393 | ||
397 | c = JFFS2_SB_INFO(sb); | 394 | c = JFFS2_SB_INFO(sb); |
398 | 395 | ||
399 | inode = new_inode(sb); | 396 | inode = new_inode(sb); |
400 | 397 | ||
401 | if (!inode) | 398 | if (!inode) |
402 | return ERR_PTR(-ENOMEM); | 399 | return ERR_PTR(-ENOMEM); |
403 | 400 | ||
@@ -461,40 +458,24 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
461 | #endif | 458 | #endif |
462 | 459 | ||
463 | c->flash_size = c->mtd->size; | 460 | c->flash_size = c->mtd->size; |
464 | 461 | c->sector_size = c->mtd->erasesize; | |
465 | /* | ||
466 | * Check, if we have to concatenate physical blocks to larger virtual blocks | ||
467 | * to reduce the memorysize for c->blocks. (kmalloc allows max. 128K allocation) | ||
468 | */ | ||
469 | c->sector_size = c->mtd->erasesize; | ||
470 | blocks = c->flash_size / c->sector_size; | 462 | blocks = c->flash_size / c->sector_size; |
471 | if (!(c->mtd->flags & MTD_NO_VIRTBLOCKS)) { | ||
472 | while ((blocks * sizeof (struct jffs2_eraseblock)) > (128 * 1024)) { | ||
473 | blocks >>= 1; | ||
474 | c->sector_size <<= 1; | ||
475 | } | ||
476 | } | ||
477 | 463 | ||
478 | /* | 464 | /* |
479 | * Size alignment check | 465 | * Size alignment check |
480 | */ | 466 | */ |
481 | if ((c->sector_size * blocks) != c->flash_size) { | 467 | if ((c->sector_size * blocks) != c->flash_size) { |
482 | c->flash_size = c->sector_size * blocks; | 468 | c->flash_size = c->sector_size * blocks; |
483 | printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n", | 469 | printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n", |
484 | c->flash_size / 1024); | 470 | c->flash_size / 1024); |
485 | } | 471 | } |
486 | 472 | ||
487 | if (c->sector_size != c->mtd->erasesize) | ||
488 | printk(KERN_INFO "jffs2: Erase block size too small (%dKiB). Using virtual blocks size (%dKiB) instead\n", | ||
489 | c->mtd->erasesize / 1024, c->sector_size / 1024); | ||
490 | |||
491 | if (c->flash_size < 5*c->sector_size) { | 473 | if (c->flash_size < 5*c->sector_size) { |
492 | printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size); | 474 | printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size); |
493 | return -EINVAL; | 475 | return -EINVAL; |
494 | } | 476 | } |
495 | 477 | ||
496 | c->cleanmarker_size = sizeof(struct jffs2_unknown_node); | 478 | c->cleanmarker_size = sizeof(struct jffs2_unknown_node); |
497 | /* Joern -- stick alignment for weird 8-byte-page flash here */ | ||
498 | 479 | ||
499 | /* NAND (or other bizarre) flash... do setup accordingly */ | 480 | /* NAND (or other bizarre) flash... do setup accordingly */ |
500 | ret = jffs2_flash_setup(c); | 481 | ret = jffs2_flash_setup(c); |
@@ -517,7 +498,7 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
517 | root_i = iget(sb, 1); | 498 | root_i = iget(sb, 1); |
518 | if (is_bad_inode(root_i)) { | 499 | if (is_bad_inode(root_i)) { |
519 | D1(printk(KERN_WARNING "get root inode failed\n")); | 500 | D1(printk(KERN_WARNING "get root inode failed\n")); |
520 | goto out_nodes; | 501 | goto out_root_i; |
521 | } | 502 | } |
522 | 503 | ||
523 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); | 504 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); |
@@ -535,10 +516,9 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
535 | 516 | ||
536 | out_root_i: | 517 | out_root_i: |
537 | iput(root_i); | 518 | iput(root_i); |
538 | out_nodes: | ||
539 | jffs2_free_ino_caches(c); | 519 | jffs2_free_ino_caches(c); |
540 | jffs2_free_raw_node_refs(c); | 520 | jffs2_free_raw_node_refs(c); |
541 | if (c->mtd->flags & MTD_NO_VIRTBLOCKS) | 521 | if (jffs2_blocks_use_vmalloc(c)) |
542 | vfree(c->blocks); | 522 | vfree(c->blocks); |
543 | else | 523 | else |
544 | kfree(c->blocks); | 524 | kfree(c->blocks); |
@@ -563,16 +543,16 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, | |||
563 | struct jffs2_inode_cache *ic; | 543 | struct jffs2_inode_cache *ic; |
564 | if (!nlink) { | 544 | if (!nlink) { |
565 | /* The inode has zero nlink but its nodes weren't yet marked | 545 | /* The inode has zero nlink but its nodes weren't yet marked |
566 | obsolete. This has to be because we're still waiting for | 546 | obsolete. This has to be because we're still waiting for |
567 | the final (close() and) iput() to happen. | 547 | the final (close() and) iput() to happen. |
568 | 548 | ||
569 | There's a possibility that the final iput() could have | 549 | There's a possibility that the final iput() could have |
570 | happened while we were contemplating. In order to ensure | 550 | happened while we were contemplating. In order to ensure |
571 | that we don't cause a new read_inode() (which would fail) | 551 | that we don't cause a new read_inode() (which would fail) |
572 | for the inode in question, we use ilookup() in this case | 552 | for the inode in question, we use ilookup() in this case |
573 | instead of iget(). | 553 | instead of iget(). |
574 | 554 | ||
575 | The nlink can't _become_ zero at this point because we're | 555 | The nlink can't _become_ zero at this point because we're |
576 | holding the alloc_sem, and jffs2_do_unlink() would also | 556 | holding the alloc_sem, and jffs2_do_unlink() would also |
577 | need that while decrementing nlink on any inode. | 557 | need that while decrementing nlink on any inode. |
578 | */ | 558 | */ |
@@ -619,19 +599,19 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, | |||
619 | return JFFS2_INODE_INFO(inode); | 599 | return JFFS2_INODE_INFO(inode); |
620 | } | 600 | } |
621 | 601 | ||
622 | unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, | 602 | unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, |
623 | struct jffs2_inode_info *f, | 603 | struct jffs2_inode_info *f, |
624 | unsigned long offset, | 604 | unsigned long offset, |
625 | unsigned long *priv) | 605 | unsigned long *priv) |
626 | { | 606 | { |
627 | struct inode *inode = OFNI_EDONI_2SFFJ(f); | 607 | struct inode *inode = OFNI_EDONI_2SFFJ(f); |
628 | struct page *pg; | 608 | struct page *pg; |
629 | 609 | ||
630 | pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, | 610 | pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, |
631 | (void *)jffs2_do_readpage_unlock, inode); | 611 | (void *)jffs2_do_readpage_unlock, inode); |
632 | if (IS_ERR(pg)) | 612 | if (IS_ERR(pg)) |
633 | return (void *)pg; | 613 | return (void *)pg; |
634 | 614 | ||
635 | *priv = (unsigned long)pg; | 615 | *priv = (unsigned long)pg; |
636 | return kmap(pg); | 616 | return kmap(pg); |
637 | } | 617 | } |
@@ -648,7 +628,7 @@ void jffs2_gc_release_page(struct jffs2_sb_info *c, | |||
648 | 628 | ||
649 | static int jffs2_flash_setup(struct jffs2_sb_info *c) { | 629 | static int jffs2_flash_setup(struct jffs2_sb_info *c) { |
650 | int ret = 0; | 630 | int ret = 0; |
651 | 631 | ||
652 | if (jffs2_cleanmarker_oob(c)) { | 632 | if (jffs2_cleanmarker_oob(c)) { |
653 | /* NAND flash... do setup accordingly */ | 633 | /* NAND flash... do setup accordingly */ |
654 | ret = jffs2_nand_flash_setup(c); | 634 | ret = jffs2_nand_flash_setup(c); |
@@ -662,14 +642,21 @@ static int jffs2_flash_setup(struct jffs2_sb_info *c) { | |||
662 | if (ret) | 642 | if (ret) |
663 | return ret; | 643 | return ret; |
664 | } | 644 | } |
665 | 645 | ||
666 | /* and Dataflash */ | 646 | /* and Dataflash */ |
667 | if (jffs2_dataflash(c)) { | 647 | if (jffs2_dataflash(c)) { |
668 | ret = jffs2_dataflash_setup(c); | 648 | ret = jffs2_dataflash_setup(c); |
669 | if (ret) | 649 | if (ret) |
670 | return ret; | 650 | return ret; |
671 | } | 651 | } |
672 | 652 | ||
653 | /* and Intel "Sibley" flash */ | ||
654 | if (jffs2_nor_wbuf_flash(c)) { | ||
655 | ret = jffs2_nor_wbuf_flash_setup(c); | ||
656 | if (ret) | ||
657 | return ret; | ||
658 | } | ||
659 | |||
673 | return ret; | 660 | return ret; |
674 | } | 661 | } |
675 | 662 | ||
@@ -683,9 +670,14 @@ void jffs2_flash_cleanup(struct jffs2_sb_info *c) { | |||
683 | if (jffs2_nor_ecc(c)) { | 670 | if (jffs2_nor_ecc(c)) { |
684 | jffs2_nor_ecc_flash_cleanup(c); | 671 | jffs2_nor_ecc_flash_cleanup(c); |
685 | } | 672 | } |
686 | 673 | ||
687 | /* and DataFlash */ | 674 | /* and DataFlash */ |
688 | if (jffs2_dataflash(c)) { | 675 | if (jffs2_dataflash(c)) { |
689 | jffs2_dataflash_cleanup(c); | 676 | jffs2_dataflash_cleanup(c); |
690 | } | 677 | } |
678 | |||
679 | /* and Intel "Sibley" flash */ | ||
680 | if (jffs2_nor_wbuf_flash(c)) { | ||
681 | jffs2_nor_wbuf_flash_cleanup(c); | ||
682 | } | ||
691 | } | 683 | } |
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c index 7086cd634503..f9ffece453a3 100644 --- a/fs/jffs2/gc.c +++ b/fs/jffs2/gc.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: gc.c,v 1.148 2005/04/09 10:47:00 dedekind Exp $ | 10 | * $Id: gc.c,v 1.155 2005/11/07 11:14:39 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -21,14 +21,14 @@ | |||
21 | #include "nodelist.h" | 21 | #include "nodelist.h" |
22 | #include "compr.h" | 22 | #include "compr.h" |
23 | 23 | ||
24 | static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | 24 | static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, |
25 | struct jffs2_inode_cache *ic, | 25 | struct jffs2_inode_cache *ic, |
26 | struct jffs2_raw_node_ref *raw); | 26 | struct jffs2_raw_node_ref *raw); |
27 | static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 27 | static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
28 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fd); | 28 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fd); |
29 | static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 29 | static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
30 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); | 30 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); |
31 | static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 31 | static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
32 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); | 32 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); |
33 | static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 33 | static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
34 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, | 34 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, |
@@ -55,7 +55,7 @@ again: | |||
55 | D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n")); | 55 | D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n")); |
56 | nextlist = &c->bad_used_list; | 56 | nextlist = &c->bad_used_list; |
57 | } else if (n < 50 && !list_empty(&c->erasable_list)) { | 57 | } else if (n < 50 && !list_empty(&c->erasable_list)) { |
58 | /* Note that most of them will have gone directly to be erased. | 58 | /* Note that most of them will have gone directly to be erased. |
59 | So don't favour the erasable_list _too_ much. */ | 59 | So don't favour the erasable_list _too_ much. */ |
60 | D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n")); | 60 | D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n")); |
61 | nextlist = &c->erasable_list; | 61 | nextlist = &c->erasable_list; |
@@ -101,7 +101,7 @@ again: | |||
101 | printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset); | 101 | printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset); |
102 | BUG(); | 102 | BUG(); |
103 | } | 103 | } |
104 | 104 | ||
105 | /* Have we accidentally picked a clean block with wasted space ? */ | 105 | /* Have we accidentally picked a clean block with wasted space ? */ |
106 | if (ret->wasted_size) { | 106 | if (ret->wasted_size) { |
107 | D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size)); | 107 | D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size)); |
@@ -111,7 +111,6 @@ again: | |||
111 | ret->wasted_size = 0; | 111 | ret->wasted_size = 0; |
112 | } | 112 | } |
113 | 113 | ||
114 | D2(jffs2_dump_block_lists(c)); | ||
115 | return ret; | 114 | return ret; |
116 | } | 115 | } |
117 | 116 | ||
@@ -137,12 +136,12 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
137 | 136 | ||
138 | /* We can't start doing GC yet. We haven't finished checking | 137 | /* We can't start doing GC yet. We haven't finished checking |
139 | the node CRCs etc. Do it now. */ | 138 | the node CRCs etc. Do it now. */ |
140 | 139 | ||
141 | /* checked_ino is protected by the alloc_sem */ | 140 | /* checked_ino is protected by the alloc_sem */ |
142 | if (c->checked_ino > c->highest_ino) { | 141 | if (c->checked_ino > c->highest_ino) { |
143 | printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n", | 142 | printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n", |
144 | c->unchecked_size); | 143 | c->unchecked_size); |
145 | D2(jffs2_dump_block_lists(c)); | 144 | jffs2_dbg_dump_block_lists_nolock(c); |
146 | spin_unlock(&c->erase_completion_lock); | 145 | spin_unlock(&c->erase_completion_lock); |
147 | BUG(); | 146 | BUG(); |
148 | } | 147 | } |
@@ -179,7 +178,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
179 | 178 | ||
180 | case INO_STATE_READING: | 179 | case INO_STATE_READING: |
181 | /* We need to wait for it to finish, lest we move on | 180 | /* We need to wait for it to finish, lest we move on |
182 | and trigger the BUG() above while we haven't yet | 181 | and trigger the BUG() above while we haven't yet |
183 | finished checking all its nodes */ | 182 | finished checking all its nodes */ |
184 | D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); | 183 | D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); |
185 | up(&c->alloc_sem); | 184 | up(&c->alloc_sem); |
@@ -229,13 +228,13 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
229 | } | 228 | } |
230 | 229 | ||
231 | raw = jeb->gc_node; | 230 | raw = jeb->gc_node; |
232 | 231 | ||
233 | while(ref_obsolete(raw)) { | 232 | while(ref_obsolete(raw)) { |
234 | D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); | 233 | D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); |
235 | raw = raw->next_phys; | 234 | raw = raw->next_phys; |
236 | if (unlikely(!raw)) { | 235 | if (unlikely(!raw)) { |
237 | printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); | 236 | printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); |
238 | printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", | 237 | printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", |
239 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); | 238 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); |
240 | jeb->gc_node = raw; | 239 | jeb->gc_node = raw; |
241 | spin_unlock(&c->erase_completion_lock); | 240 | spin_unlock(&c->erase_completion_lock); |
@@ -260,7 +259,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
260 | ic = jffs2_raw_ref_to_ic(raw); | 259 | ic = jffs2_raw_ref_to_ic(raw); |
261 | 260 | ||
262 | /* We need to hold the inocache. Either the erase_completion_lock or | 261 | /* We need to hold the inocache. Either the erase_completion_lock or |
263 | the inocache_lock are sufficient; we trade down since the inocache_lock | 262 | the inocache_lock are sufficient; we trade down since the inocache_lock |
264 | causes less contention. */ | 263 | causes less contention. */ |
265 | spin_lock(&c->inocache_lock); | 264 | spin_lock(&c->inocache_lock); |
266 | 265 | ||
@@ -279,14 +278,14 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
279 | 278 | ||
280 | switch(ic->state) { | 279 | switch(ic->state) { |
281 | case INO_STATE_CHECKEDABSENT: | 280 | case INO_STATE_CHECKEDABSENT: |
282 | /* It's been checked, but it's not currently in-core. | 281 | /* It's been checked, but it's not currently in-core. |
283 | We can just copy any pristine nodes, but have | 282 | We can just copy any pristine nodes, but have |
284 | to prevent anyone else from doing read_inode() while | 283 | to prevent anyone else from doing read_inode() while |
285 | we're at it, so we set the state accordingly */ | 284 | we're at it, so we set the state accordingly */ |
286 | if (ref_flags(raw) == REF_PRISTINE) | 285 | if (ref_flags(raw) == REF_PRISTINE) |
287 | ic->state = INO_STATE_GC; | 286 | ic->state = INO_STATE_GC; |
288 | else { | 287 | else { |
289 | D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", | 288 | D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", |
290 | ic->ino)); | 289 | ic->ino)); |
291 | } | 290 | } |
292 | break; | 291 | break; |
@@ -299,8 +298,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
299 | case INO_STATE_CHECKING: | 298 | case INO_STATE_CHECKING: |
300 | case INO_STATE_GC: | 299 | case INO_STATE_GC: |
301 | /* Should never happen. We should have finished checking | 300 | /* Should never happen. We should have finished checking |
302 | by the time we actually start doing any GC, and since | 301 | by the time we actually start doing any GC, and since |
303 | we're holding the alloc_sem, no other garbage collection | 302 | we're holding the alloc_sem, no other garbage collection |
304 | can happen. | 303 | can happen. |
305 | */ | 304 | */ |
306 | printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n", | 305 | printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n", |
@@ -320,21 +319,21 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
320 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n", | 319 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n", |
321 | ic->ino, ic->state)); | 320 | ic->ino, ic->state)); |
322 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | 321 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); |
323 | /* And because we dropped the alloc_sem we must start again from the | 322 | /* And because we dropped the alloc_sem we must start again from the |
324 | beginning. Ponder chance of livelock here -- we're returning success | 323 | beginning. Ponder chance of livelock here -- we're returning success |
325 | without actually making any progress. | 324 | without actually making any progress. |
326 | 325 | ||
327 | Q: What are the chances that the inode is back in INO_STATE_READING | 326 | Q: What are the chances that the inode is back in INO_STATE_READING |
328 | again by the time we next enter this function? And that this happens | 327 | again by the time we next enter this function? And that this happens |
329 | enough times to cause a real delay? | 328 | enough times to cause a real delay? |
330 | 329 | ||
331 | A: Small enough that I don't care :) | 330 | A: Small enough that I don't care :) |
332 | */ | 331 | */ |
333 | return 0; | 332 | return 0; |
334 | } | 333 | } |
335 | 334 | ||
336 | /* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the | 335 | /* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the |
337 | node intact, and we don't have to muck about with the fragtree etc. | 336 | node intact, and we don't have to muck about with the fragtree etc. |
338 | because we know it's not in-core. If it _was_ in-core, we go through | 337 | because we know it's not in-core. If it _was_ in-core, we go through |
339 | all the iget() crap anyway */ | 338 | all the iget() crap anyway */ |
340 | 339 | ||
@@ -454,7 +453,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era | |||
454 | if (!ret) { | 453 | if (!ret) { |
455 | /* Urgh. Return it sensibly. */ | 454 | /* Urgh. Return it sensibly. */ |
456 | frag->node->raw = f->inocache->nodes; | 455 | frag->node->raw = f->inocache->nodes; |
457 | } | 456 | } |
458 | if (ret != -EBADFD) | 457 | if (ret != -EBADFD) |
459 | goto upnout; | 458 | goto upnout; |
460 | } | 459 | } |
@@ -468,7 +467,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era | |||
468 | } | 467 | } |
469 | goto upnout; | 468 | goto upnout; |
470 | } | 469 | } |
471 | 470 | ||
472 | /* Wasn't a dnode. Try dirent */ | 471 | /* Wasn't a dnode. Try dirent */ |
473 | for (fd = f->dents; fd; fd=fd->next) { | 472 | for (fd = f->dents; fd; fd=fd->next) { |
474 | if (fd->raw == raw) | 473 | if (fd->raw == raw) |
@@ -485,7 +484,8 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era | |||
485 | if (ref_obsolete(raw)) { | 484 | if (ref_obsolete(raw)) { |
486 | printk(KERN_WARNING "But it's obsolete so we don't mind too much\n"); | 485 | printk(KERN_WARNING "But it's obsolete so we don't mind too much\n"); |
487 | } else { | 486 | } else { |
488 | ret = -EIO; | 487 | jffs2_dbg_dump_node(c, ref_offset(raw)); |
488 | BUG(); | ||
489 | } | 489 | } |
490 | } | 490 | } |
491 | upnout: | 491 | upnout: |
@@ -494,7 +494,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era | |||
494 | return ret; | 494 | return ret; |
495 | } | 495 | } |
496 | 496 | ||
497 | static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | 497 | static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, |
498 | struct jffs2_inode_cache *ic, | 498 | struct jffs2_inode_cache *ic, |
499 | struct jffs2_raw_node_ref *raw) | 499 | struct jffs2_raw_node_ref *raw) |
500 | { | 500 | { |
@@ -513,8 +513,11 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
513 | /* Ask for a small amount of space (or the totlen if smaller) because we | 513 | /* Ask for a small amount of space (or the totlen if smaller) because we |
514 | don't want to force wastage of the end of a block if splitting would | 514 | don't want to force wastage of the end of a block if splitting would |
515 | work. */ | 515 | work. */ |
516 | ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN, | 516 | ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + |
517 | rawlen), &phys_ofs, &alloclen); | 517 | JFFS2_MIN_DATA_LEN, rawlen), &phys_ofs, &alloclen, rawlen); |
518 | /* this is not the exact summary size of it, | ||
519 | it is only an upper estimation */ | ||
520 | |||
518 | if (ret) | 521 | if (ret) |
519 | return ret; | 522 | return ret; |
520 | 523 | ||
@@ -577,7 +580,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
577 | } | 580 | } |
578 | break; | 581 | break; |
579 | default: | 582 | default: |
580 | printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", | 583 | printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", |
581 | ref_offset(raw), je16_to_cpu(node->u.nodetype)); | 584 | ref_offset(raw), je16_to_cpu(node->u.nodetype)); |
582 | goto bail; | 585 | goto bail; |
583 | } | 586 | } |
@@ -618,17 +621,19 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
618 | retried = 1; | 621 | retried = 1; |
619 | 622 | ||
620 | D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n")); | 623 | D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n")); |
621 | |||
622 | ACCT_SANITY_CHECK(c,jeb); | ||
623 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
624 | 624 | ||
625 | ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy); | 625 | jffs2_dbg_acct_sanity_check(c,jeb); |
626 | jffs2_dbg_acct_paranoia_check(c, jeb); | ||
627 | |||
628 | ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy, rawlen); | ||
629 | /* this is not the exact summary size of it, | ||
630 | it is only an upper estimation */ | ||
626 | 631 | ||
627 | if (!ret) { | 632 | if (!ret) { |
628 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs)); | 633 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs)); |
629 | 634 | ||
630 | ACCT_SANITY_CHECK(c,jeb); | 635 | jffs2_dbg_acct_sanity_check(c,jeb); |
631 | D1(ACCT_PARANOIA_CHECK(jeb)); | 636 | jffs2_dbg_acct_paranoia_check(c, jeb); |
632 | 637 | ||
633 | goto retry; | 638 | goto retry; |
634 | } | 639 | } |
@@ -664,7 +669,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
664 | goto out_node; | 669 | goto out_node; |
665 | } | 670 | } |
666 | 671 | ||
667 | static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 672 | static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
668 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) | 673 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) |
669 | { | 674 | { |
670 | struct jffs2_full_dnode *new_fn; | 675 | struct jffs2_full_dnode *new_fn; |
@@ -679,7 +684,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
679 | S_ISCHR(JFFS2_F_I_MODE(f)) ) { | 684 | S_ISCHR(JFFS2_F_I_MODE(f)) ) { |
680 | /* For these, we don't actually need to read the old node */ | 685 | /* For these, we don't actually need to read the old node */ |
681 | /* FIXME: for minor or major > 255. */ | 686 | /* FIXME: for minor or major > 255. */ |
682 | dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) | | 687 | dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) | |
683 | JFFS2_F_I_RDEV_MIN(f))); | 688 | JFFS2_F_I_RDEV_MIN(f))); |
684 | mdata = (char *)&dev; | 689 | mdata = (char *)&dev; |
685 | mdatalen = sizeof(dev); | 690 | mdatalen = sizeof(dev); |
@@ -700,14 +705,15 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
700 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen)); | 705 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen)); |
701 | 706 | ||
702 | } | 707 | } |
703 | 708 | ||
704 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen); | 709 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen, |
710 | JFFS2_SUMMARY_INODE_SIZE); | ||
705 | if (ret) { | 711 | if (ret) { |
706 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", | 712 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", |
707 | sizeof(ri)+ mdatalen, ret); | 713 | sizeof(ri)+ mdatalen, ret); |
708 | goto out; | 714 | goto out; |
709 | } | 715 | } |
710 | 716 | ||
711 | last_frag = frag_last(&f->fragtree); | 717 | last_frag = frag_last(&f->fragtree); |
712 | if (last_frag) | 718 | if (last_frag) |
713 | /* Fetch the inode length from the fragtree rather then | 719 | /* Fetch the inode length from the fragtree rather then |
@@ -715,7 +721,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
715 | ilen = last_frag->ofs + last_frag->size; | 721 | ilen = last_frag->ofs + last_frag->size; |
716 | else | 722 | else |
717 | ilen = JFFS2_F_I_SIZE(f); | 723 | ilen = JFFS2_F_I_SIZE(f); |
718 | 724 | ||
719 | memset(&ri, 0, sizeof(ri)); | 725 | memset(&ri, 0, sizeof(ri)); |
720 | ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 726 | ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
721 | ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); | 727 | ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); |
@@ -754,7 +760,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
754 | return ret; | 760 | return ret; |
755 | } | 761 | } |
756 | 762 | ||
757 | static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 763 | static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
758 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) | 764 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) |
759 | { | 765 | { |
760 | struct jffs2_full_dirent *new_fd; | 766 | struct jffs2_full_dirent *new_fd; |
@@ -771,12 +777,18 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er | |||
771 | rd.pino = cpu_to_je32(f->inocache->ino); | 777 | rd.pino = cpu_to_je32(f->inocache->ino); |
772 | rd.version = cpu_to_je32(++f->highest_version); | 778 | rd.version = cpu_to_je32(++f->highest_version); |
773 | rd.ino = cpu_to_je32(fd->ino); | 779 | rd.ino = cpu_to_je32(fd->ino); |
774 | rd.mctime = cpu_to_je32(max(JFFS2_F_I_MTIME(f), JFFS2_F_I_CTIME(f))); | 780 | /* If the times on this inode were set by explicit utime() they can be different, |
781 | so refrain from splatting them. */ | ||
782 | if (JFFS2_F_I_MTIME(f) == JFFS2_F_I_CTIME(f)) | ||
783 | rd.mctime = cpu_to_je32(JFFS2_F_I_MTIME(f)); | ||
784 | else | ||
785 | rd.mctime = cpu_to_je32(0); | ||
775 | rd.type = fd->type; | 786 | rd.type = fd->type; |
776 | rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8)); | 787 | rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8)); |
777 | rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize)); | 788 | rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize)); |
778 | 789 | ||
779 | ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen); | 790 | ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen, |
791 | JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize)); | ||
780 | if (ret) { | 792 | if (ret) { |
781 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", | 793 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", |
782 | sizeof(rd)+rd.nsize, ret); | 794 | sizeof(rd)+rd.nsize, ret); |
@@ -792,7 +804,7 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er | |||
792 | return 0; | 804 | return 0; |
793 | } | 805 | } |
794 | 806 | ||
795 | static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 807 | static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
796 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) | 808 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) |
797 | { | 809 | { |
798 | struct jffs2_full_dirent **fdp = &f->dents; | 810 | struct jffs2_full_dirent **fdp = &f->dents; |
@@ -831,7 +843,7 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct | |||
831 | if (ref_totlen(c, NULL, raw) != rawlen) | 843 | if (ref_totlen(c, NULL, raw) != rawlen) |
832 | continue; | 844 | continue; |
833 | 845 | ||
834 | /* Doesn't matter if there's one in the same erase block. We're going to | 846 | /* Doesn't matter if there's one in the same erase block. We're going to |
835 | delete it too at the same time. */ | 847 | delete it too at the same time. */ |
836 | if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset)) | 848 | if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset)) |
837 | continue; | 849 | continue; |
@@ -883,6 +895,9 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct | |||
883 | kfree(rd); | 895 | kfree(rd); |
884 | } | 896 | } |
885 | 897 | ||
898 | /* FIXME: If we're deleting a dirent which contains the current mtime and ctime, | ||
899 | we should update the metadata node with those times accordingly */ | ||
900 | |||
886 | /* No need for it any more. Just mark it obsolete and remove it from the list */ | 901 | /* No need for it any more. Just mark it obsolete and remove it from the list */ |
887 | while (*fdp) { | 902 | while (*fdp) { |
888 | if ((*fdp) == fd) { | 903 | if ((*fdp) == fd) { |
@@ -912,13 +927,13 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
912 | 927 | ||
913 | D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", | 928 | D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", |
914 | f->inocache->ino, start, end)); | 929 | f->inocache->ino, start, end)); |
915 | 930 | ||
916 | memset(&ri, 0, sizeof(ri)); | 931 | memset(&ri, 0, sizeof(ri)); |
917 | 932 | ||
918 | if(fn->frags > 1) { | 933 | if(fn->frags > 1) { |
919 | size_t readlen; | 934 | size_t readlen; |
920 | uint32_t crc; | 935 | uint32_t crc; |
921 | /* It's partially obsoleted by a later write. So we have to | 936 | /* It's partially obsoleted by a later write. So we have to |
922 | write it out again with the _same_ version as before */ | 937 | write it out again with the _same_ version as before */ |
923 | ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri); | 938 | ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri); |
924 | if (readlen != sizeof(ri) || ret) { | 939 | if (readlen != sizeof(ri) || ret) { |
@@ -940,16 +955,16 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
940 | crc = crc32(0, &ri, sizeof(ri)-8); | 955 | crc = crc32(0, &ri, sizeof(ri)-8); |
941 | if (crc != je32_to_cpu(ri.node_crc)) { | 956 | if (crc != je32_to_cpu(ri.node_crc)) { |
942 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n", | 957 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n", |
943 | ref_offset(fn->raw), | 958 | ref_offset(fn->raw), |
944 | je32_to_cpu(ri.node_crc), crc); | 959 | je32_to_cpu(ri.node_crc), crc); |
945 | /* FIXME: We could possibly deal with this by writing new holes for each frag */ | 960 | /* FIXME: We could possibly deal with this by writing new holes for each frag */ |
946 | printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", | 961 | printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", |
947 | start, end, f->inocache->ino); | 962 | start, end, f->inocache->ino); |
948 | goto fill; | 963 | goto fill; |
949 | } | 964 | } |
950 | if (ri.compr != JFFS2_COMPR_ZERO) { | 965 | if (ri.compr != JFFS2_COMPR_ZERO) { |
951 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw)); | 966 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw)); |
952 | printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", | 967 | printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", |
953 | start, end, f->inocache->ino); | 968 | start, end, f->inocache->ino); |
954 | goto fill; | 969 | goto fill; |
955 | } | 970 | } |
@@ -967,7 +982,7 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
967 | ri.csize = cpu_to_je32(0); | 982 | ri.csize = cpu_to_je32(0); |
968 | ri.compr = JFFS2_COMPR_ZERO; | 983 | ri.compr = JFFS2_COMPR_ZERO; |
969 | } | 984 | } |
970 | 985 | ||
971 | frag = frag_last(&f->fragtree); | 986 | frag = frag_last(&f->fragtree); |
972 | if (frag) | 987 | if (frag) |
973 | /* Fetch the inode length from the fragtree rather then | 988 | /* Fetch the inode length from the fragtree rather then |
@@ -986,7 +1001,8 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
986 | ri.data_crc = cpu_to_je32(0); | 1001 | ri.data_crc = cpu_to_je32(0); |
987 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | 1002 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); |
988 | 1003 | ||
989 | ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen); | 1004 | ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen, |
1005 | JFFS2_SUMMARY_INODE_SIZE); | ||
990 | if (ret) { | 1006 | if (ret) { |
991 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", | 1007 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", |
992 | sizeof(ri), ret); | 1008 | sizeof(ri), ret); |
@@ -1008,10 +1024,10 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
1008 | return 0; | 1024 | return 0; |
1009 | } | 1025 | } |
1010 | 1026 | ||
1011 | /* | 1027 | /* |
1012 | * We should only get here in the case where the node we are | 1028 | * We should only get here in the case where the node we are |
1013 | * replacing had more than one frag, so we kept the same version | 1029 | * replacing had more than one frag, so we kept the same version |
1014 | * number as before. (Except in case of error -- see 'goto fill;' | 1030 | * number as before. (Except in case of error -- see 'goto fill;' |
1015 | * above.) | 1031 | * above.) |
1016 | */ | 1032 | */ |
1017 | D1(if(unlikely(fn->frags <= 1)) { | 1033 | D1(if(unlikely(fn->frags <= 1)) { |
@@ -1023,7 +1039,7 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
1023 | /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */ | 1039 | /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */ |
1024 | mark_ref_normal(new_fn->raw); | 1040 | mark_ref_normal(new_fn->raw); |
1025 | 1041 | ||
1026 | for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); | 1042 | for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); |
1027 | frag; frag = frag_next(frag)) { | 1043 | frag; frag = frag_next(frag)) { |
1028 | if (frag->ofs > fn->size + fn->ofs) | 1044 | if (frag->ofs > fn->size + fn->ofs) |
1029 | break; | 1045 | break; |
@@ -1041,10 +1057,10 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
1041 | printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n"); | 1057 | printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n"); |
1042 | BUG(); | 1058 | BUG(); |
1043 | } | 1059 | } |
1044 | 1060 | ||
1045 | jffs2_mark_node_obsolete(c, fn->raw); | 1061 | jffs2_mark_node_obsolete(c, fn->raw); |
1046 | jffs2_free_full_dnode(fn); | 1062 | jffs2_free_full_dnode(fn); |
1047 | 1063 | ||
1048 | return 0; | 1064 | return 0; |
1049 | } | 1065 | } |
1050 | 1066 | ||
@@ -1054,12 +1070,12 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1054 | { | 1070 | { |
1055 | struct jffs2_full_dnode *new_fn; | 1071 | struct jffs2_full_dnode *new_fn; |
1056 | struct jffs2_raw_inode ri; | 1072 | struct jffs2_raw_inode ri; |
1057 | uint32_t alloclen, phys_ofs, offset, orig_end, orig_start; | 1073 | uint32_t alloclen, phys_ofs, offset, orig_end, orig_start; |
1058 | int ret = 0; | 1074 | int ret = 0; |
1059 | unsigned char *comprbuf = NULL, *writebuf; | 1075 | unsigned char *comprbuf = NULL, *writebuf; |
1060 | unsigned long pg; | 1076 | unsigned long pg; |
1061 | unsigned char *pg_ptr; | 1077 | unsigned char *pg_ptr; |
1062 | 1078 | ||
1063 | memset(&ri, 0, sizeof(ri)); | 1079 | memset(&ri, 0, sizeof(ri)); |
1064 | 1080 | ||
1065 | D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", | 1081 | D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", |
@@ -1071,8 +1087,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1071 | if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) { | 1087 | if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) { |
1072 | /* Attempt to do some merging. But only expand to cover logically | 1088 | /* Attempt to do some merging. But only expand to cover logically |
1073 | adjacent frags if the block containing them is already considered | 1089 | adjacent frags if the block containing them is already considered |
1074 | to be dirty. Otherwise we end up with GC just going round in | 1090 | to be dirty. Otherwise we end up with GC just going round in |
1075 | circles dirtying the nodes it already wrote out, especially | 1091 | circles dirtying the nodes it already wrote out, especially |
1076 | on NAND where we have small eraseblocks and hence a much higher | 1092 | on NAND where we have small eraseblocks and hence a much higher |
1077 | chance of nodes having to be split to cross boundaries. */ | 1093 | chance of nodes having to be split to cross boundaries. */ |
1078 | 1094 | ||
@@ -1106,7 +1122,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1106 | break; | 1122 | break; |
1107 | } else { | 1123 | } else { |
1108 | 1124 | ||
1109 | /* OK, it's a frag which extends to the beginning of the page. Does it live | 1125 | /* OK, it's a frag which extends to the beginning of the page. Does it live |
1110 | in a block which is still considered clean? If so, don't obsolete it. | 1126 | in a block which is still considered clean? If so, don't obsolete it. |
1111 | If not, cover it anyway. */ | 1127 | If not, cover it anyway. */ |
1112 | 1128 | ||
@@ -1156,7 +1172,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1156 | break; | 1172 | break; |
1157 | } else { | 1173 | } else { |
1158 | 1174 | ||
1159 | /* OK, it's a frag which extends to the beginning of the page. Does it live | 1175 | /* OK, it's a frag which extends to the beginning of the page. Does it live |
1160 | in a block which is still considered clean? If so, don't obsolete it. | 1176 | in a block which is still considered clean? If so, don't obsolete it. |
1161 | If not, cover it anyway. */ | 1177 | If not, cover it anyway. */ |
1162 | 1178 | ||
@@ -1183,14 +1199,14 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1183 | break; | 1199 | break; |
1184 | } | 1200 | } |
1185 | } | 1201 | } |
1186 | D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", | 1202 | D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", |
1187 | orig_start, orig_end, start, end)); | 1203 | orig_start, orig_end, start, end)); |
1188 | 1204 | ||
1189 | D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); | 1205 | D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); |
1190 | BUG_ON(end < orig_end); | 1206 | BUG_ON(end < orig_end); |
1191 | BUG_ON(start > orig_start); | 1207 | BUG_ON(start > orig_start); |
1192 | } | 1208 | } |
1193 | 1209 | ||
1194 | /* First, use readpage() to read the appropriate page into the page cache */ | 1210 | /* First, use readpage() to read the appropriate page into the page cache */ |
1195 | /* Q: What happens if we actually try to GC the _same_ page for which commit_write() | 1211 | /* Q: What happens if we actually try to GC the _same_ page for which commit_write() |
1196 | * triggered garbage collection in the first place? | 1212 | * triggered garbage collection in the first place? |
@@ -1211,7 +1227,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1211 | uint32_t cdatalen; | 1227 | uint32_t cdatalen; |
1212 | uint16_t comprtype = JFFS2_COMPR_NONE; | 1228 | uint16_t comprtype = JFFS2_COMPR_NONE; |
1213 | 1229 | ||
1214 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen); | 1230 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, |
1231 | &alloclen, JFFS2_SUMMARY_INODE_SIZE); | ||
1215 | 1232 | ||
1216 | if (ret) { | 1233 | if (ret) { |
1217 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n", | 1234 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n", |
@@ -1246,7 +1263,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1246 | ri.usercompr = (comprtype >> 8) & 0xff; | 1263 | ri.usercompr = (comprtype >> 8) & 0xff; |
1247 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | 1264 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); |
1248 | ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); | 1265 | ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); |
1249 | 1266 | ||
1250 | new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC); | 1267 | new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC); |
1251 | 1268 | ||
1252 | jffs2_free_comprbuf(comprbuf, writebuf); | 1269 | jffs2_free_comprbuf(comprbuf, writebuf); |
@@ -1268,4 +1285,3 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1268 | jffs2_gc_release_page(c, pg_ptr, &pg); | 1285 | jffs2_gc_release_page(c, pg_ptr, &pg); |
1269 | return ret; | 1286 | return ret; |
1270 | } | 1287 | } |
1271 | |||
diff --git a/fs/jffs2/histo.h b/fs/jffs2/histo.h index 84f184f0836f..22a93a08210c 100644 --- a/fs/jffs2/histo.h +++ b/fs/jffs2/histo.h | |||
@@ -1,3 +1,3 @@ | |||
1 | /* This file provides the bit-probabilities for the input file */ | 1 | /* This file provides the bit-probabilities for the input file */ |
2 | #define BIT_DIVIDER 629 | 2 | #define BIT_DIVIDER 629 |
3 | static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */ | 3 | static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */ |
diff --git a/fs/jffs2/histo_mips.h b/fs/jffs2/histo_mips.h index 9a443268d885..fa3dac19a109 100644 --- a/fs/jffs2/histo_mips.h +++ b/fs/jffs2/histo_mips.h | |||
@@ -1,2 +1,2 @@ | |||
1 | #define BIT_DIVIDER_MIPS 1043 | 1 | #define BIT_DIVIDER_MIPS 1043 |
2 | static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */ | 2 | static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */ |
diff --git a/fs/jffs2/ioctl.c b/fs/jffs2/ioctl.c index 238c7992064c..69099835de1c 100644 --- a/fs/jffs2/ioctl.c +++ b/fs/jffs2/ioctl.c | |||
@@ -7,17 +7,17 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: ioctl.c,v 1.9 2004/11/16 20:36:11 dwmw2 Exp $ | 10 | * $Id: ioctl.c,v 1.10 2005/11/07 11:14:40 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
15 | 15 | ||
16 | int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, | 16 | int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, |
17 | unsigned long arg) | 17 | unsigned long arg) |
18 | { | 18 | { |
19 | /* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which | 19 | /* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which |
20 | will include compression support etc. */ | 20 | will include compression support etc. */ |
21 | return -ENOTTY; | 21 | return -ENOTTY; |
22 | } | 22 | } |
23 | 23 | ||
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c index 5abb431c2a00..036cbd11c004 100644 --- a/fs/jffs2/malloc.c +++ b/fs/jffs2/malloc.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: malloc.c,v 1.28 2004/11/16 20:36:11 dwmw2 Exp $ | 10 | * $Id: malloc.c,v 1.31 2005/11/07 11:14:40 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -17,15 +17,6 @@ | |||
17 | #include <linux/jffs2.h> | 17 | #include <linux/jffs2.h> |
18 | #include "nodelist.h" | 18 | #include "nodelist.h" |
19 | 19 | ||
20 | #if 0 | ||
21 | #define JFFS2_SLAB_POISON SLAB_POISON | ||
22 | #else | ||
23 | #define JFFS2_SLAB_POISON 0 | ||
24 | #endif | ||
25 | |||
26 | // replace this by #define D3 (x) x for cache debugging | ||
27 | #define D3(x) | ||
28 | |||
29 | /* These are initialised to NULL in the kernel startup code. | 20 | /* These are initialised to NULL in the kernel startup code. |
30 | If you're porting to other operating systems, beware */ | 21 | If you're porting to other operating systems, beware */ |
31 | static kmem_cache_t *full_dnode_slab; | 22 | static kmem_cache_t *full_dnode_slab; |
@@ -38,45 +29,45 @@ static kmem_cache_t *inode_cache_slab; | |||
38 | 29 | ||
39 | int __init jffs2_create_slab_caches(void) | 30 | int __init jffs2_create_slab_caches(void) |
40 | { | 31 | { |
41 | full_dnode_slab = kmem_cache_create("jffs2_full_dnode", | 32 | full_dnode_slab = kmem_cache_create("jffs2_full_dnode", |
42 | sizeof(struct jffs2_full_dnode), | 33 | sizeof(struct jffs2_full_dnode), |
43 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 34 | 0, 0, NULL, NULL); |
44 | if (!full_dnode_slab) | 35 | if (!full_dnode_slab) |
45 | goto err; | 36 | goto err; |
46 | 37 | ||
47 | raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", | 38 | raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", |
48 | sizeof(struct jffs2_raw_dirent), | 39 | sizeof(struct jffs2_raw_dirent), |
49 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 40 | 0, 0, NULL, NULL); |
50 | if (!raw_dirent_slab) | 41 | if (!raw_dirent_slab) |
51 | goto err; | 42 | goto err; |
52 | 43 | ||
53 | raw_inode_slab = kmem_cache_create("jffs2_raw_inode", | 44 | raw_inode_slab = kmem_cache_create("jffs2_raw_inode", |
54 | sizeof(struct jffs2_raw_inode), | 45 | sizeof(struct jffs2_raw_inode), |
55 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 46 | 0, 0, NULL, NULL); |
56 | if (!raw_inode_slab) | 47 | if (!raw_inode_slab) |
57 | goto err; | 48 | goto err; |
58 | 49 | ||
59 | tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode", | 50 | tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode", |
60 | sizeof(struct jffs2_tmp_dnode_info), | 51 | sizeof(struct jffs2_tmp_dnode_info), |
61 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 52 | 0, 0, NULL, NULL); |
62 | if (!tmp_dnode_info_slab) | 53 | if (!tmp_dnode_info_slab) |
63 | goto err; | 54 | goto err; |
64 | 55 | ||
65 | raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref", | 56 | raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref", |
66 | sizeof(struct jffs2_raw_node_ref), | 57 | sizeof(struct jffs2_raw_node_ref), |
67 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 58 | 0, 0, NULL, NULL); |
68 | if (!raw_node_ref_slab) | 59 | if (!raw_node_ref_slab) |
69 | goto err; | 60 | goto err; |
70 | 61 | ||
71 | node_frag_slab = kmem_cache_create("jffs2_node_frag", | 62 | node_frag_slab = kmem_cache_create("jffs2_node_frag", |
72 | sizeof(struct jffs2_node_frag), | 63 | sizeof(struct jffs2_node_frag), |
73 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 64 | 0, 0, NULL, NULL); |
74 | if (!node_frag_slab) | 65 | if (!node_frag_slab) |
75 | goto err; | 66 | goto err; |
76 | 67 | ||
77 | inode_cache_slab = kmem_cache_create("jffs2_inode_cache", | 68 | inode_cache_slab = kmem_cache_create("jffs2_inode_cache", |
78 | sizeof(struct jffs2_inode_cache), | 69 | sizeof(struct jffs2_inode_cache), |
79 | 0, JFFS2_SLAB_POISON, NULL, NULL); | 70 | 0, 0, NULL, NULL); |
80 | if (inode_cache_slab) | 71 | if (inode_cache_slab) |
81 | return 0; | 72 | return 0; |
82 | err: | 73 | err: |
@@ -104,102 +95,113 @@ void jffs2_destroy_slab_caches(void) | |||
104 | 95 | ||
105 | struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) | 96 | struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) |
106 | { | 97 | { |
107 | return kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL); | 98 | struct jffs2_full_dirent *ret; |
99 | ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL); | ||
100 | dbg_memalloc("%p\n", ret); | ||
101 | return ret; | ||
108 | } | 102 | } |
109 | 103 | ||
110 | void jffs2_free_full_dirent(struct jffs2_full_dirent *x) | 104 | void jffs2_free_full_dirent(struct jffs2_full_dirent *x) |
111 | { | 105 | { |
106 | dbg_memalloc("%p\n", x); | ||
112 | kfree(x); | 107 | kfree(x); |
113 | } | 108 | } |
114 | 109 | ||
115 | struct jffs2_full_dnode *jffs2_alloc_full_dnode(void) | 110 | struct jffs2_full_dnode *jffs2_alloc_full_dnode(void) |
116 | { | 111 | { |
117 | struct jffs2_full_dnode *ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL); | 112 | struct jffs2_full_dnode *ret; |
118 | D3 (printk (KERN_DEBUG "alloc_full_dnode at %p\n", ret)); | 113 | ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL); |
114 | dbg_memalloc("%p\n", ret); | ||
119 | return ret; | 115 | return ret; |
120 | } | 116 | } |
121 | 117 | ||
122 | void jffs2_free_full_dnode(struct jffs2_full_dnode *x) | 118 | void jffs2_free_full_dnode(struct jffs2_full_dnode *x) |
123 | { | 119 | { |
124 | D3 (printk (KERN_DEBUG "free full_dnode at %p\n", x)); | 120 | dbg_memalloc("%p\n", x); |
125 | kmem_cache_free(full_dnode_slab, x); | 121 | kmem_cache_free(full_dnode_slab, x); |
126 | } | 122 | } |
127 | 123 | ||
128 | struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void) | 124 | struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void) |
129 | { | 125 | { |
130 | struct jffs2_raw_dirent *ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL); | 126 | struct jffs2_raw_dirent *ret; |
131 | D3 (printk (KERN_DEBUG "alloc_raw_dirent\n", ret)); | 127 | ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL); |
128 | dbg_memalloc("%p\n", ret); | ||
132 | return ret; | 129 | return ret; |
133 | } | 130 | } |
134 | 131 | ||
135 | void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x) | 132 | void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x) |
136 | { | 133 | { |
137 | D3 (printk (KERN_DEBUG "free_raw_dirent at %p\n", x)); | 134 | dbg_memalloc("%p\n", x); |
138 | kmem_cache_free(raw_dirent_slab, x); | 135 | kmem_cache_free(raw_dirent_slab, x); |
139 | } | 136 | } |
140 | 137 | ||
141 | struct jffs2_raw_inode *jffs2_alloc_raw_inode(void) | 138 | struct jffs2_raw_inode *jffs2_alloc_raw_inode(void) |
142 | { | 139 | { |
143 | struct jffs2_raw_inode *ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL); | 140 | struct jffs2_raw_inode *ret; |
144 | D3 (printk (KERN_DEBUG "alloc_raw_inode at %p\n", ret)); | 141 | ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL); |
142 | dbg_memalloc("%p\n", ret); | ||
145 | return ret; | 143 | return ret; |
146 | } | 144 | } |
147 | 145 | ||
148 | void jffs2_free_raw_inode(struct jffs2_raw_inode *x) | 146 | void jffs2_free_raw_inode(struct jffs2_raw_inode *x) |
149 | { | 147 | { |
150 | D3 (printk (KERN_DEBUG "free_raw_inode at %p\n", x)); | 148 | dbg_memalloc("%p\n", x); |
151 | kmem_cache_free(raw_inode_slab, x); | 149 | kmem_cache_free(raw_inode_slab, x); |
152 | } | 150 | } |
153 | 151 | ||
154 | struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void) | 152 | struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void) |
155 | { | 153 | { |
156 | struct jffs2_tmp_dnode_info *ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL); | 154 | struct jffs2_tmp_dnode_info *ret; |
157 | D3 (printk (KERN_DEBUG "alloc_tmp_dnode_info at %p\n", ret)); | 155 | ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL); |
156 | dbg_memalloc("%p\n", | ||
157 | ret); | ||
158 | return ret; | 158 | return ret; |
159 | } | 159 | } |
160 | 160 | ||
161 | void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x) | 161 | void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x) |
162 | { | 162 | { |
163 | D3 (printk (KERN_DEBUG "free_tmp_dnode_info at %p\n", x)); | 163 | dbg_memalloc("%p\n", x); |
164 | kmem_cache_free(tmp_dnode_info_slab, x); | 164 | kmem_cache_free(tmp_dnode_info_slab, x); |
165 | } | 165 | } |
166 | 166 | ||
167 | struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void) | 167 | struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void) |
168 | { | 168 | { |
169 | struct jffs2_raw_node_ref *ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL); | 169 | struct jffs2_raw_node_ref *ret; |
170 | D3 (printk (KERN_DEBUG "alloc_raw_node_ref at %p\n", ret)); | 170 | ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL); |
171 | dbg_memalloc("%p\n", ret); | ||
171 | return ret; | 172 | return ret; |
172 | } | 173 | } |
173 | 174 | ||
174 | void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x) | 175 | void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x) |
175 | { | 176 | { |
176 | D3 (printk (KERN_DEBUG "free_raw_node_ref at %p\n", x)); | 177 | dbg_memalloc("%p\n", x); |
177 | kmem_cache_free(raw_node_ref_slab, x); | 178 | kmem_cache_free(raw_node_ref_slab, x); |
178 | } | 179 | } |
179 | 180 | ||
180 | struct jffs2_node_frag *jffs2_alloc_node_frag(void) | 181 | struct jffs2_node_frag *jffs2_alloc_node_frag(void) |
181 | { | 182 | { |
182 | struct jffs2_node_frag *ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL); | 183 | struct jffs2_node_frag *ret; |
183 | D3 (printk (KERN_DEBUG "alloc_node_frag at %p\n", ret)); | 184 | ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL); |
185 | dbg_memalloc("%p\n", ret); | ||
184 | return ret; | 186 | return ret; |
185 | } | 187 | } |
186 | 188 | ||
187 | void jffs2_free_node_frag(struct jffs2_node_frag *x) | 189 | void jffs2_free_node_frag(struct jffs2_node_frag *x) |
188 | { | 190 | { |
189 | D3 (printk (KERN_DEBUG "free_node_frag at %p\n", x)); | 191 | dbg_memalloc("%p\n", x); |
190 | kmem_cache_free(node_frag_slab, x); | 192 | kmem_cache_free(node_frag_slab, x); |
191 | } | 193 | } |
192 | 194 | ||
193 | struct jffs2_inode_cache *jffs2_alloc_inode_cache(void) | 195 | struct jffs2_inode_cache *jffs2_alloc_inode_cache(void) |
194 | { | 196 | { |
195 | struct jffs2_inode_cache *ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL); | 197 | struct jffs2_inode_cache *ret; |
196 | D3 (printk(KERN_DEBUG "Allocated inocache at %p\n", ret)); | 198 | ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL); |
199 | dbg_memalloc("%p\n", ret); | ||
197 | return ret; | 200 | return ret; |
198 | } | 201 | } |
199 | 202 | ||
200 | void jffs2_free_inode_cache(struct jffs2_inode_cache *x) | 203 | void jffs2_free_inode_cache(struct jffs2_inode_cache *x) |
201 | { | 204 | { |
202 | D3 (printk(KERN_DEBUG "Freeing inocache at %p\n", x)); | 205 | dbg_memalloc("%p\n", x); |
203 | kmem_cache_free(inode_cache_slab, x); | 206 | kmem_cache_free(inode_cache_slab, x); |
204 | } | 207 | } |
205 | |||
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 4991c348f6ec..c79eebb8ab32 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: nodelist.c,v 1.98 2005/07/10 15:15:32 dedekind Exp $ | 10 | * $Id: nodelist.c,v 1.115 2005/11/07 11:14:40 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -24,469 +24,832 @@ | |||
24 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list) | 24 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list) |
25 | { | 25 | { |
26 | struct jffs2_full_dirent **prev = list; | 26 | struct jffs2_full_dirent **prev = list; |
27 | D1(printk(KERN_DEBUG "jffs2_add_fd_to_list( %p, %p (->%p))\n", new, list, *list)); | 27 | |
28 | dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino); | ||
28 | 29 | ||
29 | while ((*prev) && (*prev)->nhash <= new->nhash) { | 30 | while ((*prev) && (*prev)->nhash <= new->nhash) { |
30 | if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { | 31 | if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { |
31 | /* Duplicate. Free one */ | 32 | /* Duplicate. Free one */ |
32 | if (new->version < (*prev)->version) { | 33 | if (new->version < (*prev)->version) { |
33 | D1(printk(KERN_DEBUG "Eep! Marking new dirent node obsolete\n")); | 34 | dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n", |
34 | D1(printk(KERN_DEBUG "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n", new->name, new->ino, (*prev)->name, (*prev)->ino)); | 35 | (*prev)->name, (*prev)->ino); |
35 | jffs2_mark_node_obsolete(c, new->raw); | 36 | jffs2_mark_node_obsolete(c, new->raw); |
36 | jffs2_free_full_dirent(new); | 37 | jffs2_free_full_dirent(new); |
37 | } else { | 38 | } else { |
38 | D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino)); | 39 | dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n", |
40 | (*prev)->name, (*prev)->ino); | ||
39 | new->next = (*prev)->next; | 41 | new->next = (*prev)->next; |
40 | jffs2_mark_node_obsolete(c, ((*prev)->raw)); | 42 | jffs2_mark_node_obsolete(c, ((*prev)->raw)); |
41 | jffs2_free_full_dirent(*prev); | 43 | jffs2_free_full_dirent(*prev); |
42 | *prev = new; | 44 | *prev = new; |
43 | } | 45 | } |
44 | goto out; | 46 | return; |
45 | } | 47 | } |
46 | prev = &((*prev)->next); | 48 | prev = &((*prev)->next); |
47 | } | 49 | } |
48 | new->next = *prev; | 50 | new->next = *prev; |
49 | *prev = new; | 51 | *prev = new; |
52 | } | ||
53 | |||
54 | void jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) | ||
55 | { | ||
56 | struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); | ||
57 | |||
58 | dbg_fragtree("truncating fragtree to 0x%08x bytes\n", size); | ||
59 | |||
60 | /* We know frag->ofs <= size. That's what lookup does for us */ | ||
61 | if (frag && frag->ofs != size) { | ||
62 | if (frag->ofs+frag->size > size) { | ||
63 | frag->size = size - frag->ofs; | ||
64 | } | ||
65 | frag = frag_next(frag); | ||
66 | } | ||
67 | while (frag && frag->ofs >= size) { | ||
68 | struct jffs2_node_frag *next = frag_next(frag); | ||
69 | |||
70 | frag_erase(frag, list); | ||
71 | jffs2_obsolete_node_frag(c, frag); | ||
72 | frag = next; | ||
73 | } | ||
50 | 74 | ||
51 | out: | 75 | if (size == 0) |
52 | D2(while(*list) { | 76 | return; |
53 | printk(KERN_DEBUG "Dirent \"%s\" (hash 0x%08x, ino #%u\n", (*list)->name, (*list)->nhash, (*list)->ino); | 77 | |
54 | list = &(*list)->next; | 78 | /* |
55 | }); | 79 | * If the last fragment starts at the RAM page boundary, it is |
80 | * REF_PRISTINE irrespective of its size. | ||
81 | */ | ||
82 | frag = frag_last(list); | ||
83 | if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { | ||
84 | dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n", | ||
85 | frag->ofs, frag->ofs + frag->size); | ||
86 | frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; | ||
87 | } | ||
56 | } | 88 | } |
57 | 89 | ||
58 | /* | 90 | void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) |
59 | * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in | ||
60 | * order of increasing version. | ||
61 | */ | ||
62 | static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list) | ||
63 | { | 91 | { |
64 | struct rb_node **p = &list->rb_node; | 92 | if (this->node) { |
65 | struct rb_node * parent = NULL; | 93 | this->node->frags--; |
66 | struct jffs2_tmp_dnode_info *this; | 94 | if (!this->node->frags) { |
67 | 95 | /* The node has no valid frags left. It's totally obsoleted */ | |
68 | while (*p) { | 96 | dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", |
69 | parent = *p; | 97 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size); |
70 | this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); | 98 | jffs2_mark_node_obsolete(c, this->node->raw); |
71 | 99 | jffs2_free_full_dnode(this->node); | |
72 | /* There may actually be a collision here, but it doesn't | 100 | } else { |
73 | actually matter. As long as the two nodes with the same | 101 | dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", |
74 | version are together, it's all fine. */ | 102 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags); |
75 | if (tn->version < this->version) | 103 | mark_ref_normal(this->node->raw); |
76 | p = &(*p)->rb_left; | 104 | } |
77 | else | ||
78 | p = &(*p)->rb_right; | ||
79 | } | ||
80 | 105 | ||
81 | rb_link_node(&tn->rb, parent, p); | 106 | } |
82 | rb_insert_color(&tn->rb, list); | 107 | jffs2_free_node_frag(this); |
83 | } | 108 | } |
84 | 109 | ||
85 | static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) | 110 | static void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) |
86 | { | 111 | { |
87 | struct rb_node *this; | 112 | struct rb_node *parent = &base->rb; |
88 | struct jffs2_tmp_dnode_info *tn; | 113 | struct rb_node **link = &parent; |
89 | 114 | ||
90 | this = list->rb_node; | 115 | dbg_fragtree2("insert frag (0x%04x-0x%04x)\n", newfrag->ofs, newfrag->ofs + newfrag->size); |
91 | 116 | ||
92 | /* Now at bottom of tree */ | 117 | while (*link) { |
93 | while (this) { | 118 | parent = *link; |
94 | if (this->rb_left) | 119 | base = rb_entry(parent, struct jffs2_node_frag, rb); |
95 | this = this->rb_left; | 120 | |
96 | else if (this->rb_right) | 121 | if (newfrag->ofs > base->ofs) |
97 | this = this->rb_right; | 122 | link = &base->rb.rb_right; |
123 | else if (newfrag->ofs < base->ofs) | ||
124 | link = &base->rb.rb_left; | ||
98 | else { | 125 | else { |
99 | tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb); | 126 | JFFS2_ERROR("duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); |
100 | jffs2_free_full_dnode(tn->fn); | 127 | BUG(); |
101 | jffs2_free_tmp_dnode_info(tn); | ||
102 | |||
103 | this = this->rb_parent; | ||
104 | if (!this) | ||
105 | break; | ||
106 | |||
107 | if (this->rb_left == &tn->rb) | ||
108 | this->rb_left = NULL; | ||
109 | else if (this->rb_right == &tn->rb) | ||
110 | this->rb_right = NULL; | ||
111 | else BUG(); | ||
112 | } | 128 | } |
113 | } | 129 | } |
114 | list->rb_node = NULL; | 130 | |
131 | rb_link_node(&newfrag->rb, &base->rb, link); | ||
115 | } | 132 | } |
116 | 133 | ||
117 | static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) | 134 | /* |
135 | * Allocate and initializes a new fragment. | ||
136 | */ | ||
137 | static inline struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size) | ||
118 | { | 138 | { |
119 | struct jffs2_full_dirent *next; | 139 | struct jffs2_node_frag *newfrag; |
120 | 140 | ||
121 | while (fd) { | 141 | newfrag = jffs2_alloc_node_frag(); |
122 | next = fd->next; | 142 | if (likely(newfrag)) { |
123 | jffs2_free_full_dirent(fd); | 143 | newfrag->ofs = ofs; |
124 | fd = next; | 144 | newfrag->size = size; |
145 | newfrag->node = fn; | ||
146 | } else { | ||
147 | JFFS2_ERROR("cannot allocate a jffs2_node_frag object\n"); | ||
125 | } | 148 | } |
149 | |||
150 | return newfrag; | ||
126 | } | 151 | } |
127 | 152 | ||
128 | /* Returns first valid node after 'ref'. May return 'ref' */ | 153 | /* |
129 | static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) | 154 | * Called when there is no overlapping fragment exist. Inserts a hole before the new |
155 | * fragment and inserts the new fragment to the fragtree. | ||
156 | */ | ||
157 | static int no_overlapping_node(struct jffs2_sb_info *c, struct rb_root *root, | ||
158 | struct jffs2_node_frag *newfrag, | ||
159 | struct jffs2_node_frag *this, uint32_t lastend) | ||
130 | { | 160 | { |
131 | while (ref && ref->next_in_ino) { | 161 | if (lastend < newfrag->node->ofs) { |
132 | if (!ref_obsolete(ref)) | 162 | /* put a hole in before the new fragment */ |
133 | return ref; | 163 | struct jffs2_node_frag *holefrag; |
134 | D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref))); | 164 | |
135 | ref = ref->next_in_ino; | 165 | holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend); |
166 | if (unlikely(!holefrag)) { | ||
167 | jffs2_free_node_frag(newfrag); | ||
168 | return -ENOMEM; | ||
169 | } | ||
170 | |||
171 | if (this) { | ||
172 | /* By definition, the 'this' node has no right-hand child, | ||
173 | because there are no frags with offset greater than it. | ||
174 | So that's where we want to put the hole */ | ||
175 | dbg_fragtree2("add hole frag %#04x-%#04x on the right of the new frag.\n", | ||
176 | holefrag->ofs, holefrag->ofs + holefrag->size); | ||
177 | rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); | ||
178 | } else { | ||
179 | dbg_fragtree2("Add hole frag %#04x-%#04x to the root of the tree.\n", | ||
180 | holefrag->ofs, holefrag->ofs + holefrag->size); | ||
181 | rb_link_node(&holefrag->rb, NULL, &root->rb_node); | ||
182 | } | ||
183 | rb_insert_color(&holefrag->rb, root); | ||
184 | this = holefrag; | ||
185 | } | ||
186 | |||
187 | if (this) { | ||
188 | /* By definition, the 'this' node has no right-hand child, | ||
189 | because there are no frags with offset greater than it. | ||
190 | So that's where we want to put new fragment */ | ||
191 | dbg_fragtree2("add the new node at the right\n"); | ||
192 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | ||
193 | } else { | ||
194 | dbg_fragtree2("insert the new node at the root of the tree\n"); | ||
195 | rb_link_node(&newfrag->rb, NULL, &root->rb_node); | ||
136 | } | 196 | } |
137 | return NULL; | 197 | rb_insert_color(&newfrag->rb, root); |
198 | |||
199 | return 0; | ||
138 | } | 200 | } |
139 | 201 | ||
140 | /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated | 202 | /* Doesn't set inode->i_size */ |
141 | with this ino, returning the former in order of version */ | 203 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag) |
204 | { | ||
205 | struct jffs2_node_frag *this; | ||
206 | uint32_t lastend; | ||
207 | |||
208 | /* Skip all the nodes which are completed before this one starts */ | ||
209 | this = jffs2_lookup_node_frag(root, newfrag->node->ofs); | ||
210 | |||
211 | if (this) { | ||
212 | dbg_fragtree2("lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | ||
213 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this); | ||
214 | lastend = this->ofs + this->size; | ||
215 | } else { | ||
216 | dbg_fragtree2("lookup gave no frag\n"); | ||
217 | lastend = 0; | ||
218 | } | ||
219 | |||
220 | /* See if we ran off the end of the fragtree */ | ||
221 | if (lastend <= newfrag->ofs) { | ||
222 | /* We did */ | ||
223 | |||
224 | /* Check if 'this' node was on the same page as the new node. | ||
225 | If so, both 'this' and the new node get marked REF_NORMAL so | ||
226 | the GC can take a look. | ||
227 | */ | ||
228 | if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { | ||
229 | if (this->node) | ||
230 | mark_ref_normal(this->node->raw); | ||
231 | mark_ref_normal(newfrag->node->raw); | ||
232 | } | ||
233 | |||
234 | return no_overlapping_node(c, root, newfrag, this, lastend); | ||
235 | } | ||
142 | 236 | ||
143 | int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 237 | if (this->node) |
144 | struct rb_root *tnp, struct jffs2_full_dirent **fdp, | 238 | dbg_fragtree2("dealing with frag %u-%u, phys %#08x(%d).\n", |
145 | uint32_t *highest_version, uint32_t *latest_mctime, | 239 | this->ofs, this->ofs + this->size, |
146 | uint32_t *mctime_ver) | 240 | ref_offset(this->node->raw), ref_flags(this->node->raw)); |
241 | else | ||
242 | dbg_fragtree2("dealing with hole frag %u-%u.\n", | ||
243 | this->ofs, this->ofs + this->size); | ||
244 | |||
245 | /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, | ||
246 | * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs | ||
247 | */ | ||
248 | if (newfrag->ofs > this->ofs) { | ||
249 | /* This node isn't completely obsoleted. The start of it remains valid */ | ||
250 | |||
251 | /* Mark the new node and the partially covered node REF_NORMAL -- let | ||
252 | the GC take a look at them */ | ||
253 | mark_ref_normal(newfrag->node->raw); | ||
254 | if (this->node) | ||
255 | mark_ref_normal(this->node->raw); | ||
256 | |||
257 | if (this->ofs + this->size > newfrag->ofs + newfrag->size) { | ||
258 | /* The new node splits 'this' frag into two */ | ||
259 | struct jffs2_node_frag *newfrag2; | ||
260 | |||
261 | if (this->node) | ||
262 | dbg_fragtree2("split old frag 0x%04x-0x%04x, phys 0x%08x\n", | ||
263 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw)); | ||
264 | else | ||
265 | dbg_fragtree2("split old hole frag 0x%04x-0x%04x\n", | ||
266 | this->ofs, this->ofs+this->size); | ||
267 | |||
268 | /* New second frag pointing to this's node */ | ||
269 | newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size, | ||
270 | this->ofs + this->size - newfrag->ofs - newfrag->size); | ||
271 | if (unlikely(!newfrag2)) | ||
272 | return -ENOMEM; | ||
273 | if (this->node) | ||
274 | this->node->frags++; | ||
275 | |||
276 | /* Adjust size of original 'this' */ | ||
277 | this->size = newfrag->ofs - this->ofs; | ||
278 | |||
279 | /* Now, we know there's no node with offset | ||
280 | greater than this->ofs but smaller than | ||
281 | newfrag2->ofs or newfrag->ofs, for obvious | ||
282 | reasons. So we can do a tree insert from | ||
283 | 'this' to insert newfrag, and a tree insert | ||
284 | from newfrag to insert newfrag2. */ | ||
285 | jffs2_fragtree_insert(newfrag, this); | ||
286 | rb_insert_color(&newfrag->rb, root); | ||
287 | |||
288 | jffs2_fragtree_insert(newfrag2, newfrag); | ||
289 | rb_insert_color(&newfrag2->rb, root); | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | /* New node just reduces 'this' frag in size, doesn't split it */ | ||
294 | this->size = newfrag->ofs - this->ofs; | ||
295 | |||
296 | /* Again, we know it lives down here in the tree */ | ||
297 | jffs2_fragtree_insert(newfrag, this); | ||
298 | rb_insert_color(&newfrag->rb, root); | ||
299 | } else { | ||
300 | /* New frag starts at the same point as 'this' used to. Replace | ||
301 | it in the tree without doing a delete and insertion */ | ||
302 | dbg_fragtree2("inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", | ||
303 | newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, this, this->ofs, this->ofs+this->size); | ||
304 | |||
305 | rb_replace_node(&this->rb, &newfrag->rb, root); | ||
306 | |||
307 | if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { | ||
308 | dbg_fragtree2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size); | ||
309 | jffs2_obsolete_node_frag(c, this); | ||
310 | } else { | ||
311 | this->ofs += newfrag->size; | ||
312 | this->size -= newfrag->size; | ||
313 | |||
314 | jffs2_fragtree_insert(this, newfrag); | ||
315 | rb_insert_color(&this->rb, root); | ||
316 | return 0; | ||
317 | } | ||
318 | } | ||
319 | /* OK, now we have newfrag added in the correct place in the tree, but | ||
320 | frag_next(newfrag) may be a fragment which is overlapped by it | ||
321 | */ | ||
322 | while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { | ||
323 | /* 'this' frag is obsoleted completely. */ | ||
324 | dbg_fragtree2("obsoleting node frag %p (%x-%x) and removing from tree\n", | ||
325 | this, this->ofs, this->ofs+this->size); | ||
326 | rb_erase(&this->rb, root); | ||
327 | jffs2_obsolete_node_frag(c, this); | ||
328 | } | ||
329 | /* Now we're pointing at the first frag which isn't totally obsoleted by | ||
330 | the new frag */ | ||
331 | |||
332 | if (!this || newfrag->ofs + newfrag->size == this->ofs) | ||
333 | return 0; | ||
334 | |||
335 | /* Still some overlap but we don't need to move it in the tree */ | ||
336 | this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); | ||
337 | this->ofs = newfrag->ofs + newfrag->size; | ||
338 | |||
339 | /* And mark them REF_NORMAL so the GC takes a look at them */ | ||
340 | if (this->node) | ||
341 | mark_ref_normal(this->node->raw); | ||
342 | mark_ref_normal(newfrag->node->raw); | ||
343 | |||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * Given an inode, probably with existing tree of fragments, add the new node | ||
349 | * to the fragment tree. | ||
350 | */ | ||
351 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) | ||
147 | { | 352 | { |
148 | struct jffs2_raw_node_ref *ref, *valid_ref; | 353 | int ret; |
149 | struct jffs2_tmp_dnode_info *tn; | 354 | struct jffs2_node_frag *newfrag; |
150 | struct rb_root ret_tn = RB_ROOT; | ||
151 | struct jffs2_full_dirent *fd, *ret_fd = NULL; | ||
152 | union jffs2_node_union node; | ||
153 | size_t retlen; | ||
154 | int err; | ||
155 | |||
156 | *mctime_ver = 0; | ||
157 | |||
158 | D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino)); | ||
159 | 355 | ||
160 | spin_lock(&c->erase_completion_lock); | 356 | if (unlikely(!fn->size)) |
357 | return 0; | ||
161 | 358 | ||
162 | valid_ref = jffs2_first_valid_node(f->inocache->nodes); | 359 | newfrag = new_fragment(fn, fn->ofs, fn->size); |
360 | if (unlikely(!newfrag)) | ||
361 | return -ENOMEM; | ||
362 | newfrag->node->frags = 1; | ||
163 | 363 | ||
164 | if (!valid_ref && (f->inocache->ino != 1)) | 364 | dbg_fragtree("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n", |
165 | printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino); | 365 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag); |
166 | 366 | ||
167 | while (valid_ref) { | 367 | ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); |
168 | /* We can hold a pointer to a non-obsolete node without the spinlock, | 368 | if (unlikely(ret)) |
169 | but _obsolete_ nodes may disappear at any time, if the block | 369 | return ret; |
170 | they're in gets erased. So if we mark 'ref' obsolete while we're | ||
171 | not holding the lock, it can go away immediately. For that reason, | ||
172 | we find the next valid node first, before processing 'ref'. | ||
173 | */ | ||
174 | ref = valid_ref; | ||
175 | valid_ref = jffs2_first_valid_node(ref->next_in_ino); | ||
176 | spin_unlock(&c->erase_completion_lock); | ||
177 | 370 | ||
178 | cond_resched(); | 371 | /* If we now share a page with other nodes, mark either previous |
372 | or next node REF_NORMAL, as appropriate. */ | ||
373 | if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { | ||
374 | struct jffs2_node_frag *prev = frag_prev(newfrag); | ||
375 | |||
376 | mark_ref_normal(fn->raw); | ||
377 | /* If we don't start at zero there's _always_ a previous */ | ||
378 | if (prev->node) | ||
379 | mark_ref_normal(prev->node->raw); | ||
380 | } | ||
381 | |||
382 | if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { | ||
383 | struct jffs2_node_frag *next = frag_next(newfrag); | ||
384 | |||
385 | if (next) { | ||
386 | mark_ref_normal(fn->raw); | ||
387 | if (next->node) | ||
388 | mark_ref_normal(next->node->raw); | ||
389 | } | ||
390 | } | ||
391 | jffs2_dbg_fragtree_paranoia_check_nolock(f); | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Check the data CRC of the node. | ||
398 | * | ||
399 | * Returns: 0 if the data CRC is correct; | ||
400 | * 1 - if incorrect; | ||
401 | * error code if an error occured. | ||
402 | */ | ||
403 | static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) | ||
404 | { | ||
405 | struct jffs2_raw_node_ref *ref = tn->fn->raw; | ||
406 | int err = 0, pointed = 0; | ||
407 | struct jffs2_eraseblock *jeb; | ||
408 | unsigned char *buffer; | ||
409 | uint32_t crc, ofs, retlen, len; | ||
410 | |||
411 | BUG_ON(tn->csize == 0); | ||
412 | |||
413 | if (!jffs2_is_writebuffered(c)) | ||
414 | goto adj_acc; | ||
415 | |||
416 | /* Calculate how many bytes were already checked */ | ||
417 | ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); | ||
418 | len = ofs % c->wbuf_pagesize; | ||
419 | if (likely(len)) | ||
420 | len = c->wbuf_pagesize - len; | ||
421 | |||
422 | if (len >= tn->csize) { | ||
423 | dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", | ||
424 | ref_offset(ref), tn->csize, ofs); | ||
425 | goto adj_acc; | ||
426 | } | ||
427 | |||
428 | ofs += len; | ||
429 | len = tn->csize - len; | ||
430 | |||
431 | dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", | ||
432 | ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); | ||
433 | |||
434 | #ifndef __ECOS | ||
435 | /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(), | ||
436 | * adding and jffs2_flash_read_end() interface. */ | ||
437 | if (c->mtd->point) { | ||
438 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); | ||
439 | if (!err && retlen < tn->csize) { | ||
440 | JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize); | ||
441 | c->mtd->unpoint(c->mtd, buffer, ofs, len); | ||
442 | } else if (err) | ||
443 | JFFS2_WARNING("MTD point failed: error code %d.\n", err); | ||
444 | else | ||
445 | pointed = 1; /* succefully pointed to device */ | ||
446 | } | ||
447 | #endif | ||
448 | |||
449 | if (!pointed) { | ||
450 | buffer = kmalloc(len, GFP_KERNEL); | ||
451 | if (unlikely(!buffer)) | ||
452 | return -ENOMEM; | ||
179 | 453 | ||
180 | /* FIXME: point() */ | 454 | /* TODO: this is very frequent pattern, make it a separate |
181 | err = jffs2_flash_read(c, (ref_offset(ref)), | 455 | * routine */ |
182 | min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)), | 456 | err = jffs2_flash_read(c, ofs, len, &retlen, buffer); |
183 | &retlen, (void *)&node); | ||
184 | if (err) { | 457 | if (err) { |
185 | printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref)); | 458 | JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err); |
186 | goto free_out; | 459 | goto free_out; |
187 | } | 460 | } |
188 | |||
189 | 461 | ||
190 | /* Check we've managed to read at least the common node header */ | 462 | if (retlen != len) { |
191 | if (retlen < min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node.u))) { | 463 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len); |
192 | printk(KERN_WARNING "short read in get_inode_nodes()\n"); | ||
193 | err = -EIO; | 464 | err = -EIO; |
194 | goto free_out; | 465 | goto free_out; |
195 | } | 466 | } |
196 | 467 | } | |
197 | switch (je16_to_cpu(node.u.nodetype)) { | ||
198 | case JFFS2_NODETYPE_DIRENT: | ||
199 | D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref))); | ||
200 | if (ref_flags(ref) == REF_UNCHECKED) { | ||
201 | printk(KERN_WARNING "BUG: Dirent node at 0x%08x never got checked? How?\n", ref_offset(ref)); | ||
202 | BUG(); | ||
203 | } | ||
204 | if (retlen < sizeof(node.d)) { | ||
205 | printk(KERN_WARNING "short read in get_inode_nodes()\n"); | ||
206 | err = -EIO; | ||
207 | goto free_out; | ||
208 | } | ||
209 | /* sanity check */ | ||
210 | if (PAD((node.d.nsize + sizeof (node.d))) != PAD(je32_to_cpu (node.d.totlen))) { | ||
211 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): Illegal nsize in node at 0x%08x: nsize 0x%02x, totlen %04x\n", | ||
212 | ref_offset(ref), node.d.nsize, je32_to_cpu(node.d.totlen)); | ||
213 | jffs2_mark_node_obsolete(c, ref); | ||
214 | spin_lock(&c->erase_completion_lock); | ||
215 | continue; | ||
216 | } | ||
217 | if (je32_to_cpu(node.d.version) > *highest_version) | ||
218 | *highest_version = je32_to_cpu(node.d.version); | ||
219 | if (ref_obsolete(ref)) { | ||
220 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | ||
221 | printk(KERN_ERR "Dirent node at 0x%08x became obsolete while we weren't looking\n", | ||
222 | ref_offset(ref)); | ||
223 | BUG(); | ||
224 | } | ||
225 | |||
226 | fd = jffs2_alloc_full_dirent(node.d.nsize+1); | ||
227 | if (!fd) { | ||
228 | err = -ENOMEM; | ||
229 | goto free_out; | ||
230 | } | ||
231 | fd->raw = ref; | ||
232 | fd->version = je32_to_cpu(node.d.version); | ||
233 | fd->ino = je32_to_cpu(node.d.ino); | ||
234 | fd->type = node.d.type; | ||
235 | |||
236 | /* Pick out the mctime of the latest dirent */ | ||
237 | if(fd->version > *mctime_ver) { | ||
238 | *mctime_ver = fd->version; | ||
239 | *latest_mctime = je32_to_cpu(node.d.mctime); | ||
240 | } | ||
241 | 468 | ||
242 | /* memcpy as much of the name as possible from the raw | 469 | /* Continue calculating CRC */ |
243 | dirent we've already read from the flash | 470 | crc = crc32(tn->partial_crc, buffer, len); |
244 | */ | 471 | if(!pointed) |
245 | if (retlen > sizeof(struct jffs2_raw_dirent)) | 472 | kfree(buffer); |
246 | memcpy(&fd->name[0], &node.d.name[0], min_t(uint32_t, node.d.nsize, (retlen-sizeof(struct jffs2_raw_dirent)))); | 473 | #ifndef __ECOS |
247 | 474 | else | |
248 | /* Do we need to copy any more of the name directly | 475 | c->mtd->unpoint(c->mtd, buffer, ofs, len); |
249 | from the flash? | 476 | #endif |
250 | */ | ||
251 | if (node.d.nsize + sizeof(struct jffs2_raw_dirent) > retlen) { | ||
252 | /* FIXME: point() */ | ||
253 | int already = retlen - sizeof(struct jffs2_raw_dirent); | ||
254 | |||
255 | err = jffs2_flash_read(c, (ref_offset(ref)) + retlen, | ||
256 | node.d.nsize - already, &retlen, &fd->name[already]); | ||
257 | if (!err && retlen != node.d.nsize - already) | ||
258 | err = -EIO; | ||
259 | |||
260 | if (err) { | ||
261 | printk(KERN_WARNING "Read remainder of name in jffs2_get_inode_nodes(): error %d\n", err); | ||
262 | jffs2_free_full_dirent(fd); | ||
263 | goto free_out; | ||
264 | } | ||
265 | } | ||
266 | fd->nhash = full_name_hash(fd->name, node.d.nsize); | ||
267 | fd->next = NULL; | ||
268 | fd->name[node.d.nsize] = '\0'; | ||
269 | /* Wheee. We now have a complete jffs2_full_dirent structure, with | ||
270 | the name in it and everything. Link it into the list | ||
271 | */ | ||
272 | D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino)); | ||
273 | jffs2_add_fd_to_list(c, fd, &ret_fd); | ||
274 | break; | ||
275 | |||
276 | case JFFS2_NODETYPE_INODE: | ||
277 | D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref))); | ||
278 | if (retlen < sizeof(node.i)) { | ||
279 | printk(KERN_WARNING "read too short for dnode\n"); | ||
280 | err = -EIO; | ||
281 | goto free_out; | ||
282 | } | ||
283 | if (je32_to_cpu(node.i.version) > *highest_version) | ||
284 | *highest_version = je32_to_cpu(node.i.version); | ||
285 | D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", je32_to_cpu(node.i.version), *highest_version)); | ||
286 | |||
287 | if (ref_obsolete(ref)) { | ||
288 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | ||
289 | printk(KERN_ERR "Inode node at 0x%08x became obsolete while we weren't looking\n", | ||
290 | ref_offset(ref)); | ||
291 | BUG(); | ||
292 | } | ||
293 | 477 | ||
294 | /* If we've never checked the CRCs on this node, check them now. */ | 478 | if (crc != tn->data_crc) { |
295 | if (ref_flags(ref) == REF_UNCHECKED) { | 479 | JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", |
296 | uint32_t crc, len; | 480 | ofs, tn->data_crc, crc); |
297 | struct jffs2_eraseblock *jeb; | 481 | return 1; |
298 | 482 | } | |
299 | crc = crc32(0, &node, sizeof(node.i)-8); | ||
300 | if (crc != je32_to_cpu(node.i.node_crc)) { | ||
301 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
302 | ref_offset(ref), je32_to_cpu(node.i.node_crc), crc); | ||
303 | jffs2_mark_node_obsolete(c, ref); | ||
304 | spin_lock(&c->erase_completion_lock); | ||
305 | continue; | ||
306 | } | ||
307 | |||
308 | /* sanity checks */ | ||
309 | if ( je32_to_cpu(node.i.offset) > je32_to_cpu(node.i.isize) || | ||
310 | PAD(je32_to_cpu(node.i.csize) + sizeof (node.i)) != PAD(je32_to_cpu(node.i.totlen))) { | ||
311 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): Inode corrupted at 0x%08x, totlen %d, #ino %d, version %d, isize %d, csize %d, dsize %d \n", | ||
312 | ref_offset(ref), je32_to_cpu(node.i.totlen), je32_to_cpu(node.i.ino), | ||
313 | je32_to_cpu(node.i.version), je32_to_cpu(node.i.isize), | ||
314 | je32_to_cpu(node.i.csize), je32_to_cpu(node.i.dsize)); | ||
315 | jffs2_mark_node_obsolete(c, ref); | ||
316 | spin_lock(&c->erase_completion_lock); | ||
317 | continue; | ||
318 | } | ||
319 | 483 | ||
320 | if (node.i.compr != JFFS2_COMPR_ZERO && je32_to_cpu(node.i.csize)) { | 484 | adj_acc: |
321 | unsigned char *buf=NULL; | 485 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; |
322 | uint32_t pointed = 0; | 486 | len = ref_totlen(c, jeb, ref); |
323 | #ifndef __ECOS | 487 | |
324 | if (c->mtd->point) { | 488 | /* |
325 | err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize), | 489 | * Mark the node as having been checked and fix the |
326 | &retlen, &buf); | 490 | * accounting accordingly. |
327 | if (!err && retlen < je32_to_cpu(node.i.csize)) { | 491 | */ |
328 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen)); | 492 | spin_lock(&c->erase_completion_lock); |
329 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize)); | 493 | jeb->used_size += len; |
330 | } else if (err){ | 494 | jeb->unchecked_size -= len; |
331 | D1(printk(KERN_DEBUG "MTD point failed %d\n", err)); | 495 | c->used_size += len; |
332 | } else | 496 | c->unchecked_size -= len; |
333 | pointed = 1; /* succefully pointed to device */ | 497 | spin_unlock(&c->erase_completion_lock); |
334 | } | 498 | |
335 | #endif | 499 | return 0; |
336 | if(!pointed){ | 500 | |
337 | buf = kmalloc(je32_to_cpu(node.i.csize), GFP_KERNEL); | 501 | free_out: |
338 | if (!buf) | 502 | if(!pointed) |
339 | return -ENOMEM; | 503 | kfree(buffer); |
340 | |||
341 | err = jffs2_flash_read(c, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize), | ||
342 | &retlen, buf); | ||
343 | if (!err && retlen != je32_to_cpu(node.i.csize)) | ||
344 | err = -EIO; | ||
345 | if (err) { | ||
346 | kfree(buf); | ||
347 | return err; | ||
348 | } | ||
349 | } | ||
350 | crc = crc32(0, buf, je32_to_cpu(node.i.csize)); | ||
351 | if(!pointed) | ||
352 | kfree(buf); | ||
353 | #ifndef __ECOS | 504 | #ifndef __ECOS |
354 | else | 505 | else |
355 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize)); | 506 | c->mtd->unpoint(c->mtd, buffer, ofs, len); |
356 | #endif | 507 | #endif |
508 | return err; | ||
509 | } | ||
357 | 510 | ||
358 | if (crc != je32_to_cpu(node.i.data_crc)) { | 511 | /* |
359 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): Data CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 512 | * Helper function for jffs2_add_older_frag_to_fragtree(). |
360 | ref_offset(ref), je32_to_cpu(node.i.data_crc), crc); | 513 | * |
361 | jffs2_mark_node_obsolete(c, ref); | 514 | * Checks the node if we are in the checking stage. |
362 | spin_lock(&c->erase_completion_lock); | 515 | */ |
363 | continue; | 516 | static inline int check_node(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn) |
364 | } | 517 | { |
365 | 518 | int ret; | |
366 | } | ||
367 | 519 | ||
368 | /* Mark the node as having been checked and fix the accounting accordingly */ | 520 | BUG_ON(ref_obsolete(tn->fn->raw)); |
369 | spin_lock(&c->erase_completion_lock); | 521 | |
370 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | 522 | /* We only check the data CRC of unchecked nodes */ |
371 | len = ref_totlen(c, jeb, ref); | 523 | if (ref_flags(tn->fn->raw) != REF_UNCHECKED) |
372 | 524 | return 0; | |
373 | jeb->used_size += len; | 525 | |
374 | jeb->unchecked_size -= len; | 526 | dbg_fragtree2("check node %#04x-%#04x, phys offs %#08x.\n", |
375 | c->used_size += len; | 527 | tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw)); |
376 | c->unchecked_size -= len; | 528 | |
377 | 529 | ret = check_node_data(c, tn); | |
378 | /* If node covers at least a whole page, or if it starts at the | 530 | if (unlikely(ret < 0)) { |
379 | beginning of a page and runs to the end of the file, or if | 531 | JFFS2_ERROR("check_node_data() returned error: %d.\n", |
380 | it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. | 532 | ret); |
381 | 533 | } else if (unlikely(ret > 0)) { | |
382 | If it's actually overlapped, it'll get made NORMAL (or OBSOLETE) | 534 | dbg_fragtree2("CRC error, mark it obsolete.\n"); |
383 | when the overlapping node(s) get added to the tree anyway. | 535 | jffs2_mark_node_obsolete(c, tn->fn->raw); |
384 | */ | 536 | } |
385 | if ((je32_to_cpu(node.i.dsize) >= PAGE_CACHE_SIZE) || | 537 | |
386 | ( ((je32_to_cpu(node.i.offset)&(PAGE_CACHE_SIZE-1))==0) && | 538 | return ret; |
387 | (je32_to_cpu(node.i.dsize)+je32_to_cpu(node.i.offset) == je32_to_cpu(node.i.isize)))) { | 539 | } |
388 | D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_PRISTINE\n", ref_offset(ref))); | 540 | |
389 | ref->flash_offset = ref_offset(ref) | REF_PRISTINE; | 541 | /* |
390 | } else { | 542 | * Helper function for jffs2_add_older_frag_to_fragtree(). |
391 | D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_NORMAL\n", ref_offset(ref))); | 543 | * |
392 | ref->flash_offset = ref_offset(ref) | REF_NORMAL; | 544 | * Called when the new fragment that is being inserted |
393 | } | 545 | * splits a hole fragment. |
394 | spin_unlock(&c->erase_completion_lock); | 546 | */ |
547 | static int split_hole(struct jffs2_sb_info *c, struct rb_root *root, | ||
548 | struct jffs2_node_frag *newfrag, struct jffs2_node_frag *hole) | ||
549 | { | ||
550 | dbg_fragtree2("fragment %#04x-%#04x splits the hole %#04x-%#04x\n", | ||
551 | newfrag->ofs, newfrag->ofs + newfrag->size, hole->ofs, hole->ofs + hole->size); | ||
552 | |||
553 | if (hole->ofs == newfrag->ofs) { | ||
554 | /* | ||
555 | * Well, the new fragment actually starts at the same offset as | ||
556 | * the hole. | ||
557 | */ | ||
558 | if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) { | ||
559 | /* | ||
560 | * We replace the overlapped left part of the hole by | ||
561 | * the new node. | ||
562 | */ | ||
563 | |||
564 | dbg_fragtree2("insert fragment %#04x-%#04x and cut the left part of the hole\n", | ||
565 | newfrag->ofs, newfrag->ofs + newfrag->size); | ||
566 | rb_replace_node(&hole->rb, &newfrag->rb, root); | ||
567 | |||
568 | hole->ofs += newfrag->size; | ||
569 | hole->size -= newfrag->size; | ||
570 | |||
571 | /* | ||
572 | * We know that 'hole' should be the right hand | ||
573 | * fragment. | ||
574 | */ | ||
575 | jffs2_fragtree_insert(hole, newfrag); | ||
576 | rb_insert_color(&hole->rb, root); | ||
577 | } else { | ||
578 | /* | ||
579 | * Ah, the new fragment is of the same size as the hole. | ||
580 | * Relace the hole by it. | ||
581 | */ | ||
582 | dbg_fragtree2("insert fragment %#04x-%#04x and overwrite hole\n", | ||
583 | newfrag->ofs, newfrag->ofs + newfrag->size); | ||
584 | rb_replace_node(&hole->rb, &newfrag->rb, root); | ||
585 | jffs2_free_node_frag(hole); | ||
586 | } | ||
587 | } else { | ||
588 | /* The new fragment lefts some hole space at the left */ | ||
589 | |||
590 | struct jffs2_node_frag * newfrag2 = NULL; | ||
591 | |||
592 | if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) { | ||
593 | /* The new frag also lefts some space at the right */ | ||
594 | newfrag2 = new_fragment(NULL, newfrag->ofs + | ||
595 | newfrag->size, hole->ofs + hole->size | ||
596 | - newfrag->ofs - newfrag->size); | ||
597 | if (unlikely(!newfrag2)) { | ||
598 | jffs2_free_node_frag(newfrag); | ||
599 | return -ENOMEM; | ||
395 | } | 600 | } |
601 | } | ||
602 | |||
603 | hole->size = newfrag->ofs - hole->ofs; | ||
604 | dbg_fragtree2("left the hole %#04x-%#04x at the left and inserd fragment %#04x-%#04x\n", | ||
605 | hole->ofs, hole->ofs + hole->size, newfrag->ofs, newfrag->ofs + newfrag->size); | ||
606 | |||
607 | jffs2_fragtree_insert(newfrag, hole); | ||
608 | rb_insert_color(&newfrag->rb, root); | ||
609 | |||
610 | if (newfrag2) { | ||
611 | dbg_fragtree2("left the hole %#04x-%#04x at the right\n", | ||
612 | newfrag2->ofs, newfrag2->ofs + newfrag2->size); | ||
613 | jffs2_fragtree_insert(newfrag2, newfrag); | ||
614 | rb_insert_color(&newfrag2->rb, root); | ||
615 | } | ||
616 | } | ||
617 | |||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | /* | ||
622 | * This function is used when we build inode. It expects the nodes are passed | ||
623 | * in the decreasing version order. The whole point of this is to improve the | ||
624 | * inodes checking on NAND: we check the nodes' data CRC only when they are not | ||
625 | * obsoleted. Previously, add_frag_to_fragtree() function was used and | ||
626 | * nodes were passed to it in the increasing version ordes and CRCs of all | ||
627 | * nodes were checked. | ||
628 | * | ||
629 | * Note: tn->fn->size shouldn't be zero. | ||
630 | * | ||
631 | * Returns 0 if the node was inserted | ||
632 | * 1 if it wasn't inserted (since it is obsolete) | ||
633 | * < 0 an if error occured | ||
634 | */ | ||
635 | int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
636 | struct jffs2_tmp_dnode_info *tn) | ||
637 | { | ||
638 | struct jffs2_node_frag *this, *newfrag; | ||
639 | uint32_t lastend; | ||
640 | struct jffs2_full_dnode *fn = tn->fn; | ||
641 | struct rb_root *root = &f->fragtree; | ||
642 | uint32_t fn_size = fn->size, fn_ofs = fn->ofs; | ||
643 | int err, checked = 0; | ||
644 | int ref_flag; | ||
645 | |||
646 | dbg_fragtree("insert fragment %#04x-%#04x, ver %u\n", fn_ofs, fn_ofs + fn_size, tn->version); | ||
647 | |||
648 | /* Skip all the nodes which are completed before this one starts */ | ||
649 | this = jffs2_lookup_node_frag(root, fn_ofs); | ||
650 | if (this) | ||
651 | dbg_fragtree2("'this' found %#04x-%#04x (%s)\n", this->ofs, this->ofs + this->size, this->node ? "data" : "hole"); | ||
652 | |||
653 | if (this) | ||
654 | lastend = this->ofs + this->size; | ||
655 | else | ||
656 | lastend = 0; | ||
657 | |||
658 | /* Detect the preliminary type of node */ | ||
659 | if (fn->size >= PAGE_CACHE_SIZE) | ||
660 | ref_flag = REF_PRISTINE; | ||
661 | else | ||
662 | ref_flag = REF_NORMAL; | ||
663 | |||
664 | /* See if we ran off the end of the root */ | ||
665 | if (lastend <= fn_ofs) { | ||
666 | /* We did */ | ||
667 | |||
668 | /* | ||
669 | * We are going to insert the new node into the | ||
670 | * fragment tree, so check it. | ||
671 | */ | ||
672 | err = check_node(c, f, tn); | ||
673 | if (err != 0) | ||
674 | return err; | ||
675 | |||
676 | fn->frags = 1; | ||
677 | |||
678 | newfrag = new_fragment(fn, fn_ofs, fn_size); | ||
679 | if (unlikely(!newfrag)) | ||
680 | return -ENOMEM; | ||
681 | |||
682 | err = no_overlapping_node(c, root, newfrag, this, lastend); | ||
683 | if (unlikely(err != 0)) { | ||
684 | jffs2_free_node_frag(newfrag); | ||
685 | return err; | ||
686 | } | ||
687 | |||
688 | goto out_ok; | ||
689 | } | ||
396 | 690 | ||
397 | tn = jffs2_alloc_tmp_dnode_info(); | 691 | fn->frags = 0; |
398 | if (!tn) { | 692 | |
399 | D1(printk(KERN_DEBUG "alloc tn failed\n")); | 693 | while (1) { |
400 | err = -ENOMEM; | 694 | /* |
401 | goto free_out; | 695 | * Here we have: |
696 | * fn_ofs < this->ofs + this->size && fn_ofs >= this->ofs. | ||
697 | * | ||
698 | * Remember, 'this' has higher version, any non-hole node | ||
699 | * which is already in the fragtree is newer then the newly | ||
700 | * inserted. | ||
701 | */ | ||
702 | if (!this->node) { | ||
703 | /* | ||
704 | * 'this' is the hole fragment, so at least the | ||
705 | * beginning of the new fragment is valid. | ||
706 | */ | ||
707 | |||
708 | /* | ||
709 | * We are going to insert the new node into the | ||
710 | * fragment tree, so check it. | ||
711 | */ | ||
712 | if (!checked) { | ||
713 | err = check_node(c, f, tn); | ||
714 | if (unlikely(err != 0)) | ||
715 | return err; | ||
716 | checked = 1; | ||
402 | } | 717 | } |
403 | 718 | ||
404 | tn->fn = jffs2_alloc_full_dnode(); | 719 | if (this->ofs + this->size >= fn_ofs + fn_size) { |
405 | if (!tn->fn) { | 720 | /* We split the hole on two parts */ |
406 | D1(printk(KERN_DEBUG "alloc fn failed\n")); | 721 | |
407 | err = -ENOMEM; | 722 | fn->frags += 1; |
408 | jffs2_free_tmp_dnode_info(tn); | 723 | newfrag = new_fragment(fn, fn_ofs, fn_size); |
409 | goto free_out; | 724 | if (unlikely(!newfrag)) |
725 | return -ENOMEM; | ||
726 | |||
727 | err = split_hole(c, root, newfrag, this); | ||
728 | if (unlikely(err)) | ||
729 | return err; | ||
730 | goto out_ok; | ||
410 | } | 731 | } |
411 | tn->version = je32_to_cpu(node.i.version); | 732 | |
412 | tn->fn->ofs = je32_to_cpu(node.i.offset); | 733 | /* |
413 | /* There was a bug where we wrote hole nodes out with | 734 | * The beginning of the new fragment is valid since it |
414 | csize/dsize swapped. Deal with it */ | 735 | * overlaps the hole node. |
415 | if (node.i.compr == JFFS2_COMPR_ZERO && !je32_to_cpu(node.i.dsize) && je32_to_cpu(node.i.csize)) | 736 | */ |
416 | tn->fn->size = je32_to_cpu(node.i.csize); | 737 | |
417 | else // normal case... | 738 | ref_flag = REF_NORMAL; |
418 | tn->fn->size = je32_to_cpu(node.i.dsize); | 739 | |
419 | tn->fn->raw = ref; | 740 | fn->frags += 1; |
420 | D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %04x, dsize %04x\n", | 741 | newfrag = new_fragment(fn, fn_ofs, |
421 | ref_offset(ref), je32_to_cpu(node.i.version), | 742 | this->ofs + this->size - fn_ofs); |
422 | je32_to_cpu(node.i.offset), je32_to_cpu(node.i.dsize))); | 743 | if (unlikely(!newfrag)) |
423 | jffs2_add_tn_to_tree(tn, &ret_tn); | 744 | return -ENOMEM; |
424 | break; | 745 | |
425 | 746 | if (fn_ofs == this->ofs) { | |
426 | default: | 747 | /* |
427 | if (ref_flags(ref) == REF_UNCHECKED) { | 748 | * The new node starts at the same offset as |
428 | struct jffs2_eraseblock *jeb; | 749 | * the hole and supersieds the hole. |
429 | uint32_t len; | 750 | */ |
430 | 751 | dbg_fragtree2("add the new fragment instead of hole %#04x-%#04x, refcnt %d\n", | |
431 | printk(KERN_ERR "Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n", | 752 | fn_ofs, fn_ofs + this->ofs + this->size - fn_ofs, fn->frags); |
432 | je16_to_cpu(node.u.nodetype), ref_offset(ref)); | 753 | |
433 | 754 | rb_replace_node(&this->rb, &newfrag->rb, root); | |
434 | /* Mark the node as having been checked and fix the accounting accordingly */ | 755 | jffs2_free_node_frag(this); |
435 | spin_lock(&c->erase_completion_lock); | 756 | } else { |
436 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | 757 | /* |
437 | len = ref_totlen(c, jeb, ref); | 758 | * The hole becomes shorter as its right part |
438 | 759 | * is supersieded by the new fragment. | |
439 | jeb->used_size += len; | 760 | */ |
440 | jeb->unchecked_size -= len; | 761 | dbg_fragtree2("reduce size of hole %#04x-%#04x to %#04x-%#04x\n", |
441 | c->used_size += len; | 762 | this->ofs, this->ofs + this->size, this->ofs, this->ofs + this->size - newfrag->size); |
442 | c->unchecked_size -= len; | 763 | |
443 | 764 | dbg_fragtree2("add new fragment %#04x-%#04x, refcnt %d\n", fn_ofs, | |
444 | mark_ref_normal(ref); | 765 | fn_ofs + this->ofs + this->size - fn_ofs, fn->frags); |
445 | spin_unlock(&c->erase_completion_lock); | 766 | |
767 | this->size -= newfrag->size; | ||
768 | jffs2_fragtree_insert(newfrag, this); | ||
769 | rb_insert_color(&newfrag->rb, root); | ||
446 | } | 770 | } |
447 | node.u.nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(node.u.nodetype)); | 771 | |
448 | if (crc32(0, &node, sizeof(struct jffs2_unknown_node)-4) != je32_to_cpu(node.u.hdr_crc)) { | 772 | fn_ofs += newfrag->size; |
449 | /* Hmmm. This should have been caught at scan time. */ | 773 | fn_size -= newfrag->size; |
450 | printk(KERN_ERR "Node header CRC failed at %08x. But it must have been OK earlier.\n", | 774 | this = rb_entry(rb_next(&newfrag->rb), |
451 | ref_offset(ref)); | 775 | struct jffs2_node_frag, rb); |
452 | printk(KERN_ERR "Node was: { %04x, %04x, %08x, %08x }\n", | 776 | |
453 | je16_to_cpu(node.u.magic), je16_to_cpu(node.u.nodetype), je32_to_cpu(node.u.totlen), | 777 | dbg_fragtree2("switch to the next 'this' fragment: %#04x-%#04x %s\n", |
454 | je32_to_cpu(node.u.hdr_crc)); | 778 | this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)"); |
455 | jffs2_mark_node_obsolete(c, ref); | 779 | } |
456 | } else switch(je16_to_cpu(node.u.nodetype) & JFFS2_COMPAT_MASK) { | 780 | |
457 | case JFFS2_FEATURE_INCOMPAT: | 781 | /* |
458 | printk(KERN_NOTICE "Unknown INCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | 782 | * 'This' node is not the hole so it obsoletes the new fragment |
459 | /* EEP */ | 783 | * either fully or partially. |
460 | BUG(); | 784 | */ |
461 | break; | 785 | if (this->ofs + this->size >= fn_ofs + fn_size) { |
462 | case JFFS2_FEATURE_ROCOMPAT: | 786 | /* The new node is obsolete, drop it */ |
463 | printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | 787 | if (fn->frags == 0) { |
464 | if (!(c->flags & JFFS2_SB_FLAG_RO)) | 788 | dbg_fragtree2("%#04x-%#04x is obsolete, mark it obsolete\n", fn_ofs, fn_ofs + fn_size); |
465 | BUG(); | 789 | ref_flag = REF_OBSOLETE; |
466 | break; | ||
467 | case JFFS2_FEATURE_RWCOMPAT_COPY: | ||
468 | printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | ||
469 | break; | ||
470 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | ||
471 | printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | ||
472 | jffs2_mark_node_obsolete(c, ref); | ||
473 | break; | ||
474 | } | 790 | } |
791 | goto out_ok; | ||
792 | } else { | ||
793 | struct jffs2_node_frag *new_this; | ||
794 | |||
795 | /* 'This' node obsoletes the beginning of the new node */ | ||
796 | dbg_fragtree2("the beginning %#04x-%#04x is obsolete\n", fn_ofs, this->ofs + this->size); | ||
797 | |||
798 | ref_flag = REF_NORMAL; | ||
799 | |||
800 | fn_size -= this->ofs + this->size - fn_ofs; | ||
801 | fn_ofs = this->ofs + this->size; | ||
802 | dbg_fragtree2("now considering %#04x-%#04x\n", fn_ofs, fn_ofs + fn_size); | ||
803 | |||
804 | new_this = rb_entry(rb_next(&this->rb), struct jffs2_node_frag, rb); | ||
805 | if (!new_this) { | ||
806 | /* | ||
807 | * There is no next fragment. Add the rest of | ||
808 | * the new node as the right-hand child. | ||
809 | */ | ||
810 | if (!checked) { | ||
811 | err = check_node(c, f, tn); | ||
812 | if (unlikely(err != 0)) | ||
813 | return err; | ||
814 | checked = 1; | ||
815 | } | ||
475 | 816 | ||
817 | fn->frags += 1; | ||
818 | newfrag = new_fragment(fn, fn_ofs, fn_size); | ||
819 | if (unlikely(!newfrag)) | ||
820 | return -ENOMEM; | ||
821 | |||
822 | dbg_fragtree2("there are no more fragments, insert %#04x-%#04x\n", | ||
823 | newfrag->ofs, newfrag->ofs + newfrag->size); | ||
824 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | ||
825 | rb_insert_color(&newfrag->rb, root); | ||
826 | goto out_ok; | ||
827 | } else { | ||
828 | this = new_this; | ||
829 | dbg_fragtree2("switch to the next 'this' fragment: %#04x-%#04x %s\n", | ||
830 | this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)"); | ||
831 | } | ||
476 | } | 832 | } |
477 | spin_lock(&c->erase_completion_lock); | 833 | } |
834 | |||
835 | out_ok: | ||
836 | BUG_ON(fn->size < PAGE_CACHE_SIZE && ref_flag == REF_PRISTINE); | ||
478 | 837 | ||
838 | if (ref_flag == REF_OBSOLETE) { | ||
839 | dbg_fragtree2("the node is obsolete now\n"); | ||
840 | /* jffs2_mark_node_obsolete() will adjust space accounting */ | ||
841 | jffs2_mark_node_obsolete(c, fn->raw); | ||
842 | return 1; | ||
479 | } | 843 | } |
844 | |||
845 | dbg_fragtree2("the node is \"%s\" now\n", ref_flag == REF_NORMAL ? "REF_NORMAL" : "REF_PRISTINE"); | ||
846 | |||
847 | /* Space accounting was adjusted at check_node_data() */ | ||
848 | spin_lock(&c->erase_completion_lock); | ||
849 | fn->raw->flash_offset = ref_offset(fn->raw) | ref_flag; | ||
480 | spin_unlock(&c->erase_completion_lock); | 850 | spin_unlock(&c->erase_completion_lock); |
481 | *tnp = ret_tn; | ||
482 | *fdp = ret_fd; | ||
483 | 851 | ||
484 | return 0; | 852 | return 0; |
485 | |||
486 | free_out: | ||
487 | jffs2_free_tmp_dnode_info_list(&ret_tn); | ||
488 | jffs2_free_full_dirent_list(ret_fd); | ||
489 | return err; | ||
490 | } | 853 | } |
491 | 854 | ||
492 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) | 855 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) |
@@ -499,24 +862,21 @@ void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache | |||
499 | 862 | ||
500 | /* During mount, this needs no locking. During normal operation, its | 863 | /* During mount, this needs no locking. During normal operation, its |
501 | callers want to do other stuff while still holding the inocache_lock. | 864 | callers want to do other stuff while still holding the inocache_lock. |
502 | Rather than introducing special case get_ino_cache functions or | 865 | Rather than introducing special case get_ino_cache functions or |
503 | callbacks, we just let the caller do the locking itself. */ | 866 | callbacks, we just let the caller do the locking itself. */ |
504 | 867 | ||
505 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino) | 868 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino) |
506 | { | 869 | { |
507 | struct jffs2_inode_cache *ret; | 870 | struct jffs2_inode_cache *ret; |
508 | 871 | ||
509 | D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino)); | ||
510 | |||
511 | ret = c->inocache_list[ino % INOCACHE_HASHSIZE]; | 872 | ret = c->inocache_list[ino % INOCACHE_HASHSIZE]; |
512 | while (ret && ret->ino < ino) { | 873 | while (ret && ret->ino < ino) { |
513 | ret = ret->next; | 874 | ret = ret->next; |
514 | } | 875 | } |
515 | 876 | ||
516 | if (ret && ret->ino != ino) | 877 | if (ret && ret->ino != ino) |
517 | ret = NULL; | 878 | ret = NULL; |
518 | 879 | ||
519 | D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino)); | ||
520 | return ret; | 880 | return ret; |
521 | } | 881 | } |
522 | 882 | ||
@@ -528,7 +888,7 @@ void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new | |||
528 | if (!new->ino) | 888 | if (!new->ino) |
529 | new->ino = ++c->highest_ino; | 889 | new->ino = ++c->highest_ino; |
530 | 890 | ||
531 | D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino)); | 891 | dbg_inocache("add %p (ino #%u)\n", new, new->ino); |
532 | 892 | ||
533 | prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE]; | 893 | prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE]; |
534 | 894 | ||
@@ -544,11 +904,12 @@ void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new | |||
544 | void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) | 904 | void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) |
545 | { | 905 | { |
546 | struct jffs2_inode_cache **prev; | 906 | struct jffs2_inode_cache **prev; |
547 | D1(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino)); | 907 | |
908 | dbg_inocache("del %p (ino #%u)\n", old, old->ino); | ||
548 | spin_lock(&c->inocache_lock); | 909 | spin_lock(&c->inocache_lock); |
549 | 910 | ||
550 | prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE]; | 911 | prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE]; |
551 | 912 | ||
552 | while ((*prev) && (*prev)->ino < old->ino) { | 913 | while ((*prev) && (*prev)->ino < old->ino) { |
553 | prev = &(*prev)->next; | 914 | prev = &(*prev)->next; |
554 | } | 915 | } |
@@ -558,7 +919,7 @@ void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) | |||
558 | 919 | ||
559 | /* Free it now unless it's in READING or CLEARING state, which | 920 | /* Free it now unless it's in READING or CLEARING state, which |
560 | are the transitions upon read_inode() and clear_inode(). The | 921 | are the transitions upon read_inode() and clear_inode(). The |
561 | rest of the time we know nobody else is looking at it, and | 922 | rest of the time we know nobody else is looking at it, and |
562 | if it's held by read_inode() or clear_inode() they'll free it | 923 | if it's held by read_inode() or clear_inode() they'll free it |
563 | for themselves. */ | 924 | for themselves. */ |
564 | if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING) | 925 | if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING) |
@@ -571,7 +932,7 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c) | |||
571 | { | 932 | { |
572 | int i; | 933 | int i; |
573 | struct jffs2_inode_cache *this, *next; | 934 | struct jffs2_inode_cache *this, *next; |
574 | 935 | ||
575 | for (i=0; i<INOCACHE_HASHSIZE; i++) { | 936 | for (i=0; i<INOCACHE_HASHSIZE; i++) { |
576 | this = c->inocache_list[i]; | 937 | this = c->inocache_list[i]; |
577 | while (this) { | 938 | while (this) { |
@@ -598,38 +959,30 @@ void jffs2_free_raw_node_refs(struct jffs2_sb_info *c) | |||
598 | c->blocks[i].first_node = c->blocks[i].last_node = NULL; | 959 | c->blocks[i].first_node = c->blocks[i].last_node = NULL; |
599 | } | 960 | } |
600 | } | 961 | } |
601 | 962 | ||
602 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset) | 963 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset) |
603 | { | 964 | { |
604 | /* The common case in lookup is that there will be a node | 965 | /* The common case in lookup is that there will be a node |
605 | which precisely matches. So we go looking for that first */ | 966 | which precisely matches. So we go looking for that first */ |
606 | struct rb_node *next; | 967 | struct rb_node *next; |
607 | struct jffs2_node_frag *prev = NULL; | 968 | struct jffs2_node_frag *prev = NULL; |
608 | struct jffs2_node_frag *frag = NULL; | 969 | struct jffs2_node_frag *frag = NULL; |
609 | 970 | ||
610 | D2(printk(KERN_DEBUG "jffs2_lookup_node_frag(%p, %d)\n", fragtree, offset)); | 971 | dbg_fragtree2("root %p, offset %d\n", fragtree, offset); |
611 | 972 | ||
612 | next = fragtree->rb_node; | 973 | next = fragtree->rb_node; |
613 | 974 | ||
614 | while(next) { | 975 | while(next) { |
615 | frag = rb_entry(next, struct jffs2_node_frag, rb); | 976 | frag = rb_entry(next, struct jffs2_node_frag, rb); |
616 | 977 | ||
617 | D2(printk(KERN_DEBUG "Considering frag %d-%d (%p). left %p, right %p\n", | ||
618 | frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right)); | ||
619 | if (frag->ofs + frag->size <= offset) { | 978 | if (frag->ofs + frag->size <= offset) { |
620 | D2(printk(KERN_DEBUG "Going right from frag %d-%d, before the region we care about\n", | ||
621 | frag->ofs, frag->ofs+frag->size)); | ||
622 | /* Remember the closest smaller match on the way down */ | 979 | /* Remember the closest smaller match on the way down */ |
623 | if (!prev || frag->ofs > prev->ofs) | 980 | if (!prev || frag->ofs > prev->ofs) |
624 | prev = frag; | 981 | prev = frag; |
625 | next = frag->rb.rb_right; | 982 | next = frag->rb.rb_right; |
626 | } else if (frag->ofs > offset) { | 983 | } else if (frag->ofs > offset) { |
627 | D2(printk(KERN_DEBUG "Going left from frag %d-%d, after the region we care about\n", | ||
628 | frag->ofs, frag->ofs+frag->size)); | ||
629 | next = frag->rb.rb_left; | 984 | next = frag->rb.rb_left; |
630 | } else { | 985 | } else { |
631 | D2(printk(KERN_DEBUG "Returning frag %d,%d, matched\n", | ||
632 | frag->ofs, frag->ofs+frag->size)); | ||
633 | return frag; | 986 | return frag; |
634 | } | 987 | } |
635 | } | 988 | } |
@@ -638,11 +991,11 @@ struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_ | |||
638 | and return the closest smaller one */ | 991 | and return the closest smaller one */ |
639 | 992 | ||
640 | if (prev) | 993 | if (prev) |
641 | D2(printk(KERN_DEBUG "No match. Returning frag %d,%d, closest previous\n", | 994 | dbg_fragtree2("no match. Returning frag %#04x-%#04x, closest previous\n", |
642 | prev->ofs, prev->ofs+prev->size)); | 995 | prev->ofs, prev->ofs+prev->size); |
643 | else | 996 | else |
644 | D2(printk(KERN_DEBUG "Returning NULL, empty fragtree\n")); | 997 | dbg_fragtree2("returning NULL, empty fragtree\n"); |
645 | 998 | ||
646 | return prev; | 999 | return prev; |
647 | } | 1000 | } |
648 | 1001 | ||
@@ -656,39 +1009,32 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) | |||
656 | if (!root->rb_node) | 1009 | if (!root->rb_node) |
657 | return; | 1010 | return; |
658 | 1011 | ||
659 | frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb)); | 1012 | dbg_fragtree("killing\n"); |
660 | 1013 | ||
1014 | frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb)); | ||
661 | while(frag) { | 1015 | while(frag) { |
662 | if (frag->rb.rb_left) { | 1016 | if (frag->rb.rb_left) { |
663 | D2(printk(KERN_DEBUG "Going left from frag (%p) %d-%d\n", | ||
664 | frag, frag->ofs, frag->ofs+frag->size)); | ||
665 | frag = frag_left(frag); | 1017 | frag = frag_left(frag); |
666 | continue; | 1018 | continue; |
667 | } | 1019 | } |
668 | if (frag->rb.rb_right) { | 1020 | if (frag->rb.rb_right) { |
669 | D2(printk(KERN_DEBUG "Going right from frag (%p) %d-%d\n", | ||
670 | frag, frag->ofs, frag->ofs+frag->size)); | ||
671 | frag = frag_right(frag); | 1021 | frag = frag_right(frag); |
672 | continue; | 1022 | continue; |
673 | } | 1023 | } |
674 | 1024 | ||
675 | D2(printk(KERN_DEBUG "jffs2_kill_fragtree: frag at 0x%x-0x%x: node %p, frags %d--\n", | ||
676 | frag->ofs, frag->ofs+frag->size, frag->node, | ||
677 | frag->node?frag->node->frags:0)); | ||
678 | |||
679 | if (frag->node && !(--frag->node->frags)) { | 1025 | if (frag->node && !(--frag->node->frags)) { |
680 | /* Not a hole, and it's the final remaining frag | 1026 | /* Not a hole, and it's the final remaining frag |
681 | of this node. Free the node */ | 1027 | of this node. Free the node */ |
682 | if (c) | 1028 | if (c) |
683 | jffs2_mark_node_obsolete(c, frag->node->raw); | 1029 | jffs2_mark_node_obsolete(c, frag->node->raw); |
684 | 1030 | ||
685 | jffs2_free_full_dnode(frag->node); | 1031 | jffs2_free_full_dnode(frag->node); |
686 | } | 1032 | } |
687 | parent = frag_parent(frag); | 1033 | parent = frag_parent(frag); |
688 | if (parent) { | 1034 | if (parent) { |
689 | if (frag_left(parent) == frag) | 1035 | if (frag_left(parent) == frag) |
690 | parent->rb.rb_left = NULL; | 1036 | parent->rb.rb_left = NULL; |
691 | else | 1037 | else |
692 | parent->rb.rb_right = NULL; | 1038 | parent->rb.rb_right = NULL; |
693 | } | 1039 | } |
694 | 1040 | ||
@@ -698,29 +1044,3 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) | |||
698 | cond_resched(); | 1044 | cond_resched(); |
699 | } | 1045 | } |
700 | } | 1046 | } |
701 | |||
702 | void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) | ||
703 | { | ||
704 | struct rb_node *parent = &base->rb; | ||
705 | struct rb_node **link = &parent; | ||
706 | |||
707 | D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag, | ||
708 | newfrag->ofs, newfrag->ofs+newfrag->size, base)); | ||
709 | |||
710 | while (*link) { | ||
711 | parent = *link; | ||
712 | base = rb_entry(parent, struct jffs2_node_frag, rb); | ||
713 | |||
714 | D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs)); | ||
715 | if (newfrag->ofs > base->ofs) | ||
716 | link = &base->rb.rb_right; | ||
717 | else if (newfrag->ofs < base->ofs) | ||
718 | link = &base->rb.rb_left; | ||
719 | else { | ||
720 | printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); | ||
721 | BUG(); | ||
722 | } | ||
723 | } | ||
724 | |||
725 | rb_link_node(&newfrag->rb, &base->rb, link); | ||
726 | } | ||
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h index b34c397909ef..23a67bb3052f 100644 --- a/fs/jffs2/nodelist.h +++ b/fs/jffs2/nodelist.h | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: nodelist.h,v 1.131 2005/07/05 21:03:07 dwmw2 Exp $ | 10 | * $Id: nodelist.h,v 1.140 2005/09/07 08:34:54 havasi Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -20,30 +20,15 @@ | |||
20 | #include <linux/jffs2.h> | 20 | #include <linux/jffs2.h> |
21 | #include <linux/jffs2_fs_sb.h> | 21 | #include <linux/jffs2_fs_sb.h> |
22 | #include <linux/jffs2_fs_i.h> | 22 | #include <linux/jffs2_fs_i.h> |
23 | #include "summary.h" | ||
23 | 24 | ||
24 | #ifdef __ECOS | 25 | #ifdef __ECOS |
25 | #include "os-ecos.h" | 26 | #include "os-ecos.h" |
26 | #else | 27 | #else |
27 | #include <linux/mtd/compatmac.h> /* For min/max in older kernels */ | 28 | #include <linux/mtd/compatmac.h> /* For compatibility with older kernels */ |
28 | #include "os-linux.h" | 29 | #include "os-linux.h" |
29 | #endif | 30 | #endif |
30 | 31 | ||
31 | #ifndef CONFIG_JFFS2_FS_DEBUG | ||
32 | #define CONFIG_JFFS2_FS_DEBUG 1 | ||
33 | #endif | ||
34 | |||
35 | #if CONFIG_JFFS2_FS_DEBUG > 0 | ||
36 | #define D1(x) x | ||
37 | #else | ||
38 | #define D1(x) | ||
39 | #endif | ||
40 | |||
41 | #if CONFIG_JFFS2_FS_DEBUG > 1 | ||
42 | #define D2(x) x | ||
43 | #else | ||
44 | #define D2(x) | ||
45 | #endif | ||
46 | |||
47 | #define JFFS2_NATIVE_ENDIAN | 32 | #define JFFS2_NATIVE_ENDIAN |
48 | 33 | ||
49 | /* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from | 34 | /* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from |
@@ -73,14 +58,17 @@ | |||
73 | #define je16_to_cpu(x) (le16_to_cpu(x.v16)) | 58 | #define je16_to_cpu(x) (le16_to_cpu(x.v16)) |
74 | #define je32_to_cpu(x) (le32_to_cpu(x.v32)) | 59 | #define je32_to_cpu(x) (le32_to_cpu(x.v32)) |
75 | #define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m))) | 60 | #define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m))) |
76 | #else | 61 | #else |
77 | #error wibble | 62 | #error wibble |
78 | #endif | 63 | #endif |
79 | 64 | ||
65 | /* The minimal node header size */ | ||
66 | #define JFFS2_MIN_NODE_HEADER sizeof(struct jffs2_raw_dirent) | ||
67 | |||
80 | /* | 68 | /* |
81 | This is all we need to keep in-core for each raw node during normal | 69 | This is all we need to keep in-core for each raw node during normal |
82 | operation. As and when we do read_inode on a particular inode, we can | 70 | operation. As and when we do read_inode on a particular inode, we can |
83 | scan the nodes which are listed for it and build up a proper map of | 71 | scan the nodes which are listed for it and build up a proper map of |
84 | which nodes are currently valid. JFFSv1 always used to keep that whole | 72 | which nodes are currently valid. JFFSv1 always used to keep that whole |
85 | map in core for each inode. | 73 | map in core for each inode. |
86 | */ | 74 | */ |
@@ -97,7 +85,7 @@ struct jffs2_raw_node_ref | |||
97 | 85 | ||
98 | /* flash_offset & 3 always has to be zero, because nodes are | 86 | /* flash_offset & 3 always has to be zero, because nodes are |
99 | always aligned at 4 bytes. So we have a couple of extra bits | 87 | always aligned at 4 bytes. So we have a couple of extra bits |
100 | to play with, which indicate the node's status; see below: */ | 88 | to play with, which indicate the node's status; see below: */ |
101 | #define REF_UNCHECKED 0 /* We haven't yet checked the CRC or built its inode */ | 89 | #define REF_UNCHECKED 0 /* We haven't yet checked the CRC or built its inode */ |
102 | #define REF_OBSOLETE 1 /* Obsolete, can be completely ignored */ | 90 | #define REF_OBSOLETE 1 /* Obsolete, can be completely ignored */ |
103 | #define REF_PRISTINE 2 /* Completely clean. GC without looking */ | 91 | #define REF_PRISTINE 2 /* Completely clean. GC without looking */ |
@@ -110,7 +98,7 @@ struct jffs2_raw_node_ref | |||
110 | /* For each inode in the filesystem, we need to keep a record of | 98 | /* For each inode in the filesystem, we need to keep a record of |
111 | nlink, because it would be a PITA to scan the whole directory tree | 99 | nlink, because it would be a PITA to scan the whole directory tree |
112 | at read_inode() time to calculate it, and to keep sufficient information | 100 | at read_inode() time to calculate it, and to keep sufficient information |
113 | in the raw_node_ref (basically both parent and child inode number for | 101 | in the raw_node_ref (basically both parent and child inode number for |
114 | dirent nodes) would take more space than this does. We also keep | 102 | dirent nodes) would take more space than this does. We also keep |
115 | a pointer to the first physical node which is part of this inode, too. | 103 | a pointer to the first physical node which is part of this inode, too. |
116 | */ | 104 | */ |
@@ -140,7 +128,7 @@ struct jffs2_inode_cache { | |||
140 | #define INOCACHE_HASHSIZE 128 | 128 | #define INOCACHE_HASHSIZE 128 |
141 | 129 | ||
142 | /* | 130 | /* |
143 | Larger representation of a raw node, kept in-core only when the | 131 | Larger representation of a raw node, kept in-core only when the |
144 | struct inode for this particular ino is instantiated. | 132 | struct inode for this particular ino is instantiated. |
145 | */ | 133 | */ |
146 | 134 | ||
@@ -150,11 +138,11 @@ struct jffs2_full_dnode | |||
150 | uint32_t ofs; /* The offset to which the data of this node belongs */ | 138 | uint32_t ofs; /* The offset to which the data of this node belongs */ |
151 | uint32_t size; | 139 | uint32_t size; |
152 | uint32_t frags; /* Number of fragments which currently refer | 140 | uint32_t frags; /* Number of fragments which currently refer |
153 | to this node. When this reaches zero, | 141 | to this node. When this reaches zero, |
154 | the node is obsolete. */ | 142 | the node is obsolete. */ |
155 | }; | 143 | }; |
156 | 144 | ||
157 | /* | 145 | /* |
158 | Even larger representation of a raw node, kept in-core only while | 146 | Even larger representation of a raw node, kept in-core only while |
159 | we're actually building up the original map of which nodes go where, | 147 | we're actually building up the original map of which nodes go where, |
160 | in read_inode() | 148 | in read_inode() |
@@ -164,7 +152,10 @@ struct jffs2_tmp_dnode_info | |||
164 | struct rb_node rb; | 152 | struct rb_node rb; |
165 | struct jffs2_full_dnode *fn; | 153 | struct jffs2_full_dnode *fn; |
166 | uint32_t version; | 154 | uint32_t version; |
167 | }; | 155 | uint32_t data_crc; |
156 | uint32_t partial_crc; | ||
157 | uint32_t csize; | ||
158 | }; | ||
168 | 159 | ||
169 | struct jffs2_full_dirent | 160 | struct jffs2_full_dirent |
170 | { | 161 | { |
@@ -178,7 +169,7 @@ struct jffs2_full_dirent | |||
178 | }; | 169 | }; |
179 | 170 | ||
180 | /* | 171 | /* |
181 | Fragments - used to build a map of which raw node to obtain | 172 | Fragments - used to build a map of which raw node to obtain |
182 | data from for each part of the ino | 173 | data from for each part of the ino |
183 | */ | 174 | */ |
184 | struct jffs2_node_frag | 175 | struct jffs2_node_frag |
@@ -207,86 +198,18 @@ struct jffs2_eraseblock | |||
207 | struct jffs2_raw_node_ref *gc_node; /* Next node to be garbage collected */ | 198 | struct jffs2_raw_node_ref *gc_node; /* Next node to be garbage collected */ |
208 | }; | 199 | }; |
209 | 200 | ||
210 | #define ACCT_SANITY_CHECK(c, jeb) do { \ | 201 | static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c) |
211 | struct jffs2_eraseblock *___j = jeb; \ | ||
212 | if ((___j) && ___j->used_size + ___j->dirty_size + ___j->free_size + ___j->wasted_size + ___j->unchecked_size != c->sector_size) { \ | ||
213 | printk(KERN_NOTICE "Eeep. Space accounting for block at 0x%08x is screwed\n", ___j->offset); \ | ||
214 | printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + wasted %08x + unchecked %08x != total %08x\n", \ | ||
215 | ___j->free_size, ___j->dirty_size, ___j->used_size, ___j->wasted_size, ___j->unchecked_size, c->sector_size); \ | ||
216 | BUG(); \ | ||
217 | } \ | ||
218 | if (c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size + c->wasted_size + c->unchecked_size != c->flash_size) { \ | ||
219 | printk(KERN_NOTICE "Eeep. Space accounting superblock info is screwed\n"); \ | ||
220 | printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + erasing %08x + bad %08x + wasted %08x + unchecked %08x != total %08x\n", \ | ||
221 | c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, c->wasted_size, c->unchecked_size, c->flash_size); \ | ||
222 | BUG(); \ | ||
223 | } \ | ||
224 | } while(0) | ||
225 | |||
226 | static inline void paranoia_failed_dump(struct jffs2_eraseblock *jeb) | ||
227 | { | 202 | { |
228 | struct jffs2_raw_node_ref *ref; | 203 | return ((c->flash_size / c->sector_size) * sizeof (struct jffs2_eraseblock)) > (128 * 1024); |
229 | int i=0; | ||
230 | |||
231 | printk(KERN_NOTICE); | ||
232 | for (ref = jeb->first_node; ref; ref = ref->next_phys) { | ||
233 | printk("%08x->", ref_offset(ref)); | ||
234 | if (++i == 8) { | ||
235 | i = 0; | ||
236 | printk("\n" KERN_NOTICE); | ||
237 | } | ||
238 | } | ||
239 | printk("\n"); | ||
240 | } | 204 | } |
241 | 205 | ||
242 | |||
243 | #define ACCT_PARANOIA_CHECK(jeb) do { \ | ||
244 | uint32_t my_used_size = 0; \ | ||
245 | uint32_t my_unchecked_size = 0; \ | ||
246 | struct jffs2_raw_node_ref *ref2 = jeb->first_node; \ | ||
247 | while (ref2) { \ | ||
248 | if (unlikely(ref2->flash_offset < jeb->offset || \ | ||
249 | ref2->flash_offset > jeb->offset + c->sector_size)) { \ | ||
250 | printk(KERN_NOTICE "Node %08x shouldn't be in block at %08x!\n", \ | ||
251 | ref_offset(ref2), jeb->offset); \ | ||
252 | paranoia_failed_dump(jeb); \ | ||
253 | BUG(); \ | ||
254 | } \ | ||
255 | if (ref_flags(ref2) == REF_UNCHECKED) \ | ||
256 | my_unchecked_size += ref_totlen(c, jeb, ref2); \ | ||
257 | else if (!ref_obsolete(ref2)) \ | ||
258 | my_used_size += ref_totlen(c, jeb, ref2); \ | ||
259 | if (unlikely((!ref2->next_phys) != (ref2 == jeb->last_node))) { \ | ||
260 | if (!ref2->next_phys) \ | ||
261 | printk("ref for node at %p (phys %08x) has next_phys->%p (----), last_node->%p (phys %08x)\n", \ | ||
262 | ref2, ref_offset(ref2), ref2->next_phys, \ | ||
263 | jeb->last_node, ref_offset(jeb->last_node)); \ | ||
264 | else \ | ||
265 | printk("ref for node at %p (phys %08x) has next_phys->%p (%08x), last_node->%p (phys %08x)\n", \ | ||
266 | ref2, ref_offset(ref2), ref2->next_phys, ref_offset(ref2->next_phys), \ | ||
267 | jeb->last_node, ref_offset(jeb->last_node)); \ | ||
268 | paranoia_failed_dump(jeb); \ | ||
269 | BUG(); \ | ||
270 | } \ | ||
271 | ref2 = ref2->next_phys; \ | ||
272 | } \ | ||
273 | if (my_used_size != jeb->used_size) { \ | ||
274 | printk(KERN_NOTICE "Calculated used size %08x != stored used size %08x\n", my_used_size, jeb->used_size); \ | ||
275 | BUG(); \ | ||
276 | } \ | ||
277 | if (my_unchecked_size != jeb->unchecked_size) { \ | ||
278 | printk(KERN_NOTICE "Calculated unchecked size %08x != stored unchecked size %08x\n", my_unchecked_size, jeb->unchecked_size); \ | ||
279 | BUG(); \ | ||
280 | } \ | ||
281 | } while(0) | ||
282 | |||
283 | /* Calculate totlen from surrounding nodes or eraseblock */ | 206 | /* Calculate totlen from surrounding nodes or eraseblock */ |
284 | static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, | 207 | static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, |
285 | struct jffs2_eraseblock *jeb, | 208 | struct jffs2_eraseblock *jeb, |
286 | struct jffs2_raw_node_ref *ref) | 209 | struct jffs2_raw_node_ref *ref) |
287 | { | 210 | { |
288 | uint32_t ref_end; | 211 | uint32_t ref_end; |
289 | 212 | ||
290 | if (ref->next_phys) | 213 | if (ref->next_phys) |
291 | ref_end = ref_offset(ref->next_phys); | 214 | ref_end = ref_offset(ref->next_phys); |
292 | else { | 215 | else { |
@@ -306,11 +229,13 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c, | |||
306 | { | 229 | { |
307 | uint32_t ret; | 230 | uint32_t ret; |
308 | 231 | ||
309 | D1(if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) { | 232 | #if CONFIG_JFFS2_FS_DEBUG > 0 |
233 | if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) { | ||
310 | printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n", | 234 | printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n", |
311 | jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref)); | 235 | jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref)); |
312 | BUG(); | 236 | BUG(); |
313 | }) | 237 | } |
238 | #endif | ||
314 | 239 | ||
315 | #if 1 | 240 | #if 1 |
316 | ret = ref->__totlen; | 241 | ret = ref->__totlen; |
@@ -323,14 +248,13 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c, | |||
323 | ret, ref->__totlen); | 248 | ret, ref->__totlen); |
324 | if (!jeb) | 249 | if (!jeb) |
325 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | 250 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; |
326 | paranoia_failed_dump(jeb); | 251 | jffs2_dbg_dump_node_refs_nolock(c, jeb); |
327 | BUG(); | 252 | BUG(); |
328 | } | 253 | } |
329 | #endif | 254 | #endif |
330 | return ret; | 255 | return ret; |
331 | } | 256 | } |
332 | 257 | ||
333 | |||
334 | #define ALLOC_NORMAL 0 /* Normal allocation */ | 258 | #define ALLOC_NORMAL 0 /* Normal allocation */ |
335 | #define ALLOC_DELETION 1 /* Deletion node. Best to allow it */ | 259 | #define ALLOC_DELETION 1 /* Deletion node. Best to allow it */ |
336 | #define ALLOC_GC 2 /* Space requested for GC. Give it or die */ | 260 | #define ALLOC_GC 2 /* Space requested for GC. Give it or die */ |
@@ -340,7 +264,7 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c, | |||
340 | #define VERYDIRTY(c, size) ((size) >= ((c)->sector_size / 2)) | 264 | #define VERYDIRTY(c, size) ((size) >= ((c)->sector_size / 2)) |
341 | 265 | ||
342 | /* check if dirty space is more than 255 Byte */ | 266 | /* check if dirty space is more than 255 Byte */ |
343 | #define ISDIRTY(size) ((size) > sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) | 267 | #define ISDIRTY(size) ((size) > sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) |
344 | 268 | ||
345 | #define PAD(x) (((x)+3)&~3) | 269 | #define PAD(x) (((x)+3)&~3) |
346 | 270 | ||
@@ -384,12 +308,7 @@ static inline struct jffs2_node_frag *frag_last(struct rb_root *root) | |||
384 | #define frag_erase(frag, list) rb_erase(&frag->rb, list); | 308 | #define frag_erase(frag, list) rb_erase(&frag->rb, list); |
385 | 309 | ||
386 | /* nodelist.c */ | 310 | /* nodelist.c */ |
387 | D2(void jffs2_print_frag_list(struct jffs2_inode_info *f)); | ||
388 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list); | 311 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list); |
389 | int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
390 | struct rb_root *tnp, struct jffs2_full_dirent **fdp, | ||
391 | uint32_t *highest_version, uint32_t *latest_mctime, | ||
392 | uint32_t *mctime_ver); | ||
393 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state); | 312 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state); |
394 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino); | 313 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino); |
395 | void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new); | 314 | void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new); |
@@ -398,19 +317,23 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c); | |||
398 | void jffs2_free_raw_node_refs(struct jffs2_sb_info *c); | 317 | void jffs2_free_raw_node_refs(struct jffs2_sb_info *c); |
399 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset); | 318 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset); |
400 | void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete); | 319 | void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete); |
401 | void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base); | ||
402 | struct rb_node *rb_next(struct rb_node *); | 320 | struct rb_node *rb_next(struct rb_node *); |
403 | struct rb_node *rb_prev(struct rb_node *); | 321 | struct rb_node *rb_prev(struct rb_node *); |
404 | void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); | 322 | void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); |
323 | void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this); | ||
324 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); | ||
325 | void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); | ||
326 | int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn); | ||
405 | 327 | ||
406 | /* nodemgmt.c */ | 328 | /* nodemgmt.c */ |
407 | int jffs2_thread_should_wake(struct jffs2_sb_info *c); | 329 | int jffs2_thread_should_wake(struct jffs2_sb_info *c); |
408 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio); | 330 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, |
409 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len); | 331 | uint32_t *len, int prio, uint32_t sumsize); |
332 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, | ||
333 | uint32_t *len, uint32_t sumsize); | ||
410 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new); | 334 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new); |
411 | void jffs2_complete_reservation(struct jffs2_sb_info *c); | 335 | void jffs2_complete_reservation(struct jffs2_sb_info *c); |
412 | void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw); | 336 | void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw); |
413 | void jffs2_dump_block_lists(struct jffs2_sb_info *c); | ||
414 | 337 | ||
415 | /* write.c */ | 338 | /* write.c */ |
416 | int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri); | 339 | int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri); |
@@ -418,17 +341,15 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint | |||
418 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode); | 341 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode); |
419 | struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode); | 342 | struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode); |
420 | int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 343 | int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
421 | struct jffs2_raw_inode *ri, unsigned char *buf, | 344 | struct jffs2_raw_inode *ri, unsigned char *buf, |
422 | uint32_t offset, uint32_t writelen, uint32_t *retlen); | 345 | uint32_t offset, uint32_t writelen, uint32_t *retlen); |
423 | int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen); | 346 | int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen); |
424 | int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f); | 347 | int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f, uint32_t time); |
425 | int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen); | 348 | int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time); |
426 | 349 | ||
427 | 350 | ||
428 | /* readinode.c */ | 351 | /* readinode.c */ |
429 | void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); | 352 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
430 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); | ||
431 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
432 | uint32_t ino, struct jffs2_raw_inode *latest_node); | 353 | uint32_t ino, struct jffs2_raw_inode *latest_node); |
433 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); | 354 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); |
434 | void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f); | 355 | void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f); |
@@ -468,6 +389,10 @@ char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f); | |||
468 | /* scan.c */ | 389 | /* scan.c */ |
469 | int jffs2_scan_medium(struct jffs2_sb_info *c); | 390 | int jffs2_scan_medium(struct jffs2_sb_info *c); |
470 | void jffs2_rotate_lists(struct jffs2_sb_info *c); | 391 | void jffs2_rotate_lists(struct jffs2_sb_info *c); |
392 | int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf, | ||
393 | uint32_t ofs, uint32_t len); | ||
394 | struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino); | ||
395 | int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
471 | 396 | ||
472 | /* build.c */ | 397 | /* build.c */ |
473 | int jffs2_do_mount_fs(struct jffs2_sb_info *c); | 398 | int jffs2_do_mount_fs(struct jffs2_sb_info *c); |
@@ -483,4 +408,6 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
483 | int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | 408 | int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); |
484 | #endif | 409 | #endif |
485 | 410 | ||
411 | #include "debug.h" | ||
412 | |||
486 | #endif /* __JFFS2_NODELIST_H__ */ | 413 | #endif /* __JFFS2_NODELIST_H__ */ |
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index c1d8b5ed9ab9..49127a1f0458 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: nodemgmt.c,v 1.122 2005/05/06 09:30:27 dedekind Exp $ | 10 | * $Id: nodemgmt.c,v 1.127 2005/09/20 15:49:12 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/compiler.h> | 17 | #include <linux/compiler.h> |
18 | #include <linux/sched.h> /* For cond_resched() */ | 18 | #include <linux/sched.h> /* For cond_resched() */ |
19 | #include "nodelist.h" | 19 | #include "nodelist.h" |
20 | #include "debug.h" | ||
20 | 21 | ||
21 | /** | 22 | /** |
22 | * jffs2_reserve_space - request physical space to write nodes to flash | 23 | * jffs2_reserve_space - request physical space to write nodes to flash |
@@ -38,9 +39,11 @@ | |||
38 | * for the requested allocation. | 39 | * for the requested allocation. |
39 | */ | 40 | */ |
40 | 41 | ||
41 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len); | 42 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, |
43 | uint32_t *ofs, uint32_t *len, uint32_t sumsize); | ||
42 | 44 | ||
43 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio) | 45 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, |
46 | uint32_t *len, int prio, uint32_t sumsize) | ||
44 | { | 47 | { |
45 | int ret = -EAGAIN; | 48 | int ret = -EAGAIN; |
46 | int blocksneeded = c->resv_blocks_write; | 49 | int blocksneeded = c->resv_blocks_write; |
@@ -85,12 +88,12 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
85 | up(&c->alloc_sem); | 88 | up(&c->alloc_sem); |
86 | return -ENOSPC; | 89 | return -ENOSPC; |
87 | } | 90 | } |
88 | 91 | ||
89 | /* Calc possibly available space. Possibly available means that we | 92 | /* Calc possibly available space. Possibly available means that we |
90 | * don't know, if unchecked size contains obsoleted nodes, which could give us some | 93 | * don't know, if unchecked size contains obsoleted nodes, which could give us some |
91 | * more usable space. This will affect the sum only once, as gc first finishes checking | 94 | * more usable space. This will affect the sum only once, as gc first finishes checking |
92 | * of nodes. | 95 | * of nodes. |
93 | + Return -ENOSPC, if the maximum possibly available space is less or equal than | 96 | + Return -ENOSPC, if the maximum possibly available space is less or equal than |
94 | * blocksneeded * sector_size. | 97 | * blocksneeded * sector_size. |
95 | * This blocks endless gc looping on a filesystem, which is nearly full, even if | 98 | * This blocks endless gc looping on a filesystem, which is nearly full, even if |
96 | * the check above passes. | 99 | * the check above passes. |
@@ -115,7 +118,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
115 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, | 118 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, |
116 | c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); | 119 | c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); |
117 | spin_unlock(&c->erase_completion_lock); | 120 | spin_unlock(&c->erase_completion_lock); |
118 | 121 | ||
119 | ret = jffs2_garbage_collect_pass(c); | 122 | ret = jffs2_garbage_collect_pass(c); |
120 | if (ret) | 123 | if (ret) |
121 | return ret; | 124 | return ret; |
@@ -129,7 +132,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
129 | spin_lock(&c->erase_completion_lock); | 132 | spin_lock(&c->erase_completion_lock); |
130 | } | 133 | } |
131 | 134 | ||
132 | ret = jffs2_do_reserve_space(c, minsize, ofs, len); | 135 | ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize); |
133 | if (ret) { | 136 | if (ret) { |
134 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); | 137 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); |
135 | } | 138 | } |
@@ -140,7 +143,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
140 | return ret; | 143 | return ret; |
141 | } | 144 | } |
142 | 145 | ||
143 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len) | 146 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, |
147 | uint32_t *len, uint32_t sumsize) | ||
144 | { | 148 | { |
145 | int ret = -EAGAIN; | 149 | int ret = -EAGAIN; |
146 | minsize = PAD(minsize); | 150 | minsize = PAD(minsize); |
@@ -149,7 +153,7 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t * | |||
149 | 153 | ||
150 | spin_lock(&c->erase_completion_lock); | 154 | spin_lock(&c->erase_completion_lock); |
151 | while(ret == -EAGAIN) { | 155 | while(ret == -EAGAIN) { |
152 | ret = jffs2_do_reserve_space(c, minsize, ofs, len); | 156 | ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize); |
153 | if (ret) { | 157 | if (ret) { |
154 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); | 158 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); |
155 | } | 159 | } |
@@ -158,105 +162,185 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t * | |||
158 | return ret; | 162 | return ret; |
159 | } | 163 | } |
160 | 164 | ||
161 | /* Called with alloc sem _and_ erase_completion_lock */ | 165 | |
162 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len) | 166 | /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */ |
167 | |||
168 | static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
163 | { | 169 | { |
164 | struct jffs2_eraseblock *jeb = c->nextblock; | 170 | |
165 | 171 | /* Check, if we have a dirty block now, or if it was dirty already */ | |
166 | restart: | 172 | if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) { |
167 | if (jeb && minsize > jeb->free_size) { | 173 | c->dirty_size += jeb->wasted_size; |
168 | /* Skip the end of this block and file it as having some dirty space */ | 174 | c->wasted_size -= jeb->wasted_size; |
169 | /* If there's a pending write to it, flush now */ | 175 | jeb->dirty_size += jeb->wasted_size; |
170 | if (jffs2_wbuf_dirty(c)) { | 176 | jeb->wasted_size = 0; |
177 | if (VERYDIRTY(c, jeb->dirty_size)) { | ||
178 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
179 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
180 | list_add_tail(&jeb->list, &c->very_dirty_list); | ||
181 | } else { | ||
182 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
183 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
184 | list_add_tail(&jeb->list, &c->dirty_list); | ||
185 | } | ||
186 | } else { | ||
187 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
188 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
189 | list_add_tail(&jeb->list, &c->clean_list); | ||
190 | } | ||
191 | c->nextblock = NULL; | ||
192 | |||
193 | } | ||
194 | |||
195 | /* Select a new jeb for nextblock */ | ||
196 | |||
197 | static int jffs2_find_nextblock(struct jffs2_sb_info *c) | ||
198 | { | ||
199 | struct list_head *next; | ||
200 | |||
201 | /* Take the next block off the 'free' list */ | ||
202 | |||
203 | if (list_empty(&c->free_list)) { | ||
204 | |||
205 | if (!c->nr_erasing_blocks && | ||
206 | !list_empty(&c->erasable_list)) { | ||
207 | struct jffs2_eraseblock *ejeb; | ||
208 | |||
209 | ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); | ||
210 | list_del(&ejeb->list); | ||
211 | list_add_tail(&ejeb->list, &c->erase_pending_list); | ||
212 | c->nr_erasing_blocks++; | ||
213 | jffs2_erase_pending_trigger(c); | ||
214 | D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n", | ||
215 | ejeb->offset)); | ||
216 | } | ||
217 | |||
218 | if (!c->nr_erasing_blocks && | ||
219 | !list_empty(&c->erasable_pending_wbuf_list)) { | ||
220 | D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n")); | ||
221 | /* c->nextblock is NULL, no update to c->nextblock allowed */ | ||
171 | spin_unlock(&c->erase_completion_lock); | 222 | spin_unlock(&c->erase_completion_lock); |
172 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | ||
173 | jffs2_flush_wbuf_pad(c); | 223 | jffs2_flush_wbuf_pad(c); |
174 | spin_lock(&c->erase_completion_lock); | 224 | spin_lock(&c->erase_completion_lock); |
175 | jeb = c->nextblock; | 225 | /* Have another go. It'll be on the erasable_list now */ |
176 | goto restart; | 226 | return -EAGAIN; |
177 | } | 227 | } |
178 | c->wasted_size += jeb->free_size; | 228 | |
179 | c->free_size -= jeb->free_size; | 229 | if (!c->nr_erasing_blocks) { |
180 | jeb->wasted_size += jeb->free_size; | 230 | /* Ouch. We're in GC, or we wouldn't have got here. |
181 | jeb->free_size = 0; | 231 | And there's no space left. At all. */ |
182 | 232 | printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", | |
183 | /* Check, if we have a dirty block now, or if it was dirty already */ | 233 | c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", |
184 | if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) { | 234 | list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); |
185 | c->dirty_size += jeb->wasted_size; | 235 | return -ENOSPC; |
186 | c->wasted_size -= jeb->wasted_size; | ||
187 | jeb->dirty_size += jeb->wasted_size; | ||
188 | jeb->wasted_size = 0; | ||
189 | if (VERYDIRTY(c, jeb->dirty_size)) { | ||
190 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
191 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
192 | list_add_tail(&jeb->list, &c->very_dirty_list); | ||
193 | } else { | ||
194 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
195 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
196 | list_add_tail(&jeb->list, &c->dirty_list); | ||
197 | } | ||
198 | } else { | ||
199 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
200 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
201 | list_add_tail(&jeb->list, &c->clean_list); | ||
202 | } | 236 | } |
203 | c->nextblock = jeb = NULL; | 237 | |
238 | spin_unlock(&c->erase_completion_lock); | ||
239 | /* Don't wait for it; just erase one right now */ | ||
240 | jffs2_erase_pending_blocks(c, 1); | ||
241 | spin_lock(&c->erase_completion_lock); | ||
242 | |||
243 | /* An erase may have failed, decreasing the | ||
244 | amount of free space available. So we must | ||
245 | restart from the beginning */ | ||
246 | return -EAGAIN; | ||
204 | } | 247 | } |
205 | |||
206 | if (!jeb) { | ||
207 | struct list_head *next; | ||
208 | /* Take the next block off the 'free' list */ | ||
209 | 248 | ||
210 | if (list_empty(&c->free_list)) { | 249 | next = c->free_list.next; |
250 | list_del(next); | ||
251 | c->nextblock = list_entry(next, struct jffs2_eraseblock, list); | ||
252 | c->nr_free_blocks--; | ||
211 | 253 | ||
212 | if (!c->nr_erasing_blocks && | 254 | jffs2_sum_reset_collected(c->summary); /* reset collected summary */ |
213 | !list_empty(&c->erasable_list)) { | ||
214 | struct jffs2_eraseblock *ejeb; | ||
215 | 255 | ||
216 | ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); | 256 | D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset)); |
217 | list_del(&ejeb->list); | 257 | |
218 | list_add_tail(&ejeb->list, &c->erase_pending_list); | 258 | return 0; |
219 | c->nr_erasing_blocks++; | 259 | } |
220 | jffs2_erase_pending_trigger(c); | 260 | |
221 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n", | 261 | /* Called with alloc sem _and_ erase_completion_lock */ |
222 | ejeb->offset)); | 262 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize) |
263 | { | ||
264 | struct jffs2_eraseblock *jeb = c->nextblock; | ||
265 | uint32_t reserved_size; /* for summary information at the end of the jeb */ | ||
266 | int ret; | ||
267 | |||
268 | restart: | ||
269 | reserved_size = 0; | ||
270 | |||
271 | if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) { | ||
272 | /* NOSUM_SIZE means not to generate summary */ | ||
273 | |||
274 | if (jeb) { | ||
275 | reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); | ||
276 | dbg_summary("minsize=%d , jeb->free=%d ," | ||
277 | "summary->size=%d , sumsize=%d\n", | ||
278 | minsize, jeb->free_size, | ||
279 | c->summary->sum_size, sumsize); | ||
280 | } | ||
281 | |||
282 | /* Is there enough space for writing out the current node, or we have to | ||
283 | write out summary information now, close this jeb and select new nextblock? */ | ||
284 | if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize + | ||
285 | JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) { | ||
286 | |||
287 | /* Has summary been disabled for this jeb? */ | ||
288 | if (jffs2_sum_is_disabled(c->summary)) { | ||
289 | sumsize = JFFS2_SUMMARY_NOSUM_SIZE; | ||
290 | goto restart; | ||
223 | } | 291 | } |
224 | 292 | ||
225 | if (!c->nr_erasing_blocks && | 293 | /* Writing out the collected summary information */ |
226 | !list_empty(&c->erasable_pending_wbuf_list)) { | 294 | dbg_summary("generating summary for 0x%08x.\n", jeb->offset); |
227 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | 295 | ret = jffs2_sum_write_sumnode(c); |
228 | /* c->nextblock is NULL, no update to c->nextblock allowed */ | 296 | |
297 | if (ret) | ||
298 | return ret; | ||
299 | |||
300 | if (jffs2_sum_is_disabled(c->summary)) { | ||
301 | /* jffs2_write_sumnode() couldn't write out the summary information | ||
302 | diabling summary for this jeb and free the collected information | ||
303 | */ | ||
304 | sumsize = JFFS2_SUMMARY_NOSUM_SIZE; | ||
305 | goto restart; | ||
306 | } | ||
307 | |||
308 | jffs2_close_nextblock(c, jeb); | ||
309 | jeb = NULL; | ||
310 | /* keep always valid value in reserved_size */ | ||
311 | reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); | ||
312 | } | ||
313 | } else { | ||
314 | if (jeb && minsize > jeb->free_size) { | ||
315 | /* Skip the end of this block and file it as having some dirty space */ | ||
316 | /* If there's a pending write to it, flush now */ | ||
317 | |||
318 | if (jffs2_wbuf_dirty(c)) { | ||
229 | spin_unlock(&c->erase_completion_lock); | 319 | spin_unlock(&c->erase_completion_lock); |
320 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | ||
230 | jffs2_flush_wbuf_pad(c); | 321 | jffs2_flush_wbuf_pad(c); |
231 | spin_lock(&c->erase_completion_lock); | 322 | spin_lock(&c->erase_completion_lock); |
232 | /* Have another go. It'll be on the erasable_list now */ | 323 | jeb = c->nextblock; |
233 | return -EAGAIN; | 324 | goto restart; |
234 | } | 325 | } |
235 | 326 | ||
236 | if (!c->nr_erasing_blocks) { | 327 | c->wasted_size += jeb->free_size; |
237 | /* Ouch. We're in GC, or we wouldn't have got here. | 328 | c->free_size -= jeb->free_size; |
238 | And there's no space left. At all. */ | 329 | jeb->wasted_size += jeb->free_size; |
239 | printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", | 330 | jeb->free_size = 0; |
240 | c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", | ||
241 | list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); | ||
242 | return -ENOSPC; | ||
243 | } | ||
244 | |||
245 | spin_unlock(&c->erase_completion_lock); | ||
246 | /* Don't wait for it; just erase one right now */ | ||
247 | jffs2_erase_pending_blocks(c, 1); | ||
248 | spin_lock(&c->erase_completion_lock); | ||
249 | 331 | ||
250 | /* An erase may have failed, decreasing the | 332 | jffs2_close_nextblock(c, jeb); |
251 | amount of free space available. So we must | 333 | jeb = NULL; |
252 | restart from the beginning */ | ||
253 | return -EAGAIN; | ||
254 | } | 334 | } |
335 | } | ||
336 | |||
337 | if (!jeb) { | ||
255 | 338 | ||
256 | next = c->free_list.next; | 339 | ret = jffs2_find_nextblock(c); |
257 | list_del(next); | 340 | if (ret) |
258 | c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list); | 341 | return ret; |
259 | c->nr_free_blocks--; | 342 | |
343 | jeb = c->nextblock; | ||
260 | 344 | ||
261 | if (jeb->free_size != c->sector_size - c->cleanmarker_size) { | 345 | if (jeb->free_size != c->sector_size - c->cleanmarker_size) { |
262 | printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); | 346 | printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); |
@@ -266,13 +350,13 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, ui | |||
266 | /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has | 350 | /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has |
267 | enough space */ | 351 | enough space */ |
268 | *ofs = jeb->offset + (c->sector_size - jeb->free_size); | 352 | *ofs = jeb->offset + (c->sector_size - jeb->free_size); |
269 | *len = jeb->free_size; | 353 | *len = jeb->free_size - reserved_size; |
270 | 354 | ||
271 | if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && | 355 | if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && |
272 | !jeb->first_node->next_in_ino) { | 356 | !jeb->first_node->next_in_ino) { |
273 | /* Only node in it beforehand was a CLEANMARKER node (we think). | 357 | /* Only node in it beforehand was a CLEANMARKER node (we think). |
274 | So mark it obsolete now that there's going to be another node | 358 | So mark it obsolete now that there's going to be another node |
275 | in the block. This will reduce used_size to zero but We've | 359 | in the block. This will reduce used_size to zero but We've |
276 | already set c->nextblock so that jffs2_mark_node_obsolete() | 360 | already set c->nextblock so that jffs2_mark_node_obsolete() |
277 | won't try to refile it to the dirty_list. | 361 | won't try to refile it to the dirty_list. |
278 | */ | 362 | */ |
@@ -292,12 +376,12 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, ui | |||
292 | * @len: length of this physical node | 376 | * @len: length of this physical node |
293 | * @dirty: dirty flag for new node | 377 | * @dirty: dirty flag for new node |
294 | * | 378 | * |
295 | * Should only be used to report nodes for which space has been allocated | 379 | * Should only be used to report nodes for which space has been allocated |
296 | * by jffs2_reserve_space. | 380 | * by jffs2_reserve_space. |
297 | * | 381 | * |
298 | * Must be called with the alloc_sem held. | 382 | * Must be called with the alloc_sem held. |
299 | */ | 383 | */ |
300 | 384 | ||
301 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) | 385 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) |
302 | { | 386 | { |
303 | struct jffs2_eraseblock *jeb; | 387 | struct jffs2_eraseblock *jeb; |
@@ -349,8 +433,8 @@ int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_r | |||
349 | list_add_tail(&jeb->list, &c->clean_list); | 433 | list_add_tail(&jeb->list, &c->clean_list); |
350 | c->nextblock = NULL; | 434 | c->nextblock = NULL; |
351 | } | 435 | } |
352 | ACCT_SANITY_CHECK(c,jeb); | 436 | jffs2_dbg_acct_sanity_check_nolock(c,jeb); |
353 | D1(ACCT_PARANOIA_CHECK(jeb)); | 437 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
354 | 438 | ||
355 | spin_unlock(&c->erase_completion_lock); | 439 | spin_unlock(&c->erase_completion_lock); |
356 | 440 | ||
@@ -404,8 +488,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
404 | 488 | ||
405 | if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && | 489 | if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && |
406 | !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) { | 490 | !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) { |
407 | /* Hm. This may confuse static lock analysis. If any of the above | 491 | /* Hm. This may confuse static lock analysis. If any of the above |
408 | three conditions is false, we're going to return from this | 492 | three conditions is false, we're going to return from this |
409 | function without actually obliterating any nodes or freeing | 493 | function without actually obliterating any nodes or freeing |
410 | any jffs2_raw_node_refs. So we don't need to stop erases from | 494 | any jffs2_raw_node_refs. So we don't need to stop erases from |
411 | happening, or protect against people holding an obsolete | 495 | happening, or protect against people holding an obsolete |
@@ -430,7 +514,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
430 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); | 514 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); |
431 | BUG(); | 515 | BUG(); |
432 | }) | 516 | }) |
433 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); | 517 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); |
434 | jeb->used_size -= ref_totlen(c, jeb, ref); | 518 | jeb->used_size -= ref_totlen(c, jeb, ref); |
435 | c->used_size -= ref_totlen(c, jeb, ref); | 519 | c->used_size -= ref_totlen(c, jeb, ref); |
436 | } | 520 | } |
@@ -462,18 +546,17 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
462 | D1(printk(KERN_DEBUG "Wasting\n")); | 546 | D1(printk(KERN_DEBUG "Wasting\n")); |
463 | addedsize = 0; | 547 | addedsize = 0; |
464 | jeb->wasted_size += ref_totlen(c, jeb, ref); | 548 | jeb->wasted_size += ref_totlen(c, jeb, ref); |
465 | c->wasted_size += ref_totlen(c, jeb, ref); | 549 | c->wasted_size += ref_totlen(c, jeb, ref); |
466 | } | 550 | } |
467 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; | 551 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; |
468 | |||
469 | ACCT_SANITY_CHECK(c, jeb); | ||
470 | 552 | ||
471 | D1(ACCT_PARANOIA_CHECK(jeb)); | 553 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); |
554 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | ||
472 | 555 | ||
473 | if (c->flags & JFFS2_SB_FLAG_SCANNING) { | 556 | if (c->flags & JFFS2_SB_FLAG_SCANNING) { |
474 | /* Flash scanning is in progress. Don't muck about with the block | 557 | /* Flash scanning is in progress. Don't muck about with the block |
475 | lists because they're not ready yet, and don't actually | 558 | lists because they're not ready yet, and don't actually |
476 | obliterate nodes that look obsolete. If they weren't | 559 | obliterate nodes that look obsolete. If they weren't |
477 | marked obsolete on the flash at the time they _became_ | 560 | marked obsolete on the flash at the time they _became_ |
478 | obsolete, there was probably a reason for that. */ | 561 | obsolete, there was probably a reason for that. */ |
479 | spin_unlock(&c->erase_completion_lock); | 562 | spin_unlock(&c->erase_completion_lock); |
@@ -507,7 +590,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
507 | immediately reused, and we spread the load a bit. */ | 590 | immediately reused, and we spread the load a bit. */ |
508 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); | 591 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); |
509 | list_add_tail(&jeb->list, &c->erasable_list); | 592 | list_add_tail(&jeb->list, &c->erasable_list); |
510 | } | 593 | } |
511 | } | 594 | } |
512 | D1(printk(KERN_DEBUG "Done OK\n")); | 595 | D1(printk(KERN_DEBUG "Done OK\n")); |
513 | } else if (jeb == c->gcblock) { | 596 | } else if (jeb == c->gcblock) { |
@@ -525,8 +608,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
525 | list_add_tail(&jeb->list, &c->very_dirty_list); | 608 | list_add_tail(&jeb->list, &c->very_dirty_list); |
526 | } else { | 609 | } else { |
527 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", | 610 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", |
528 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 611 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); |
529 | } | 612 | } |
530 | 613 | ||
531 | spin_unlock(&c->erase_completion_lock); | 614 | spin_unlock(&c->erase_completion_lock); |
532 | 615 | ||
@@ -573,11 +656,11 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
573 | 656 | ||
574 | /* Nodes which have been marked obsolete no longer need to be | 657 | /* Nodes which have been marked obsolete no longer need to be |
575 | associated with any inode. Remove them from the per-inode list. | 658 | associated with any inode. Remove them from the per-inode list. |
576 | 659 | ||
577 | Note we can't do this for NAND at the moment because we need | 660 | Note we can't do this for NAND at the moment because we need |
578 | obsolete dirent nodes to stay on the lists, because of the | 661 | obsolete dirent nodes to stay on the lists, because of the |
579 | horridness in jffs2_garbage_collect_deletion_dirent(). Also | 662 | horridness in jffs2_garbage_collect_deletion_dirent(). Also |
580 | because we delete the inocache, and on NAND we need that to | 663 | because we delete the inocache, and on NAND we need that to |
581 | stay around until all the nodes are actually erased, in order | 664 | stay around until all the nodes are actually erased, in order |
582 | to stop us from giving the same inode number to another newly | 665 | to stop us from giving the same inode number to another newly |
583 | created inode. */ | 666 | created inode. */ |
@@ -606,7 +689,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
606 | if (ref->next_phys && ref_obsolete(ref->next_phys) && | 689 | if (ref->next_phys && ref_obsolete(ref->next_phys) && |
607 | !ref->next_phys->next_in_ino) { | 690 | !ref->next_phys->next_in_ino) { |
608 | struct jffs2_raw_node_ref *n = ref->next_phys; | 691 | struct jffs2_raw_node_ref *n = ref->next_phys; |
609 | 692 | ||
610 | spin_lock(&c->erase_completion_lock); | 693 | spin_lock(&c->erase_completion_lock); |
611 | 694 | ||
612 | ref->__totlen += n->__totlen; | 695 | ref->__totlen += n->__totlen; |
@@ -620,7 +703,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
620 | 703 | ||
621 | jffs2_free_raw_node_ref(n); | 704 | jffs2_free_raw_node_ref(n); |
622 | } | 705 | } |
623 | 706 | ||
624 | /* Also merge with the previous node in the list, if there is one | 707 | /* Also merge with the previous node in the list, if there is one |
625 | and that one is obsolete */ | 708 | and that one is obsolete */ |
626 | if (ref != jeb->first_node ) { | 709 | if (ref != jeb->first_node ) { |
@@ -630,7 +713,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
630 | 713 | ||
631 | while (p->next_phys != ref) | 714 | while (p->next_phys != ref) |
632 | p = p->next_phys; | 715 | p = p->next_phys; |
633 | 716 | ||
634 | if (ref_obsolete(p) && !ref->next_in_ino) { | 717 | if (ref_obsolete(p) && !ref->next_in_ino) { |
635 | p->__totlen += ref->__totlen; | 718 | p->__totlen += ref->__totlen; |
636 | if (jeb->last_node == ref) { | 719 | if (jeb->last_node == ref) { |
@@ -649,164 +732,6 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
649 | up(&c->erase_free_sem); | 732 | up(&c->erase_free_sem); |
650 | } | 733 | } |
651 | 734 | ||
652 | #if CONFIG_JFFS2_FS_DEBUG >= 2 | ||
653 | void jffs2_dump_block_lists(struct jffs2_sb_info *c) | ||
654 | { | ||
655 | |||
656 | |||
657 | printk(KERN_DEBUG "jffs2_dump_block_lists:\n"); | ||
658 | printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size); | ||
659 | printk(KERN_DEBUG "used_size: %08x\n", c->used_size); | ||
660 | printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size); | ||
661 | printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size); | ||
662 | printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size); | ||
663 | printk(KERN_DEBUG "free_size: %08x\n", c->free_size); | ||
664 | printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size); | ||
665 | printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size); | ||
666 | printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size); | ||
667 | printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write); | ||
668 | |||
669 | if (c->nextblock) { | ||
670 | printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
671 | c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size); | ||
672 | } else { | ||
673 | printk(KERN_DEBUG "nextblock: NULL\n"); | ||
674 | } | ||
675 | if (c->gcblock) { | ||
676 | printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
677 | c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); | ||
678 | } else { | ||
679 | printk(KERN_DEBUG "gcblock: NULL\n"); | ||
680 | } | ||
681 | if (list_empty(&c->clean_list)) { | ||
682 | printk(KERN_DEBUG "clean_list: empty\n"); | ||
683 | } else { | ||
684 | struct list_head *this; | ||
685 | int numblocks = 0; | ||
686 | uint32_t dirty = 0; | ||
687 | |||
688 | list_for_each(this, &c->clean_list) { | ||
689 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
690 | numblocks ++; | ||
691 | dirty += jeb->wasted_size; | ||
692 | printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
693 | } | ||
694 | printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks); | ||
695 | } | ||
696 | if (list_empty(&c->very_dirty_list)) { | ||
697 | printk(KERN_DEBUG "very_dirty_list: empty\n"); | ||
698 | } else { | ||
699 | struct list_head *this; | ||
700 | int numblocks = 0; | ||
701 | uint32_t dirty = 0; | ||
702 | |||
703 | list_for_each(this, &c->very_dirty_list) { | ||
704 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
705 | numblocks ++; | ||
706 | dirty += jeb->dirty_size; | ||
707 | printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
708 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
709 | } | ||
710 | printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | ||
711 | numblocks, dirty, dirty / numblocks); | ||
712 | } | ||
713 | if (list_empty(&c->dirty_list)) { | ||
714 | printk(KERN_DEBUG "dirty_list: empty\n"); | ||
715 | } else { | ||
716 | struct list_head *this; | ||
717 | int numblocks = 0; | ||
718 | uint32_t dirty = 0; | ||
719 | |||
720 | list_for_each(this, &c->dirty_list) { | ||
721 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
722 | numblocks ++; | ||
723 | dirty += jeb->dirty_size; | ||
724 | printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
725 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
726 | } | ||
727 | printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | ||
728 | numblocks, dirty, dirty / numblocks); | ||
729 | } | ||
730 | if (list_empty(&c->erasable_list)) { | ||
731 | printk(KERN_DEBUG "erasable_list: empty\n"); | ||
732 | } else { | ||
733 | struct list_head *this; | ||
734 | |||
735 | list_for_each(this, &c->erasable_list) { | ||
736 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
737 | printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
738 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
739 | } | ||
740 | } | ||
741 | if (list_empty(&c->erasing_list)) { | ||
742 | printk(KERN_DEBUG "erasing_list: empty\n"); | ||
743 | } else { | ||
744 | struct list_head *this; | ||
745 | |||
746 | list_for_each(this, &c->erasing_list) { | ||
747 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
748 | printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
749 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
750 | } | ||
751 | } | ||
752 | if (list_empty(&c->erase_pending_list)) { | ||
753 | printk(KERN_DEBUG "erase_pending_list: empty\n"); | ||
754 | } else { | ||
755 | struct list_head *this; | ||
756 | |||
757 | list_for_each(this, &c->erase_pending_list) { | ||
758 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
759 | printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
760 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
761 | } | ||
762 | } | ||
763 | if (list_empty(&c->erasable_pending_wbuf_list)) { | ||
764 | printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n"); | ||
765 | } else { | ||
766 | struct list_head *this; | ||
767 | |||
768 | list_for_each(this, &c->erasable_pending_wbuf_list) { | ||
769 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
770 | printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
771 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
772 | } | ||
773 | } | ||
774 | if (list_empty(&c->free_list)) { | ||
775 | printk(KERN_DEBUG "free_list: empty\n"); | ||
776 | } else { | ||
777 | struct list_head *this; | ||
778 | |||
779 | list_for_each(this, &c->free_list) { | ||
780 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
781 | printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
782 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
783 | } | ||
784 | } | ||
785 | if (list_empty(&c->bad_list)) { | ||
786 | printk(KERN_DEBUG "bad_list: empty\n"); | ||
787 | } else { | ||
788 | struct list_head *this; | ||
789 | |||
790 | list_for_each(this, &c->bad_list) { | ||
791 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
792 | printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
793 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
794 | } | ||
795 | } | ||
796 | if (list_empty(&c->bad_used_list)) { | ||
797 | printk(KERN_DEBUG "bad_used_list: empty\n"); | ||
798 | } else { | ||
799 | struct list_head *this; | ||
800 | |||
801 | list_for_each(this, &c->bad_used_list) { | ||
802 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
803 | printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
804 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
805 | } | ||
806 | } | ||
807 | } | ||
808 | #endif /* CONFIG_JFFS2_FS_DEBUG */ | ||
809 | |||
810 | int jffs2_thread_should_wake(struct jffs2_sb_info *c) | 735 | int jffs2_thread_should_wake(struct jffs2_sb_info *c) |
811 | { | 736 | { |
812 | int ret = 0; | 737 | int ret = 0; |
@@ -828,11 +753,11 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c) | |||
828 | */ | 753 | */ |
829 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; | 754 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; |
830 | 755 | ||
831 | if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && | 756 | if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && |
832 | (dirty > c->nospc_dirty_size)) | 757 | (dirty > c->nospc_dirty_size)) |
833 | ret = 1; | 758 | ret = 1; |
834 | 759 | ||
835 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", | 760 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", |
836 | c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no")); | 761 | c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no")); |
837 | 762 | ||
838 | return ret; | 763 | return ret; |
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index d900c8929b09..59e7a393200c 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: os-linux.h,v 1.58 2005/07/12 02:34:35 tpoynor Exp $ | 10 | * $Id: os-linux.h,v 1.64 2005/09/30 13:59:13 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -57,6 +57,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) | |||
57 | f->fragtree = RB_ROOT; | 57 | f->fragtree = RB_ROOT; |
58 | f->metadata = NULL; | 58 | f->metadata = NULL; |
59 | f->dents = NULL; | 59 | f->dents = NULL; |
60 | f->target = NULL; | ||
60 | f->flags = 0; | 61 | f->flags = 0; |
61 | f->usercompr = 0; | 62 | f->usercompr = 0; |
62 | } | 63 | } |
@@ -64,17 +65,24 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) | |||
64 | 65 | ||
65 | #define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY) | 66 | #define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY) |
66 | 67 | ||
68 | #define SECTOR_ADDR(x) ( (((unsigned long)(x) / c->sector_size) * c->sector_size) ) | ||
67 | #ifndef CONFIG_JFFS2_FS_WRITEBUFFER | 69 | #ifndef CONFIG_JFFS2_FS_WRITEBUFFER |
68 | #define SECTOR_ADDR(x) ( ((unsigned long)(x) & ~(c->sector_size-1)) ) | 70 | |
71 | |||
72 | #ifdef CONFIG_JFFS2_SUMMARY | ||
73 | #define jffs2_can_mark_obsolete(c) (0) | ||
74 | #else | ||
69 | #define jffs2_can_mark_obsolete(c) (1) | 75 | #define jffs2_can_mark_obsolete(c) (1) |
76 | #endif | ||
77 | |||
70 | #define jffs2_is_writebuffered(c) (0) | 78 | #define jffs2_is_writebuffered(c) (0) |
71 | #define jffs2_cleanmarker_oob(c) (0) | 79 | #define jffs2_cleanmarker_oob(c) (0) |
72 | #define jffs2_write_nand_cleanmarker(c,jeb) (-EIO) | 80 | #define jffs2_write_nand_cleanmarker(c,jeb) (-EIO) |
73 | 81 | ||
74 | #define jffs2_flash_write(c, ofs, len, retlen, buf) ((c)->mtd->write((c)->mtd, ofs, len, retlen, buf)) | 82 | #define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf) |
75 | #define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf)) | 83 | #define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf)) |
76 | #define jffs2_flush_wbuf_pad(c) ({ (void)(c), 0; }) | 84 | #define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; }) |
77 | #define jffs2_flush_wbuf_gc(c, i) ({ (void)(c), (void) i, 0; }) | 85 | #define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; }) |
78 | #define jffs2_write_nand_badblock(c,jeb,bad_offset) (1) | 86 | #define jffs2_write_nand_badblock(c,jeb,bad_offset) (1) |
79 | #define jffs2_nand_flash_setup(c) (0) | 87 | #define jffs2_nand_flash_setup(c) (0) |
80 | #define jffs2_nand_flash_cleanup(c) do {} while(0) | 88 | #define jffs2_nand_flash_cleanup(c) do {} while(0) |
@@ -84,16 +92,26 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) | |||
84 | #define jffs2_wbuf_process NULL | 92 | #define jffs2_wbuf_process NULL |
85 | #define jffs2_nor_ecc(c) (0) | 93 | #define jffs2_nor_ecc(c) (0) |
86 | #define jffs2_dataflash(c) (0) | 94 | #define jffs2_dataflash(c) (0) |
95 | #define jffs2_nor_wbuf_flash(c) (0) | ||
87 | #define jffs2_nor_ecc_flash_setup(c) (0) | 96 | #define jffs2_nor_ecc_flash_setup(c) (0) |
88 | #define jffs2_nor_ecc_flash_cleanup(c) do {} while (0) | 97 | #define jffs2_nor_ecc_flash_cleanup(c) do {} while (0) |
89 | #define jffs2_dataflash_setup(c) (0) | 98 | #define jffs2_dataflash_setup(c) (0) |
90 | #define jffs2_dataflash_cleanup(c) do {} while (0) | 99 | #define jffs2_dataflash_cleanup(c) do {} while (0) |
100 | #define jffs2_nor_wbuf_flash_setup(c) (0) | ||
101 | #define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0) | ||
91 | 102 | ||
92 | #else /* NAND and/or ECC'd NOR support present */ | 103 | #else /* NAND and/or ECC'd NOR support present */ |
93 | 104 | ||
94 | #define jffs2_is_writebuffered(c) (c->wbuf != NULL) | 105 | #define jffs2_is_writebuffered(c) (c->wbuf != NULL) |
95 | #define SECTOR_ADDR(x) ( ((unsigned long)(x) / (unsigned long)(c->sector_size)) * c->sector_size ) | 106 | |
96 | #define jffs2_can_mark_obsolete(c) ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & MTD_ECC)) || c->mtd->type == MTD_RAM) | 107 | #ifdef CONFIG_JFFS2_SUMMARY |
108 | #define jffs2_can_mark_obsolete(c) (0) | ||
109 | #else | ||
110 | #define jffs2_can_mark_obsolete(c) \ | ||
111 | ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & (MTD_ECC|MTD_PROGRAM_REGIONS))) || \ | ||
112 | c->mtd->type == MTD_RAM) | ||
113 | #endif | ||
114 | |||
97 | #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) | 115 | #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) |
98 | 116 | ||
99 | #define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf)) | 117 | #define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf)) |
@@ -123,6 +141,10 @@ void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c); | |||
123 | int jffs2_dataflash_setup(struct jffs2_sb_info *c); | 141 | int jffs2_dataflash_setup(struct jffs2_sb_info *c); |
124 | void jffs2_dataflash_cleanup(struct jffs2_sb_info *c); | 142 | void jffs2_dataflash_cleanup(struct jffs2_sb_info *c); |
125 | 143 | ||
144 | #define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_PROGRAM_REGIONS)) | ||
145 | int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c); | ||
146 | void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c); | ||
147 | |||
126 | #endif /* WRITEBUFFER */ | 148 | #endif /* WRITEBUFFER */ |
127 | 149 | ||
128 | /* erase.c */ | 150 | /* erase.c */ |
@@ -169,20 +191,21 @@ void jffs2_gc_release_inode(struct jffs2_sb_info *c, | |||
169 | struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, | 191 | struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, |
170 | int inum, int nlink); | 192 | int inum, int nlink); |
171 | 193 | ||
172 | unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, | 194 | unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, |
173 | struct jffs2_inode_info *f, | 195 | struct jffs2_inode_info *f, |
174 | unsigned long offset, | 196 | unsigned long offset, |
175 | unsigned long *priv); | 197 | unsigned long *priv); |
176 | void jffs2_gc_release_page(struct jffs2_sb_info *c, | 198 | void jffs2_gc_release_page(struct jffs2_sb_info *c, |
177 | unsigned char *pg, | 199 | unsigned char *pg, |
178 | unsigned long *priv); | 200 | unsigned long *priv); |
179 | void jffs2_flash_cleanup(struct jffs2_sb_info *c); | 201 | void jffs2_flash_cleanup(struct jffs2_sb_info *c); |
180 | 202 | ||
181 | 203 | ||
182 | /* writev.c */ | 204 | /* writev.c */ |
183 | int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, | 205 | int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, |
184 | unsigned long count, loff_t to, size_t *retlen); | 206 | unsigned long count, loff_t to, size_t *retlen); |
185 | 207 | int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, | |
208 | size_t *retlen, const u_char *buf); | ||
186 | 209 | ||
187 | #endif /* __JFFS2_OS_LINUX_H__ */ | 210 | #endif /* __JFFS2_OS_LINUX_H__ */ |
188 | 211 | ||
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c index c7f9068907cf..f3b86da833ba 100644 --- a/fs/jffs2/read.c +++ b/fs/jffs2/read.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: read.c,v 1.39 2005/03/01 10:34:03 dedekind Exp $ | 10 | * $Id: read.c,v 1.42 2005/11/07 11:14:41 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -43,7 +43,7 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
43 | } | 43 | } |
44 | if (readlen != sizeof(*ri)) { | 44 | if (readlen != sizeof(*ri)) { |
45 | jffs2_free_raw_inode(ri); | 45 | jffs2_free_raw_inode(ri); |
46 | printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", | 46 | printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", |
47 | ref_offset(fd->raw), sizeof(*ri), readlen); | 47 | ref_offset(fd->raw), sizeof(*ri), readlen); |
48 | return -EIO; | 48 | return -EIO; |
49 | } | 49 | } |
@@ -61,7 +61,7 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
61 | } | 61 | } |
62 | /* There was a bug where we wrote hole nodes out with csize/dsize | 62 | /* There was a bug where we wrote hole nodes out with csize/dsize |
63 | swapped. Deal with it */ | 63 | swapped. Deal with it */ |
64 | if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) && | 64 | if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) && |
65 | je32_to_cpu(ri->csize)) { | 65 | je32_to_cpu(ri->csize)) { |
66 | ri->dsize = ri->csize; | 66 | ri->dsize = ri->csize; |
67 | ri->csize = cpu_to_je32(0); | 67 | ri->csize = cpu_to_je32(0); |
@@ -74,7 +74,7 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
74 | goto out_ri; | 74 | goto out_ri; |
75 | }); | 75 | }); |
76 | 76 | ||
77 | 77 | ||
78 | if (ri->compr == JFFS2_COMPR_ZERO) { | 78 | if (ri->compr == JFFS2_COMPR_ZERO) { |
79 | memset(buf, 0, len); | 79 | memset(buf, 0, len); |
80 | goto out_ri; | 80 | goto out_ri; |
@@ -82,8 +82,8 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
82 | 82 | ||
83 | /* Cases: | 83 | /* Cases: |
84 | Reading whole node and it's uncompressed - read directly to buffer provided, check CRC. | 84 | Reading whole node and it's uncompressed - read directly to buffer provided, check CRC. |
85 | Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided | 85 | Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided |
86 | Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy | 86 | Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy |
87 | Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy | 87 | Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy |
88 | */ | 88 | */ |
89 | if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) { | 89 | if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) { |
@@ -129,7 +129,7 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
129 | D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc)); | 129 | D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc)); |
130 | if (ri->compr != JFFS2_COMPR_NONE) { | 130 | if (ri->compr != JFFS2_COMPR_NONE) { |
131 | D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n", | 131 | D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n", |
132 | je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf)); | 132 | je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf)); |
133 | ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); | 133 | ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); |
134 | if (ret) { | 134 | if (ret) { |
135 | printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret); | 135 | printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret); |
@@ -174,7 +174,6 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
174 | if (frag) { | 174 | if (frag) { |
175 | D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); | 175 | D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); |
176 | holesize = min(holesize, frag->ofs - offset); | 176 | holesize = min(holesize, frag->ofs - offset); |
177 | D2(jffs2_print_frag_list(f)); | ||
178 | } | 177 | } |
179 | D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize)); | 178 | D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize)); |
180 | memset(buf, 0, holesize); | 179 | memset(buf, 0, holesize); |
@@ -192,7 +191,7 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
192 | } else { | 191 | } else { |
193 | uint32_t readlen; | 192 | uint32_t readlen; |
194 | uint32_t fragofs; /* offset within the frag to start reading */ | 193 | uint32_t fragofs; /* offset within the frag to start reading */ |
195 | 194 | ||
196 | fragofs = offset - frag->ofs; | 195 | fragofs = offset - frag->ofs; |
197 | readlen = min(frag->size - fragofs, end - offset); | 196 | readlen = min(frag->size - fragofs, end - offset); |
198 | D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n", | 197 | D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n", |
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 1a96903e3ef3..5f0652df5d47 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
@@ -7,11 +7,12 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: readinode.c,v 1.125 2005/07/10 13:13:55 dedekind Exp $ | 10 | * $Id: readinode.c,v 1.143 2005/11/07 11:14:41 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/sched.h> | ||
15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
16 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
17 | #include <linux/crc32.h> | 18 | #include <linux/crc32.h> |
@@ -20,502 +21,631 @@ | |||
20 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
21 | #include "nodelist.h" | 22 | #include "nodelist.h" |
22 | 23 | ||
23 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag); | 24 | /* |
24 | 25 | * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in | |
25 | #if CONFIG_JFFS2_FS_DEBUG >= 2 | 26 | * order of increasing version. |
26 | static void jffs2_print_fragtree(struct rb_root *list, int permitbug) | 27 | */ |
28 | static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list) | ||
27 | { | 29 | { |
28 | struct jffs2_node_frag *this = frag_first(list); | 30 | struct rb_node **p = &list->rb_node; |
29 | uint32_t lastofs = 0; | 31 | struct rb_node * parent = NULL; |
30 | int buggy = 0; | 32 | struct jffs2_tmp_dnode_info *this; |
31 | 33 | ||
32 | while(this) { | 34 | while (*p) { |
33 | if (this->node) | 35 | parent = *p; |
34 | printk(KERN_DEBUG "frag %04x-%04x: 0x%08x(%d) on flash (*%p). left (%p), right (%p), parent (%p)\n", | 36 | this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); |
35 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw), ref_flags(this->node->raw), | 37 | |
36 | this, frag_left(this), frag_right(this), frag_parent(this)); | 38 | /* There may actually be a collision here, but it doesn't |
37 | else | 39 | actually matter. As long as the two nodes with the same |
38 | printk(KERN_DEBUG "frag %04x-%04x: hole (*%p). left (%p} right (%p), parent (%p)\n", this->ofs, | 40 | version are together, it's all fine. */ |
39 | this->ofs+this->size, this, frag_left(this), frag_right(this), frag_parent(this)); | 41 | if (tn->version > this->version) |
40 | if (this->ofs != lastofs) | 42 | p = &(*p)->rb_left; |
41 | buggy = 1; | 43 | else |
42 | lastofs = this->ofs+this->size; | 44 | p = &(*p)->rb_right; |
43 | this = frag_next(this); | ||
44 | } | 45 | } |
45 | if (buggy && !permitbug) { | 46 | |
46 | printk(KERN_CRIT "Frag tree got a hole in it\n"); | 47 | rb_link_node(&tn->rb, parent, p); |
47 | BUG(); | 48 | rb_insert_color(&tn->rb, list); |
49 | } | ||
50 | |||
51 | static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) | ||
52 | { | ||
53 | struct rb_node *this; | ||
54 | struct jffs2_tmp_dnode_info *tn; | ||
55 | |||
56 | this = list->rb_node; | ||
57 | |||
58 | /* Now at bottom of tree */ | ||
59 | while (this) { | ||
60 | if (this->rb_left) | ||
61 | this = this->rb_left; | ||
62 | else if (this->rb_right) | ||
63 | this = this->rb_right; | ||
64 | else { | ||
65 | tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb); | ||
66 | jffs2_free_full_dnode(tn->fn); | ||
67 | jffs2_free_tmp_dnode_info(tn); | ||
68 | |||
69 | this = this->rb_parent; | ||
70 | if (!this) | ||
71 | break; | ||
72 | |||
73 | if (this->rb_left == &tn->rb) | ||
74 | this->rb_left = NULL; | ||
75 | else if (this->rb_right == &tn->rb) | ||
76 | this->rb_right = NULL; | ||
77 | else BUG(); | ||
78 | } | ||
48 | } | 79 | } |
80 | list->rb_node = NULL; | ||
49 | } | 81 | } |
50 | 82 | ||
51 | void jffs2_print_frag_list(struct jffs2_inode_info *f) | 83 | static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) |
52 | { | 84 | { |
53 | jffs2_print_fragtree(&f->fragtree, 0); | 85 | struct jffs2_full_dirent *next; |
54 | 86 | ||
55 | if (f->metadata) { | 87 | while (fd) { |
56 | printk(KERN_DEBUG "metadata at 0x%08x\n", ref_offset(f->metadata->raw)); | 88 | next = fd->next; |
89 | jffs2_free_full_dirent(fd); | ||
90 | fd = next; | ||
57 | } | 91 | } |
58 | } | 92 | } |
59 | #endif | ||
60 | 93 | ||
61 | #if CONFIG_JFFS2_FS_DEBUG >= 1 | 94 | /* Returns first valid node after 'ref'. May return 'ref' */ |
62 | static int jffs2_sanitycheck_fragtree(struct jffs2_inode_info *f) | 95 | static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) |
63 | { | 96 | { |
64 | struct jffs2_node_frag *frag; | 97 | while (ref && ref->next_in_ino) { |
65 | int bitched = 0; | 98 | if (!ref_obsolete(ref)) |
66 | 99 | return ref; | |
67 | for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { | 100 | dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)); |
101 | ref = ref->next_in_ino; | ||
102 | } | ||
103 | return NULL; | ||
104 | } | ||
68 | 105 | ||
69 | struct jffs2_full_dnode *fn = frag->node; | 106 | /* |
70 | if (!fn || !fn->raw) | 107 | * Helper function for jffs2_get_inode_nodes(). |
71 | continue; | 108 | * It is called every time an directory entry node is found. |
109 | * | ||
110 | * Returns: 0 on succes; | ||
111 | * 1 if the node should be marked obsolete; | ||
112 | * negative error code on failure. | ||
113 | */ | ||
114 | static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, | ||
115 | struct jffs2_raw_dirent *rd, uint32_t read, struct jffs2_full_dirent **fdp, | ||
116 | uint32_t *latest_mctime, uint32_t *mctime_ver) | ||
117 | { | ||
118 | struct jffs2_full_dirent *fd; | ||
119 | |||
120 | /* The direntry nodes are checked during the flash scanning */ | ||
121 | BUG_ON(ref_flags(ref) == REF_UNCHECKED); | ||
122 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | ||
123 | BUG_ON(ref_obsolete(ref)); | ||
124 | |||
125 | /* Sanity check */ | ||
126 | if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) { | ||
127 | JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n", | ||
128 | ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen)); | ||
129 | return 1; | ||
130 | } | ||
72 | 131 | ||
73 | if (ref_flags(fn->raw) == REF_PRISTINE) { | 132 | fd = jffs2_alloc_full_dirent(rd->nsize + 1); |
133 | if (unlikely(!fd)) | ||
134 | return -ENOMEM; | ||
74 | 135 | ||
75 | if (fn->frags > 1) { | 136 | fd->raw = ref; |
76 | printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2\n", ref_offset(fn->raw), fn->frags); | 137 | fd->version = je32_to_cpu(rd->version); |
77 | bitched = 1; | 138 | fd->ino = je32_to_cpu(rd->ino); |
78 | } | 139 | fd->type = rd->type; |
79 | /* A hole node which isn't multi-page should be garbage-collected | ||
80 | and merged anyway, so we just check for the frag size here, | ||
81 | rather than mucking around with actually reading the node | ||
82 | and checking the compression type, which is the real way | ||
83 | to tell a hole node. */ | ||
84 | if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { | ||
85 | printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2\n", | ||
86 | ref_offset(fn->raw)); | ||
87 | bitched = 1; | ||
88 | } | ||
89 | 140 | ||
90 | if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { | 141 | /* Pick out the mctime of the latest dirent */ |
91 | printk(KERN_WARNING "REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2\n", | 142 | if(fd->version > *mctime_ver && je32_to_cpu(rd->mctime)) { |
92 | ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); | 143 | *mctime_ver = fd->version; |
93 | bitched = 1; | 144 | *latest_mctime = je32_to_cpu(rd->mctime); |
94 | } | ||
95 | } | ||
96 | } | 145 | } |
97 | |||
98 | if (bitched) { | ||
99 | struct jffs2_node_frag *thisfrag; | ||
100 | |||
101 | printk(KERN_WARNING "Inode is #%u\n", f->inocache->ino); | ||
102 | thisfrag = frag_first(&f->fragtree); | ||
103 | while (thisfrag) { | ||
104 | if (!thisfrag->node) { | ||
105 | printk("Frag @0x%x-0x%x; node-less hole\n", | ||
106 | thisfrag->ofs, thisfrag->size + thisfrag->ofs); | ||
107 | } else if (!thisfrag->node->raw) { | ||
108 | printk("Frag @0x%x-0x%x; raw-less hole\n", | ||
109 | thisfrag->ofs, thisfrag->size + thisfrag->ofs); | ||
110 | } else { | ||
111 | printk("Frag @0x%x-0x%x; raw at 0x%08x(%d) (0x%x-0x%x)\n", | ||
112 | thisfrag->ofs, thisfrag->size + thisfrag->ofs, | ||
113 | ref_offset(thisfrag->node->raw), ref_flags(thisfrag->node->raw), | ||
114 | thisfrag->node->ofs, thisfrag->node->ofs+thisfrag->node->size); | ||
115 | } | ||
116 | thisfrag = frag_next(thisfrag); | ||
117 | } | ||
118 | } | ||
119 | return bitched; | ||
120 | } | ||
121 | #endif /* D1 */ | ||
122 | 146 | ||
123 | static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) | 147 | /* |
124 | { | 148 | * Copy as much of the name as possible from the raw |
125 | if (this->node) { | 149 | * dirent we've already read from the flash. |
126 | this->node->frags--; | 150 | */ |
127 | if (!this->node->frags) { | 151 | if (read > sizeof(*rd)) |
128 | /* The node has no valid frags left. It's totally obsoleted */ | 152 | memcpy(&fd->name[0], &rd->name[0], |
129 | D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", | 153 | min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) )); |
130 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size)); | 154 | |
131 | jffs2_mark_node_obsolete(c, this->node->raw); | 155 | /* Do we need to copy any more of the name directly from the flash? */ |
132 | jffs2_free_full_dnode(this->node); | 156 | if (rd->nsize + sizeof(*rd) > read) { |
133 | } else { | 157 | /* FIXME: point() */ |
134 | D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", | 158 | int err; |
135 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, | 159 | int already = read - sizeof(*rd); |
136 | this->node->frags)); | 160 | |
137 | mark_ref_normal(this->node->raw); | 161 | err = jffs2_flash_read(c, (ref_offset(ref)) + read, |
162 | rd->nsize - already, &read, &fd->name[already]); | ||
163 | if (unlikely(read != rd->nsize - already) && likely(!err)) | ||
164 | return -EIO; | ||
165 | |||
166 | if (unlikely(err)) { | ||
167 | JFFS2_ERROR("read remainder of name: error %d\n", err); | ||
168 | jffs2_free_full_dirent(fd); | ||
169 | return -EIO; | ||
138 | } | 170 | } |
139 | |||
140 | } | 171 | } |
141 | jffs2_free_node_frag(this); | 172 | |
173 | fd->nhash = full_name_hash(fd->name, rd->nsize); | ||
174 | fd->next = NULL; | ||
175 | fd->name[rd->nsize] = '\0'; | ||
176 | |||
177 | /* | ||
178 | * Wheee. We now have a complete jffs2_full_dirent structure, with | ||
179 | * the name in it and everything. Link it into the list | ||
180 | */ | ||
181 | jffs2_add_fd_to_list(c, fd, fdp); | ||
182 | |||
183 | return 0; | ||
142 | } | 184 | } |
143 | 185 | ||
144 | /* Given an inode, probably with existing list of fragments, add the new node | 186 | /* |
145 | * to the fragment list. | 187 | * Helper function for jffs2_get_inode_nodes(). |
188 | * It is called every time an inode node is found. | ||
189 | * | ||
190 | * Returns: 0 on succes; | ||
191 | * 1 if the node should be marked obsolete; | ||
192 | * negative error code on failure. | ||
146 | */ | 193 | */ |
147 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) | 194 | static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, |
195 | struct jffs2_raw_inode *rd, struct rb_root *tnp, int rdlen, | ||
196 | uint32_t *latest_mctime, uint32_t *mctime_ver) | ||
148 | { | 197 | { |
149 | int ret; | 198 | struct jffs2_tmp_dnode_info *tn; |
150 | struct jffs2_node_frag *newfrag; | 199 | uint32_t len, csize; |
151 | 200 | int ret = 1; | |
152 | D1(printk(KERN_DEBUG "jffs2_add_full_dnode_to_inode(ino #%u, f %p, fn %p)\n", f->inocache->ino, f, fn)); | ||
153 | 201 | ||
154 | if (unlikely(!fn->size)) | 202 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ |
155 | return 0; | 203 | BUG_ON(ref_obsolete(ref)); |
156 | 204 | ||
157 | newfrag = jffs2_alloc_node_frag(); | 205 | tn = jffs2_alloc_tmp_dnode_info(); |
158 | if (unlikely(!newfrag)) | 206 | if (!tn) { |
207 | JFFS2_ERROR("failed to allocate tn (%d bytes).\n", sizeof(*tn)); | ||
159 | return -ENOMEM; | 208 | return -ENOMEM; |
209 | } | ||
160 | 210 | ||
161 | D2(printk(KERN_DEBUG "adding node %04x-%04x @0x%08x on flash, newfrag *%p\n", | 211 | tn->partial_crc = 0; |
162 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag)); | 212 | csize = je32_to_cpu(rd->csize); |
163 | |||
164 | newfrag->ofs = fn->ofs; | ||
165 | newfrag->size = fn->size; | ||
166 | newfrag->node = fn; | ||
167 | newfrag->node->frags = 1; | ||
168 | 213 | ||
169 | ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); | 214 | /* If we've never checked the CRCs on this node, check them now */ |
170 | if (ret) | 215 | if (ref_flags(ref) == REF_UNCHECKED) { |
171 | return ret; | 216 | uint32_t crc; |
172 | 217 | ||
173 | /* If we now share a page with other nodes, mark either previous | 218 | crc = crc32(0, rd, sizeof(*rd) - 8); |
174 | or next node REF_NORMAL, as appropriate. */ | 219 | if (unlikely(crc != je32_to_cpu(rd->node_crc))) { |
175 | if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { | 220 | JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n", |
176 | struct jffs2_node_frag *prev = frag_prev(newfrag); | 221 | ref_offset(ref), je32_to_cpu(rd->node_crc), crc); |
222 | goto free_out; | ||
223 | } | ||
177 | 224 | ||
178 | mark_ref_normal(fn->raw); | 225 | /* Sanity checks */ |
179 | /* If we don't start at zero there's _always_ a previous */ | 226 | if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) || |
180 | if (prev->node) | 227 | unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) { |
181 | mark_ref_normal(prev->node->raw); | 228 | JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref)); |
182 | } | 229 | jffs2_dbg_dump_node(c, ref_offset(ref)); |
230 | goto free_out; | ||
231 | } | ||
183 | 232 | ||
184 | if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { | 233 | if (jffs2_is_writebuffered(c) && csize != 0) { |
185 | struct jffs2_node_frag *next = frag_next(newfrag); | 234 | /* At this point we are supposed to check the data CRC |
186 | 235 | * of our unchecked node. But thus far, we do not | |
187 | if (next) { | 236 | * know whether the node is valid or obsolete. To |
188 | mark_ref_normal(fn->raw); | 237 | * figure this out, we need to walk all the nodes of |
189 | if (next->node) | 238 | * the inode and build the inode fragtree. We don't |
190 | mark_ref_normal(next->node->raw); | 239 | * want to spend time checking data of nodes which may |
240 | * later be found to be obsolete. So we put off the full | ||
241 | * data CRC checking until we have read all the inode | ||
242 | * nodes and have started building the fragtree. | ||
243 | * | ||
244 | * The fragtree is being built starting with nodes | ||
245 | * having the highest version number, so we'll be able | ||
246 | * to detect whether a node is valid (i.e., it is not | ||
247 | * overlapped by a node with higher version) or not. | ||
248 | * And we'll be able to check only those nodes, which | ||
249 | * are not obsolete. | ||
250 | * | ||
251 | * Of course, this optimization only makes sense in case | ||
252 | * of NAND flashes (or other flashes whith | ||
253 | * !jffs2_can_mark_obsolete()), since on NOR flashes | ||
254 | * nodes are marked obsolete physically. | ||
255 | * | ||
256 | * Since NAND flashes (or other flashes with | ||
257 | * jffs2_is_writebuffered(c)) are anyway read by | ||
258 | * fractions of c->wbuf_pagesize, and we have just read | ||
259 | * the node header, it is likely that the starting part | ||
260 | * of the node data is also read when we read the | ||
261 | * header. So we don't mind to check the CRC of the | ||
262 | * starting part of the data of the node now, and check | ||
263 | * the second part later (in jffs2_check_node_data()). | ||
264 | * Of course, we will not need to re-read and re-check | ||
265 | * the NAND page which we have just read. This is why we | ||
266 | * read the whole NAND page at jffs2_get_inode_nodes(), | ||
267 | * while we needed only the node header. | ||
268 | */ | ||
269 | unsigned char *buf; | ||
270 | |||
271 | /* 'buf' will point to the start of data */ | ||
272 | buf = (unsigned char *)rd + sizeof(*rd); | ||
273 | /* len will be the read data length */ | ||
274 | len = min_t(uint32_t, rdlen - sizeof(*rd), csize); | ||
275 | tn->partial_crc = crc32(0, buf, len); | ||
276 | |||
277 | dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize); | ||
278 | |||
279 | /* If we actually calculated the whole data CRC | ||
280 | * and it is wrong, drop the node. */ | ||
281 | if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) { | ||
282 | JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", | ||
283 | ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc)); | ||
284 | goto free_out; | ||
285 | } | ||
286 | |||
287 | } else if (csize == 0) { | ||
288 | /* | ||
289 | * We checked the header CRC. If the node has no data, adjust | ||
290 | * the space accounting now. For other nodes this will be done | ||
291 | * later either when the node is marked obsolete or when its | ||
292 | * data is checked. | ||
293 | */ | ||
294 | struct jffs2_eraseblock *jeb; | ||
295 | |||
296 | dbg_readinode("the node has no data.\n"); | ||
297 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
298 | len = ref_totlen(c, jeb, ref); | ||
299 | |||
300 | spin_lock(&c->erase_completion_lock); | ||
301 | jeb->used_size += len; | ||
302 | jeb->unchecked_size -= len; | ||
303 | c->used_size += len; | ||
304 | c->unchecked_size -= len; | ||
305 | ref->flash_offset = ref_offset(ref) | REF_NORMAL; | ||
306 | spin_unlock(&c->erase_completion_lock); | ||
191 | } | 307 | } |
192 | } | 308 | } |
193 | D2(if (jffs2_sanitycheck_fragtree(f)) { | 309 | |
194 | printk(KERN_WARNING "Just added node %04x-%04x @0x%08x on flash, newfrag *%p\n", | 310 | tn->fn = jffs2_alloc_full_dnode(); |
195 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag); | 311 | if (!tn->fn) { |
196 | return 0; | 312 | JFFS2_ERROR("alloc fn failed\n"); |
197 | }) | 313 | ret = -ENOMEM; |
198 | D2(jffs2_print_frag_list(f)); | 314 | goto free_out; |
315 | } | ||
316 | |||
317 | tn->version = je32_to_cpu(rd->version); | ||
318 | tn->fn->ofs = je32_to_cpu(rd->offset); | ||
319 | tn->data_crc = je32_to_cpu(rd->data_crc); | ||
320 | tn->csize = csize; | ||
321 | tn->fn->raw = ref; | ||
322 | |||
323 | /* There was a bug where we wrote hole nodes out with | ||
324 | csize/dsize swapped. Deal with it */ | ||
325 | if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize) | ||
326 | tn->fn->size = csize; | ||
327 | else // normal case... | ||
328 | tn->fn->size = je32_to_cpu(rd->dsize); | ||
329 | |||
330 | dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n", | ||
331 | ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize); | ||
332 | |||
333 | jffs2_add_tn_to_tree(tn, tnp); | ||
334 | |||
199 | return 0; | 335 | return 0; |
336 | |||
337 | free_out: | ||
338 | jffs2_free_tmp_dnode_info(tn); | ||
339 | return ret; | ||
200 | } | 340 | } |
201 | 341 | ||
202 | /* Doesn't set inode->i_size */ | 342 | /* |
203 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag) | 343 | * Helper function for jffs2_get_inode_nodes(). |
344 | * It is called every time an unknown node is found. | ||
345 | * | ||
346 | * Returns: 0 on succes; | ||
347 | * 1 if the node should be marked obsolete; | ||
348 | * negative error code on failure. | ||
349 | */ | ||
350 | static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un) | ||
204 | { | 351 | { |
205 | struct jffs2_node_frag *this; | 352 | /* We don't mark unknown nodes as REF_UNCHECKED */ |
206 | uint32_t lastend; | 353 | BUG_ON(ref_flags(ref) == REF_UNCHECKED); |
207 | 354 | ||
208 | /* Skip all the nodes which are completed before this one starts */ | 355 | un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype)); |
209 | this = jffs2_lookup_node_frag(list, newfrag->node->ofs); | ||
210 | 356 | ||
211 | if (this) { | 357 | if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) { |
212 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | 358 | /* Hmmm. This should have been caught at scan time. */ |
213 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); | 359 | JFFS2_NOTICE("node header CRC failed at %#08x. But it must have been OK earlier.\n", ref_offset(ref)); |
214 | lastend = this->ofs + this->size; | 360 | jffs2_dbg_dump_node(c, ref_offset(ref)); |
361 | return 1; | ||
215 | } else { | 362 | } else { |
216 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave no frag\n")); | 363 | switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) { |
217 | lastend = 0; | ||
218 | } | ||
219 | |||
220 | /* See if we ran off the end of the list */ | ||
221 | if (lastend <= newfrag->ofs) { | ||
222 | /* We did */ | ||
223 | |||
224 | /* Check if 'this' node was on the same page as the new node. | ||
225 | If so, both 'this' and the new node get marked REF_NORMAL so | ||
226 | the GC can take a look. | ||
227 | */ | ||
228 | if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { | ||
229 | if (this->node) | ||
230 | mark_ref_normal(this->node->raw); | ||
231 | mark_ref_normal(newfrag->node->raw); | ||
232 | } | ||
233 | 364 | ||
234 | if (lastend < newfrag->node->ofs) { | 365 | case JFFS2_FEATURE_INCOMPAT: |
235 | /* ... and we need to put a hole in before the new node */ | 366 | JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n", |
236 | struct jffs2_node_frag *holefrag = jffs2_alloc_node_frag(); | 367 | je16_to_cpu(un->nodetype), ref_offset(ref)); |
237 | if (!holefrag) { | 368 | /* EEP */ |
238 | jffs2_free_node_frag(newfrag); | 369 | BUG(); |
239 | return -ENOMEM; | 370 | break; |
240 | } | 371 | |
241 | holefrag->ofs = lastend; | 372 | case JFFS2_FEATURE_ROCOMPAT: |
242 | holefrag->size = newfrag->node->ofs - lastend; | 373 | JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n", |
243 | holefrag->node = NULL; | 374 | je16_to_cpu(un->nodetype), ref_offset(ref)); |
244 | if (this) { | 375 | BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO)); |
245 | /* By definition, the 'this' node has no right-hand child, | 376 | break; |
246 | because there are no frags with offset greater than it. | 377 | |
247 | So that's where we want to put the hole */ | 378 | case JFFS2_FEATURE_RWCOMPAT_COPY: |
248 | D2(printk(KERN_DEBUG "Adding hole frag (%p) on right of node at (%p)\n", holefrag, this)); | 379 | JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n", |
249 | rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); | 380 | je16_to_cpu(un->nodetype), ref_offset(ref)); |
250 | } else { | 381 | break; |
251 | D2(printk(KERN_DEBUG "Adding hole frag (%p) at root of tree\n", holefrag)); | 382 | |
252 | rb_link_node(&holefrag->rb, NULL, &list->rb_node); | 383 | case JFFS2_FEATURE_RWCOMPAT_DELETE: |
253 | } | 384 | JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n", |
254 | rb_insert_color(&holefrag->rb, list); | 385 | je16_to_cpu(un->nodetype), ref_offset(ref)); |
255 | this = holefrag; | 386 | return 1; |
256 | } | ||
257 | if (this) { | ||
258 | /* By definition, the 'this' node has no right-hand child, | ||
259 | because there are no frags with offset greater than it. | ||
260 | So that's where we want to put the hole */ | ||
261 | D2(printk(KERN_DEBUG "Adding new frag (%p) on right of node at (%p)\n", newfrag, this)); | ||
262 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | ||
263 | } else { | ||
264 | D2(printk(KERN_DEBUG "Adding new frag (%p) at root of tree\n", newfrag)); | ||
265 | rb_link_node(&newfrag->rb, NULL, &list->rb_node); | ||
266 | } | 387 | } |
267 | rb_insert_color(&newfrag->rb, list); | ||
268 | return 0; | ||
269 | } | 388 | } |
270 | 389 | ||
271 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: dealing with frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | 390 | return 0; |
272 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); | 391 | } |
273 | 392 | ||
274 | /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, | 393 | /* |
275 | * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs | 394 | * Helper function for jffs2_get_inode_nodes(). |
276 | */ | 395 | * The function detects whether more data should be read and reads it if yes. |
277 | if (newfrag->ofs > this->ofs) { | 396 | * |
278 | /* This node isn't completely obsoleted. The start of it remains valid */ | 397 | * Returns: 0 on succes; |
279 | 398 | * negative error code on failure. | |
280 | /* Mark the new node and the partially covered node REF_NORMAL -- let | 399 | */ |
281 | the GC take a look at them */ | 400 | static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, |
282 | mark_ref_normal(newfrag->node->raw); | 401 | int right_size, int *rdlen, unsigned char *buf, unsigned char *bufstart) |
283 | if (this->node) | 402 | { |
284 | mark_ref_normal(this->node->raw); | 403 | int right_len, err, len; |
285 | 404 | size_t retlen; | |
286 | if (this->ofs + this->size > newfrag->ofs + newfrag->size) { | 405 | uint32_t offs; |
287 | /* The new node splits 'this' frag into two */ | ||
288 | struct jffs2_node_frag *newfrag2 = jffs2_alloc_node_frag(); | ||
289 | if (!newfrag2) { | ||
290 | jffs2_free_node_frag(newfrag); | ||
291 | return -ENOMEM; | ||
292 | } | ||
293 | D2(printk(KERN_DEBUG "split old frag 0x%04x-0x%04x -->", this->ofs, this->ofs+this->size); | ||
294 | if (this->node) | ||
295 | printk("phys 0x%08x\n", ref_offset(this->node->raw)); | ||
296 | else | ||
297 | printk("hole\n"); | ||
298 | ) | ||
299 | |||
300 | /* New second frag pointing to this's node */ | ||
301 | newfrag2->ofs = newfrag->ofs + newfrag->size; | ||
302 | newfrag2->size = (this->ofs+this->size) - newfrag2->ofs; | ||
303 | newfrag2->node = this->node; | ||
304 | if (this->node) | ||
305 | this->node->frags++; | ||
306 | |||
307 | /* Adjust size of original 'this' */ | ||
308 | this->size = newfrag->ofs - this->ofs; | ||
309 | |||
310 | /* Now, we know there's no node with offset | ||
311 | greater than this->ofs but smaller than | ||
312 | newfrag2->ofs or newfrag->ofs, for obvious | ||
313 | reasons. So we can do a tree insert from | ||
314 | 'this' to insert newfrag, and a tree insert | ||
315 | from newfrag to insert newfrag2. */ | ||
316 | jffs2_fragtree_insert(newfrag, this); | ||
317 | rb_insert_color(&newfrag->rb, list); | ||
318 | |||
319 | jffs2_fragtree_insert(newfrag2, newfrag); | ||
320 | rb_insert_color(&newfrag2->rb, list); | ||
321 | |||
322 | return 0; | ||
323 | } | ||
324 | /* New node just reduces 'this' frag in size, doesn't split it */ | ||
325 | this->size = newfrag->ofs - this->ofs; | ||
326 | 406 | ||
327 | /* Again, we know it lives down here in the tree */ | 407 | if (jffs2_is_writebuffered(c)) { |
328 | jffs2_fragtree_insert(newfrag, this); | 408 | right_len = c->wbuf_pagesize - (bufstart - buf); |
329 | rb_insert_color(&newfrag->rb, list); | 409 | if (right_size + (int)(bufstart - buf) > c->wbuf_pagesize) |
330 | } else { | 410 | right_len += c->wbuf_pagesize; |
331 | /* New frag starts at the same point as 'this' used to. Replace | 411 | } else |
332 | it in the tree without doing a delete and insertion */ | 412 | right_len = right_size; |
333 | D2(printk(KERN_DEBUG "Inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", | ||
334 | newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, | ||
335 | this, this->ofs, this->ofs+this->size)); | ||
336 | |||
337 | rb_replace_node(&this->rb, &newfrag->rb, list); | ||
338 | |||
339 | if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { | ||
340 | D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size)); | ||
341 | jffs2_obsolete_node_frag(c, this); | ||
342 | } else { | ||
343 | this->ofs += newfrag->size; | ||
344 | this->size -= newfrag->size; | ||
345 | 413 | ||
346 | jffs2_fragtree_insert(this, newfrag); | 414 | if (*rdlen == right_len) |
347 | rb_insert_color(&this->rb, list); | 415 | return 0; |
348 | return 0; | 416 | |
349 | } | 417 | /* We need to read more data */ |
418 | offs = ref_offset(ref) + *rdlen; | ||
419 | if (jffs2_is_writebuffered(c)) { | ||
420 | bufstart = buf + c->wbuf_pagesize; | ||
421 | len = c->wbuf_pagesize; | ||
422 | } else { | ||
423 | bufstart = buf + *rdlen; | ||
424 | len = right_size - *rdlen; | ||
350 | } | 425 | } |
351 | /* OK, now we have newfrag added in the correct place in the tree, but | 426 | |
352 | frag_next(newfrag) may be a fragment which is overlapped by it | 427 | dbg_readinode("read more %d bytes\n", len); |
353 | */ | 428 | |
354 | while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { | 429 | err = jffs2_flash_read(c, offs, len, &retlen, bufstart); |
355 | /* 'this' frag is obsoleted completely. */ | 430 | if (err) { |
356 | D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x) and removing from tree\n", this, this->ofs, this->ofs+this->size)); | 431 | JFFS2_ERROR("can not read %d bytes from 0x%08x, " |
357 | rb_erase(&this->rb, list); | 432 | "error code: %d.\n", len, offs, err); |
358 | jffs2_obsolete_node_frag(c, this); | 433 | return err; |
359 | } | 434 | } |
360 | /* Now we're pointing at the first frag which isn't totally obsoleted by | ||
361 | the new frag */ | ||
362 | 435 | ||
363 | if (!this || newfrag->ofs + newfrag->size == this->ofs) { | 436 | if (retlen < len) { |
364 | return 0; | 437 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", |
438 | offs, retlen, len); | ||
439 | return -EIO; | ||
365 | } | 440 | } |
366 | /* Still some overlap but we don't need to move it in the tree */ | ||
367 | this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); | ||
368 | this->ofs = newfrag->ofs + newfrag->size; | ||
369 | 441 | ||
370 | /* And mark them REF_NORMAL so the GC takes a look at them */ | 442 | *rdlen = right_len; |
371 | if (this->node) | ||
372 | mark_ref_normal(this->node->raw); | ||
373 | mark_ref_normal(newfrag->node->raw); | ||
374 | 443 | ||
375 | return 0; | 444 | return 0; |
376 | } | 445 | } |
377 | 446 | ||
378 | void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) | 447 | /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated |
448 | with this ino, returning the former in order of version */ | ||
449 | static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
450 | struct rb_root *tnp, struct jffs2_full_dirent **fdp, | ||
451 | uint32_t *highest_version, uint32_t *latest_mctime, | ||
452 | uint32_t *mctime_ver) | ||
379 | { | 453 | { |
380 | struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); | 454 | struct jffs2_raw_node_ref *ref, *valid_ref; |
455 | struct rb_root ret_tn = RB_ROOT; | ||
456 | struct jffs2_full_dirent *ret_fd = NULL; | ||
457 | unsigned char *buf = NULL; | ||
458 | union jffs2_node_union *node; | ||
459 | size_t retlen; | ||
460 | int len, err; | ||
461 | |||
462 | *mctime_ver = 0; | ||
463 | |||
464 | dbg_readinode("ino #%u\n", f->inocache->ino); | ||
465 | |||
466 | if (jffs2_is_writebuffered(c)) { | ||
467 | /* | ||
468 | * If we have the write buffer, we assume the minimal I/O unit | ||
469 | * is c->wbuf_pagesize. We implement some optimizations which in | ||
470 | * this case and we need a temporary buffer of size = | ||
471 | * 2*c->wbuf_pagesize bytes (see comments in read_dnode()). | ||
472 | * Basically, we want to read not only the node header, but the | ||
473 | * whole wbuf (NAND page in case of NAND) or 2, if the node | ||
474 | * header overlaps the border between the 2 wbufs. | ||
475 | */ | ||
476 | len = 2*c->wbuf_pagesize; | ||
477 | } else { | ||
478 | /* | ||
479 | * When there is no write buffer, the size of the temporary | ||
480 | * buffer is the size of the larges node header. | ||
481 | */ | ||
482 | len = sizeof(union jffs2_node_union); | ||
483 | } | ||
381 | 484 | ||
382 | D1(printk(KERN_DEBUG "Truncating fraglist to 0x%08x bytes\n", size)); | 485 | /* FIXME: in case of NOR and available ->point() this |
486 | * needs to be fixed. */ | ||
487 | buf = kmalloc(len, GFP_KERNEL); | ||
488 | if (!buf) | ||
489 | return -ENOMEM; | ||
383 | 490 | ||
384 | /* We know frag->ofs <= size. That's what lookup does for us */ | 491 | spin_lock(&c->erase_completion_lock); |
385 | if (frag && frag->ofs != size) { | 492 | valid_ref = jffs2_first_valid_node(f->inocache->nodes); |
386 | if (frag->ofs+frag->size >= size) { | 493 | if (!valid_ref && f->inocache->ino != 1) |
387 | D1(printk(KERN_DEBUG "Truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); | 494 | JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino); |
388 | frag->size = size - frag->ofs; | 495 | while (valid_ref) { |
496 | unsigned char *bufstart; | ||
497 | |||
498 | /* We can hold a pointer to a non-obsolete node without the spinlock, | ||
499 | but _obsolete_ nodes may disappear at any time, if the block | ||
500 | they're in gets erased. So if we mark 'ref' obsolete while we're | ||
501 | not holding the lock, it can go away immediately. For that reason, | ||
502 | we find the next valid node first, before processing 'ref'. | ||
503 | */ | ||
504 | ref = valid_ref; | ||
505 | valid_ref = jffs2_first_valid_node(ref->next_in_ino); | ||
506 | spin_unlock(&c->erase_completion_lock); | ||
507 | |||
508 | cond_resched(); | ||
509 | |||
510 | /* | ||
511 | * At this point we don't know the type of the node we're going | ||
512 | * to read, so we do not know the size of its header. In order | ||
513 | * to minimize the amount of flash IO we assume the node has | ||
514 | * size = JFFS2_MIN_NODE_HEADER. | ||
515 | */ | ||
516 | if (jffs2_is_writebuffered(c)) { | ||
517 | /* | ||
518 | * We treat 'buf' as 2 adjacent wbufs. We want to | ||
519 | * adjust bufstart such as it points to the | ||
520 | * beginning of the node within this wbuf. | ||
521 | */ | ||
522 | bufstart = buf + (ref_offset(ref) % c->wbuf_pagesize); | ||
523 | /* We will read either one wbuf or 2 wbufs. */ | ||
524 | len = c->wbuf_pagesize - (bufstart - buf); | ||
525 | if (JFFS2_MIN_NODE_HEADER + (int)(bufstart - buf) > c->wbuf_pagesize) { | ||
526 | /* The header spans the border of the first wbuf */ | ||
527 | len += c->wbuf_pagesize; | ||
528 | } | ||
529 | } else { | ||
530 | bufstart = buf; | ||
531 | len = JFFS2_MIN_NODE_HEADER; | ||
389 | } | 532 | } |
390 | frag = frag_next(frag); | ||
391 | } | ||
392 | while (frag && frag->ofs >= size) { | ||
393 | struct jffs2_node_frag *next = frag_next(frag); | ||
394 | 533 | ||
395 | D1(printk(KERN_DEBUG "Removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); | 534 | dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref)); |
396 | frag_erase(frag, list); | ||
397 | jffs2_obsolete_node_frag(c, frag); | ||
398 | frag = next; | ||
399 | } | ||
400 | } | ||
401 | 535 | ||
402 | /* Scan the list of all nodes present for this ino, build map of versions, etc. */ | 536 | /* FIXME: point() */ |
537 | err = jffs2_flash_read(c, ref_offset(ref), len, | ||
538 | &retlen, bufstart); | ||
539 | if (err) { | ||
540 | JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err); | ||
541 | goto free_out; | ||
542 | } | ||
403 | 543 | ||
404 | static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | 544 | if (retlen < len) { |
405 | struct jffs2_inode_info *f, | 545 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ref_offset(ref), retlen, len); |
406 | struct jffs2_raw_inode *latest_node); | 546 | err = -EIO; |
547 | goto free_out; | ||
548 | } | ||
407 | 549 | ||
408 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 550 | node = (union jffs2_node_union *)bufstart; |
409 | uint32_t ino, struct jffs2_raw_inode *latest_node) | ||
410 | { | ||
411 | D2(printk(KERN_DEBUG "jffs2_do_read_inode(): getting inocache\n")); | ||
412 | 551 | ||
413 | retry_inocache: | 552 | switch (je16_to_cpu(node->u.nodetype)) { |
414 | spin_lock(&c->inocache_lock); | ||
415 | f->inocache = jffs2_get_ino_cache(c, ino); | ||
416 | 553 | ||
417 | D2(printk(KERN_DEBUG "jffs2_do_read_inode(): Got inocache at %p\n", f->inocache)); | 554 | case JFFS2_NODETYPE_DIRENT: |
555 | |||
556 | if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) { | ||
557 | err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf, bufstart); | ||
558 | if (unlikely(err)) | ||
559 | goto free_out; | ||
560 | } | ||
561 | |||
562 | err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver); | ||
563 | if (err == 1) { | ||
564 | jffs2_mark_node_obsolete(c, ref); | ||
565 | break; | ||
566 | } else if (unlikely(err)) | ||
567 | goto free_out; | ||
568 | |||
569 | if (je32_to_cpu(node->d.version) > *highest_version) | ||
570 | *highest_version = je32_to_cpu(node->d.version); | ||
418 | 571 | ||
419 | if (f->inocache) { | ||
420 | /* Check its state. We may need to wait before we can use it */ | ||
421 | switch(f->inocache->state) { | ||
422 | case INO_STATE_UNCHECKED: | ||
423 | case INO_STATE_CHECKEDABSENT: | ||
424 | f->inocache->state = INO_STATE_READING; | ||
425 | break; | 572 | break; |
426 | |||
427 | case INO_STATE_CHECKING: | ||
428 | case INO_STATE_GC: | ||
429 | /* If it's in either of these states, we need | ||
430 | to wait for whoever's got it to finish and | ||
431 | put it back. */ | ||
432 | D1(printk(KERN_DEBUG "jffs2_get_ino_cache_read waiting for ino #%u in state %d\n", | ||
433 | ino, f->inocache->state)); | ||
434 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | ||
435 | goto retry_inocache; | ||
436 | 573 | ||
437 | case INO_STATE_READING: | 574 | case JFFS2_NODETYPE_INODE: |
438 | case INO_STATE_PRESENT: | 575 | |
439 | /* Eep. This should never happen. It can | 576 | if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) { |
440 | happen if Linux calls read_inode() again | 577 | err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf, bufstart); |
441 | before clear_inode() has finished though. */ | 578 | if (unlikely(err)) |
442 | printk(KERN_WARNING "Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state); | 579 | goto free_out; |
443 | /* Fail. That's probably better than allowing it to succeed */ | 580 | } |
444 | f->inocache = NULL; | 581 | |
582 | err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver); | ||
583 | if (err == 1) { | ||
584 | jffs2_mark_node_obsolete(c, ref); | ||
585 | break; | ||
586 | } else if (unlikely(err)) | ||
587 | goto free_out; | ||
588 | |||
589 | if (je32_to_cpu(node->i.version) > *highest_version) | ||
590 | *highest_version = je32_to_cpu(node->i.version); | ||
591 | |||
445 | break; | 592 | break; |
446 | 593 | ||
447 | default: | 594 | default: |
448 | BUG(); | 595 | if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) { |
449 | } | 596 | err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf, bufstart); |
450 | } | 597 | if (unlikely(err)) |
451 | spin_unlock(&c->inocache_lock); | 598 | goto free_out; |
599 | } | ||
600 | |||
601 | err = read_unknown(c, ref, &node->u); | ||
602 | if (err == 1) { | ||
603 | jffs2_mark_node_obsolete(c, ref); | ||
604 | break; | ||
605 | } else if (unlikely(err)) | ||
606 | goto free_out; | ||
452 | 607 | ||
453 | if (!f->inocache && ino == 1) { | ||
454 | /* Special case - no root inode on medium */ | ||
455 | f->inocache = jffs2_alloc_inode_cache(); | ||
456 | if (!f->inocache) { | ||
457 | printk(KERN_CRIT "jffs2_do_read_inode(): Cannot allocate inocache for root inode\n"); | ||
458 | return -ENOMEM; | ||
459 | } | 608 | } |
460 | D1(printk(KERN_DEBUG "jffs2_do_read_inode(): Creating inocache for root inode\n")); | 609 | spin_lock(&c->erase_completion_lock); |
461 | memset(f->inocache, 0, sizeof(struct jffs2_inode_cache)); | ||
462 | f->inocache->ino = f->inocache->nlink = 1; | ||
463 | f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; | ||
464 | f->inocache->state = INO_STATE_READING; | ||
465 | jffs2_add_ino_cache(c, f->inocache); | ||
466 | } | 610 | } |
467 | if (!f->inocache) { | ||
468 | printk(KERN_WARNING "jffs2_do_read_inode() on nonexistent ino %u\n", ino); | ||
469 | return -ENOENT; | ||
470 | } | ||
471 | |||
472 | return jffs2_do_read_inode_internal(c, f, latest_node); | ||
473 | } | ||
474 | 611 | ||
475 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | 612 | spin_unlock(&c->erase_completion_lock); |
476 | { | 613 | *tnp = ret_tn; |
477 | struct jffs2_raw_inode n; | 614 | *fdp = ret_fd; |
478 | struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL); | 615 | kfree(buf); |
479 | int ret; | ||
480 | 616 | ||
481 | if (!f) | 617 | dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n", |
482 | return -ENOMEM; | 618 | f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver); |
483 | 619 | return 0; | |
484 | memset(f, 0, sizeof(*f)); | ||
485 | init_MUTEX_LOCKED(&f->sem); | ||
486 | f->inocache = ic; | ||
487 | 620 | ||
488 | ret = jffs2_do_read_inode_internal(c, f, &n); | 621 | free_out: |
489 | if (!ret) { | 622 | jffs2_free_tmp_dnode_info_list(&ret_tn); |
490 | up(&f->sem); | 623 | jffs2_free_full_dirent_list(ret_fd); |
491 | jffs2_do_clear_inode(c, f); | 624 | kfree(buf); |
492 | } | 625 | return err; |
493 | kfree(f); | ||
494 | return ret; | ||
495 | } | 626 | } |
496 | 627 | ||
497 | static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | 628 | static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, |
498 | struct jffs2_inode_info *f, | 629 | struct jffs2_inode_info *f, |
499 | struct jffs2_raw_inode *latest_node) | 630 | struct jffs2_raw_inode *latest_node) |
500 | { | 631 | { |
501 | struct jffs2_tmp_dnode_info *tn = NULL; | 632 | struct jffs2_tmp_dnode_info *tn; |
502 | struct rb_root tn_list; | 633 | struct rb_root tn_list; |
503 | struct rb_node *rb, *repl_rb; | 634 | struct rb_node *rb, *repl_rb; |
504 | struct jffs2_full_dirent *fd_list; | 635 | struct jffs2_full_dirent *fd_list; |
505 | struct jffs2_full_dnode *fn = NULL; | 636 | struct jffs2_full_dnode *fn, *first_fn = NULL; |
506 | uint32_t crc; | 637 | uint32_t crc; |
507 | uint32_t latest_mctime, mctime_ver; | 638 | uint32_t latest_mctime, mctime_ver; |
508 | uint32_t mdata_ver = 0; | ||
509 | size_t retlen; | 639 | size_t retlen; |
510 | int ret; | 640 | int ret; |
511 | 641 | ||
512 | D1(printk(KERN_DEBUG "jffs2_do_read_inode_internal(): ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink)); | 642 | dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink); |
513 | 643 | ||
514 | /* Grab all nodes relevant to this ino */ | 644 | /* Grab all nodes relevant to this ino */ |
515 | ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver); | 645 | ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver); |
516 | 646 | ||
517 | if (ret) { | 647 | if (ret) { |
518 | printk(KERN_CRIT "jffs2_get_inode_nodes() for ino %u returned %d\n", f->inocache->ino, ret); | 648 | JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret); |
519 | if (f->inocache->state == INO_STATE_READING) | 649 | if (f->inocache->state == INO_STATE_READING) |
520 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); | 650 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); |
521 | return ret; | 651 | return ret; |
@@ -525,42 +655,33 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
525 | rb = rb_first(&tn_list); | 655 | rb = rb_first(&tn_list); |
526 | 656 | ||
527 | while (rb) { | 657 | while (rb) { |
658 | cond_resched(); | ||
528 | tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb); | 659 | tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb); |
529 | fn = tn->fn; | 660 | fn = tn->fn; |
530 | 661 | ret = 1; | |
531 | if (f->metadata) { | 662 | dbg_readinode("consider node ver %u, phys offset " |
532 | if (likely(tn->version >= mdata_ver)) { | 663 | "%#08x(%d), range %u-%u.\n", tn->version, |
533 | D1(printk(KERN_DEBUG "Obsoleting old metadata at 0x%08x\n", ref_offset(f->metadata->raw))); | 664 | ref_offset(fn->raw), ref_flags(fn->raw), |
534 | jffs2_mark_node_obsolete(c, f->metadata->raw); | 665 | fn->ofs, fn->ofs + fn->size); |
535 | jffs2_free_full_dnode(f->metadata); | ||
536 | f->metadata = NULL; | ||
537 | |||
538 | mdata_ver = 0; | ||
539 | } else { | ||
540 | /* This should never happen. */ | ||
541 | printk(KERN_WARNING "Er. New metadata at 0x%08x with ver %d is actually older than previous ver %d at 0x%08x\n", | ||
542 | ref_offset(fn->raw), tn->version, mdata_ver, ref_offset(f->metadata->raw)); | ||
543 | jffs2_mark_node_obsolete(c, fn->raw); | ||
544 | jffs2_free_full_dnode(fn); | ||
545 | /* Fill in latest_node from the metadata, not this one we're about to free... */ | ||
546 | fn = f->metadata; | ||
547 | goto next_tn; | ||
548 | } | ||
549 | } | ||
550 | 666 | ||
551 | if (fn->size) { | 667 | if (fn->size) { |
552 | jffs2_add_full_dnode_to_inode(c, f, fn); | 668 | ret = jffs2_add_older_frag_to_fragtree(c, f, tn); |
553 | } else { | 669 | /* TODO: the error code isn't checked, check it */ |
554 | /* Zero-sized node at end of version list. Just a metadata update */ | 670 | jffs2_dbg_fragtree_paranoia_check_nolock(f); |
555 | D1(printk(KERN_DEBUG "metadata @%08x: ver %d\n", ref_offset(fn->raw), tn->version)); | 671 | BUG_ON(ret < 0); |
672 | if (!first_fn && ret == 0) | ||
673 | first_fn = fn; | ||
674 | } else if (!first_fn) { | ||
675 | first_fn = fn; | ||
556 | f->metadata = fn; | 676 | f->metadata = fn; |
557 | mdata_ver = tn->version; | 677 | ret = 0; /* Prevent freeing the metadata update node */ |
558 | } | 678 | } else |
559 | next_tn: | 679 | jffs2_mark_node_obsolete(c, fn->raw); |
680 | |||
560 | BUG_ON(rb->rb_left); | 681 | BUG_ON(rb->rb_left); |
561 | if (rb->rb_parent && rb->rb_parent->rb_left == rb) { | 682 | if (rb->rb_parent && rb->rb_parent->rb_left == rb) { |
562 | /* We were then left-hand child of our parent. We need | 683 | /* We were then left-hand child of our parent. We need |
563 | to move our own right-hand child into our place. */ | 684 | * to move our own right-hand child into our place. */ |
564 | repl_rb = rb->rb_right; | 685 | repl_rb = rb->rb_right; |
565 | if (repl_rb) | 686 | if (repl_rb) |
566 | repl_rb->rb_parent = rb->rb_parent; | 687 | repl_rb->rb_parent = rb->rb_parent; |
@@ -570,7 +691,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
570 | rb = rb_next(rb); | 691 | rb = rb_next(rb); |
571 | 692 | ||
572 | /* Remove the spent tn from the tree; don't bother rebalancing | 693 | /* Remove the spent tn from the tree; don't bother rebalancing |
573 | but put our right-hand child in our own place. */ | 694 | * but put our right-hand child in our own place. */ |
574 | if (tn->rb.rb_parent) { | 695 | if (tn->rb.rb_parent) { |
575 | if (tn->rb.rb_parent->rb_left == &tn->rb) | 696 | if (tn->rb.rb_parent->rb_left == &tn->rb) |
576 | tn->rb.rb_parent->rb_left = repl_rb; | 697 | tn->rb.rb_parent->rb_left = repl_rb; |
@@ -581,19 +702,27 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
581 | tn->rb.rb_right->rb_parent = NULL; | 702 | tn->rb.rb_right->rb_parent = NULL; |
582 | 703 | ||
583 | jffs2_free_tmp_dnode_info(tn); | 704 | jffs2_free_tmp_dnode_info(tn); |
705 | if (ret) { | ||
706 | dbg_readinode("delete dnode %u-%u.\n", | ||
707 | fn->ofs, fn->ofs + fn->size); | ||
708 | jffs2_free_full_dnode(fn); | ||
709 | } | ||
584 | } | 710 | } |
585 | D1(jffs2_sanitycheck_fragtree(f)); | 711 | jffs2_dbg_fragtree_paranoia_check_nolock(f); |
586 | 712 | ||
587 | if (!fn) { | 713 | BUG_ON(first_fn && ref_obsolete(first_fn->raw)); |
714 | |||
715 | fn = first_fn; | ||
716 | if (unlikely(!first_fn)) { | ||
588 | /* No data nodes for this inode. */ | 717 | /* No data nodes for this inode. */ |
589 | if (f->inocache->ino != 1) { | 718 | if (f->inocache->ino != 1) { |
590 | printk(KERN_WARNING "jffs2_do_read_inode(): No data nodes found for ino #%u\n", f->inocache->ino); | 719 | JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino); |
591 | if (!fd_list) { | 720 | if (!fd_list) { |
592 | if (f->inocache->state == INO_STATE_READING) | 721 | if (f->inocache->state == INO_STATE_READING) |
593 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); | 722 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); |
594 | return -EIO; | 723 | return -EIO; |
595 | } | 724 | } |
596 | printk(KERN_WARNING "jffs2_do_read_inode(): But it has children so we fake some modes for it\n"); | 725 | JFFS2_NOTICE("but it has children so we fake some modes for it\n"); |
597 | } | 726 | } |
598 | latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO); | 727 | latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO); |
599 | latest_node->version = cpu_to_je32(0); | 728 | latest_node->version = cpu_to_je32(0); |
@@ -608,8 +737,8 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
608 | 737 | ||
609 | ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node); | 738 | ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node); |
610 | if (ret || retlen != sizeof(*latest_node)) { | 739 | if (ret || retlen != sizeof(*latest_node)) { |
611 | printk(KERN_NOTICE "MTD read in jffs2_do_read_inode() failed: Returned %d, %zd of %zd bytes read\n", | 740 | JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n", |
612 | ret, retlen, sizeof(*latest_node)); | 741 | ret, retlen, sizeof(*latest_node)); |
613 | /* FIXME: If this fails, there seems to be a memory leak. Find it. */ | 742 | /* FIXME: If this fails, there seems to be a memory leak. Find it. */ |
614 | up(&f->sem); | 743 | up(&f->sem); |
615 | jffs2_do_clear_inode(c, f); | 744 | jffs2_do_clear_inode(c, f); |
@@ -618,7 +747,8 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
618 | 747 | ||
619 | crc = crc32(0, latest_node, sizeof(*latest_node)-8); | 748 | crc = crc32(0, latest_node, sizeof(*latest_node)-8); |
620 | if (crc != je32_to_cpu(latest_node->node_crc)) { | 749 | if (crc != je32_to_cpu(latest_node->node_crc)) { |
621 | printk(KERN_NOTICE "CRC failed for read_inode of inode %u at physical location 0x%x\n", f->inocache->ino, ref_offset(fn->raw)); | 750 | JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n", |
751 | f->inocache->ino, ref_offset(fn->raw)); | ||
622 | up(&f->sem); | 752 | up(&f->sem); |
623 | jffs2_do_clear_inode(c, f); | 753 | jffs2_do_clear_inode(c, f); |
624 | return -EIO; | 754 | return -EIO; |
@@ -633,10 +763,10 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
633 | } | 763 | } |
634 | break; | 764 | break; |
635 | 765 | ||
636 | 766 | ||
637 | case S_IFREG: | 767 | case S_IFREG: |
638 | /* If it was a regular file, truncate it to the latest node's isize */ | 768 | /* If it was a regular file, truncate it to the latest node's isize */ |
639 | jffs2_truncate_fraglist(c, &f->fragtree, je32_to_cpu(latest_node->isize)); | 769 | jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize)); |
640 | break; | 770 | break; |
641 | 771 | ||
642 | case S_IFLNK: | 772 | case S_IFLNK: |
@@ -649,37 +779,33 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
649 | 779 | ||
650 | if (f->inocache->state != INO_STATE_CHECKING) { | 780 | if (f->inocache->state != INO_STATE_CHECKING) { |
651 | /* Symlink's inode data is the target path. Read it and | 781 | /* Symlink's inode data is the target path. Read it and |
652 | * keep in RAM to facilitate quick follow symlink operation. | 782 | * keep in RAM to facilitate quick follow symlink |
653 | * We use f->dents field to store the target path, which | 783 | * operation. */ |
654 | * is somewhat ugly. */ | 784 | f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL); |
655 | f->dents = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL); | 785 | if (!f->target) { |
656 | if (!f->dents) { | 786 | JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize)); |
657 | printk(KERN_WARNING "Can't allocate %d bytes of memory " | ||
658 | "for the symlink target path cache\n", | ||
659 | je32_to_cpu(latest_node->csize)); | ||
660 | up(&f->sem); | 787 | up(&f->sem); |
661 | jffs2_do_clear_inode(c, f); | 788 | jffs2_do_clear_inode(c, f); |
662 | return -ENOMEM; | 789 | return -ENOMEM; |
663 | } | 790 | } |
664 | 791 | ||
665 | ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node), | 792 | ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node), |
666 | je32_to_cpu(latest_node->csize), &retlen, (char *)f->dents); | 793 | je32_to_cpu(latest_node->csize), &retlen, (char *)f->target); |
667 | 794 | ||
668 | if (ret || retlen != je32_to_cpu(latest_node->csize)) { | 795 | if (ret || retlen != je32_to_cpu(latest_node->csize)) { |
669 | if (retlen != je32_to_cpu(latest_node->csize)) | 796 | if (retlen != je32_to_cpu(latest_node->csize)) |
670 | ret = -EIO; | 797 | ret = -EIO; |
671 | kfree(f->dents); | 798 | kfree(f->target); |
672 | f->dents = NULL; | 799 | f->target = NULL; |
673 | up(&f->sem); | 800 | up(&f->sem); |
674 | jffs2_do_clear_inode(c, f); | 801 | jffs2_do_clear_inode(c, f); |
675 | return -ret; | 802 | return -ret; |
676 | } | 803 | } |
677 | 804 | ||
678 | ((char *)f->dents)[je32_to_cpu(latest_node->csize)] = '\0'; | 805 | f->target[je32_to_cpu(latest_node->csize)] = '\0'; |
679 | D1(printk(KERN_DEBUG "jffs2_do_read_inode(): symlink's target '%s' cached\n", | 806 | dbg_readinode("symlink's target '%s' cached\n", f->target); |
680 | (char *)f->dents)); | ||
681 | } | 807 | } |
682 | 808 | ||
683 | /* fall through... */ | 809 | /* fall through... */ |
684 | 810 | ||
685 | case S_IFBLK: | 811 | case S_IFBLK: |
@@ -687,14 +813,14 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
687 | /* Certain inode types should have only one data node, and it's | 813 | /* Certain inode types should have only one data node, and it's |
688 | kept as the metadata node */ | 814 | kept as the metadata node */ |
689 | if (f->metadata) { | 815 | if (f->metadata) { |
690 | printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o had metadata node\n", | 816 | JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n", |
691 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); | 817 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); |
692 | up(&f->sem); | 818 | up(&f->sem); |
693 | jffs2_do_clear_inode(c, f); | 819 | jffs2_do_clear_inode(c, f); |
694 | return -EIO; | 820 | return -EIO; |
695 | } | 821 | } |
696 | if (!frag_first(&f->fragtree)) { | 822 | if (!frag_first(&f->fragtree)) { |
697 | printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o has no fragments\n", | 823 | JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n", |
698 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); | 824 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); |
699 | up(&f->sem); | 825 | up(&f->sem); |
700 | jffs2_do_clear_inode(c, f); | 826 | jffs2_do_clear_inode(c, f); |
@@ -702,7 +828,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
702 | } | 828 | } |
703 | /* ASSERT: f->fraglist != NULL */ | 829 | /* ASSERT: f->fraglist != NULL */ |
704 | if (frag_next(frag_first(&f->fragtree))) { | 830 | if (frag_next(frag_first(&f->fragtree))) { |
705 | printk(KERN_WARNING "Argh. Special inode #%u with mode 0x%x had more than one node\n", | 831 | JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n", |
706 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); | 832 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); |
707 | /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */ | 833 | /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */ |
708 | up(&f->sem); | 834 | up(&f->sem); |
@@ -721,6 +847,93 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
721 | return 0; | 847 | return 0; |
722 | } | 848 | } |
723 | 849 | ||
850 | /* Scan the list of all nodes present for this ino, build map of versions, etc. */ | ||
851 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
852 | uint32_t ino, struct jffs2_raw_inode *latest_node) | ||
853 | { | ||
854 | dbg_readinode("read inode #%u\n", ino); | ||
855 | |||
856 | retry_inocache: | ||
857 | spin_lock(&c->inocache_lock); | ||
858 | f->inocache = jffs2_get_ino_cache(c, ino); | ||
859 | |||
860 | if (f->inocache) { | ||
861 | /* Check its state. We may need to wait before we can use it */ | ||
862 | switch(f->inocache->state) { | ||
863 | case INO_STATE_UNCHECKED: | ||
864 | case INO_STATE_CHECKEDABSENT: | ||
865 | f->inocache->state = INO_STATE_READING; | ||
866 | break; | ||
867 | |||
868 | case INO_STATE_CHECKING: | ||
869 | case INO_STATE_GC: | ||
870 | /* If it's in either of these states, we need | ||
871 | to wait for whoever's got it to finish and | ||
872 | put it back. */ | ||
873 | dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state); | ||
874 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | ||
875 | goto retry_inocache; | ||
876 | |||
877 | case INO_STATE_READING: | ||
878 | case INO_STATE_PRESENT: | ||
879 | /* Eep. This should never happen. It can | ||
880 | happen if Linux calls read_inode() again | ||
881 | before clear_inode() has finished though. */ | ||
882 | JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state); | ||
883 | /* Fail. That's probably better than allowing it to succeed */ | ||
884 | f->inocache = NULL; | ||
885 | break; | ||
886 | |||
887 | default: | ||
888 | BUG(); | ||
889 | } | ||
890 | } | ||
891 | spin_unlock(&c->inocache_lock); | ||
892 | |||
893 | if (!f->inocache && ino == 1) { | ||
894 | /* Special case - no root inode on medium */ | ||
895 | f->inocache = jffs2_alloc_inode_cache(); | ||
896 | if (!f->inocache) { | ||
897 | JFFS2_ERROR("cannot allocate inocache for root inode\n"); | ||
898 | return -ENOMEM; | ||
899 | } | ||
900 | dbg_readinode("creating inocache for root inode\n"); | ||
901 | memset(f->inocache, 0, sizeof(struct jffs2_inode_cache)); | ||
902 | f->inocache->ino = f->inocache->nlink = 1; | ||
903 | f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; | ||
904 | f->inocache->state = INO_STATE_READING; | ||
905 | jffs2_add_ino_cache(c, f->inocache); | ||
906 | } | ||
907 | if (!f->inocache) { | ||
908 | JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino); | ||
909 | return -ENOENT; | ||
910 | } | ||
911 | |||
912 | return jffs2_do_read_inode_internal(c, f, latest_node); | ||
913 | } | ||
914 | |||
915 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | ||
916 | { | ||
917 | struct jffs2_raw_inode n; | ||
918 | struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL); | ||
919 | int ret; | ||
920 | |||
921 | if (!f) | ||
922 | return -ENOMEM; | ||
923 | |||
924 | memset(f, 0, sizeof(*f)); | ||
925 | init_MUTEX_LOCKED(&f->sem); | ||
926 | f->inocache = ic; | ||
927 | |||
928 | ret = jffs2_do_read_inode_internal(c, f, &n); | ||
929 | if (!ret) { | ||
930 | up(&f->sem); | ||
931 | jffs2_do_clear_inode(c, f); | ||
932 | } | ||
933 | kfree (f); | ||
934 | return ret; | ||
935 | } | ||
936 | |||
724 | void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) | 937 | void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) |
725 | { | 938 | { |
726 | struct jffs2_full_dirent *fd, *fds; | 939 | struct jffs2_full_dirent *fd, *fds; |
@@ -740,18 +953,16 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) | |||
740 | 953 | ||
741 | jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); | 954 | jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); |
742 | 955 | ||
743 | /* For symlink inodes we us f->dents to store the target path name */ | 956 | if (f->target) { |
744 | if (S_ISLNK(OFNI_EDONI_2SFFJ(f)->i_mode)) { | 957 | kfree(f->target); |
745 | kfree(f->dents); | 958 | f->target = NULL; |
746 | f->dents = NULL; | 959 | } |
747 | } else { | ||
748 | fds = f->dents; | ||
749 | 960 | ||
750 | while(fds) { | 961 | fds = f->dents; |
751 | fd = fds; | 962 | while(fds) { |
752 | fds = fd->next; | 963 | fd = fds; |
753 | jffs2_free_full_dirent(fd); | 964 | fds = fd->next; |
754 | } | 965 | jffs2_free_full_dirent(fd); |
755 | } | 966 | } |
756 | 967 | ||
757 | if (f->inocache && f->inocache->state != INO_STATE_CHECKING) { | 968 | if (f->inocache && f->inocache->state != INO_STATE_CHECKING) { |
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index b63160f83bab..0e7456ec99fd 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: scan.c,v 1.119 2005/02/17 17:51:13 dedekind Exp $ | 10 | * $Id: scan.c,v 1.125 2005/09/30 13:59:13 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
@@ -18,22 +18,11 @@ | |||
18 | #include <linux/crc32.h> | 18 | #include <linux/crc32.h> |
19 | #include <linux/compiler.h> | 19 | #include <linux/compiler.h> |
20 | #include "nodelist.h" | 20 | #include "nodelist.h" |
21 | #include "summary.h" | ||
22 | #include "debug.h" | ||
21 | 23 | ||
22 | #define DEFAULT_EMPTY_SCAN_SIZE 1024 | 24 | #define DEFAULT_EMPTY_SCAN_SIZE 1024 |
23 | 25 | ||
24 | #define DIRTY_SPACE(x) do { typeof(x) _x = (x); \ | ||
25 | c->free_size -= _x; c->dirty_size += _x; \ | ||
26 | jeb->free_size -= _x ; jeb->dirty_size += _x; \ | ||
27 | }while(0) | ||
28 | #define USED_SPACE(x) do { typeof(x) _x = (x); \ | ||
29 | c->free_size -= _x; c->used_size += _x; \ | ||
30 | jeb->free_size -= _x ; jeb->used_size += _x; \ | ||
31 | }while(0) | ||
32 | #define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \ | ||
33 | c->free_size -= _x; c->unchecked_size += _x; \ | ||
34 | jeb->free_size -= _x ; jeb->unchecked_size += _x; \ | ||
35 | }while(0) | ||
36 | |||
37 | #define noisy_printk(noise, args...) do { \ | 26 | #define noisy_printk(noise, args...) do { \ |
38 | if (*(noise)) { \ | 27 | if (*(noise)) { \ |
39 | printk(KERN_NOTICE args); \ | 28 | printk(KERN_NOTICE args); \ |
@@ -47,23 +36,16 @@ | |||
47 | static uint32_t pseudo_random; | 36 | static uint32_t pseudo_random; |
48 | 37 | ||
49 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 38 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
50 | unsigned char *buf, uint32_t buf_size); | 39 | unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s); |
51 | 40 | ||
52 | /* These helper functions _must_ increase ofs and also do the dirty/used space accounting. | 41 | /* These helper functions _must_ increase ofs and also do the dirty/used space accounting. |
53 | * Returning an error will abort the mount - bad checksums etc. should just mark the space | 42 | * Returning an error will abort the mount - bad checksums etc. should just mark the space |
54 | * as dirty. | 43 | * as dirty. |
55 | */ | 44 | */ |
56 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 45 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
57 | struct jffs2_raw_inode *ri, uint32_t ofs); | 46 | struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s); |
58 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 47 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
59 | struct jffs2_raw_dirent *rd, uint32_t ofs); | 48 | struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s); |
60 | |||
61 | #define BLK_STATE_ALLFF 0 | ||
62 | #define BLK_STATE_CLEAN 1 | ||
63 | #define BLK_STATE_PARTDIRTY 2 | ||
64 | #define BLK_STATE_CLEANMARKER 3 | ||
65 | #define BLK_STATE_ALLDIRTY 4 | ||
66 | #define BLK_STATE_BADBLOCK 5 | ||
67 | 49 | ||
68 | static inline int min_free(struct jffs2_sb_info *c) | 50 | static inline int min_free(struct jffs2_sb_info *c) |
69 | { | 51 | { |
@@ -89,6 +71,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
89 | uint32_t empty_blocks = 0, bad_blocks = 0; | 71 | uint32_t empty_blocks = 0, bad_blocks = 0; |
90 | unsigned char *flashbuf = NULL; | 72 | unsigned char *flashbuf = NULL; |
91 | uint32_t buf_size = 0; | 73 | uint32_t buf_size = 0; |
74 | struct jffs2_summary *s = NULL; /* summary info collected by the scan process */ | ||
92 | #ifndef __ECOS | 75 | #ifndef __ECOS |
93 | size_t pointlen; | 76 | size_t pointlen; |
94 | 77 | ||
@@ -122,21 +105,34 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
122 | return -ENOMEM; | 105 | return -ENOMEM; |
123 | } | 106 | } |
124 | 107 | ||
108 | if (jffs2_sum_active()) { | ||
109 | s = kmalloc(sizeof(struct jffs2_summary), GFP_KERNEL); | ||
110 | if (!s) { | ||
111 | JFFS2_WARNING("Can't allocate memory for summary\n"); | ||
112 | return -ENOMEM; | ||
113 | } | ||
114 | memset(s, 0, sizeof(struct jffs2_summary)); | ||
115 | } | ||
116 | |||
125 | for (i=0; i<c->nr_blocks; i++) { | 117 | for (i=0; i<c->nr_blocks; i++) { |
126 | struct jffs2_eraseblock *jeb = &c->blocks[i]; | 118 | struct jffs2_eraseblock *jeb = &c->blocks[i]; |
127 | 119 | ||
128 | ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size); | 120 | /* reset summary info for next eraseblock scan */ |
121 | jffs2_sum_reset_collected(s); | ||
122 | |||
123 | ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), | ||
124 | buf_size, s); | ||
129 | 125 | ||
130 | if (ret < 0) | 126 | if (ret < 0) |
131 | goto out; | 127 | goto out; |
132 | 128 | ||
133 | ACCT_PARANOIA_CHECK(jeb); | 129 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
134 | 130 | ||
135 | /* Now decide which list to put it on */ | 131 | /* Now decide which list to put it on */ |
136 | switch(ret) { | 132 | switch(ret) { |
137 | case BLK_STATE_ALLFF: | 133 | case BLK_STATE_ALLFF: |
138 | /* | 134 | /* |
139 | * Empty block. Since we can't be sure it | 135 | * Empty block. Since we can't be sure it |
140 | * was entirely erased, we just queue it for erase | 136 | * was entirely erased, we just queue it for erase |
141 | * again. It will be marked as such when the erase | 137 | * again. It will be marked as such when the erase |
142 | * is complete. Meanwhile we still count it as empty | 138 | * is complete. Meanwhile we still count it as empty |
@@ -162,18 +158,18 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
162 | break; | 158 | break; |
163 | 159 | ||
164 | case BLK_STATE_CLEAN: | 160 | case BLK_STATE_CLEAN: |
165 | /* Full (or almost full) of clean data. Clean list */ | 161 | /* Full (or almost full) of clean data. Clean list */ |
166 | list_add(&jeb->list, &c->clean_list); | 162 | list_add(&jeb->list, &c->clean_list); |
167 | break; | 163 | break; |
168 | 164 | ||
169 | case BLK_STATE_PARTDIRTY: | 165 | case BLK_STATE_PARTDIRTY: |
170 | /* Some data, but not full. Dirty list. */ | 166 | /* Some data, but not full. Dirty list. */ |
171 | /* We want to remember the block with most free space | 167 | /* We want to remember the block with most free space |
172 | and stick it in the 'nextblock' position to start writing to it. */ | 168 | and stick it in the 'nextblock' position to start writing to it. */ |
173 | if (jeb->free_size > min_free(c) && | 169 | if (jeb->free_size > min_free(c) && |
174 | (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { | 170 | (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { |
175 | /* Better candidate for the next writes to go to */ | 171 | /* Better candidate for the next writes to go to */ |
176 | if (c->nextblock) { | 172 | if (c->nextblock) { |
177 | c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; | 173 | c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; |
178 | c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; | 174 | c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; |
179 | c->free_size -= c->nextblock->free_size; | 175 | c->free_size -= c->nextblock->free_size; |
@@ -184,9 +180,14 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
184 | } else { | 180 | } else { |
185 | list_add(&c->nextblock->list, &c->dirty_list); | 181 | list_add(&c->nextblock->list, &c->dirty_list); |
186 | } | 182 | } |
183 | /* deleting summary information of the old nextblock */ | ||
184 | jffs2_sum_reset_collected(c->summary); | ||
187 | } | 185 | } |
188 | c->nextblock = jeb; | 186 | /* update collected summary infromation for the current nextblock */ |
189 | } else { | 187 | jffs2_sum_move_collected(c, s); |
188 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset)); | ||
189 | c->nextblock = jeb; | ||
190 | } else { | ||
190 | jeb->dirty_size += jeb->free_size + jeb->wasted_size; | 191 | jeb->dirty_size += jeb->free_size + jeb->wasted_size; |
191 | c->dirty_size += jeb->free_size + jeb->wasted_size; | 192 | c->dirty_size += jeb->free_size + jeb->wasted_size; |
192 | c->free_size -= jeb->free_size; | 193 | c->free_size -= jeb->free_size; |
@@ -197,30 +198,33 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
197 | } else { | 198 | } else { |
198 | list_add(&jeb->list, &c->dirty_list); | 199 | list_add(&jeb->list, &c->dirty_list); |
199 | } | 200 | } |
200 | } | 201 | } |
201 | break; | 202 | break; |
202 | 203 | ||
203 | case BLK_STATE_ALLDIRTY: | 204 | case BLK_STATE_ALLDIRTY: |
204 | /* Nothing valid - not even a clean marker. Needs erasing. */ | 205 | /* Nothing valid - not even a clean marker. Needs erasing. */ |
205 | /* For now we just put it on the erasing list. We'll start the erases later */ | 206 | /* For now we just put it on the erasing list. We'll start the erases later */ |
206 | D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset)); | 207 | D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset)); |
207 | list_add(&jeb->list, &c->erase_pending_list); | 208 | list_add(&jeb->list, &c->erase_pending_list); |
208 | c->nr_erasing_blocks++; | 209 | c->nr_erasing_blocks++; |
209 | break; | 210 | break; |
210 | 211 | ||
211 | case BLK_STATE_BADBLOCK: | 212 | case BLK_STATE_BADBLOCK: |
212 | D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset)); | 213 | D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset)); |
213 | list_add(&jeb->list, &c->bad_list); | 214 | list_add(&jeb->list, &c->bad_list); |
214 | c->bad_size += c->sector_size; | 215 | c->bad_size += c->sector_size; |
215 | c->free_size -= c->sector_size; | 216 | c->free_size -= c->sector_size; |
216 | bad_blocks++; | 217 | bad_blocks++; |
217 | break; | 218 | break; |
218 | default: | 219 | default: |
219 | printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n"); | 220 | printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n"); |
220 | BUG(); | 221 | BUG(); |
221 | } | 222 | } |
222 | } | 223 | } |
223 | 224 | ||
225 | if (jffs2_sum_active() && s) | ||
226 | kfree(s); | ||
227 | |||
224 | /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ | 228 | /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ |
225 | if (c->nextblock && (c->nextblock->dirty_size)) { | 229 | if (c->nextblock && (c->nextblock->dirty_size)) { |
226 | c->nextblock->wasted_size += c->nextblock->dirty_size; | 230 | c->nextblock->wasted_size += c->nextblock->dirty_size; |
@@ -229,12 +233,12 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
229 | c->nextblock->dirty_size = 0; | 233 | c->nextblock->dirty_size = 0; |
230 | } | 234 | } |
231 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 235 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
232 | if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size & (c->wbuf_pagesize-1))) { | 236 | if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) { |
233 | /* If we're going to start writing into a block which already | 237 | /* If we're going to start writing into a block which already |
234 | contains data, and the end of the data isn't page-aligned, | 238 | contains data, and the end of the data isn't page-aligned, |
235 | skip a little and align it. */ | 239 | skip a little and align it. */ |
236 | 240 | ||
237 | uint32_t skip = c->nextblock->free_size & (c->wbuf_pagesize-1); | 241 | uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; |
238 | 242 | ||
239 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", | 243 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", |
240 | skip)); | 244 | skip)); |
@@ -246,7 +250,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
246 | } | 250 | } |
247 | #endif | 251 | #endif |
248 | if (c->nr_erasing_blocks) { | 252 | if (c->nr_erasing_blocks) { |
249 | if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { | 253 | if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { |
250 | printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); | 254 | printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); |
251 | printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks); | 255 | printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks); |
252 | ret = -EIO; | 256 | ret = -EIO; |
@@ -259,13 +263,13 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
259 | if (buf_size) | 263 | if (buf_size) |
260 | kfree(flashbuf); | 264 | kfree(flashbuf); |
261 | #ifndef __ECOS | 265 | #ifndef __ECOS |
262 | else | 266 | else |
263 | c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); | 267 | c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); |
264 | #endif | 268 | #endif |
265 | return ret; | 269 | return ret; |
266 | } | 270 | } |
267 | 271 | ||
268 | static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf, | 272 | int jffs2_fill_scan_buf (struct jffs2_sb_info *c, void *buf, |
269 | uint32_t ofs, uint32_t len) | 273 | uint32_t ofs, uint32_t len) |
270 | { | 274 | { |
271 | int ret; | 275 | int ret; |
@@ -286,14 +290,36 @@ static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf, | |||
286 | return 0; | 290 | return 0; |
287 | } | 291 | } |
288 | 292 | ||
293 | int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
294 | { | ||
295 | if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size | ||
296 | && (!jeb->first_node || !jeb->first_node->next_phys) ) | ||
297 | return BLK_STATE_CLEANMARKER; | ||
298 | |||
299 | /* move blocks with max 4 byte dirty space to cleanlist */ | ||
300 | else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) { | ||
301 | c->dirty_size -= jeb->dirty_size; | ||
302 | c->wasted_size += jeb->dirty_size; | ||
303 | jeb->wasted_size += jeb->dirty_size; | ||
304 | jeb->dirty_size = 0; | ||
305 | return BLK_STATE_CLEAN; | ||
306 | } else if (jeb->used_size || jeb->unchecked_size) | ||
307 | return BLK_STATE_PARTDIRTY; | ||
308 | else | ||
309 | return BLK_STATE_ALLDIRTY; | ||
310 | } | ||
311 | |||
289 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 312 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
290 | unsigned char *buf, uint32_t buf_size) { | 313 | unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) { |
291 | struct jffs2_unknown_node *node; | 314 | struct jffs2_unknown_node *node; |
292 | struct jffs2_unknown_node crcnode; | 315 | struct jffs2_unknown_node crcnode; |
316 | struct jffs2_sum_marker *sm; | ||
293 | uint32_t ofs, prevofs; | 317 | uint32_t ofs, prevofs; |
294 | uint32_t hdr_crc, buf_ofs, buf_len; | 318 | uint32_t hdr_crc, buf_ofs, buf_len; |
295 | int err; | 319 | int err; |
296 | int noise = 0; | 320 | int noise = 0; |
321 | |||
322 | |||
297 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 323 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
298 | int cleanmarkerfound = 0; | 324 | int cleanmarkerfound = 0; |
299 | #endif | 325 | #endif |
@@ -319,17 +345,53 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
319 | } | 345 | } |
320 | } | 346 | } |
321 | #endif | 347 | #endif |
348 | |||
349 | if (jffs2_sum_active()) { | ||
350 | sm = kmalloc(sizeof(struct jffs2_sum_marker), GFP_KERNEL); | ||
351 | if (!sm) { | ||
352 | return -ENOMEM; | ||
353 | } | ||
354 | |||
355 | err = jffs2_fill_scan_buf(c, (unsigned char *) sm, jeb->offset + c->sector_size - | ||
356 | sizeof(struct jffs2_sum_marker), sizeof(struct jffs2_sum_marker)); | ||
357 | if (err) { | ||
358 | kfree(sm); | ||
359 | return err; | ||
360 | } | ||
361 | |||
362 | if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC ) { | ||
363 | err = jffs2_sum_scan_sumnode(c, jeb, je32_to_cpu(sm->offset), &pseudo_random); | ||
364 | if (err) { | ||
365 | kfree(sm); | ||
366 | return err; | ||
367 | } | ||
368 | } | ||
369 | |||
370 | kfree(sm); | ||
371 | |||
372 | ofs = jeb->offset; | ||
373 | prevofs = jeb->offset - 1; | ||
374 | } | ||
375 | |||
322 | buf_ofs = jeb->offset; | 376 | buf_ofs = jeb->offset; |
323 | 377 | ||
324 | if (!buf_size) { | 378 | if (!buf_size) { |
325 | buf_len = c->sector_size; | 379 | buf_len = c->sector_size; |
380 | |||
381 | if (jffs2_sum_active()) { | ||
382 | /* must reread because of summary test */ | ||
383 | err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); | ||
384 | if (err) | ||
385 | return err; | ||
386 | } | ||
387 | |||
326 | } else { | 388 | } else { |
327 | buf_len = EMPTY_SCAN_SIZE(c->sector_size); | 389 | buf_len = EMPTY_SCAN_SIZE(c->sector_size); |
328 | err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); | 390 | err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); |
329 | if (err) | 391 | if (err) |
330 | return err; | 392 | return err; |
331 | } | 393 | } |
332 | 394 | ||
333 | /* We temporarily use 'ofs' as a pointer into the buffer/jeb */ | 395 | /* We temporarily use 'ofs' as a pointer into the buffer/jeb */ |
334 | ofs = 0; | 396 | ofs = 0; |
335 | 397 | ||
@@ -367,10 +429,12 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
367 | 429 | ||
368 | noise = 10; | 430 | noise = 10; |
369 | 431 | ||
370 | scan_more: | 432 | dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset); |
433 | |||
434 | scan_more: | ||
371 | while(ofs < jeb->offset + c->sector_size) { | 435 | while(ofs < jeb->offset + c->sector_size) { |
372 | 436 | ||
373 | D1(ACCT_PARANOIA_CHECK(jeb)); | 437 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
374 | 438 | ||
375 | cond_resched(); | 439 | cond_resched(); |
376 | 440 | ||
@@ -432,7 +496,7 @@ scan_more: | |||
432 | 496 | ||
433 | /* If we're only checking the beginning of a block with a cleanmarker, | 497 | /* If we're only checking the beginning of a block with a cleanmarker, |
434 | bail now */ | 498 | bail now */ |
435 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && | 499 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && |
436 | c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_phys) { | 500 | c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_phys) { |
437 | D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size))); | 501 | D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size))); |
438 | return BLK_STATE_CLEANMARKER; | 502 | return BLK_STATE_CLEANMARKER; |
@@ -441,7 +505,7 @@ scan_more: | |||
441 | /* See how much more there is to read in this eraseblock... */ | 505 | /* See how much more there is to read in this eraseblock... */ |
442 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 506 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
443 | if (!buf_len) { | 507 | if (!buf_len) { |
444 | /* No more to read. Break out of main loop without marking | 508 | /* No more to read. Break out of main loop without marking |
445 | this range of empty space as dirty (because it's not) */ | 509 | this range of empty space as dirty (because it's not) */ |
446 | D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n", | 510 | D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n", |
447 | empty_start)); | 511 | empty_start)); |
@@ -476,8 +540,8 @@ scan_more: | |||
476 | } | 540 | } |
477 | if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { | 541 | if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { |
478 | /* OK. We're out of possibilities. Whinge and move on */ | 542 | /* OK. We're out of possibilities. Whinge and move on */ |
479 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", | 543 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", |
480 | JFFS2_MAGIC_BITMASK, ofs, | 544 | JFFS2_MAGIC_BITMASK, ofs, |
481 | je16_to_cpu(node->magic)); | 545 | je16_to_cpu(node->magic)); |
482 | DIRTY_SPACE(4); | 546 | DIRTY_SPACE(4); |
483 | ofs += 4; | 547 | ofs += 4; |
@@ -492,7 +556,7 @@ scan_more: | |||
492 | if (hdr_crc != je32_to_cpu(node->hdr_crc)) { | 556 | if (hdr_crc != je32_to_cpu(node->hdr_crc)) { |
493 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", | 557 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", |
494 | ofs, je16_to_cpu(node->magic), | 558 | ofs, je16_to_cpu(node->magic), |
495 | je16_to_cpu(node->nodetype), | 559 | je16_to_cpu(node->nodetype), |
496 | je32_to_cpu(node->totlen), | 560 | je32_to_cpu(node->totlen), |
497 | je32_to_cpu(node->hdr_crc), | 561 | je32_to_cpu(node->hdr_crc), |
498 | hdr_crc); | 562 | hdr_crc); |
@@ -501,7 +565,7 @@ scan_more: | |||
501 | continue; | 565 | continue; |
502 | } | 566 | } |
503 | 567 | ||
504 | if (ofs + je32_to_cpu(node->totlen) > | 568 | if (ofs + je32_to_cpu(node->totlen) > |
505 | jeb->offset + c->sector_size) { | 569 | jeb->offset + c->sector_size) { |
506 | /* Eep. Node goes over the end of the erase block. */ | 570 | /* Eep. Node goes over the end of the erase block. */ |
507 | printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", | 571 | printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", |
@@ -532,11 +596,11 @@ scan_more: | |||
532 | buf_ofs = ofs; | 596 | buf_ofs = ofs; |
533 | node = (void *)buf; | 597 | node = (void *)buf; |
534 | } | 598 | } |
535 | err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs); | 599 | err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s); |
536 | if (err) return err; | 600 | if (err) return err; |
537 | ofs += PAD(je32_to_cpu(node->totlen)); | 601 | ofs += PAD(je32_to_cpu(node->totlen)); |
538 | break; | 602 | break; |
539 | 603 | ||
540 | case JFFS2_NODETYPE_DIRENT: | 604 | case JFFS2_NODETYPE_DIRENT: |
541 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | 605 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { |
542 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 606 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
@@ -548,7 +612,7 @@ scan_more: | |||
548 | buf_ofs = ofs; | 612 | buf_ofs = ofs; |
549 | node = (void *)buf; | 613 | node = (void *)buf; |
550 | } | 614 | } |
551 | err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs); | 615 | err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s); |
552 | if (err) return err; | 616 | if (err) return err; |
553 | ofs += PAD(je32_to_cpu(node->totlen)); | 617 | ofs += PAD(je32_to_cpu(node->totlen)); |
554 | break; | 618 | break; |
@@ -556,7 +620,7 @@ scan_more: | |||
556 | case JFFS2_NODETYPE_CLEANMARKER: | 620 | case JFFS2_NODETYPE_CLEANMARKER: |
557 | D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); | 621 | D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); |
558 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { | 622 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { |
559 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", | 623 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", |
560 | ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); | 624 | ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); |
561 | DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); | 625 | DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); |
562 | ofs += PAD(sizeof(struct jffs2_unknown_node)); | 626 | ofs += PAD(sizeof(struct jffs2_unknown_node)); |
@@ -575,13 +639,15 @@ scan_more: | |||
575 | marker_ref->flash_offset = ofs | REF_NORMAL; | 639 | marker_ref->flash_offset = ofs | REF_NORMAL; |
576 | marker_ref->__totlen = c->cleanmarker_size; | 640 | marker_ref->__totlen = c->cleanmarker_size; |
577 | jeb->first_node = jeb->last_node = marker_ref; | 641 | jeb->first_node = jeb->last_node = marker_ref; |
578 | 642 | ||
579 | USED_SPACE(PAD(c->cleanmarker_size)); | 643 | USED_SPACE(PAD(c->cleanmarker_size)); |
580 | ofs += PAD(c->cleanmarker_size); | 644 | ofs += PAD(c->cleanmarker_size); |
581 | } | 645 | } |
582 | break; | 646 | break; |
583 | 647 | ||
584 | case JFFS2_NODETYPE_PADDING: | 648 | case JFFS2_NODETYPE_PADDING: |
649 | if (jffs2_sum_active()) | ||
650 | jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen)); | ||
585 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | 651 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); |
586 | ofs += PAD(je32_to_cpu(node->totlen)); | 652 | ofs += PAD(je32_to_cpu(node->totlen)); |
587 | break; | 653 | break; |
@@ -616,8 +682,15 @@ scan_more: | |||
616 | } | 682 | } |
617 | } | 683 | } |
618 | 684 | ||
685 | if (jffs2_sum_active()) { | ||
686 | if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) { | ||
687 | dbg_summary("There is not enough space for " | ||
688 | "summary information, disabling for this jeb!\n"); | ||
689 | jffs2_sum_disable_collecting(s); | ||
690 | } | ||
691 | } | ||
619 | 692 | ||
620 | D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset, | 693 | D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset, |
621 | jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size)); | 694 | jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size)); |
622 | 695 | ||
623 | /* mark_node_obsolete can add to wasted !! */ | 696 | /* mark_node_obsolete can add to wasted !! */ |
@@ -628,24 +701,10 @@ scan_more: | |||
628 | jeb->wasted_size = 0; | 701 | jeb->wasted_size = 0; |
629 | } | 702 | } |
630 | 703 | ||
631 | if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size | 704 | return jffs2_scan_classify_jeb(c, jeb); |
632 | && (!jeb->first_node || !jeb->first_node->next_phys) ) | ||
633 | return BLK_STATE_CLEANMARKER; | ||
634 | |||
635 | /* move blocks with max 4 byte dirty space to cleanlist */ | ||
636 | else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) { | ||
637 | c->dirty_size -= jeb->dirty_size; | ||
638 | c->wasted_size += jeb->dirty_size; | ||
639 | jeb->wasted_size += jeb->dirty_size; | ||
640 | jeb->dirty_size = 0; | ||
641 | return BLK_STATE_CLEAN; | ||
642 | } else if (jeb->used_size || jeb->unchecked_size) | ||
643 | return BLK_STATE_PARTDIRTY; | ||
644 | else | ||
645 | return BLK_STATE_ALLDIRTY; | ||
646 | } | 705 | } |
647 | 706 | ||
648 | static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino) | 707 | struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino) |
649 | { | 708 | { |
650 | struct jffs2_inode_cache *ic; | 709 | struct jffs2_inode_cache *ic; |
651 | 710 | ||
@@ -671,8 +730,8 @@ static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info | |||
671 | return ic; | 730 | return ic; |
672 | } | 731 | } |
673 | 732 | ||
674 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 733 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
675 | struct jffs2_raw_inode *ri, uint32_t ofs) | 734 | struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s) |
676 | { | 735 | { |
677 | struct jffs2_raw_node_ref *raw; | 736 | struct jffs2_raw_node_ref *raw; |
678 | struct jffs2_inode_cache *ic; | 737 | struct jffs2_inode_cache *ic; |
@@ -681,11 +740,11 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
681 | D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); | 740 | D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); |
682 | 741 | ||
683 | /* We do very little here now. Just check the ino# to which we should attribute | 742 | /* We do very little here now. Just check the ino# to which we should attribute |
684 | this node; we can do all the CRC checking etc. later. There's a tradeoff here -- | 743 | this node; we can do all the CRC checking etc. later. There's a tradeoff here -- |
685 | we used to scan the flash once only, reading everything we want from it into | 744 | we used to scan the flash once only, reading everything we want from it into |
686 | memory, then building all our in-core data structures and freeing the extra | 745 | memory, then building all our in-core data structures and freeing the extra |
687 | information. Now we allow the first part of the mount to complete a lot quicker, | 746 | information. Now we allow the first part of the mount to complete a lot quicker, |
688 | but we have to go _back_ to the flash in order to finish the CRC checking, etc. | 747 | but we have to go _back_ to the flash in order to finish the CRC checking, etc. |
689 | Which means that the _full_ amount of time to get to proper write mode with GC | 748 | Which means that the _full_ amount of time to get to proper write mode with GC |
690 | operational may actually be _longer_ than before. Sucks to be me. */ | 749 | operational may actually be _longer_ than before. Sucks to be me. */ |
691 | 750 | ||
@@ -731,7 +790,7 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
731 | jeb->last_node->next_phys = raw; | 790 | jeb->last_node->next_phys = raw; |
732 | jeb->last_node = raw; | 791 | jeb->last_node = raw; |
733 | 792 | ||
734 | D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", | 793 | D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", |
735 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), | 794 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), |
736 | je32_to_cpu(ri->offset), | 795 | je32_to_cpu(ri->offset), |
737 | je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize))); | 796 | je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize))); |
@@ -739,11 +798,16 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
739 | pseudo_random += je32_to_cpu(ri->version); | 798 | pseudo_random += je32_to_cpu(ri->version); |
740 | 799 | ||
741 | UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen))); | 800 | UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen))); |
801 | |||
802 | if (jffs2_sum_active()) { | ||
803 | jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset); | ||
804 | } | ||
805 | |||
742 | return 0; | 806 | return 0; |
743 | } | 807 | } |
744 | 808 | ||
745 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 809 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
746 | struct jffs2_raw_dirent *rd, uint32_t ofs) | 810 | struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s) |
747 | { | 811 | { |
748 | struct jffs2_raw_node_ref *raw; | 812 | struct jffs2_raw_node_ref *raw; |
749 | struct jffs2_full_dirent *fd; | 813 | struct jffs2_full_dirent *fd; |
@@ -776,7 +840,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
776 | crc = crc32(0, fd->name, rd->nsize); | 840 | crc = crc32(0, fd->name, rd->nsize); |
777 | if (crc != je32_to_cpu(rd->name_crc)) { | 841 | if (crc != je32_to_cpu(rd->name_crc)) { |
778 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 842 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
779 | ofs, je32_to_cpu(rd->name_crc), crc); | 843 | ofs, je32_to_cpu(rd->name_crc), crc); |
780 | D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino))); | 844 | D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino))); |
781 | jffs2_free_full_dirent(fd); | 845 | jffs2_free_full_dirent(fd); |
782 | /* FIXME: Why do we believe totlen? */ | 846 | /* FIXME: Why do we believe totlen? */ |
@@ -796,7 +860,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
796 | jffs2_free_raw_node_ref(raw); | 860 | jffs2_free_raw_node_ref(raw); |
797 | return -ENOMEM; | 861 | return -ENOMEM; |
798 | } | 862 | } |
799 | 863 | ||
800 | raw->__totlen = PAD(je32_to_cpu(rd->totlen)); | 864 | raw->__totlen = PAD(je32_to_cpu(rd->totlen)); |
801 | raw->flash_offset = ofs | REF_PRISTINE; | 865 | raw->flash_offset = ofs | REF_PRISTINE; |
802 | raw->next_phys = NULL; | 866 | raw->next_phys = NULL; |
@@ -817,6 +881,10 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
817 | USED_SPACE(PAD(je32_to_cpu(rd->totlen))); | 881 | USED_SPACE(PAD(je32_to_cpu(rd->totlen))); |
818 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); | 882 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); |
819 | 883 | ||
884 | if (jffs2_sum_active()) { | ||
885 | jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset); | ||
886 | } | ||
887 | |||
820 | return 0; | 888 | return 0; |
821 | } | 889 | } |
822 | 890 | ||
@@ -852,76 +920,34 @@ void jffs2_rotate_lists(struct jffs2_sb_info *c) | |||
852 | x = count_list(&c->clean_list); | 920 | x = count_list(&c->clean_list); |
853 | if (x) { | 921 | if (x) { |
854 | rotateby = pseudo_random % x; | 922 | rotateby = pseudo_random % x; |
855 | D1(printk(KERN_DEBUG "Rotating clean_list by %d\n", rotateby)); | ||
856 | |||
857 | rotate_list((&c->clean_list), rotateby); | 923 | rotate_list((&c->clean_list), rotateby); |
858 | |||
859 | D1(printk(KERN_DEBUG "Erase block at front of clean_list is at %08x\n", | ||
860 | list_entry(c->clean_list.next, struct jffs2_eraseblock, list)->offset)); | ||
861 | } else { | ||
862 | D1(printk(KERN_DEBUG "Not rotating empty clean_list\n")); | ||
863 | } | 924 | } |
864 | 925 | ||
865 | x = count_list(&c->very_dirty_list); | 926 | x = count_list(&c->very_dirty_list); |
866 | if (x) { | 927 | if (x) { |
867 | rotateby = pseudo_random % x; | 928 | rotateby = pseudo_random % x; |
868 | D1(printk(KERN_DEBUG "Rotating very_dirty_list by %d\n", rotateby)); | ||
869 | |||
870 | rotate_list((&c->very_dirty_list), rotateby); | 929 | rotate_list((&c->very_dirty_list), rotateby); |
871 | |||
872 | D1(printk(KERN_DEBUG "Erase block at front of very_dirty_list is at %08x\n", | ||
873 | list_entry(c->very_dirty_list.next, struct jffs2_eraseblock, list)->offset)); | ||
874 | } else { | ||
875 | D1(printk(KERN_DEBUG "Not rotating empty very_dirty_list\n")); | ||
876 | } | 930 | } |
877 | 931 | ||
878 | x = count_list(&c->dirty_list); | 932 | x = count_list(&c->dirty_list); |
879 | if (x) { | 933 | if (x) { |
880 | rotateby = pseudo_random % x; | 934 | rotateby = pseudo_random % x; |
881 | D1(printk(KERN_DEBUG "Rotating dirty_list by %d\n", rotateby)); | ||
882 | |||
883 | rotate_list((&c->dirty_list), rotateby); | 935 | rotate_list((&c->dirty_list), rotateby); |
884 | |||
885 | D1(printk(KERN_DEBUG "Erase block at front of dirty_list is at %08x\n", | ||
886 | list_entry(c->dirty_list.next, struct jffs2_eraseblock, list)->offset)); | ||
887 | } else { | ||
888 | D1(printk(KERN_DEBUG "Not rotating empty dirty_list\n")); | ||
889 | } | 936 | } |
890 | 937 | ||
891 | x = count_list(&c->erasable_list); | 938 | x = count_list(&c->erasable_list); |
892 | if (x) { | 939 | if (x) { |
893 | rotateby = pseudo_random % x; | 940 | rotateby = pseudo_random % x; |
894 | D1(printk(KERN_DEBUG "Rotating erasable_list by %d\n", rotateby)); | ||
895 | |||
896 | rotate_list((&c->erasable_list), rotateby); | 941 | rotate_list((&c->erasable_list), rotateby); |
897 | |||
898 | D1(printk(KERN_DEBUG "Erase block at front of erasable_list is at %08x\n", | ||
899 | list_entry(c->erasable_list.next, struct jffs2_eraseblock, list)->offset)); | ||
900 | } else { | ||
901 | D1(printk(KERN_DEBUG "Not rotating empty erasable_list\n")); | ||
902 | } | 942 | } |
903 | 943 | ||
904 | if (c->nr_erasing_blocks) { | 944 | if (c->nr_erasing_blocks) { |
905 | rotateby = pseudo_random % c->nr_erasing_blocks; | 945 | rotateby = pseudo_random % c->nr_erasing_blocks; |
906 | D1(printk(KERN_DEBUG "Rotating erase_pending_list by %d\n", rotateby)); | ||
907 | |||
908 | rotate_list((&c->erase_pending_list), rotateby); | 946 | rotate_list((&c->erase_pending_list), rotateby); |
909 | |||
910 | D1(printk(KERN_DEBUG "Erase block at front of erase_pending_list is at %08x\n", | ||
911 | list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list)->offset)); | ||
912 | } else { | ||
913 | D1(printk(KERN_DEBUG "Not rotating empty erase_pending_list\n")); | ||
914 | } | 947 | } |
915 | 948 | ||
916 | if (c->nr_free_blocks) { | 949 | if (c->nr_free_blocks) { |
917 | rotateby = pseudo_random % c->nr_free_blocks; | 950 | rotateby = pseudo_random % c->nr_free_blocks; |
918 | D1(printk(KERN_DEBUG "Rotating free_list by %d\n", rotateby)); | ||
919 | |||
920 | rotate_list((&c->free_list), rotateby); | 951 | rotate_list((&c->free_list), rotateby); |
921 | |||
922 | D1(printk(KERN_DEBUG "Erase block at front of free_list is at %08x\n", | ||
923 | list_entry(c->free_list.next, struct jffs2_eraseblock, list)->offset)); | ||
924 | } else { | ||
925 | D1(printk(KERN_DEBUG "Not rotating empty free_list\n")); | ||
926 | } | 952 | } |
927 | } | 953 | } |
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c new file mode 100644 index 000000000000..fb9cec61fcf2 --- /dev/null +++ b/fs/jffs2/summary.c | |||
@@ -0,0 +1,730 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, | ||
5 | * Zoltan Sogor <weth@inf.u-szeged.hu>, | ||
6 | * Patrik Kluba <pajko@halom.u-szeged.hu>, | ||
7 | * University of Szeged, Hungary | ||
8 | * | ||
9 | * For licensing information, see the file 'LICENCE' in this directory. | ||
10 | * | ||
11 | * $Id: summary.c,v 1.4 2005/09/26 11:37:21 havasi Exp $ | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/mtd/mtd.h> | ||
19 | #include <linux/pagemap.h> | ||
20 | #include <linux/crc32.h> | ||
21 | #include <linux/compiler.h> | ||
22 | #include <linux/vmalloc.h> | ||
23 | #include "nodelist.h" | ||
24 | #include "debug.h" | ||
25 | |||
26 | int jffs2_sum_init(struct jffs2_sb_info *c) | ||
27 | { | ||
28 | c->summary = kmalloc(sizeof(struct jffs2_summary), GFP_KERNEL); | ||
29 | |||
30 | if (!c->summary) { | ||
31 | JFFS2_WARNING("Can't allocate memory for summary information!\n"); | ||
32 | return -ENOMEM; | ||
33 | } | ||
34 | |||
35 | memset(c->summary, 0, sizeof(struct jffs2_summary)); | ||
36 | |||
37 | c->summary->sum_buf = vmalloc(c->sector_size); | ||
38 | |||
39 | if (!c->summary->sum_buf) { | ||
40 | JFFS2_WARNING("Can't allocate buffer for writing out summary information!\n"); | ||
41 | kfree(c->summary); | ||
42 | return -ENOMEM; | ||
43 | } | ||
44 | |||
45 | dbg_summary("returned succesfully\n"); | ||
46 | |||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | void jffs2_sum_exit(struct jffs2_sb_info *c) | ||
51 | { | ||
52 | dbg_summary("called\n"); | ||
53 | |||
54 | jffs2_sum_disable_collecting(c->summary); | ||
55 | |||
56 | vfree(c->summary->sum_buf); | ||
57 | c->summary->sum_buf = NULL; | ||
58 | |||
59 | kfree(c->summary); | ||
60 | c->summary = NULL; | ||
61 | } | ||
62 | |||
63 | static int jffs2_sum_add_mem(struct jffs2_summary *s, union jffs2_sum_mem *item) | ||
64 | { | ||
65 | if (!s->sum_list_head) | ||
66 | s->sum_list_head = (union jffs2_sum_mem *) item; | ||
67 | if (s->sum_list_tail) | ||
68 | s->sum_list_tail->u.next = (union jffs2_sum_mem *) item; | ||
69 | s->sum_list_tail = (union jffs2_sum_mem *) item; | ||
70 | |||
71 | switch (je16_to_cpu(item->u.nodetype)) { | ||
72 | case JFFS2_NODETYPE_INODE: | ||
73 | s->sum_size += JFFS2_SUMMARY_INODE_SIZE; | ||
74 | s->sum_num++; | ||
75 | dbg_summary("inode (%u) added to summary\n", | ||
76 | je32_to_cpu(item->i.inode)); | ||
77 | break; | ||
78 | case JFFS2_NODETYPE_DIRENT: | ||
79 | s->sum_size += JFFS2_SUMMARY_DIRENT_SIZE(item->d.nsize); | ||
80 | s->sum_num++; | ||
81 | dbg_summary("dirent (%u) added to summary\n", | ||
82 | je32_to_cpu(item->d.ino)); | ||
83 | break; | ||
84 | default: | ||
85 | JFFS2_WARNING("UNKNOWN node type %u\n", | ||
86 | je16_to_cpu(item->u.nodetype)); | ||
87 | return 1; | ||
88 | } | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | |||
93 | /* The following 3 functions are called from scan.c to collect summary info for not closed jeb */ | ||
94 | |||
95 | int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size) | ||
96 | { | ||
97 | dbg_summary("called with %u\n", size); | ||
98 | s->sum_padded += size; | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, | ||
103 | uint32_t ofs) | ||
104 | { | ||
105 | struct jffs2_sum_inode_mem *temp = kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL); | ||
106 | |||
107 | if (!temp) | ||
108 | return -ENOMEM; | ||
109 | |||
110 | temp->nodetype = ri->nodetype; | ||
111 | temp->inode = ri->ino; | ||
112 | temp->version = ri->version; | ||
113 | temp->offset = cpu_to_je32(ofs); /* relative offset from the begining of the jeb */ | ||
114 | temp->totlen = ri->totlen; | ||
115 | temp->next = NULL; | ||
116 | |||
117 | return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); | ||
118 | } | ||
119 | |||
120 | int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, | ||
121 | uint32_t ofs) | ||
122 | { | ||
123 | struct jffs2_sum_dirent_mem *temp = | ||
124 | kmalloc(sizeof(struct jffs2_sum_dirent_mem) + rd->nsize, GFP_KERNEL); | ||
125 | |||
126 | if (!temp) | ||
127 | return -ENOMEM; | ||
128 | |||
129 | temp->nodetype = rd->nodetype; | ||
130 | temp->totlen = rd->totlen; | ||
131 | temp->offset = cpu_to_je32(ofs); /* relative from the begining of the jeb */ | ||
132 | temp->pino = rd->pino; | ||
133 | temp->version = rd->version; | ||
134 | temp->ino = rd->ino; | ||
135 | temp->nsize = rd->nsize; | ||
136 | temp->type = rd->type; | ||
137 | temp->next = NULL; | ||
138 | |||
139 | memcpy(temp->name, rd->name, rd->nsize); | ||
140 | |||
141 | return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); | ||
142 | } | ||
143 | |||
144 | /* Cleanup every collected summary information */ | ||
145 | |||
146 | static void jffs2_sum_clean_collected(struct jffs2_summary *s) | ||
147 | { | ||
148 | union jffs2_sum_mem *temp; | ||
149 | |||
150 | if (!s->sum_list_head) { | ||
151 | dbg_summary("already empty\n"); | ||
152 | } | ||
153 | while (s->sum_list_head) { | ||
154 | temp = s->sum_list_head; | ||
155 | s->sum_list_head = s->sum_list_head->u.next; | ||
156 | kfree(temp); | ||
157 | } | ||
158 | s->sum_list_tail = NULL; | ||
159 | s->sum_padded = 0; | ||
160 | s->sum_num = 0; | ||
161 | } | ||
162 | |||
163 | void jffs2_sum_reset_collected(struct jffs2_summary *s) | ||
164 | { | ||
165 | dbg_summary("called\n"); | ||
166 | jffs2_sum_clean_collected(s); | ||
167 | s->sum_size = 0; | ||
168 | } | ||
169 | |||
170 | void jffs2_sum_disable_collecting(struct jffs2_summary *s) | ||
171 | { | ||
172 | dbg_summary("called\n"); | ||
173 | jffs2_sum_clean_collected(s); | ||
174 | s->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; | ||
175 | } | ||
176 | |||
177 | int jffs2_sum_is_disabled(struct jffs2_summary *s) | ||
178 | { | ||
179 | return (s->sum_size == JFFS2_SUMMARY_NOSUM_SIZE); | ||
180 | } | ||
181 | |||
182 | /* Move the collected summary information into sb (called from scan.c) */ | ||
183 | |||
184 | void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s) | ||
185 | { | ||
186 | dbg_summary("oldsize=0x%x oldnum=%u => newsize=0x%x newnum=%u\n", | ||
187 | c->summary->sum_size, c->summary->sum_num, | ||
188 | s->sum_size, s->sum_num); | ||
189 | |||
190 | c->summary->sum_size = s->sum_size; | ||
191 | c->summary->sum_num = s->sum_num; | ||
192 | c->summary->sum_padded = s->sum_padded; | ||
193 | c->summary->sum_list_head = s->sum_list_head; | ||
194 | c->summary->sum_list_tail = s->sum_list_tail; | ||
195 | |||
196 | s->sum_list_head = s->sum_list_tail = NULL; | ||
197 | } | ||
198 | |||
199 | /* Called from wbuf.c to collect writed node info */ | ||
200 | |||
201 | int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, | ||
202 | unsigned long count, uint32_t ofs) | ||
203 | { | ||
204 | union jffs2_node_union *node; | ||
205 | struct jffs2_eraseblock *jeb; | ||
206 | |||
207 | node = invecs[0].iov_base; | ||
208 | jeb = &c->blocks[ofs / c->sector_size]; | ||
209 | ofs -= jeb->offset; | ||
210 | |||
211 | switch (je16_to_cpu(node->u.nodetype)) { | ||
212 | case JFFS2_NODETYPE_INODE: { | ||
213 | struct jffs2_sum_inode_mem *temp = | ||
214 | kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL); | ||
215 | |||
216 | if (!temp) | ||
217 | goto no_mem; | ||
218 | |||
219 | temp->nodetype = node->i.nodetype; | ||
220 | temp->inode = node->i.ino; | ||
221 | temp->version = node->i.version; | ||
222 | temp->offset = cpu_to_je32(ofs); | ||
223 | temp->totlen = node->i.totlen; | ||
224 | temp->next = NULL; | ||
225 | |||
226 | return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); | ||
227 | } | ||
228 | |||
229 | case JFFS2_NODETYPE_DIRENT: { | ||
230 | struct jffs2_sum_dirent_mem *temp = | ||
231 | kmalloc(sizeof(struct jffs2_sum_dirent_mem) + node->d.nsize, GFP_KERNEL); | ||
232 | |||
233 | if (!temp) | ||
234 | goto no_mem; | ||
235 | |||
236 | temp->nodetype = node->d.nodetype; | ||
237 | temp->totlen = node->d.totlen; | ||
238 | temp->offset = cpu_to_je32(ofs); | ||
239 | temp->pino = node->d.pino; | ||
240 | temp->version = node->d.version; | ||
241 | temp->ino = node->d.ino; | ||
242 | temp->nsize = node->d.nsize; | ||
243 | temp->type = node->d.type; | ||
244 | temp->next = NULL; | ||
245 | |||
246 | switch (count) { | ||
247 | case 1: | ||
248 | memcpy(temp->name,node->d.name,node->d.nsize); | ||
249 | break; | ||
250 | |||
251 | case 2: | ||
252 | memcpy(temp->name,invecs[1].iov_base,node->d.nsize); | ||
253 | break; | ||
254 | |||
255 | default: | ||
256 | BUG(); /* impossible count value */ | ||
257 | break; | ||
258 | } | ||
259 | |||
260 | return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); | ||
261 | } | ||
262 | |||
263 | case JFFS2_NODETYPE_PADDING: | ||
264 | dbg_summary("node PADDING\n"); | ||
265 | c->summary->sum_padded += je32_to_cpu(node->u.totlen); | ||
266 | break; | ||
267 | |||
268 | case JFFS2_NODETYPE_CLEANMARKER: | ||
269 | dbg_summary("node CLEANMARKER\n"); | ||
270 | break; | ||
271 | |||
272 | case JFFS2_NODETYPE_SUMMARY: | ||
273 | dbg_summary("node SUMMARY\n"); | ||
274 | break; | ||
275 | |||
276 | default: | ||
277 | /* If you implement a new node type you should also implement | ||
278 | summary support for it or disable summary. | ||
279 | */ | ||
280 | BUG(); | ||
281 | break; | ||
282 | } | ||
283 | |||
284 | return 0; | ||
285 | |||
286 | no_mem: | ||
287 | JFFS2_WARNING("MEMORY ALLOCATION ERROR!"); | ||
288 | return -ENOMEM; | ||
289 | } | ||
290 | |||
291 | |||
292 | /* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */ | ||
293 | |||
294 | static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
295 | struct jffs2_raw_summary *summary, uint32_t *pseudo_random) | ||
296 | { | ||
297 | struct jffs2_raw_node_ref *raw; | ||
298 | struct jffs2_inode_cache *ic; | ||
299 | struct jffs2_full_dirent *fd; | ||
300 | void *sp; | ||
301 | int i, ino; | ||
302 | |||
303 | sp = summary->sum; | ||
304 | |||
305 | for (i=0; i<je32_to_cpu(summary->sum_num); i++) { | ||
306 | dbg_summary("processing summary index %d\n", i); | ||
307 | |||
308 | switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) { | ||
309 | case JFFS2_NODETYPE_INODE: { | ||
310 | struct jffs2_sum_inode_flash *spi; | ||
311 | spi = sp; | ||
312 | |||
313 | ino = je32_to_cpu(spi->inode); | ||
314 | |||
315 | dbg_summary("Inode at 0x%08x\n", | ||
316 | jeb->offset + je32_to_cpu(spi->offset)); | ||
317 | |||
318 | raw = jffs2_alloc_raw_node_ref(); | ||
319 | if (!raw) { | ||
320 | JFFS2_NOTICE("allocation of node reference failed\n"); | ||
321 | kfree(summary); | ||
322 | return -ENOMEM; | ||
323 | } | ||
324 | |||
325 | ic = jffs2_scan_make_ino_cache(c, ino); | ||
326 | if (!ic) { | ||
327 | JFFS2_NOTICE("scan_make_ino_cache failed\n"); | ||
328 | jffs2_free_raw_node_ref(raw); | ||
329 | kfree(summary); | ||
330 | return -ENOMEM; | ||
331 | } | ||
332 | |||
333 | raw->flash_offset = (jeb->offset + je32_to_cpu(spi->offset)) | REF_UNCHECKED; | ||
334 | raw->__totlen = PAD(je32_to_cpu(spi->totlen)); | ||
335 | raw->next_phys = NULL; | ||
336 | raw->next_in_ino = ic->nodes; | ||
337 | |||
338 | ic->nodes = raw; | ||
339 | if (!jeb->first_node) | ||
340 | jeb->first_node = raw; | ||
341 | if (jeb->last_node) | ||
342 | jeb->last_node->next_phys = raw; | ||
343 | jeb->last_node = raw; | ||
344 | *pseudo_random += je32_to_cpu(spi->version); | ||
345 | |||
346 | UNCHECKED_SPACE(PAD(je32_to_cpu(spi->totlen))); | ||
347 | |||
348 | sp += JFFS2_SUMMARY_INODE_SIZE; | ||
349 | |||
350 | break; | ||
351 | } | ||
352 | |||
353 | case JFFS2_NODETYPE_DIRENT: { | ||
354 | struct jffs2_sum_dirent_flash *spd; | ||
355 | spd = sp; | ||
356 | |||
357 | dbg_summary("Dirent at 0x%08x\n", | ||
358 | jeb->offset + je32_to_cpu(spd->offset)); | ||
359 | |||
360 | fd = jffs2_alloc_full_dirent(spd->nsize+1); | ||
361 | if (!fd) { | ||
362 | kfree(summary); | ||
363 | return -ENOMEM; | ||
364 | } | ||
365 | |||
366 | memcpy(&fd->name, spd->name, spd->nsize); | ||
367 | fd->name[spd->nsize] = 0; | ||
368 | |||
369 | raw = jffs2_alloc_raw_node_ref(); | ||
370 | if (!raw) { | ||
371 | jffs2_free_full_dirent(fd); | ||
372 | JFFS2_NOTICE("allocation of node reference failed\n"); | ||
373 | kfree(summary); | ||
374 | return -ENOMEM; | ||
375 | } | ||
376 | |||
377 | ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino)); | ||
378 | if (!ic) { | ||
379 | jffs2_free_full_dirent(fd); | ||
380 | jffs2_free_raw_node_ref(raw); | ||
381 | kfree(summary); | ||
382 | return -ENOMEM; | ||
383 | } | ||
384 | |||
385 | raw->__totlen = PAD(je32_to_cpu(spd->totlen)); | ||
386 | raw->flash_offset = (jeb->offset + je32_to_cpu(spd->offset)) | REF_PRISTINE; | ||
387 | raw->next_phys = NULL; | ||
388 | raw->next_in_ino = ic->nodes; | ||
389 | ic->nodes = raw; | ||
390 | if (!jeb->first_node) | ||
391 | jeb->first_node = raw; | ||
392 | if (jeb->last_node) | ||
393 | jeb->last_node->next_phys = raw; | ||
394 | jeb->last_node = raw; | ||
395 | |||
396 | fd->raw = raw; | ||
397 | fd->next = NULL; | ||
398 | fd->version = je32_to_cpu(spd->version); | ||
399 | fd->ino = je32_to_cpu(spd->ino); | ||
400 | fd->nhash = full_name_hash(fd->name, spd->nsize); | ||
401 | fd->type = spd->type; | ||
402 | USED_SPACE(PAD(je32_to_cpu(spd->totlen))); | ||
403 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); | ||
404 | |||
405 | *pseudo_random += je32_to_cpu(spd->version); | ||
406 | |||
407 | sp += JFFS2_SUMMARY_DIRENT_SIZE(spd->nsize); | ||
408 | |||
409 | break; | ||
410 | } | ||
411 | |||
412 | default : { | ||
413 | JFFS2_WARNING("Unsupported node type found in summary! Exiting..."); | ||
414 | kfree(summary); | ||
415 | return -EIO; | ||
416 | } | ||
417 | } | ||
418 | } | ||
419 | |||
420 | kfree(summary); | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | /* Process the summary node - called from jffs2_scan_eraseblock() */ | ||
425 | |||
426 | int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
427 | uint32_t ofs, uint32_t *pseudo_random) | ||
428 | { | ||
429 | struct jffs2_unknown_node crcnode; | ||
430 | struct jffs2_raw_node_ref *cache_ref; | ||
431 | struct jffs2_raw_summary *summary; | ||
432 | int ret, sumsize; | ||
433 | uint32_t crc; | ||
434 | |||
435 | sumsize = c->sector_size - ofs; | ||
436 | ofs += jeb->offset; | ||
437 | |||
438 | dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n", | ||
439 | jeb->offset, ofs, sumsize); | ||
440 | |||
441 | summary = kmalloc(sumsize, GFP_KERNEL); | ||
442 | |||
443 | if (!summary) { | ||
444 | return -ENOMEM; | ||
445 | } | ||
446 | |||
447 | ret = jffs2_fill_scan_buf(c, (unsigned char *)summary, ofs, sumsize); | ||
448 | |||
449 | if (ret) { | ||
450 | kfree(summary); | ||
451 | return ret; | ||
452 | } | ||
453 | |||
454 | /* OK, now check for node validity and CRC */ | ||
455 | crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
456 | crcnode.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY); | ||
457 | crcnode.totlen = summary->totlen; | ||
458 | crc = crc32(0, &crcnode, sizeof(crcnode)-4); | ||
459 | |||
460 | if (je32_to_cpu(summary->hdr_crc) != crc) { | ||
461 | dbg_summary("Summary node header is corrupt (bad CRC or " | ||
462 | "no summary at all)\n"); | ||
463 | goto crc_err; | ||
464 | } | ||
465 | |||
466 | if (je32_to_cpu(summary->totlen) != sumsize) { | ||
467 | dbg_summary("Summary node is corrupt (wrong erasesize?)\n"); | ||
468 | goto crc_err; | ||
469 | } | ||
470 | |||
471 | crc = crc32(0, summary, sizeof(struct jffs2_raw_summary)-8); | ||
472 | |||
473 | if (je32_to_cpu(summary->node_crc) != crc) { | ||
474 | dbg_summary("Summary node is corrupt (bad CRC)\n"); | ||
475 | goto crc_err; | ||
476 | } | ||
477 | |||
478 | crc = crc32(0, summary->sum, sumsize - sizeof(struct jffs2_raw_summary)); | ||
479 | |||
480 | if (je32_to_cpu(summary->sum_crc) != crc) { | ||
481 | dbg_summary("Summary node data is corrupt (bad CRC)\n"); | ||
482 | goto crc_err; | ||
483 | } | ||
484 | |||
485 | if ( je32_to_cpu(summary->cln_mkr) ) { | ||
486 | |||
487 | dbg_summary("Summary : CLEANMARKER node \n"); | ||
488 | |||
489 | if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) { | ||
490 | dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n", | ||
491 | je32_to_cpu(summary->cln_mkr), c->cleanmarker_size); | ||
492 | UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr))); | ||
493 | } else if (jeb->first_node) { | ||
494 | dbg_summary("CLEANMARKER node not first node in block " | ||
495 | "(0x%08x)\n", jeb->offset); | ||
496 | UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr))); | ||
497 | } else { | ||
498 | struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref(); | ||
499 | |||
500 | if (!marker_ref) { | ||
501 | JFFS2_NOTICE("Failed to allocate node ref for clean marker\n"); | ||
502 | kfree(summary); | ||
503 | return -ENOMEM; | ||
504 | } | ||
505 | |||
506 | marker_ref->next_in_ino = NULL; | ||
507 | marker_ref->next_phys = NULL; | ||
508 | marker_ref->flash_offset = jeb->offset | REF_NORMAL; | ||
509 | marker_ref->__totlen = je32_to_cpu(summary->cln_mkr); | ||
510 | jeb->first_node = jeb->last_node = marker_ref; | ||
511 | |||
512 | USED_SPACE( PAD(je32_to_cpu(summary->cln_mkr)) ); | ||
513 | } | ||
514 | } | ||
515 | |||
516 | if (je32_to_cpu(summary->padded)) { | ||
517 | DIRTY_SPACE(je32_to_cpu(summary->padded)); | ||
518 | } | ||
519 | |||
520 | ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random); | ||
521 | if (ret) | ||
522 | return ret; | ||
523 | |||
524 | /* for PARANOIA_CHECK */ | ||
525 | cache_ref = jffs2_alloc_raw_node_ref(); | ||
526 | |||
527 | if (!cache_ref) { | ||
528 | JFFS2_NOTICE("Failed to allocate node ref for cache\n"); | ||
529 | return -ENOMEM; | ||
530 | } | ||
531 | |||
532 | cache_ref->next_in_ino = NULL; | ||
533 | cache_ref->next_phys = NULL; | ||
534 | cache_ref->flash_offset = ofs | REF_NORMAL; | ||
535 | cache_ref->__totlen = sumsize; | ||
536 | |||
537 | if (!jeb->first_node) | ||
538 | jeb->first_node = cache_ref; | ||
539 | if (jeb->last_node) | ||
540 | jeb->last_node->next_phys = cache_ref; | ||
541 | jeb->last_node = cache_ref; | ||
542 | |||
543 | USED_SPACE(sumsize); | ||
544 | |||
545 | jeb->wasted_size += jeb->free_size; | ||
546 | c->wasted_size += jeb->free_size; | ||
547 | c->free_size -= jeb->free_size; | ||
548 | jeb->free_size = 0; | ||
549 | |||
550 | return jffs2_scan_classify_jeb(c, jeb); | ||
551 | |||
552 | crc_err: | ||
553 | JFFS2_WARNING("Summary node crc error, skipping summary information.\n"); | ||
554 | |||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | /* Write summary data to flash - helper function for jffs2_sum_write_sumnode() */ | ||
559 | |||
560 | static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
561 | uint32_t infosize, uint32_t datasize, int padsize) | ||
562 | { | ||
563 | struct jffs2_raw_summary isum; | ||
564 | union jffs2_sum_mem *temp; | ||
565 | struct jffs2_sum_marker *sm; | ||
566 | struct kvec vecs[2]; | ||
567 | void *wpage; | ||
568 | int ret; | ||
569 | size_t retlen; | ||
570 | |||
571 | memset(c->summary->sum_buf, 0xff, datasize); | ||
572 | memset(&isum, 0, sizeof(isum)); | ||
573 | |||
574 | isum.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
575 | isum.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY); | ||
576 | isum.totlen = cpu_to_je32(infosize); | ||
577 | isum.hdr_crc = cpu_to_je32(crc32(0, &isum, sizeof(struct jffs2_unknown_node) - 4)); | ||
578 | isum.padded = cpu_to_je32(c->summary->sum_padded); | ||
579 | isum.cln_mkr = cpu_to_je32(c->cleanmarker_size); | ||
580 | isum.sum_num = cpu_to_je32(c->summary->sum_num); | ||
581 | wpage = c->summary->sum_buf; | ||
582 | |||
583 | while (c->summary->sum_num) { | ||
584 | |||
585 | switch (je16_to_cpu(c->summary->sum_list_head->u.nodetype)) { | ||
586 | case JFFS2_NODETYPE_INODE: { | ||
587 | struct jffs2_sum_inode_flash *sino_ptr = wpage; | ||
588 | |||
589 | sino_ptr->nodetype = c->summary->sum_list_head->i.nodetype; | ||
590 | sino_ptr->inode = c->summary->sum_list_head->i.inode; | ||
591 | sino_ptr->version = c->summary->sum_list_head->i.version; | ||
592 | sino_ptr->offset = c->summary->sum_list_head->i.offset; | ||
593 | sino_ptr->totlen = c->summary->sum_list_head->i.totlen; | ||
594 | |||
595 | wpage += JFFS2_SUMMARY_INODE_SIZE; | ||
596 | |||
597 | break; | ||
598 | } | ||
599 | |||
600 | case JFFS2_NODETYPE_DIRENT: { | ||
601 | struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage; | ||
602 | |||
603 | sdrnt_ptr->nodetype = c->summary->sum_list_head->d.nodetype; | ||
604 | sdrnt_ptr->totlen = c->summary->sum_list_head->d.totlen; | ||
605 | sdrnt_ptr->offset = c->summary->sum_list_head->d.offset; | ||
606 | sdrnt_ptr->pino = c->summary->sum_list_head->d.pino; | ||
607 | sdrnt_ptr->version = c->summary->sum_list_head->d.version; | ||
608 | sdrnt_ptr->ino = c->summary->sum_list_head->d.ino; | ||
609 | sdrnt_ptr->nsize = c->summary->sum_list_head->d.nsize; | ||
610 | sdrnt_ptr->type = c->summary->sum_list_head->d.type; | ||
611 | |||
612 | memcpy(sdrnt_ptr->name, c->summary->sum_list_head->d.name, | ||
613 | c->summary->sum_list_head->d.nsize); | ||
614 | |||
615 | wpage += JFFS2_SUMMARY_DIRENT_SIZE(c->summary->sum_list_head->d.nsize); | ||
616 | |||
617 | break; | ||
618 | } | ||
619 | |||
620 | default : { | ||
621 | BUG(); /* unknown node in summary information */ | ||
622 | } | ||
623 | } | ||
624 | |||
625 | temp = c->summary->sum_list_head; | ||
626 | c->summary->sum_list_head = c->summary->sum_list_head->u.next; | ||
627 | kfree(temp); | ||
628 | |||
629 | c->summary->sum_num--; | ||
630 | } | ||
631 | |||
632 | jffs2_sum_reset_collected(c->summary); | ||
633 | |||
634 | wpage += padsize; | ||
635 | |||
636 | sm = wpage; | ||
637 | sm->offset = cpu_to_je32(c->sector_size - jeb->free_size); | ||
638 | sm->magic = cpu_to_je32(JFFS2_SUM_MAGIC); | ||
639 | |||
640 | isum.sum_crc = cpu_to_je32(crc32(0, c->summary->sum_buf, datasize)); | ||
641 | isum.node_crc = cpu_to_je32(crc32(0, &isum, sizeof(isum) - 8)); | ||
642 | |||
643 | vecs[0].iov_base = &isum; | ||
644 | vecs[0].iov_len = sizeof(isum); | ||
645 | vecs[1].iov_base = c->summary->sum_buf; | ||
646 | vecs[1].iov_len = datasize; | ||
647 | |||
648 | dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n", | ||
649 | jeb->offset + c->sector_size - jeb->free_size); | ||
650 | |||
651 | spin_unlock(&c->erase_completion_lock); | ||
652 | ret = jffs2_flash_writev(c, vecs, 2, jeb->offset + c->sector_size - | ||
653 | jeb->free_size, &retlen, 0); | ||
654 | spin_lock(&c->erase_completion_lock); | ||
655 | |||
656 | |||
657 | if (ret || (retlen != infosize)) { | ||
658 | JFFS2_WARNING("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", | ||
659 | infosize, jeb->offset + c->sector_size - jeb->free_size, ret, retlen); | ||
660 | |||
661 | c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; | ||
662 | WASTED_SPACE(infosize); | ||
663 | |||
664 | return 1; | ||
665 | } | ||
666 | |||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | /* Write out summary information - called from jffs2_do_reserve_space */ | ||
671 | |||
672 | int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) | ||
673 | { | ||
674 | struct jffs2_raw_node_ref *summary_ref; | ||
675 | int datasize, infosize, padsize, ret; | ||
676 | struct jffs2_eraseblock *jeb; | ||
677 | |||
678 | dbg_summary("called\n"); | ||
679 | |||
680 | jeb = c->nextblock; | ||
681 | |||
682 | if (!c->summary->sum_num || !c->summary->sum_list_head) { | ||
683 | JFFS2_WARNING("Empty summary info!!!\n"); | ||
684 | BUG(); | ||
685 | } | ||
686 | |||
687 | datasize = c->summary->sum_size + sizeof(struct jffs2_sum_marker); | ||
688 | infosize = sizeof(struct jffs2_raw_summary) + datasize; | ||
689 | padsize = jeb->free_size - infosize; | ||
690 | infosize += padsize; | ||
691 | datasize += padsize; | ||
692 | |||
693 | /* Is there enough space for summary? */ | ||
694 | if (padsize < 0) { | ||
695 | /* don't try to write out summary for this jeb */ | ||
696 | jffs2_sum_disable_collecting(c->summary); | ||
697 | |||
698 | JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize); | ||
699 | return 0; | ||
700 | } | ||
701 | |||
702 | ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize); | ||
703 | if (ret) | ||
704 | return 0; /* can't write out summary, block is marked as NOSUM_SIZE */ | ||
705 | |||
706 | /* for ACCT_PARANOIA_CHECK */ | ||
707 | spin_unlock(&c->erase_completion_lock); | ||
708 | summary_ref = jffs2_alloc_raw_node_ref(); | ||
709 | spin_lock(&c->erase_completion_lock); | ||
710 | |||
711 | if (!summary_ref) { | ||
712 | JFFS2_NOTICE("Failed to allocate node ref for summary\n"); | ||
713 | return -ENOMEM; | ||
714 | } | ||
715 | |||
716 | summary_ref->next_in_ino = NULL; | ||
717 | summary_ref->next_phys = NULL; | ||
718 | summary_ref->flash_offset = (jeb->offset + c->sector_size - jeb->free_size) | REF_NORMAL; | ||
719 | summary_ref->__totlen = infosize; | ||
720 | |||
721 | if (!jeb->first_node) | ||
722 | jeb->first_node = summary_ref; | ||
723 | if (jeb->last_node) | ||
724 | jeb->last_node->next_phys = summary_ref; | ||
725 | jeb->last_node = summary_ref; | ||
726 | |||
727 | USED_SPACE(infosize); | ||
728 | |||
729 | return 0; | ||
730 | } | ||
diff --git a/fs/jffs2/summary.h b/fs/jffs2/summary.h new file mode 100644 index 000000000000..b7a678be1709 --- /dev/null +++ b/fs/jffs2/summary.h | |||
@@ -0,0 +1,183 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, | ||
5 | * Zoltan Sogor <weth@inf.u-szeged.hu>, | ||
6 | * Patrik Kluba <pajko@halom.u-szeged.hu>, | ||
7 | * University of Szeged, Hungary | ||
8 | * | ||
9 | * For licensing information, see the file 'LICENCE' in this directory. | ||
10 | * | ||
11 | * $Id: summary.h,v 1.2 2005/09/26 11:37:21 havasi Exp $ | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #ifndef JFFS2_SUMMARY_H | ||
16 | #define JFFS2_SUMMARY_H | ||
17 | |||
18 | #include <linux/uio.h> | ||
19 | #include <linux/jffs2.h> | ||
20 | |||
21 | #define DIRTY_SPACE(x) do { typeof(x) _x = (x); \ | ||
22 | c->free_size -= _x; c->dirty_size += _x; \ | ||
23 | jeb->free_size -= _x ; jeb->dirty_size += _x; \ | ||
24 | }while(0) | ||
25 | #define USED_SPACE(x) do { typeof(x) _x = (x); \ | ||
26 | c->free_size -= _x; c->used_size += _x; \ | ||
27 | jeb->free_size -= _x ; jeb->used_size += _x; \ | ||
28 | }while(0) | ||
29 | #define WASTED_SPACE(x) do { typeof(x) _x = (x); \ | ||
30 | c->free_size -= _x; c->wasted_size += _x; \ | ||
31 | jeb->free_size -= _x ; jeb->wasted_size += _x; \ | ||
32 | }while(0) | ||
33 | #define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \ | ||
34 | c->free_size -= _x; c->unchecked_size += _x; \ | ||
35 | jeb->free_size -= _x ; jeb->unchecked_size += _x; \ | ||
36 | }while(0) | ||
37 | |||
38 | #define BLK_STATE_ALLFF 0 | ||
39 | #define BLK_STATE_CLEAN 1 | ||
40 | #define BLK_STATE_PARTDIRTY 2 | ||
41 | #define BLK_STATE_CLEANMARKER 3 | ||
42 | #define BLK_STATE_ALLDIRTY 4 | ||
43 | #define BLK_STATE_BADBLOCK 5 | ||
44 | |||
45 | #define JFFS2_SUMMARY_NOSUM_SIZE 0xffffffff | ||
46 | #define JFFS2_SUMMARY_INODE_SIZE (sizeof(struct jffs2_sum_inode_flash)) | ||
47 | #define JFFS2_SUMMARY_DIRENT_SIZE(x) (sizeof(struct jffs2_sum_dirent_flash) + (x)) | ||
48 | |||
49 | /* Summary structures used on flash */ | ||
50 | |||
51 | struct jffs2_sum_unknown_flash | ||
52 | { | ||
53 | jint16_t nodetype; /* node type */ | ||
54 | }; | ||
55 | |||
56 | struct jffs2_sum_inode_flash | ||
57 | { | ||
58 | jint16_t nodetype; /* node type */ | ||
59 | jint32_t inode; /* inode number */ | ||
60 | jint32_t version; /* inode version */ | ||
61 | jint32_t offset; /* offset on jeb */ | ||
62 | jint32_t totlen; /* record length */ | ||
63 | } __attribute__((packed)); | ||
64 | |||
65 | struct jffs2_sum_dirent_flash | ||
66 | { | ||
67 | jint16_t nodetype; /* == JFFS_NODETYPE_DIRENT */ | ||
68 | jint32_t totlen; /* record length */ | ||
69 | jint32_t offset; /* offset on jeb */ | ||
70 | jint32_t pino; /* parent inode */ | ||
71 | jint32_t version; /* dirent version */ | ||
72 | jint32_t ino; /* == zero for unlink */ | ||
73 | uint8_t nsize; /* dirent name size */ | ||
74 | uint8_t type; /* dirent type */ | ||
75 | uint8_t name[0]; /* dirent name */ | ||
76 | } __attribute__((packed)); | ||
77 | |||
78 | union jffs2_sum_flash | ||
79 | { | ||
80 | struct jffs2_sum_unknown_flash u; | ||
81 | struct jffs2_sum_inode_flash i; | ||
82 | struct jffs2_sum_dirent_flash d; | ||
83 | }; | ||
84 | |||
85 | /* Summary structures used in the memory */ | ||
86 | |||
87 | struct jffs2_sum_unknown_mem | ||
88 | { | ||
89 | union jffs2_sum_mem *next; | ||
90 | jint16_t nodetype; /* node type */ | ||
91 | }; | ||
92 | |||
93 | struct jffs2_sum_inode_mem | ||
94 | { | ||
95 | union jffs2_sum_mem *next; | ||
96 | jint16_t nodetype; /* node type */ | ||
97 | jint32_t inode; /* inode number */ | ||
98 | jint32_t version; /* inode version */ | ||
99 | jint32_t offset; /* offset on jeb */ | ||
100 | jint32_t totlen; /* record length */ | ||
101 | } __attribute__((packed)); | ||
102 | |||
103 | struct jffs2_sum_dirent_mem | ||
104 | { | ||
105 | union jffs2_sum_mem *next; | ||
106 | jint16_t nodetype; /* == JFFS_NODETYPE_DIRENT */ | ||
107 | jint32_t totlen; /* record length */ | ||
108 | jint32_t offset; /* ofset on jeb */ | ||
109 | jint32_t pino; /* parent inode */ | ||
110 | jint32_t version; /* dirent version */ | ||
111 | jint32_t ino; /* == zero for unlink */ | ||
112 | uint8_t nsize; /* dirent name size */ | ||
113 | uint8_t type; /* dirent type */ | ||
114 | uint8_t name[0]; /* dirent name */ | ||
115 | } __attribute__((packed)); | ||
116 | |||
117 | union jffs2_sum_mem | ||
118 | { | ||
119 | struct jffs2_sum_unknown_mem u; | ||
120 | struct jffs2_sum_inode_mem i; | ||
121 | struct jffs2_sum_dirent_mem d; | ||
122 | }; | ||
123 | |||
124 | /* Summary related information stored in superblock */ | ||
125 | |||
126 | struct jffs2_summary | ||
127 | { | ||
128 | uint32_t sum_size; /* collected summary information for nextblock */ | ||
129 | uint32_t sum_num; | ||
130 | uint32_t sum_padded; | ||
131 | union jffs2_sum_mem *sum_list_head; | ||
132 | union jffs2_sum_mem *sum_list_tail; | ||
133 | |||
134 | jint32_t *sum_buf; /* buffer for writing out summary */ | ||
135 | }; | ||
136 | |||
137 | /* Summary marker is stored at the end of every sumarized erase block */ | ||
138 | |||
139 | struct jffs2_sum_marker | ||
140 | { | ||
141 | jint32_t offset; /* offset of the summary node in the jeb */ | ||
142 | jint32_t magic; /* == JFFS2_SUM_MAGIC */ | ||
143 | }; | ||
144 | |||
145 | #define JFFS2_SUMMARY_FRAME_SIZE (sizeof(struct jffs2_raw_summary) + sizeof(struct jffs2_sum_marker)) | ||
146 | |||
147 | #ifdef CONFIG_JFFS2_SUMMARY /* SUMMARY SUPPORT ENABLED */ | ||
148 | |||
149 | #define jffs2_sum_active() (1) | ||
150 | int jffs2_sum_init(struct jffs2_sb_info *c); | ||
151 | void jffs2_sum_exit(struct jffs2_sb_info *c); | ||
152 | void jffs2_sum_disable_collecting(struct jffs2_summary *s); | ||
153 | int jffs2_sum_is_disabled(struct jffs2_summary *s); | ||
154 | void jffs2_sum_reset_collected(struct jffs2_summary *s); | ||
155 | void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s); | ||
156 | int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, | ||
157 | unsigned long count, uint32_t to); | ||
158 | int jffs2_sum_write_sumnode(struct jffs2_sb_info *c); | ||
159 | int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size); | ||
160 | int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs); | ||
161 | int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs); | ||
162 | int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
163 | uint32_t ofs, uint32_t *pseudo_random); | ||
164 | |||
165 | #else /* SUMMARY DISABLED */ | ||
166 | |||
167 | #define jffs2_sum_active() (0) | ||
168 | #define jffs2_sum_init(a) (0) | ||
169 | #define jffs2_sum_exit(a) | ||
170 | #define jffs2_sum_disable_collecting(a) | ||
171 | #define jffs2_sum_is_disabled(a) (0) | ||
172 | #define jffs2_sum_reset_collected(a) | ||
173 | #define jffs2_sum_add_kvec(a,b,c,d) (0) | ||
174 | #define jffs2_sum_move_collected(a,b) | ||
175 | #define jffs2_sum_write_sumnode(a) (0) | ||
176 | #define jffs2_sum_add_padding_mem(a,b) | ||
177 | #define jffs2_sum_add_inode_mem(a,b,c) | ||
178 | #define jffs2_sum_add_dirent_mem(a,b,c) | ||
179 | #define jffs2_sum_scan_sumnode(a,b,c,d) (0) | ||
180 | |||
181 | #endif /* CONFIG_JFFS2_SUMMARY */ | ||
182 | |||
183 | #endif /* JFFS2_SUMMARY_H */ | ||
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index aaf9475cfb6a..9e0b5458d9c0 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: super.c,v 1.107 2005/07/12 16:37:08 dedekind Exp $ | 10 | * $Id: super.c,v 1.110 2005/11/07 11:14:42 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -62,7 +62,7 @@ static int jffs2_sync_fs(struct super_block *sb, int wait) | |||
62 | 62 | ||
63 | down(&c->alloc_sem); | 63 | down(&c->alloc_sem); |
64 | jffs2_flush_wbuf_pad(c); | 64 | jffs2_flush_wbuf_pad(c); |
65 | up(&c->alloc_sem); | 65 | up(&c->alloc_sem); |
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
68 | 68 | ||
@@ -112,7 +112,7 @@ static int jffs2_sb_set(struct super_block *sb, void *data) | |||
112 | } | 112 | } |
113 | 113 | ||
114 | static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type, | 114 | static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type, |
115 | int flags, const char *dev_name, | 115 | int flags, const char *dev_name, |
116 | void *data, struct mtd_info *mtd) | 116 | void *data, struct mtd_info *mtd) |
117 | { | 117 | { |
118 | struct super_block *sb; | 118 | struct super_block *sb; |
@@ -172,7 +172,7 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type, | |||
172 | } | 172 | } |
173 | 173 | ||
174 | static struct super_block *jffs2_get_sb_mtdnr(struct file_system_type *fs_type, | 174 | static struct super_block *jffs2_get_sb_mtdnr(struct file_system_type *fs_type, |
175 | int flags, const char *dev_name, | 175 | int flags, const char *dev_name, |
176 | void *data, int mtdnr) | 176 | void *data, int mtdnr) |
177 | { | 177 | { |
178 | struct mtd_info *mtd; | 178 | struct mtd_info *mtd; |
@@ -201,7 +201,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type, | |||
201 | 201 | ||
202 | /* The preferred way of mounting in future; especially when | 202 | /* The preferred way of mounting in future; especially when |
203 | CONFIG_BLK_DEV is implemented - we specify the underlying | 203 | CONFIG_BLK_DEV is implemented - we specify the underlying |
204 | MTD device by number or by name, so that we don't require | 204 | MTD device by number or by name, so that we don't require |
205 | block device support to be present in the kernel. */ | 205 | block device support to be present in the kernel. */ |
206 | 206 | ||
207 | /* FIXME: How to do the root fs this way? */ | 207 | /* FIXME: How to do the root fs this way? */ |
@@ -225,7 +225,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type, | |||
225 | } else if (isdigit(dev_name[3])) { | 225 | } else if (isdigit(dev_name[3])) { |
226 | /* Mount by MTD device number name */ | 226 | /* Mount by MTD device number name */ |
227 | char *endptr; | 227 | char *endptr; |
228 | 228 | ||
229 | mtdnr = simple_strtoul(dev_name+3, &endptr, 0); | 229 | mtdnr = simple_strtoul(dev_name+3, &endptr, 0); |
230 | if (!*endptr) { | 230 | if (!*endptr) { |
231 | /* It was a valid number */ | 231 | /* It was a valid number */ |
@@ -235,7 +235,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type, | |||
235 | } | 235 | } |
236 | } | 236 | } |
237 | 237 | ||
238 | /* Try the old way - the hack where we allowed users to mount | 238 | /* Try the old way - the hack where we allowed users to mount |
239 | /dev/mtdblock$(n) but didn't actually _use_ the blkdev */ | 239 | /dev/mtdblock$(n) but didn't actually _use_ the blkdev */ |
240 | 240 | ||
241 | err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); | 241 | err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); |
@@ -282,9 +282,12 @@ static void jffs2_put_super (struct super_block *sb) | |||
282 | down(&c->alloc_sem); | 282 | down(&c->alloc_sem); |
283 | jffs2_flush_wbuf_pad(c); | 283 | jffs2_flush_wbuf_pad(c); |
284 | up(&c->alloc_sem); | 284 | up(&c->alloc_sem); |
285 | |||
286 | jffs2_sum_exit(c); | ||
287 | |||
285 | jffs2_free_ino_caches(c); | 288 | jffs2_free_ino_caches(c); |
286 | jffs2_free_raw_node_refs(c); | 289 | jffs2_free_raw_node_refs(c); |
287 | if (c->mtd->flags & MTD_NO_VIRTBLOCKS) | 290 | if (jffs2_blocks_use_vmalloc(c)) |
288 | vfree(c->blocks); | 291 | vfree(c->blocks); |
289 | else | 292 | else |
290 | kfree(c->blocks); | 293 | kfree(c->blocks); |
@@ -321,6 +324,9 @@ static int __init init_jffs2_fs(void) | |||
321 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 324 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
322 | " (NAND)" | 325 | " (NAND)" |
323 | #endif | 326 | #endif |
327 | #ifdef CONFIG_JFFS2_SUMMARY | ||
328 | " (SUMMARY) " | ||
329 | #endif | ||
324 | " (C) 2001-2003 Red Hat, Inc.\n"); | 330 | " (C) 2001-2003 Red Hat, Inc.\n"); |
325 | 331 | ||
326 | jffs2_inode_cachep = kmem_cache_create("jffs2_i", | 332 | jffs2_inode_cachep = kmem_cache_create("jffs2_i", |
@@ -370,5 +376,5 @@ module_exit(exit_jffs2_fs); | |||
370 | 376 | ||
371 | MODULE_DESCRIPTION("The Journalling Flash File System, v2"); | 377 | MODULE_DESCRIPTION("The Journalling Flash File System, v2"); |
372 | MODULE_AUTHOR("Red Hat, Inc."); | 378 | MODULE_AUTHOR("Red Hat, Inc."); |
373 | MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for | 379 | MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for |
374 | // the sake of this tag. It's Free Software. | 380 | // the sake of this tag. It's Free Software. |
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c index 82ef484f5e12..d55754fe8925 100644 --- a/fs/jffs2/symlink.c +++ b/fs/jffs2/symlink.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: symlink.c,v 1.16 2005/03/01 10:50:48 dedekind Exp $ | 10 | * $Id: symlink.c,v 1.19 2005/11/07 11:14:42 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -21,7 +21,7 @@ | |||
21 | static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd); | 21 | static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd); |
22 | 22 | ||
23 | struct inode_operations jffs2_symlink_inode_operations = | 23 | struct inode_operations jffs2_symlink_inode_operations = |
24 | { | 24 | { |
25 | .readlink = generic_readlink, | 25 | .readlink = generic_readlink, |
26 | .follow_link = jffs2_follow_link, | 26 | .follow_link = jffs2_follow_link, |
27 | .setattr = jffs2_setattr | 27 | .setattr = jffs2_setattr |
@@ -30,35 +30,33 @@ struct inode_operations jffs2_symlink_inode_operations = | |||
30 | static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) | 30 | static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) |
31 | { | 31 | { |
32 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode); | 32 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode); |
33 | char *p = (char *)f->dents; | 33 | char *p = (char *)f->target; |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * We don't acquire the f->sem mutex here since the only data we | 36 | * We don't acquire the f->sem mutex here since the only data we |
37 | * use is f->dents which in case of the symlink inode points to the | 37 | * use is f->target. |
38 | * symlink's target path. | ||
39 | * | 38 | * |
40 | * 1. If we are here the inode has already built and f->dents has | 39 | * 1. If we are here the inode has already built and f->target has |
41 | * to point to the target path. | 40 | * to point to the target path. |
42 | * 2. Nobody uses f->dents (if the inode is symlink's inode). The | 41 | * 2. Nobody uses f->target (if the inode is symlink's inode). The |
43 | * exception is inode freeing function which frees f->dents. But | 42 | * exception is inode freeing function which frees f->target. But |
44 | * it can't be called while we are here and before VFS has | 43 | * it can't be called while we are here and before VFS has |
45 | * stopped using our f->dents string which we provide by means of | 44 | * stopped using our f->target string which we provide by means of |
46 | * nd_set_link() call. | 45 | * nd_set_link() call. |
47 | */ | 46 | */ |
48 | 47 | ||
49 | if (!p) { | 48 | if (!p) { |
50 | printk(KERN_ERR "jffs2_follow_link(): can't find symlink taerget\n"); | 49 | printk(KERN_ERR "jffs2_follow_link(): can't find symlink taerget\n"); |
51 | p = ERR_PTR(-EIO); | 50 | p = ERR_PTR(-EIO); |
52 | } else { | ||
53 | D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->dents)); | ||
54 | } | 51 | } |
52 | D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->target)); | ||
55 | 53 | ||
56 | nd_set_link(nd, p); | 54 | nd_set_link(nd, p); |
57 | 55 | ||
58 | /* | 56 | /* |
59 | * We unlock the f->sem mutex but VFS will use the f->dents string. This is safe | 57 | * We will unlock the f->sem mutex but VFS will use the f->target string. This is safe |
60 | * since the only way that may cause f->dents to be changed is iput() operation. | 58 | * since the only way that may cause f->target to be changed is iput() operation. |
61 | * But VFS will not use f->dents after iput() has been called. | 59 | * But VFS will not use f->target after iput() has been called. |
62 | */ | 60 | */ |
63 | return NULL; | 61 | return NULL; |
64 | } | 62 | } |
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index 7bc7f2d571f6..4cebf0e57c46 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * | 9 | * |
10 | * For licensing information, see the file 'LICENCE' in this directory. | 10 | * For licensing information, see the file 'LICENCE' in this directory. |
11 | * | 11 | * |
12 | * $Id: wbuf.c,v 1.92 2005/04/05 12:51:54 dedekind Exp $ | 12 | * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $ |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
@@ -30,12 +30,12 @@ | |||
30 | static unsigned char *brokenbuf; | 30 | static unsigned char *brokenbuf; |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) ) | ||
34 | #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) ) | ||
35 | |||
33 | /* max. erase failures before we mark a block bad */ | 36 | /* max. erase failures before we mark a block bad */ |
34 | #define MAX_ERASE_FAILURES 2 | 37 | #define MAX_ERASE_FAILURES 2 |
35 | 38 | ||
36 | /* two seconds timeout for timed wbuf-flushing */ | ||
37 | #define WBUF_FLUSH_TIMEOUT 2 * HZ | ||
38 | |||
39 | struct jffs2_inodirty { | 39 | struct jffs2_inodirty { |
40 | uint32_t ino; | 40 | uint32_t ino; |
41 | struct jffs2_inodirty *next; | 41 | struct jffs2_inodirty *next; |
@@ -139,7 +139,6 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
139 | { | 139 | { |
140 | D1(printk("About to refile bad block at %08x\n", jeb->offset)); | 140 | D1(printk("About to refile bad block at %08x\n", jeb->offset)); |
141 | 141 | ||
142 | D2(jffs2_dump_block_lists(c)); | ||
143 | /* File the existing block on the bad_used_list.... */ | 142 | /* File the existing block on the bad_used_list.... */ |
144 | if (c->nextblock == jeb) | 143 | if (c->nextblock == jeb) |
145 | c->nextblock = NULL; | 144 | c->nextblock = NULL; |
@@ -156,7 +155,6 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
156 | c->nr_erasing_blocks++; | 155 | c->nr_erasing_blocks++; |
157 | jffs2_erase_pending_trigger(c); | 156 | jffs2_erase_pending_trigger(c); |
158 | } | 157 | } |
159 | D2(jffs2_dump_block_lists(c)); | ||
160 | 158 | ||
161 | /* Adjust its size counts accordingly */ | 159 | /* Adjust its size counts accordingly */ |
162 | c->wasted_size += jeb->free_size; | 160 | c->wasted_size += jeb->free_size; |
@@ -164,8 +162,9 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
164 | jeb->wasted_size += jeb->free_size; | 162 | jeb->wasted_size += jeb->free_size; |
165 | jeb->free_size = 0; | 163 | jeb->free_size = 0; |
166 | 164 | ||
167 | ACCT_SANITY_CHECK(c,jeb); | 165 | jffs2_dbg_dump_block_lists_nolock(c); |
168 | D1(ACCT_PARANOIA_CHECK(jeb)); | 166 | jffs2_dbg_acct_sanity_check_nolock(c,jeb); |
167 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | ||
169 | } | 168 | } |
170 | 169 | ||
171 | /* Recover from failure to write wbuf. Recover the nodes up to the | 170 | /* Recover from failure to write wbuf. Recover the nodes up to the |
@@ -189,7 +188,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
189 | /* Find the first node to be recovered, by skipping over every | 188 | /* Find the first node to be recovered, by skipping over every |
190 | node which ends before the wbuf starts, or which is obsolete. */ | 189 | node which ends before the wbuf starts, or which is obsolete. */ |
191 | first_raw = &jeb->first_node; | 190 | first_raw = &jeb->first_node; |
192 | while (*first_raw && | 191 | while (*first_raw && |
193 | (ref_obsolete(*first_raw) || | 192 | (ref_obsolete(*first_raw) || |
194 | (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) { | 193 | (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) { |
195 | D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", | 194 | D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", |
@@ -238,7 +237,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
238 | ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo); | 237 | ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo); |
239 | else | 238 | else |
240 | ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); | 239 | ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); |
241 | 240 | ||
242 | if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) { | 241 | if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) { |
243 | /* ECC recovered */ | 242 | /* ECC recovered */ |
244 | ret = 0; | 243 | ret = 0; |
@@ -266,7 +265,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
266 | 265 | ||
267 | 266 | ||
268 | /* ... and get an allocation of space from a shiny new block instead */ | 267 | /* ... and get an allocation of space from a shiny new block instead */ |
269 | ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len); | 268 | ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len, JFFS2_SUMMARY_NOSUM_SIZE); |
270 | if (ret) { | 269 | if (ret) { |
271 | printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); | 270 | printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); |
272 | kfree(buf); | 271 | kfree(buf); |
@@ -275,15 +274,15 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
275 | if (end-start >= c->wbuf_pagesize) { | 274 | if (end-start >= c->wbuf_pagesize) { |
276 | /* Need to do another write immediately, but it's possible | 275 | /* Need to do another write immediately, but it's possible |
277 | that this is just because the wbuf itself is completely | 276 | that this is just because the wbuf itself is completely |
278 | full, and there's nothing earlier read back from the | 277 | full, and there's nothing earlier read back from the |
279 | flash. Hence 'buf' isn't necessarily what we're writing | 278 | flash. Hence 'buf' isn't necessarily what we're writing |
280 | from. */ | 279 | from. */ |
281 | unsigned char *rewrite_buf = buf?:c->wbuf; | 280 | unsigned char *rewrite_buf = buf?:c->wbuf; |
282 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); | 281 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); |
283 | 282 | ||
284 | D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", | 283 | D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", |
285 | towrite, ofs)); | 284 | towrite, ofs)); |
286 | 285 | ||
287 | #ifdef BREAKMEHEADER | 286 | #ifdef BREAKMEHEADER |
288 | static int breakme; | 287 | static int breakme; |
289 | if (breakme++ == 20) { | 288 | if (breakme++ == 20) { |
@@ -391,11 +390,11 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
391 | else | 390 | else |
392 | jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys); | 391 | jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys); |
393 | 392 | ||
394 | ACCT_SANITY_CHECK(c,jeb); | 393 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); |
395 | D1(ACCT_PARANOIA_CHECK(jeb)); | 394 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
396 | 395 | ||
397 | ACCT_SANITY_CHECK(c,new_jeb); | 396 | jffs2_dbg_acct_sanity_check_nolock(c, new_jeb); |
398 | D1(ACCT_PARANOIA_CHECK(new_jeb)); | 397 | jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb); |
399 | 398 | ||
400 | spin_unlock(&c->erase_completion_lock); | 399 | spin_unlock(&c->erase_completion_lock); |
401 | 400 | ||
@@ -434,15 +433,15 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
434 | this happens, if we have a change to a new block, | 433 | this happens, if we have a change to a new block, |
435 | or if fsync forces us to flush the writebuffer. | 434 | or if fsync forces us to flush the writebuffer. |
436 | if we have a switch to next page, we will not have | 435 | if we have a switch to next page, we will not have |
437 | enough remaining space for this. | 436 | enough remaining space for this. |
438 | */ | 437 | */ |
439 | if (pad && !jffs2_dataflash(c)) { | 438 | if (pad ) { |
440 | c->wbuf_len = PAD(c->wbuf_len); | 439 | c->wbuf_len = PAD(c->wbuf_len); |
441 | 440 | ||
442 | /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR | 441 | /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR |
443 | with 8 byte page size */ | 442 | with 8 byte page size */ |
444 | memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len); | 443 | memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len); |
445 | 444 | ||
446 | if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) { | 445 | if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) { |
447 | struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len); | 446 | struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len); |
448 | padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 447 | padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
@@ -453,7 +452,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
453 | } | 452 | } |
454 | /* else jffs2_flash_writev has actually filled in the rest of the | 453 | /* else jffs2_flash_writev has actually filled in the rest of the |
455 | buffer for us, and will deal with the node refs etc. later. */ | 454 | buffer for us, and will deal with the node refs etc. later. */ |
456 | 455 | ||
457 | #ifdef BREAKME | 456 | #ifdef BREAKME |
458 | static int breakme; | 457 | static int breakme; |
459 | if (breakme++ == 20) { | 458 | if (breakme++ == 20) { |
@@ -462,9 +461,9 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
462 | c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, | 461 | c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, |
463 | &retlen, brokenbuf, NULL, c->oobinfo); | 462 | &retlen, brokenbuf, NULL, c->oobinfo); |
464 | ret = -EIO; | 463 | ret = -EIO; |
465 | } else | 464 | } else |
466 | #endif | 465 | #endif |
467 | 466 | ||
468 | if (jffs2_cleanmarker_oob(c)) | 467 | if (jffs2_cleanmarker_oob(c)) |
469 | ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo); | 468 | ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo); |
470 | else | 469 | else |
@@ -487,7 +486,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
487 | spin_lock(&c->erase_completion_lock); | 486 | spin_lock(&c->erase_completion_lock); |
488 | 487 | ||
489 | /* Adjust free size of the block if we padded. */ | 488 | /* Adjust free size of the block if we padded. */ |
490 | if (pad && !jffs2_dataflash(c)) { | 489 | if (pad) { |
491 | struct jffs2_eraseblock *jeb; | 490 | struct jffs2_eraseblock *jeb; |
492 | 491 | ||
493 | jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; | 492 | jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; |
@@ -495,7 +494,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
495 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", | 494 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", |
496 | (jeb==c->nextblock)?"next":"", jeb->offset)); | 495 | (jeb==c->nextblock)?"next":"", jeb->offset)); |
497 | 496 | ||
498 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be | 497 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be |
499 | padded. If there is less free space in the block than that, | 498 | padded. If there is less free space in the block than that, |
500 | something screwed up */ | 499 | something screwed up */ |
501 | if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) { | 500 | if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) { |
@@ -523,9 +522,9 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
523 | return 0; | 522 | return 0; |
524 | } | 523 | } |
525 | 524 | ||
526 | /* Trigger garbage collection to flush the write-buffer. | 525 | /* Trigger garbage collection to flush the write-buffer. |
527 | If ino arg is zero, do it if _any_ real (i.e. not GC) writes are | 526 | If ino arg is zero, do it if _any_ real (i.e. not GC) writes are |
528 | outstanding. If ino arg non-zero, do it only if a write for the | 527 | outstanding. If ino arg non-zero, do it only if a write for the |
529 | given inode is outstanding. */ | 528 | given inode is outstanding. */ |
530 | int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | 529 | int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) |
531 | { | 530 | { |
@@ -604,15 +603,6 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c) | |||
604 | 603 | ||
605 | return ret; | 604 | return ret; |
606 | } | 605 | } |
607 | |||
608 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | ||
609 | #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) ) | ||
610 | #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) ) | ||
611 | #else | ||
612 | #define PAGE_DIV(x) ( (x) & (~(c->wbuf_pagesize - 1)) ) | ||
613 | #define PAGE_MOD(x) ( (x) & (c->wbuf_pagesize - 1) ) | ||
614 | #endif | ||
615 | |||
616 | int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) | 606 | int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) |
617 | { | 607 | { |
618 | struct kvec outvecs[3]; | 608 | struct kvec outvecs[3]; |
@@ -629,13 +619,13 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
629 | /* If not NAND flash, don't bother */ | 619 | /* If not NAND flash, don't bother */ |
630 | if (!jffs2_is_writebuffered(c)) | 620 | if (!jffs2_is_writebuffered(c)) |
631 | return jffs2_flash_direct_writev(c, invecs, count, to, retlen); | 621 | return jffs2_flash_direct_writev(c, invecs, count, to, retlen); |
632 | 622 | ||
633 | down_write(&c->wbuf_sem); | 623 | down_write(&c->wbuf_sem); |
634 | 624 | ||
635 | /* If wbuf_ofs is not initialized, set it to target address */ | 625 | /* If wbuf_ofs is not initialized, set it to target address */ |
636 | if (c->wbuf_ofs == 0xFFFFFFFF) { | 626 | if (c->wbuf_ofs == 0xFFFFFFFF) { |
637 | c->wbuf_ofs = PAGE_DIV(to); | 627 | c->wbuf_ofs = PAGE_DIV(to); |
638 | c->wbuf_len = PAGE_MOD(to); | 628 | c->wbuf_len = PAGE_MOD(to); |
639 | memset(c->wbuf,0xff,c->wbuf_pagesize); | 629 | memset(c->wbuf,0xff,c->wbuf_pagesize); |
640 | } | 630 | } |
641 | 631 | ||
@@ -649,10 +639,10 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
649 | memset(c->wbuf,0xff,c->wbuf_pagesize); | 639 | memset(c->wbuf,0xff,c->wbuf_pagesize); |
650 | } | 640 | } |
651 | } | 641 | } |
652 | 642 | ||
653 | /* Sanity checks on target address. | 643 | /* Sanity checks on target address. |
654 | It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs), | 644 | It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs), |
655 | and it's permitted to write at the beginning of a new | 645 | and it's permitted to write at the beginning of a new |
656 | erase block. Anything else, and you die. | 646 | erase block. Anything else, and you die. |
657 | New block starts at xxx000c (0-b = block header) | 647 | New block starts at xxx000c (0-b = block header) |
658 | */ | 648 | */ |
@@ -670,8 +660,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
670 | } | 660 | } |
671 | /* set pointer to new block */ | 661 | /* set pointer to new block */ |
672 | c->wbuf_ofs = PAGE_DIV(to); | 662 | c->wbuf_ofs = PAGE_DIV(to); |
673 | c->wbuf_len = PAGE_MOD(to); | 663 | c->wbuf_len = PAGE_MOD(to); |
674 | } | 664 | } |
675 | 665 | ||
676 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { | 666 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { |
677 | /* We're not writing immediately after the writebuffer. Bad. */ | 667 | /* We're not writing immediately after the writebuffer. Bad. */ |
@@ -691,21 +681,21 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
691 | invec = 0; | 681 | invec = 0; |
692 | outvec = 0; | 682 | outvec = 0; |
693 | 683 | ||
694 | /* Fill writebuffer first, if already in use */ | 684 | /* Fill writebuffer first, if already in use */ |
695 | if (c->wbuf_len) { | 685 | if (c->wbuf_len) { |
696 | uint32_t invec_ofs = 0; | 686 | uint32_t invec_ofs = 0; |
697 | 687 | ||
698 | /* adjust alignment offset */ | 688 | /* adjust alignment offset */ |
699 | if (c->wbuf_len != PAGE_MOD(to)) { | 689 | if (c->wbuf_len != PAGE_MOD(to)) { |
700 | c->wbuf_len = PAGE_MOD(to); | 690 | c->wbuf_len = PAGE_MOD(to); |
701 | /* take care of alignment to next page */ | 691 | /* take care of alignment to next page */ |
702 | if (!c->wbuf_len) | 692 | if (!c->wbuf_len) |
703 | c->wbuf_len = c->wbuf_pagesize; | 693 | c->wbuf_len = c->wbuf_pagesize; |
704 | } | 694 | } |
705 | 695 | ||
706 | while(c->wbuf_len < c->wbuf_pagesize) { | 696 | while(c->wbuf_len < c->wbuf_pagesize) { |
707 | uint32_t thislen; | 697 | uint32_t thislen; |
708 | 698 | ||
709 | if (invec == count) | 699 | if (invec == count) |
710 | goto alldone; | 700 | goto alldone; |
711 | 701 | ||
@@ -713,17 +703,17 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
713 | 703 | ||
714 | if (thislen >= invecs[invec].iov_len) | 704 | if (thislen >= invecs[invec].iov_len) |
715 | thislen = invecs[invec].iov_len; | 705 | thislen = invecs[invec].iov_len; |
716 | 706 | ||
717 | invec_ofs = thislen; | 707 | invec_ofs = thislen; |
718 | 708 | ||
719 | memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen); | 709 | memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen); |
720 | c->wbuf_len += thislen; | 710 | c->wbuf_len += thislen; |
721 | donelen += thislen; | 711 | donelen += thislen; |
722 | /* Get next invec, if actual did not fill the buffer */ | 712 | /* Get next invec, if actual did not fill the buffer */ |
723 | if (c->wbuf_len < c->wbuf_pagesize) | 713 | if (c->wbuf_len < c->wbuf_pagesize) |
724 | invec++; | 714 | invec++; |
725 | } | 715 | } |
726 | 716 | ||
727 | /* write buffer is full, flush buffer */ | 717 | /* write buffer is full, flush buffer */ |
728 | ret = __jffs2_flush_wbuf(c, NOPAD); | 718 | ret = __jffs2_flush_wbuf(c, NOPAD); |
729 | if (ret) { | 719 | if (ret) { |
@@ -782,10 +772,10 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
782 | 772 | ||
783 | /* We did cross a page boundary, so we write some now */ | 773 | /* We did cross a page boundary, so we write some now */ |
784 | if (jffs2_cleanmarker_oob(c)) | 774 | if (jffs2_cleanmarker_oob(c)) |
785 | ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo); | 775 | ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo); |
786 | else | 776 | else |
787 | ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen); | 777 | ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen); |
788 | 778 | ||
789 | if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) { | 779 | if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) { |
790 | /* At this point we have no problem, | 780 | /* At this point we have no problem, |
791 | c->wbuf is empty. However refile nextblock to avoid | 781 | c->wbuf is empty. However refile nextblock to avoid |
@@ -802,7 +792,7 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
802 | spin_unlock(&c->erase_completion_lock); | 792 | spin_unlock(&c->erase_completion_lock); |
803 | goto exit; | 793 | goto exit; |
804 | } | 794 | } |
805 | 795 | ||
806 | donelen += wbuf_retlen; | 796 | donelen += wbuf_retlen; |
807 | c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen); | 797 | c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen); |
808 | 798 | ||
@@ -836,11 +826,17 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
836 | alldone: | 826 | alldone: |
837 | *retlen = donelen; | 827 | *retlen = donelen; |
838 | 828 | ||
829 | if (jffs2_sum_active()) { | ||
830 | int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to); | ||
831 | if (res) | ||
832 | return res; | ||
833 | } | ||
834 | |||
839 | if (c->wbuf_len && ino) | 835 | if (c->wbuf_len && ino) |
840 | jffs2_wbuf_dirties_inode(c, ino); | 836 | jffs2_wbuf_dirties_inode(c, ino); |
841 | 837 | ||
842 | ret = 0; | 838 | ret = 0; |
843 | 839 | ||
844 | exit: | 840 | exit: |
845 | up_write(&c->wbuf_sem); | 841 | up_write(&c->wbuf_sem); |
846 | return ret; | 842 | return ret; |
@@ -855,7 +851,7 @@ int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *r | |||
855 | struct kvec vecs[1]; | 851 | struct kvec vecs[1]; |
856 | 852 | ||
857 | if (!jffs2_is_writebuffered(c)) | 853 | if (!jffs2_is_writebuffered(c)) |
858 | return c->mtd->write(c->mtd, ofs, len, retlen, buf); | 854 | return jffs2_flash_direct_write(c, ofs, len, retlen, buf); |
859 | 855 | ||
860 | vecs[0].iov_base = (unsigned char *) buf; | 856 | vecs[0].iov_base = (unsigned char *) buf; |
861 | vecs[0].iov_len = len; | 857 | vecs[0].iov_len = len; |
@@ -883,18 +879,18 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re | |||
883 | if ( (ret == -EBADMSG) && (*retlen == len) ) { | 879 | if ( (ret == -EBADMSG) && (*retlen == len) ) { |
884 | printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", | 880 | printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", |
885 | len, ofs); | 881 | len, ofs); |
886 | /* | 882 | /* |
887 | * We have the raw data without ECC correction in the buffer, maybe | 883 | * We have the raw data without ECC correction in the buffer, maybe |
888 | * we are lucky and all data or parts are correct. We check the node. | 884 | * we are lucky and all data or parts are correct. We check the node. |
889 | * If data are corrupted node check will sort it out. | 885 | * If data are corrupted node check will sort it out. |
890 | * We keep this block, it will fail on write or erase and the we | 886 | * We keep this block, it will fail on write or erase and the we |
891 | * mark it bad. Or should we do that now? But we should give him a chance. | 887 | * mark it bad. Or should we do that now? But we should give him a chance. |
892 | * Maybe we had a system crash or power loss before the ecc write or | 888 | * Maybe we had a system crash or power loss before the ecc write or |
893 | * a erase was completed. | 889 | * a erase was completed. |
894 | * So we return success. :) | 890 | * So we return success. :) |
895 | */ | 891 | */ |
896 | ret = 0; | 892 | ret = 0; |
897 | } | 893 | } |
898 | 894 | ||
899 | /* if no writebuffer available or write buffer empty, return */ | 895 | /* if no writebuffer available or write buffer empty, return */ |
900 | if (!c->wbuf_pagesize || !c->wbuf_len) | 896 | if (!c->wbuf_pagesize || !c->wbuf_len) |
@@ -909,16 +905,16 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re | |||
909 | if (owbf > c->wbuf_len) /* is read beyond write buffer ? */ | 905 | if (owbf > c->wbuf_len) /* is read beyond write buffer ? */ |
910 | goto exit; | 906 | goto exit; |
911 | lwbf = c->wbuf_len - owbf; /* number of bytes to copy */ | 907 | lwbf = c->wbuf_len - owbf; /* number of bytes to copy */ |
912 | if (lwbf > len) | 908 | if (lwbf > len) |
913 | lwbf = len; | 909 | lwbf = len; |
914 | } else { | 910 | } else { |
915 | orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ | 911 | orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ |
916 | if (orbf > len) /* is write beyond write buffer ? */ | 912 | if (orbf > len) /* is write beyond write buffer ? */ |
917 | goto exit; | 913 | goto exit; |
918 | lwbf = len - orbf; /* number of bytes to copy */ | 914 | lwbf = len - orbf; /* number of bytes to copy */ |
919 | if (lwbf > c->wbuf_len) | 915 | if (lwbf > c->wbuf_len) |
920 | lwbf = c->wbuf_len; | 916 | lwbf = c->wbuf_len; |
921 | } | 917 | } |
922 | if (lwbf > 0) | 918 | if (lwbf > 0) |
923 | memcpy(buf+orbf,c->wbuf+owbf,lwbf); | 919 | memcpy(buf+orbf,c->wbuf+owbf,lwbf); |
924 | 920 | ||
@@ -946,7 +942,7 @@ int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb | |||
946 | printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n"); | 942 | printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n"); |
947 | return -ENOMEM; | 943 | return -ENOMEM; |
948 | } | 944 | } |
949 | /* | 945 | /* |
950 | * if mode = 0, we scan for a total empty oob area, else we have | 946 | * if mode = 0, we scan for a total empty oob area, else we have |
951 | * to take care of the cleanmarker in the first page of the block | 947 | * to take care of the cleanmarker in the first page of the block |
952 | */ | 948 | */ |
@@ -955,41 +951,41 @@ int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb | |||
955 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); | 951 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); |
956 | goto out; | 952 | goto out; |
957 | } | 953 | } |
958 | 954 | ||
959 | if (retlen < len) { | 955 | if (retlen < len) { |
960 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read " | 956 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read " |
961 | "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset)); | 957 | "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset)); |
962 | ret = -EIO; | 958 | ret = -EIO; |
963 | goto out; | 959 | goto out; |
964 | } | 960 | } |
965 | 961 | ||
966 | /* Special check for first page */ | 962 | /* Special check for first page */ |
967 | for(i = 0; i < oob_size ; i++) { | 963 | for(i = 0; i < oob_size ; i++) { |
968 | /* Yeah, we know about the cleanmarker. */ | 964 | /* Yeah, we know about the cleanmarker. */ |
969 | if (mode && i >= c->fsdata_pos && | 965 | if (mode && i >= c->fsdata_pos && |
970 | i < c->fsdata_pos + c->fsdata_len) | 966 | i < c->fsdata_pos + c->fsdata_len) |
971 | continue; | 967 | continue; |
972 | 968 | ||
973 | if (buf[i] != 0xFF) { | 969 | if (buf[i] != 0xFF) { |
974 | D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n", | 970 | D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n", |
975 | buf[page+i], page+i, jeb->offset)); | 971 | buf[i], i, jeb->offset)); |
976 | ret = 1; | 972 | ret = 1; |
977 | goto out; | 973 | goto out; |
978 | } | 974 | } |
979 | } | 975 | } |
980 | 976 | ||
981 | /* we know, we are aligned :) */ | 977 | /* we know, we are aligned :) */ |
982 | for (page = oob_size; page < len; page += sizeof(long)) { | 978 | for (page = oob_size; page < len; page += sizeof(long)) { |
983 | unsigned long dat = *(unsigned long *)(&buf[page]); | 979 | unsigned long dat = *(unsigned long *)(&buf[page]); |
984 | if(dat != -1) { | 980 | if(dat != -1) { |
985 | ret = 1; | 981 | ret = 1; |
986 | goto out; | 982 | goto out; |
987 | } | 983 | } |
988 | } | 984 | } |
989 | 985 | ||
990 | out: | 986 | out: |
991 | kfree(buf); | 987 | kfree(buf); |
992 | 988 | ||
993 | return ret; | 989 | return ret; |
994 | } | 990 | } |
995 | 991 | ||
@@ -1071,7 +1067,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
1071 | n.totlen = cpu_to_je32(8); | 1067 | n.totlen = cpu_to_je32(8); |
1072 | 1068 | ||
1073 | ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n); | 1069 | ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n); |
1074 | 1070 | ||
1075 | if (ret) { | 1071 | if (ret) { |
1076 | D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); | 1072 | D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); |
1077 | return ret; | 1073 | return ret; |
@@ -1083,7 +1079,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
1083 | return 0; | 1079 | return 0; |
1084 | } | 1080 | } |
1085 | 1081 | ||
1086 | /* | 1082 | /* |
1087 | * On NAND we try to mark this block bad. If the block was erased more | 1083 | * On NAND we try to mark this block bad. If the block was erased more |
1088 | * than MAX_ERASE_FAILURES we mark it finaly bad. | 1084 | * than MAX_ERASE_FAILURES we mark it finaly bad. |
1089 | * Don't care about failures. This block remains on the erase-pending | 1085 | * Don't care about failures. This block remains on the erase-pending |
@@ -1104,7 +1100,7 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock * | |||
1104 | 1100 | ||
1105 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset)); | 1101 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset)); |
1106 | ret = c->mtd->block_markbad(c->mtd, bad_offset); | 1102 | ret = c->mtd->block_markbad(c->mtd, bad_offset); |
1107 | 1103 | ||
1108 | if (ret) { | 1104 | if (ret) { |
1109 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); | 1105 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); |
1110 | return ret; | 1106 | return ret; |
@@ -1128,7 +1124,7 @@ static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c) | |||
1128 | /* Do this only, if we have an oob buffer */ | 1124 | /* Do this only, if we have an oob buffer */ |
1129 | if (!c->mtd->oobsize) | 1125 | if (!c->mtd->oobsize) |
1130 | return 0; | 1126 | return 0; |
1131 | 1127 | ||
1132 | /* Cleanmarker is out-of-band, so inline size zero */ | 1128 | /* Cleanmarker is out-of-band, so inline size zero */ |
1133 | c->cleanmarker_size = 0; | 1129 | c->cleanmarker_size = 0; |
1134 | 1130 | ||
@@ -1154,7 +1150,7 @@ static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c) | |||
1154 | c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN; | 1150 | c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN; |
1155 | c->badblock_pos = 15; | 1151 | c->badblock_pos = 15; |
1156 | break; | 1152 | break; |
1157 | 1153 | ||
1158 | default: | 1154 | default: |
1159 | D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n")); | 1155 | D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n")); |
1160 | return -EINVAL; | 1156 | return -EINVAL; |
@@ -1171,7 +1167,7 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c) | |||
1171 | init_rwsem(&c->wbuf_sem); | 1167 | init_rwsem(&c->wbuf_sem); |
1172 | c->wbuf_pagesize = c->mtd->oobblock; | 1168 | c->wbuf_pagesize = c->mtd->oobblock; |
1173 | c->wbuf_ofs = 0xFFFFFFFF; | 1169 | c->wbuf_ofs = 0xFFFFFFFF; |
1174 | 1170 | ||
1175 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | 1171 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); |
1176 | if (!c->wbuf) | 1172 | if (!c->wbuf) |
1177 | return -ENOMEM; | 1173 | return -ENOMEM; |
@@ -1197,17 +1193,41 @@ void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c) | |||
1197 | 1193 | ||
1198 | int jffs2_dataflash_setup(struct jffs2_sb_info *c) { | 1194 | int jffs2_dataflash_setup(struct jffs2_sb_info *c) { |
1199 | c->cleanmarker_size = 0; /* No cleanmarkers needed */ | 1195 | c->cleanmarker_size = 0; /* No cleanmarkers needed */ |
1200 | 1196 | ||
1201 | /* Initialize write buffer */ | 1197 | /* Initialize write buffer */ |
1202 | init_rwsem(&c->wbuf_sem); | 1198 | init_rwsem(&c->wbuf_sem); |
1203 | c->wbuf_pagesize = c->sector_size; | ||
1204 | c->wbuf_ofs = 0xFFFFFFFF; | ||
1205 | 1199 | ||
1200 | |||
1201 | c->wbuf_pagesize = c->mtd->erasesize; | ||
1202 | |||
1203 | /* Find a suitable c->sector_size | ||
1204 | * - Not too much sectors | ||
1205 | * - Sectors have to be at least 4 K + some bytes | ||
1206 | * - All known dataflashes have erase sizes of 528 or 1056 | ||
1207 | * - we take at least 8 eraseblocks and want to have at least 8K size | ||
1208 | * - The concatenation should be a power of 2 | ||
1209 | */ | ||
1210 | |||
1211 | c->sector_size = 8 * c->mtd->erasesize; | ||
1212 | |||
1213 | while (c->sector_size < 8192) { | ||
1214 | c->sector_size *= 2; | ||
1215 | } | ||
1216 | |||
1217 | /* It may be necessary to adjust the flash size */ | ||
1218 | c->flash_size = c->mtd->size; | ||
1219 | |||
1220 | if ((c->flash_size % c->sector_size) != 0) { | ||
1221 | c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; | ||
1222 | printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size); | ||
1223 | }; | ||
1224 | |||
1225 | c->wbuf_ofs = 0xFFFFFFFF; | ||
1206 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | 1226 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); |
1207 | if (!c->wbuf) | 1227 | if (!c->wbuf) |
1208 | return -ENOMEM; | 1228 | return -ENOMEM; |
1209 | 1229 | ||
1210 | printk(KERN_INFO "JFFS2 write-buffering enabled (%i)\n", c->wbuf_pagesize); | 1230 | printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); |
1211 | 1231 | ||
1212 | return 0; | 1232 | return 0; |
1213 | } | 1233 | } |
@@ -1235,3 +1255,23 @@ int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) { | |||
1235 | void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) { | 1255 | void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) { |
1236 | kfree(c->wbuf); | 1256 | kfree(c->wbuf); |
1237 | } | 1257 | } |
1258 | |||
1259 | int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { | ||
1260 | /* Cleanmarker currently occupies a whole programming region */ | ||
1261 | c->cleanmarker_size = MTD_PROGREGION_SIZE(c->mtd); | ||
1262 | |||
1263 | /* Initialize write buffer */ | ||
1264 | init_rwsem(&c->wbuf_sem); | ||
1265 | c->wbuf_pagesize = MTD_PROGREGION_SIZE(c->mtd); | ||
1266 | c->wbuf_ofs = 0xFFFFFFFF; | ||
1267 | |||
1268 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | ||
1269 | if (!c->wbuf) | ||
1270 | return -ENOMEM; | ||
1271 | |||
1272 | return 0; | ||
1273 | } | ||
1274 | |||
1275 | void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) { | ||
1276 | kfree(c->wbuf); | ||
1277 | } | ||
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c index 69100615d9ae..1342f0158e9b 100644 --- a/fs/jffs2/write.c +++ b/fs/jffs2/write.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: write.c,v 1.92 2005/04/13 13:22:35 dwmw2 Exp $ | 10 | * $Id: write.c,v 1.97 2005/11/07 11:14:42 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -54,35 +54,7 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint | |||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | 56 | ||
57 | #if CONFIG_JFFS2_FS_DEBUG > 0 | 57 | /* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, |
58 | static void writecheck(struct jffs2_sb_info *c, uint32_t ofs) | ||
59 | { | ||
60 | unsigned char buf[16]; | ||
61 | size_t retlen; | ||
62 | int ret, i; | ||
63 | |||
64 | ret = jffs2_flash_read(c, ofs, 16, &retlen, buf); | ||
65 | if (ret || (retlen != 16)) { | ||
66 | D1(printk(KERN_DEBUG "read failed or short in writecheck(). ret %d, retlen %zd\n", ret, retlen)); | ||
67 | return; | ||
68 | } | ||
69 | ret = 0; | ||
70 | for (i=0; i<16; i++) { | ||
71 | if (buf[i] != 0xff) | ||
72 | ret = 1; | ||
73 | } | ||
74 | if (ret) { | ||
75 | printk(KERN_WARNING "ARGH. About to write node to 0x%08x on flash, but there are data already there:\n", ofs); | ||
76 | printk(KERN_WARNING "0x%08x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
77 | ofs, | ||
78 | buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], | ||
79 | buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]); | ||
80 | } | ||
81 | } | ||
82 | #endif | ||
83 | |||
84 | |||
85 | /* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, | ||
86 | write it to the flash, link it into the existing inode/fragment list */ | 58 | write it to the flash, link it into the existing inode/fragment list */ |
87 | 59 | ||
88 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode) | 60 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode) |
@@ -106,7 +78,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
106 | vecs[1].iov_base = (unsigned char *)data; | 78 | vecs[1].iov_base = (unsigned char *)data; |
107 | vecs[1].iov_len = datalen; | 79 | vecs[1].iov_len = datalen; |
108 | 80 | ||
109 | D1(writecheck(c, flash_ofs)); | 81 | jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); |
110 | 82 | ||
111 | if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { | 83 | if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { |
112 | printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen); | 84 | printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen); |
@@ -114,7 +86,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
114 | raw = jffs2_alloc_raw_node_ref(); | 86 | raw = jffs2_alloc_raw_node_ref(); |
115 | if (!raw) | 87 | if (!raw) |
116 | return ERR_PTR(-ENOMEM); | 88 | return ERR_PTR(-ENOMEM); |
117 | 89 | ||
118 | fn = jffs2_alloc_full_dnode(); | 90 | fn = jffs2_alloc_full_dnode(); |
119 | if (!fn) { | 91 | if (!fn) { |
120 | jffs2_free_raw_node_ref(raw); | 92 | jffs2_free_raw_node_ref(raw); |
@@ -138,7 +110,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
138 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { | 110 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { |
139 | BUG_ON(!retried); | 111 | BUG_ON(!retried); |
140 | D1(printk(KERN_DEBUG "jffs2_write_dnode : dnode_version %d, " | 112 | D1(printk(KERN_DEBUG "jffs2_write_dnode : dnode_version %d, " |
141 | "highest version %d -> updating dnode\n", | 113 | "highest version %d -> updating dnode\n", |
142 | je32_to_cpu(ri->version), f->highest_version)); | 114 | je32_to_cpu(ri->version), f->highest_version)); |
143 | ri->version = cpu_to_je32(++f->highest_version); | 115 | ri->version = cpu_to_je32(++f->highest_version); |
144 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 116 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
@@ -148,7 +120,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
148 | (alloc_mode==ALLOC_GC)?0:f->inocache->ino); | 120 | (alloc_mode==ALLOC_GC)?0:f->inocache->ino); |
149 | 121 | ||
150 | if (ret || (retlen != sizeof(*ri) + datalen)) { | 122 | if (ret || (retlen != sizeof(*ri) + datalen)) { |
151 | printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", | 123 | printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", |
152 | sizeof(*ri)+datalen, flash_ofs, ret, retlen); | 124 | sizeof(*ri)+datalen, flash_ofs, ret, retlen); |
153 | 125 | ||
154 | /* Mark the space as dirtied */ | 126 | /* Mark the space as dirtied */ |
@@ -156,10 +128,10 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
156 | /* Doesn't belong to any inode */ | 128 | /* Doesn't belong to any inode */ |
157 | raw->next_in_ino = NULL; | 129 | raw->next_in_ino = NULL; |
158 | 130 | ||
159 | /* Don't change raw->size to match retlen. We may have | 131 | /* Don't change raw->size to match retlen. We may have |
160 | written the node header already, and only the data will | 132 | written the node header already, and only the data will |
161 | seem corrupted, in which case the scan would skip over | 133 | seem corrupted, in which case the scan would skip over |
162 | any node we write before the original intended end of | 134 | any node we write before the original intended end of |
163 | this node */ | 135 | this node */ |
164 | raw->flash_offset |= REF_OBSOLETE; | 136 | raw->flash_offset |= REF_OBSOLETE; |
165 | jffs2_add_physical_node_ref(c, raw); | 137 | jffs2_add_physical_node_ref(c, raw); |
@@ -176,26 +148,28 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
176 | retried = 1; | 148 | retried = 1; |
177 | 149 | ||
178 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); | 150 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); |
179 | 151 | ||
180 | ACCT_SANITY_CHECK(c,jeb); | 152 | jffs2_dbg_acct_sanity_check(c,jeb); |
181 | D1(ACCT_PARANOIA_CHECK(jeb)); | 153 | jffs2_dbg_acct_paranoia_check(c, jeb); |
182 | 154 | ||
183 | if (alloc_mode == ALLOC_GC) { | 155 | if (alloc_mode == ALLOC_GC) { |
184 | ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs, &dummy); | 156 | ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs, |
157 | &dummy, JFFS2_SUMMARY_INODE_SIZE); | ||
185 | } else { | 158 | } else { |
186 | /* Locking pain */ | 159 | /* Locking pain */ |
187 | up(&f->sem); | 160 | up(&f->sem); |
188 | jffs2_complete_reservation(c); | 161 | jffs2_complete_reservation(c); |
189 | 162 | ||
190 | ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs, &dummy, alloc_mode); | 163 | ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs, |
164 | &dummy, alloc_mode, JFFS2_SUMMARY_INODE_SIZE); | ||
191 | down(&f->sem); | 165 | down(&f->sem); |
192 | } | 166 | } |
193 | 167 | ||
194 | if (!ret) { | 168 | if (!ret) { |
195 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); | 169 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); |
196 | 170 | ||
197 | ACCT_SANITY_CHECK(c,jeb); | 171 | jffs2_dbg_acct_sanity_check(c,jeb); |
198 | D1(ACCT_PARANOIA_CHECK(jeb)); | 172 | jffs2_dbg_acct_paranoia_check(c, jeb); |
199 | 173 | ||
200 | goto retry; | 174 | goto retry; |
201 | } | 175 | } |
@@ -207,9 +181,9 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
207 | return ERR_PTR(ret?ret:-EIO); | 181 | return ERR_PTR(ret?ret:-EIO); |
208 | } | 182 | } |
209 | /* Mark the space used */ | 183 | /* Mark the space used */ |
210 | /* If node covers at least a whole page, or if it starts at the | 184 | /* If node covers at least a whole page, or if it starts at the |
211 | beginning of a page and runs to the end of the file, or if | 185 | beginning of a page and runs to the end of the file, or if |
212 | it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. | 186 | it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. |
213 | */ | 187 | */ |
214 | if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || | 188 | if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || |
215 | ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && | 189 | ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && |
@@ -227,12 +201,12 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
227 | spin_unlock(&c->erase_completion_lock); | 201 | spin_unlock(&c->erase_completion_lock); |
228 | 202 | ||
229 | D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", | 203 | D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", |
230 | flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize), | 204 | flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize), |
231 | je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), | 205 | je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), |
232 | je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen))); | 206 | je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen))); |
233 | 207 | ||
234 | if (retried) { | 208 | if (retried) { |
235 | ACCT_SANITY_CHECK(c,NULL); | 209 | jffs2_dbg_acct_sanity_check(c,NULL); |
236 | } | 210 | } |
237 | 211 | ||
238 | return fn; | 212 | return fn; |
@@ -247,10 +221,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
247 | int retried = 0; | 221 | int retried = 0; |
248 | int ret; | 222 | int ret; |
249 | 223 | ||
250 | D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", | 224 | D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", |
251 | je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), | 225 | je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), |
252 | je32_to_cpu(rd->name_crc))); | 226 | je32_to_cpu(rd->name_crc))); |
253 | D1(writecheck(c, flash_ofs)); | ||
254 | 227 | ||
255 | D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { | 228 | D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { |
256 | printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); | 229 | printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); |
@@ -262,7 +235,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
262 | vecs[0].iov_len = sizeof(*rd); | 235 | vecs[0].iov_len = sizeof(*rd); |
263 | vecs[1].iov_base = (unsigned char *)name; | 236 | vecs[1].iov_base = (unsigned char *)name; |
264 | vecs[1].iov_len = namelen; | 237 | vecs[1].iov_len = namelen; |
265 | 238 | ||
239 | jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); | ||
240 | |||
266 | raw = jffs2_alloc_raw_node_ref(); | 241 | raw = jffs2_alloc_raw_node_ref(); |
267 | 242 | ||
268 | if (!raw) | 243 | if (!raw) |
@@ -301,7 +276,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
301 | ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen, | 276 | ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen, |
302 | (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino)); | 277 | (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino)); |
303 | if (ret || (retlen != sizeof(*rd) + namelen)) { | 278 | if (ret || (retlen != sizeof(*rd) + namelen)) { |
304 | printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", | 279 | printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", |
305 | sizeof(*rd)+namelen, flash_ofs, ret, retlen); | 280 | sizeof(*rd)+namelen, flash_ofs, ret, retlen); |
306 | /* Mark the space as dirtied */ | 281 | /* Mark the space as dirtied */ |
307 | if (retlen) { | 282 | if (retlen) { |
@@ -322,24 +297,26 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
322 | 297 | ||
323 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); | 298 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); |
324 | 299 | ||
325 | ACCT_SANITY_CHECK(c,jeb); | 300 | jffs2_dbg_acct_sanity_check(c,jeb); |
326 | D1(ACCT_PARANOIA_CHECK(jeb)); | 301 | jffs2_dbg_acct_paranoia_check(c, jeb); |
327 | 302 | ||
328 | if (alloc_mode == ALLOC_GC) { | 303 | if (alloc_mode == ALLOC_GC) { |
329 | ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs, &dummy); | 304 | ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs, |
305 | &dummy, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
330 | } else { | 306 | } else { |
331 | /* Locking pain */ | 307 | /* Locking pain */ |
332 | up(&f->sem); | 308 | up(&f->sem); |
333 | jffs2_complete_reservation(c); | 309 | jffs2_complete_reservation(c); |
334 | 310 | ||
335 | ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs, &dummy, alloc_mode); | 311 | ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs, |
312 | &dummy, alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
336 | down(&f->sem); | 313 | down(&f->sem); |
337 | } | 314 | } |
338 | 315 | ||
339 | if (!ret) { | 316 | if (!ret) { |
340 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); | 317 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); |
341 | ACCT_SANITY_CHECK(c,jeb); | 318 | jffs2_dbg_acct_sanity_check(c,jeb); |
342 | D1(ACCT_PARANOIA_CHECK(jeb)); | 319 | jffs2_dbg_acct_paranoia_check(c, jeb); |
343 | goto retry; | 320 | goto retry; |
344 | } | 321 | } |
345 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | 322 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); |
@@ -359,7 +336,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
359 | spin_unlock(&c->erase_completion_lock); | 336 | spin_unlock(&c->erase_completion_lock); |
360 | 337 | ||
361 | if (retried) { | 338 | if (retried) { |
362 | ACCT_SANITY_CHECK(c,NULL); | 339 | jffs2_dbg_acct_sanity_check(c,NULL); |
363 | } | 340 | } |
364 | 341 | ||
365 | return fd; | 342 | return fd; |
@@ -369,7 +346,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
369 | we don't have to go digging in struct inode or its equivalent. It should set: | 346 | we don't have to go digging in struct inode or its equivalent. It should set: |
370 | mode, uid, gid, (starting)isize, atime, ctime, mtime */ | 347 | mode, uid, gid, (starting)isize, atime, ctime, mtime */ |
371 | int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 348 | int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
372 | struct jffs2_raw_inode *ri, unsigned char *buf, | 349 | struct jffs2_raw_inode *ri, unsigned char *buf, |
373 | uint32_t offset, uint32_t writelen, uint32_t *retlen) | 350 | uint32_t offset, uint32_t writelen, uint32_t *retlen) |
374 | { | 351 | { |
375 | int ret = 0; | 352 | int ret = 0; |
@@ -377,7 +354,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
377 | 354 | ||
378 | D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n", | 355 | D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n", |
379 | f->inocache->ino, offset, writelen)); | 356 | f->inocache->ino, offset, writelen)); |
380 | 357 | ||
381 | while(writelen) { | 358 | while(writelen) { |
382 | struct jffs2_full_dnode *fn; | 359 | struct jffs2_full_dnode *fn; |
383 | unsigned char *comprbuf = NULL; | 360 | unsigned char *comprbuf = NULL; |
@@ -389,7 +366,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
389 | retry: | 366 | retry: |
390 | D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset)); | 367 | D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset)); |
391 | 368 | ||
392 | ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen, ALLOC_NORMAL); | 369 | ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, |
370 | &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | ||
393 | if (ret) { | 371 | if (ret) { |
394 | D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret)); | 372 | D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret)); |
395 | break; | 373 | break; |
@@ -473,10 +451,11 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str | |||
473 | uint32_t alloclen, phys_ofs; | 451 | uint32_t alloclen, phys_ofs; |
474 | int ret; | 452 | int ret; |
475 | 453 | ||
476 | /* Try to reserve enough space for both node and dirent. | 454 | /* Try to reserve enough space for both node and dirent. |
477 | * Just the node will do for now, though | 455 | * Just the node will do for now, though |
478 | */ | 456 | */ |
479 | ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL); | 457 | ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL, |
458 | JFFS2_SUMMARY_INODE_SIZE); | ||
480 | D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen)); | 459 | D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen)); |
481 | if (ret) { | 460 | if (ret) { |
482 | up(&f->sem); | 461 | up(&f->sem); |
@@ -498,15 +477,16 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str | |||
498 | jffs2_complete_reservation(c); | 477 | jffs2_complete_reservation(c); |
499 | return PTR_ERR(fn); | 478 | return PTR_ERR(fn); |
500 | } | 479 | } |
501 | /* No data here. Only a metadata node, which will be | 480 | /* No data here. Only a metadata node, which will be |
502 | obsoleted by the first data write | 481 | obsoleted by the first data write |
503 | */ | 482 | */ |
504 | f->metadata = fn; | 483 | f->metadata = fn; |
505 | 484 | ||
506 | up(&f->sem); | 485 | up(&f->sem); |
507 | jffs2_complete_reservation(c); | 486 | jffs2_complete_reservation(c); |
508 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 487 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, |
509 | 488 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | |
489 | |||
510 | if (ret) { | 490 | if (ret) { |
511 | /* Eep. */ | 491 | /* Eep. */ |
512 | D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n")); | 492 | D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n")); |
@@ -539,9 +519,9 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str | |||
539 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); | 519 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); |
540 | 520 | ||
541 | jffs2_free_raw_dirent(rd); | 521 | jffs2_free_raw_dirent(rd); |
542 | 522 | ||
543 | if (IS_ERR(fd)) { | 523 | if (IS_ERR(fd)) { |
544 | /* dirent failed to write. Delete the inode normally | 524 | /* dirent failed to write. Delete the inode normally |
545 | as if it were the final unlink() */ | 525 | as if it were the final unlink() */ |
546 | jffs2_complete_reservation(c); | 526 | jffs2_complete_reservation(c); |
547 | up(&dir_f->sem); | 527 | up(&dir_f->sem); |
@@ -560,14 +540,15 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str | |||
560 | 540 | ||
561 | 541 | ||
562 | int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | 542 | int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, |
563 | const char *name, int namelen, struct jffs2_inode_info *dead_f) | 543 | const char *name, int namelen, struct jffs2_inode_info *dead_f, |
544 | uint32_t time) | ||
564 | { | 545 | { |
565 | struct jffs2_raw_dirent *rd; | 546 | struct jffs2_raw_dirent *rd; |
566 | struct jffs2_full_dirent *fd; | 547 | struct jffs2_full_dirent *fd; |
567 | uint32_t alloclen, phys_ofs; | 548 | uint32_t alloclen, phys_ofs; |
568 | int ret; | 549 | int ret; |
569 | 550 | ||
570 | if (1 /* alternative branch needs testing */ || | 551 | if (1 /* alternative branch needs testing */ || |
571 | !jffs2_can_mark_obsolete(c)) { | 552 | !jffs2_can_mark_obsolete(c)) { |
572 | /* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */ | 553 | /* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */ |
573 | 554 | ||
@@ -575,7 +556,8 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
575 | if (!rd) | 556 | if (!rd) |
576 | return -ENOMEM; | 557 | return -ENOMEM; |
577 | 558 | ||
578 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_DELETION); | 559 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, |
560 | ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
579 | if (ret) { | 561 | if (ret) { |
580 | jffs2_free_raw_dirent(rd); | 562 | jffs2_free_raw_dirent(rd); |
581 | return ret; | 563 | return ret; |
@@ -588,18 +570,18 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
588 | rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); | 570 | rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); |
589 | rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); | 571 | rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); |
590 | rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); | 572 | rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); |
591 | 573 | ||
592 | rd->pino = cpu_to_je32(dir_f->inocache->ino); | 574 | rd->pino = cpu_to_je32(dir_f->inocache->ino); |
593 | rd->version = cpu_to_je32(++dir_f->highest_version); | 575 | rd->version = cpu_to_je32(++dir_f->highest_version); |
594 | rd->ino = cpu_to_je32(0); | 576 | rd->ino = cpu_to_je32(0); |
595 | rd->mctime = cpu_to_je32(get_seconds()); | 577 | rd->mctime = cpu_to_je32(time); |
596 | rd->nsize = namelen; | 578 | rd->nsize = namelen; |
597 | rd->type = DT_UNKNOWN; | 579 | rd->type = DT_UNKNOWN; |
598 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | 580 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); |
599 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); | 581 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); |
600 | 582 | ||
601 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION); | 583 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION); |
602 | 584 | ||
603 | jffs2_free_raw_dirent(rd); | 585 | jffs2_free_raw_dirent(rd); |
604 | 586 | ||
605 | if (IS_ERR(fd)) { | 587 | if (IS_ERR(fd)) { |
@@ -618,7 +600,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
618 | down(&dir_f->sem); | 600 | down(&dir_f->sem); |
619 | 601 | ||
620 | while ((*prev) && (*prev)->nhash <= nhash) { | 602 | while ((*prev) && (*prev)->nhash <= nhash) { |
621 | if ((*prev)->nhash == nhash && | 603 | if ((*prev)->nhash == nhash && |
622 | !memcmp((*prev)->name, name, namelen) && | 604 | !memcmp((*prev)->name, name, namelen) && |
623 | !(*prev)->name[namelen]) { | 605 | !(*prev)->name[namelen]) { |
624 | struct jffs2_full_dirent *this = *prev; | 606 | struct jffs2_full_dirent *this = *prev; |
@@ -639,7 +621,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
639 | /* dead_f is NULL if this was a rename not a real unlink */ | 621 | /* dead_f is NULL if this was a rename not a real unlink */ |
640 | /* Also catch the !f->inocache case, where there was a dirent | 622 | /* Also catch the !f->inocache case, where there was a dirent |
641 | pointing to an inode which didn't exist. */ | 623 | pointing to an inode which didn't exist. */ |
642 | if (dead_f && dead_f->inocache) { | 624 | if (dead_f && dead_f->inocache) { |
643 | 625 | ||
644 | down(&dead_f->sem); | 626 | down(&dead_f->sem); |
645 | 627 | ||
@@ -647,9 +629,9 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
647 | while (dead_f->dents) { | 629 | while (dead_f->dents) { |
648 | /* There can be only deleted ones */ | 630 | /* There can be only deleted ones */ |
649 | fd = dead_f->dents; | 631 | fd = dead_f->dents; |
650 | 632 | ||
651 | dead_f->dents = fd->next; | 633 | dead_f->dents = fd->next; |
652 | 634 | ||
653 | if (fd->ino) { | 635 | if (fd->ino) { |
654 | printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n", | 636 | printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n", |
655 | dead_f->inocache->ino, fd->name, fd->ino); | 637 | dead_f->inocache->ino, fd->name, fd->ino); |
@@ -673,7 +655,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
673 | } | 655 | } |
674 | 656 | ||
675 | 657 | ||
676 | int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen) | 658 | int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time) |
677 | { | 659 | { |
678 | struct jffs2_raw_dirent *rd; | 660 | struct jffs2_raw_dirent *rd; |
679 | struct jffs2_full_dirent *fd; | 661 | struct jffs2_full_dirent *fd; |
@@ -684,12 +666,13 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint | |||
684 | if (!rd) | 666 | if (!rd) |
685 | return -ENOMEM; | 667 | return -ENOMEM; |
686 | 668 | ||
687 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | 669 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, |
670 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
688 | if (ret) { | 671 | if (ret) { |
689 | jffs2_free_raw_dirent(rd); | 672 | jffs2_free_raw_dirent(rd); |
690 | return ret; | 673 | return ret; |
691 | } | 674 | } |
692 | 675 | ||
693 | down(&dir_f->sem); | 676 | down(&dir_f->sem); |
694 | 677 | ||
695 | /* Build a deletion node */ | 678 | /* Build a deletion node */ |
@@ -701,7 +684,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint | |||
701 | rd->pino = cpu_to_je32(dir_f->inocache->ino); | 684 | rd->pino = cpu_to_je32(dir_f->inocache->ino); |
702 | rd->version = cpu_to_je32(++dir_f->highest_version); | 685 | rd->version = cpu_to_je32(++dir_f->highest_version); |
703 | rd->ino = cpu_to_je32(ino); | 686 | rd->ino = cpu_to_je32(ino); |
704 | rd->mctime = cpu_to_je32(get_seconds()); | 687 | rd->mctime = cpu_to_je32(time); |
705 | rd->nsize = namelen; | 688 | rd->nsize = namelen; |
706 | 689 | ||
707 | rd->type = type; | 690 | rd->type = type; |
@@ -710,7 +693,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint | |||
710 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); | 693 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); |
711 | 694 | ||
712 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); | 695 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); |
713 | 696 | ||
714 | jffs2_free_raw_dirent(rd); | 697 | jffs2_free_raw_dirent(rd); |
715 | 698 | ||
716 | if (IS_ERR(fd)) { | 699 | if (IS_ERR(fd)) { |
diff --git a/fs/jffs2/writev.c b/fs/jffs2/writev.c index f079f8388566..c638ae1008de 100644 --- a/fs/jffs2/writev.c +++ b/fs/jffs2/writev.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: writev.c,v 1.6 2004/11/16 20:36:12 dwmw2 Exp $ | 10 | * $Id: writev.c,v 1.8 2005/09/09 15:11:58 havasi Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -42,9 +42,40 @@ static inline int mtd_fake_writev(struct mtd_info *mtd, const struct kvec *vecs, | |||
42 | int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, | 42 | int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, |
43 | unsigned long count, loff_t to, size_t *retlen) | 43 | unsigned long count, loff_t to, size_t *retlen) |
44 | { | 44 | { |
45 | if (!jffs2_is_writebuffered(c)) { | ||
46 | if (jffs2_sum_active()) { | ||
47 | int res; | ||
48 | res = jffs2_sum_add_kvec(c, vecs, count, (uint32_t) to); | ||
49 | if (res) { | ||
50 | return res; | ||
51 | } | ||
52 | } | ||
53 | } | ||
54 | |||
45 | if (c->mtd->writev) | 55 | if (c->mtd->writev) |
46 | return c->mtd->writev(c->mtd, vecs, count, to, retlen); | 56 | return c->mtd->writev(c->mtd, vecs, count, to, retlen); |
47 | else | 57 | else { |
48 | return mtd_fake_writev(c->mtd, vecs, count, to, retlen); | 58 | return mtd_fake_writev(c->mtd, vecs, count, to, retlen); |
59 | } | ||
49 | } | 60 | } |
50 | 61 | ||
62 | int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, | ||
63 | size_t *retlen, const u_char *buf) | ||
64 | { | ||
65 | int ret; | ||
66 | ret = c->mtd->write(c->mtd, ofs, len, retlen, buf); | ||
67 | |||
68 | if (jffs2_sum_active()) { | ||
69 | struct kvec vecs[1]; | ||
70 | int res; | ||
71 | |||
72 | vecs[0].iov_base = (unsigned char *) buf; | ||
73 | vecs[0].iov_len = len; | ||
74 | |||
75 | res = jffs2_sum_add_kvec(c, vecs, 1, (uint32_t) ofs); | ||
76 | if (res) { | ||
77 | return res; | ||
78 | } | ||
79 | } | ||
80 | return ret; | ||
81 | } | ||