diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /fs/jffs2 |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'fs/jffs2')
35 files changed, 12564 insertions, 0 deletions
diff --git a/fs/jffs2/LICENCE b/fs/jffs2/LICENCE new file mode 100644 index 000000000000..cd81d83e4ad2 --- /dev/null +++ b/fs/jffs2/LICENCE | |||
@@ -0,0 +1,35 @@ | |||
1 | The files in this directory and elsewhere which refer to this LICENCE | ||
2 | file are part of JFFS2, the Journalling Flash File System v2. | ||
3 | |||
4 | Copyright (C) 2001, 2002 Red Hat, Inc. | ||
5 | |||
6 | JFFS2 is free software; you can redistribute it and/or modify it under | ||
7 | the terms of the GNU General Public License as published by the Free | ||
8 | Software Foundation; either version 2 or (at your option) any later | ||
9 | version. | ||
10 | |||
11 | JFFS2 is distributed in the hope that it will be useful, but WITHOUT | ||
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
14 | for more details. | ||
15 | |||
16 | You should have received a copy of the GNU General Public License along | ||
17 | with JFFS2; if not, write to the Free Software Foundation, Inc., | ||
18 | 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
19 | |||
20 | As a special exception, if other files instantiate templates or use | ||
21 | macros or inline functions from these files, or you compile these | ||
22 | files and link them with other works to produce a work based on these | ||
23 | files, these files do not by themselves cause the resulting work to be | ||
24 | covered by the GNU General Public License. However the source code for | ||
25 | these files must still be made available in accordance with section (3) | ||
26 | of the GNU General Public License. | ||
27 | |||
28 | This exception does not invalidate any other reasons why a work based on | ||
29 | this file might be covered by the GNU General Public License. | ||
30 | |||
31 | For information on obtaining alternative licences for JFFS2, see | ||
32 | http://sources.redhat.com/jffs2/jffs2-licence.html | ||
33 | |||
34 | |||
35 | $Id: LICENCE,v 1.1 2002/05/20 14:56:37 dwmw2 Exp $ | ||
diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile new file mode 100644 index 000000000000..e3c38ccf9c7d --- /dev/null +++ b/fs/jffs2/Makefile | |||
@@ -0,0 +1,18 @@ | |||
1 | # | ||
2 | # Makefile for the Linux Journalling Flash File System v2 (JFFS2) | ||
3 | # | ||
4 | # $Id: Makefile.common,v 1.7 2004/11/03 12:57:38 jwboyer Exp $ | ||
5 | # | ||
6 | |||
7 | obj-$(CONFIG_JFFS2_FS) += jffs2.o | ||
8 | |||
9 | jffs2-y := compr.o dir.o file.o ioctl.o nodelist.o malloc.o | ||
10 | jffs2-y += read.o nodemgmt.o readinode.o write.o scan.o gc.o | ||
11 | jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o | ||
12 | jffs2-y += super.o | ||
13 | |||
14 | jffs2-$(CONFIG_JFFS2_FS_NAND) += wbuf.o | ||
15 | jffs2-$(CONFIG_JFFS2_FS_NOR_ECC) += wbuf.o | ||
16 | jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o | ||
17 | jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o | ||
18 | jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o | ||
diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking new file mode 100644 index 000000000000..49771cf8513a --- /dev/null +++ b/fs/jffs2/README.Locking | |||
@@ -0,0 +1,148 @@ | |||
1 | $Id: README.Locking,v 1.9 2004/11/20 10:35:40 dwmw2 Exp $ | ||
2 | |||
3 | JFFS2 LOCKING DOCUMENTATION | ||
4 | --------------------------- | ||
5 | |||
6 | At least theoretically, JFFS2 does not require the Big Kernel Lock | ||
7 | (BKL), which was always helpfully obtained for it by Linux 2.4 VFS | ||
8 | code. It has its own locking, as described below. | ||
9 | |||
10 | This document attempts to describe the existing locking rules for | ||
11 | JFFS2. It is not expected to remain perfectly up to date, but ought to | ||
12 | be fairly close. | ||
13 | |||
14 | |||
15 | alloc_sem | ||
16 | --------- | ||
17 | |||
18 | The alloc_sem is a per-filesystem semaphore, used primarily to ensure | ||
19 | contiguous allocation of space on the medium. It is automatically | ||
20 | obtained during space allocations (jffs2_reserve_space()) and freed | ||
21 | upon write completion (jffs2_complete_reservation()). Note that | ||
22 | the garbage collector will obtain this right at the beginning of | ||
23 | jffs2_garbage_collect_pass() and release it at the end, thereby | ||
24 | preventing any other write activity on the file system during a | ||
25 | garbage collect pass. | ||
26 | |||
27 | When writing new nodes, the alloc_sem must be held until the new nodes | ||
28 | have been properly linked into the data structures for the inode to | ||
29 | which they belong. This is for the benefit of NAND flash - adding new | ||
30 | nodes to an inode may obsolete old ones, and by holding the alloc_sem | ||
31 | until this happens we ensure that any data in the write-buffer at the | ||
32 | time this happens are part of the new node, not just something that | ||
33 | was written afterwards. Hence, we can ensure the newly-obsoleted nodes | ||
34 | don't actually get erased until the write-buffer has been flushed to | ||
35 | the medium. | ||
36 | |||
37 | With the introduction of NAND flash support and the write-buffer, | ||
38 | the alloc_sem is also used to protect the wbuf-related members of the | ||
39 | jffs2_sb_info structure. Atomically reading the wbuf_len member to see | ||
40 | if the wbuf is currently holding any data is permitted, though. | ||
41 | |||
42 | Ordering constraints: See f->sem. | ||
43 | |||
44 | |||
45 | File Semaphore f->sem | ||
46 | --------------------- | ||
47 | |||
48 | This is the JFFS2-internal equivalent of the inode semaphore i->i_sem. | ||
49 | It protects the contents of the jffs2_inode_info private inode data, | ||
50 | including the linked list of node fragments (but see the notes below on | ||
51 | erase_completion_lock), etc. | ||
52 | |||
53 | The reason that the i_sem itself isn't used for this purpose is to | ||
54 | avoid deadlocks with garbage collection -- the VFS will lock the i_sem | ||
55 | before calling a function which may need to allocate space. The | ||
56 | allocation may trigger garbage-collection, which may need to move a | ||
57 | node belonging to the inode which was locked in the first place by the | ||
58 | VFS. If the garbage collection code were to attempt to lock the i_sem | ||
59 | of the inode from which it's garbage-collecting a physical node, this | ||
60 | lead to deadlock, unless we played games with unlocking the i_sem | ||
61 | before calling the space allocation functions. | ||
62 | |||
63 | Instead of playing such games, we just have an extra internal | ||
64 | semaphore, which is obtained by the garbage collection code and also | ||
65 | by the normal file system code _after_ allocation of space. | ||
66 | |||
67 | Ordering constraints: | ||
68 | |||
69 | 1. Never attempt to allocate space or lock alloc_sem with | ||
70 | any f->sem held. | ||
71 | 2. Never attempt to lock two file semaphores in one thread. | ||
72 | No ordering rules have been made for doing so. | ||
73 | |||
74 | |||
75 | erase_completion_lock spinlock | ||
76 | ------------------------------ | ||
77 | |||
78 | This is used to serialise access to the eraseblock lists, to the | ||
79 | per-eraseblock lists of physical jffs2_raw_node_ref structures, and | ||
80 | (NB) the per-inode list of physical nodes. The latter is a special | ||
81 | case - see below. | ||
82 | |||
83 | As the MTD API no longer permits erase-completion callback functions | ||
84 | to be called from bottom-half (timer) context (on the basis that nobody | ||
85 | ever actually implemented such a thing), it's now sufficient to use | ||
86 | a simple spin_lock() rather than spin_lock_bh(). | ||
87 | |||
88 | Note that the per-inode list of physical nodes (f->nodes) is a special | ||
89 | case. Any changes to _valid_ nodes (i.e. ->flash_offset & 1 == 0) in | ||
90 | the list are protected by the file semaphore f->sem. But the erase | ||
91 | code may remove _obsolete_ nodes from the list while holding only the | ||
92 | erase_completion_lock. So you can walk the list only while holding the | ||
93 | erase_completion_lock, and can drop the lock temporarily mid-walk as | ||
94 | long as the pointer you're holding is to a _valid_ node, not an | ||
95 | obsolete one. | ||
96 | |||
97 | The erase_completion_lock is also used to protect the c->gc_task | ||
98 | pointer when the garbage collection thread exits. The code to kill the | ||
99 | GC thread locks it, sends the signal, then unlocks it - while the GC | ||
100 | thread itself locks it, zeroes c->gc_task, then unlocks on the exit path. | ||
101 | |||
102 | |||
103 | inocache_lock spinlock | ||
104 | ---------------------- | ||
105 | |||
106 | This spinlock protects the hashed list (c->inocache_list) of the | ||
107 | in-core jffs2_inode_cache objects (each inode in JFFS2 has the | ||
108 | correspondent jffs2_inode_cache object). So, the inocache_lock | ||
109 | has to be locked while walking the c->inocache_list hash buckets. | ||
110 | |||
111 | Note, the f->sem guarantees that the correspondent jffs2_inode_cache | ||
112 | will not be removed. So, it is allowed to access it without locking | ||
113 | the inocache_lock spinlock. | ||
114 | |||
115 | Ordering constraints: | ||
116 | |||
117 | If both erase_completion_lock and inocache_lock are needed, the | ||
118 | c->erase_completion has to be acquired first. | ||
119 | |||
120 | |||
121 | erase_free_sem | ||
122 | -------------- | ||
123 | |||
124 | This semaphore is only used by the erase code which frees obsolete | ||
125 | node references and the jffs2_garbage_collect_deletion_dirent() | ||
126 | function. The latter function on NAND flash must read _obsolete_ nodes | ||
127 | to determine whether the 'deletion dirent' under consideration can be | ||
128 | discarded or whether it is still required to show that an inode has | ||
129 | been unlinked. Because reading from the flash may sleep, the | ||
130 | erase_completion_lock cannot be held, so an alternative, more | ||
131 | heavyweight lock was required to prevent the erase code from freeing | ||
132 | the jffs2_raw_node_ref structures in question while the garbage | ||
133 | collection code is looking at them. | ||
134 | |||
135 | Suggestions for alternative solutions to this problem would be welcomed. | ||
136 | |||
137 | |||
138 | wbuf_sem | ||
139 | -------- | ||
140 | |||
141 | This read/write semaphore protects against concurrent access to the | ||
142 | write-behind buffer ('wbuf') used for flash chips where we must write | ||
143 | in blocks. It protects both the contents of the wbuf and the metadata | ||
144 | which indicates which flash region (if any) is currently covered by | ||
145 | the buffer. | ||
146 | |||
147 | Ordering constraints: | ||
148 | Lock wbuf_sem last, after the alloc_sem or and f->sem. | ||
diff --git a/fs/jffs2/TODO b/fs/jffs2/TODO new file mode 100644 index 000000000000..2bff82fd221f --- /dev/null +++ b/fs/jffs2/TODO | |||
@@ -0,0 +1,40 @@ | |||
1 | $Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $ | ||
2 | |||
3 | - disable compression in commit_write()? | ||
4 | - fine-tune the allocation / GC thresholds | ||
5 | - chattr support - turning on/off and tuning compression per-inode | ||
6 | - checkpointing (do we need this? scan is quite fast) | ||
7 | - make the scan code populate real inodes so read_inode just after | ||
8 | mount doesn't have to read the flash twice for large files. | ||
9 | Make this a per-inode option, changable with chattr, so you can | ||
10 | decide which inodes should be in-core immediately after mount. | ||
11 | - test, test, test | ||
12 | |||
13 | - NAND flash support: | ||
14 | - flush_wbuf using GC to fill it, don't just pad. | ||
15 | - Deal with write errors. Data don't get lost - we just have to write | ||
16 | the affected node(s) out again somewhere else. | ||
17 | - make fsync flush only if actually required | ||
18 | - make sys_sync() work. | ||
19 | - reboot notifier | ||
20 | - timed flush of old wbuf | ||
21 | - fix magical second arg of jffs2_flush_wbuf(). Split into two or more functions instead. | ||
22 | |||
23 | |||
24 | - Optimisations: | ||
25 | - Stop GC from decompressing and immediately recompressing nodes which could | ||
26 | just be copied intact. (We now keep track of REF_PRISTINE flag. Easy now.) | ||
27 | - Furthermore, in the case where it could be copied intact we don't even need | ||
28 | to call iget() for it -- if we use (raw_node_raw->flash_offset & 2) as a flag | ||
29 | to show a node can be copied intact and it's _not_ in icache, we could just do | ||
30 | it, fix up the next_in_ino list and move on. We would need a way to find out | ||
31 | _whether_ it's in icache though -- if it's in icache we also need to do the | ||
32 | fragment lists, etc. P'raps a flag or pointer in the jffs2_inode_cache could | ||
33 | help. (We have half of this now.) | ||
34 | - Stop keeping name in-core with struct jffs2_full_dirent. If we keep the hash in | ||
35 | the full dirent, we only need to go to the flash in lookup() when we think we've | ||
36 | got a match, and in readdir(). | ||
37 | - Doubly-linked next_in_ino list to allow us to free obsoleted raw_node_refs immediately? | ||
38 | - Remove totlen from jffs2_raw_node_ref? Need to have totlen passed into | ||
39 | jffs2_mark_node_obsolete(). Can all callers work it out? | ||
40 | - Remove size from jffs2_raw_node_frag. | ||
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c new file mode 100644 index 000000000000..1be6de27dd81 --- /dev/null +++ b/fs/jffs2/background.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: background.c,v 1.50 2004/11/16 20:36:10 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/jffs2.h> | ||
16 | #include <linux/mtd/mtd.h> | ||
17 | #include <linux/completion.h> | ||
18 | #include "nodelist.h" | ||
19 | |||
20 | |||
21 | static int jffs2_garbage_collect_thread(void *); | ||
22 | |||
23 | void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c) | ||
24 | { | ||
25 | spin_lock(&c->erase_completion_lock); | ||
26 | if (c->gc_task && jffs2_thread_should_wake(c)) | ||
27 | send_sig(SIGHUP, c->gc_task, 1); | ||
28 | spin_unlock(&c->erase_completion_lock); | ||
29 | } | ||
30 | |||
31 | /* This must only ever be called when no GC thread is currently running */ | ||
32 | int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) | ||
33 | { | ||
34 | pid_t pid; | ||
35 | int ret = 0; | ||
36 | |||
37 | if (c->gc_task) | ||
38 | BUG(); | ||
39 | |||
40 | init_MUTEX_LOCKED(&c->gc_thread_start); | ||
41 | init_completion(&c->gc_thread_exit); | ||
42 | |||
43 | pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES); | ||
44 | if (pid < 0) { | ||
45 | printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %d\n", -pid); | ||
46 | complete(&c->gc_thread_exit); | ||
47 | ret = pid; | ||
48 | } else { | ||
49 | /* Wait for it... */ | ||
50 | D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid)); | ||
51 | down(&c->gc_thread_start); | ||
52 | } | ||
53 | |||
54 | return ret; | ||
55 | } | ||
56 | |||
57 | void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) | ||
58 | { | ||
59 | spin_lock(&c->erase_completion_lock); | ||
60 | if (c->gc_task) { | ||
61 | D1(printk(KERN_DEBUG "jffs2: Killing GC task %d\n", c->gc_task->pid)); | ||
62 | send_sig(SIGKILL, c->gc_task, 1); | ||
63 | } | ||
64 | spin_unlock(&c->erase_completion_lock); | ||
65 | wait_for_completion(&c->gc_thread_exit); | ||
66 | } | ||
67 | |||
68 | static int jffs2_garbage_collect_thread(void *_c) | ||
69 | { | ||
70 | struct jffs2_sb_info *c = _c; | ||
71 | |||
72 | daemonize("jffs2_gcd_mtd%d", c->mtd->index); | ||
73 | allow_signal(SIGKILL); | ||
74 | allow_signal(SIGSTOP); | ||
75 | allow_signal(SIGCONT); | ||
76 | |||
77 | c->gc_task = current; | ||
78 | up(&c->gc_thread_start); | ||
79 | |||
80 | set_user_nice(current, 10); | ||
81 | |||
82 | for (;;) { | ||
83 | allow_signal(SIGHUP); | ||
84 | |||
85 | if (!jffs2_thread_should_wake(c)) { | ||
86 | set_current_state (TASK_INTERRUPTIBLE); | ||
87 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n")); | ||
88 | /* Yes, there's a race here; we checked jffs2_thread_should_wake() | ||
89 | before setting current->state to TASK_INTERRUPTIBLE. But it doesn't | ||
90 | matter - We don't care if we miss a wakeup, because the GC thread | ||
91 | is only an optimisation anyway. */ | ||
92 | schedule(); | ||
93 | } | ||
94 | |||
95 | if (try_to_freeze(0)) | ||
96 | continue; | ||
97 | |||
98 | cond_resched(); | ||
99 | |||
100 | /* Put_super will send a SIGKILL and then wait on the sem. | ||
101 | */ | ||
102 | while (signal_pending(current)) { | ||
103 | siginfo_t info; | ||
104 | unsigned long signr; | ||
105 | |||
106 | signr = dequeue_signal_lock(current, ¤t->blocked, &info); | ||
107 | |||
108 | switch(signr) { | ||
109 | case SIGSTOP: | ||
110 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n")); | ||
111 | set_current_state(TASK_STOPPED); | ||
112 | schedule(); | ||
113 | break; | ||
114 | |||
115 | case SIGKILL: | ||
116 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n")); | ||
117 | goto die; | ||
118 | |||
119 | case SIGHUP: | ||
120 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n")); | ||
121 | break; | ||
122 | default: | ||
123 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr)); | ||
124 | } | ||
125 | } | ||
126 | /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */ | ||
127 | disallow_signal(SIGHUP); | ||
128 | |||
129 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n")); | ||
130 | if (jffs2_garbage_collect_pass(c) == -ENOSPC) { | ||
131 | printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n"); | ||
132 | goto die; | ||
133 | } | ||
134 | } | ||
135 | die: | ||
136 | spin_lock(&c->erase_completion_lock); | ||
137 | c->gc_task = NULL; | ||
138 | spin_unlock(&c->erase_completion_lock); | ||
139 | complete_and_exit(&c->gc_thread_exit, 0); | ||
140 | } | ||
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c new file mode 100644 index 000000000000..a01dd5fdbb95 --- /dev/null +++ b/fs/jffs2/build.c | |||
@@ -0,0 +1,371 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: build.c,v 1.69 2004/12/16 20:22:18 dmarlin Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/vmalloc.h> | ||
18 | #include <linux/mtd/mtd.h> | ||
19 | #include "nodelist.h" | ||
20 | |||
21 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **); | ||
22 | |||
23 | static inline struct jffs2_inode_cache * | ||
24 | first_inode_chain(int *i, struct jffs2_sb_info *c) | ||
25 | { | ||
26 | for (; *i < INOCACHE_HASHSIZE; (*i)++) { | ||
27 | if (c->inocache_list[*i]) | ||
28 | return c->inocache_list[*i]; | ||
29 | } | ||
30 | return NULL; | ||
31 | } | ||
32 | |||
33 | static inline struct jffs2_inode_cache * | ||
34 | next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) | ||
35 | { | ||
36 | /* More in this chain? */ | ||
37 | if (ic->next) | ||
38 | return ic->next; | ||
39 | (*i)++; | ||
40 | return first_inode_chain(i, c); | ||
41 | } | ||
42 | |||
43 | #define for_each_inode(i, c, ic) \ | ||
44 | for (i = 0, ic = first_inode_chain(&i, (c)); \ | ||
45 | ic; \ | ||
46 | ic = next_inode(&i, ic, (c))) | ||
47 | |||
48 | |||
49 | static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | ||
50 | { | ||
51 | struct jffs2_full_dirent *fd; | ||
52 | |||
53 | D1(printk(KERN_DEBUG "jffs2_build_inode building directory inode #%u\n", ic->ino)); | ||
54 | |||
55 | /* For each child, increase nlink */ | ||
56 | for(fd = ic->scan_dents; fd; fd = fd->next) { | ||
57 | struct jffs2_inode_cache *child_ic; | ||
58 | if (!fd->ino) | ||
59 | continue; | ||
60 | |||
61 | /* XXX: Can get high latency here with huge directories */ | ||
62 | |||
63 | child_ic = jffs2_get_ino_cache(c, fd->ino); | ||
64 | if (!child_ic) { | ||
65 | printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", | ||
66 | fd->name, fd->ino, ic->ino); | ||
67 | jffs2_mark_node_obsolete(c, fd->raw); | ||
68 | continue; | ||
69 | } | ||
70 | |||
71 | if (child_ic->nlink++ && fd->type == DT_DIR) { | ||
72 | printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino); | ||
73 | if (fd->ino == 1 && ic->ino == 1) { | ||
74 | printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n"); | ||
75 | printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n"); | ||
76 | } | ||
77 | /* What do we do about it? */ | ||
78 | } | ||
79 | D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino)); | ||
80 | /* Can't free them. We might need them in pass 2 */ | ||
81 | } | ||
82 | } | ||
83 | |||
84 | /* Scan plan: | ||
85 | - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go | ||
86 | - Scan directory tree from top down, setting nlink in inocaches | ||
87 | - Scan inocaches for inodes with nlink==0 | ||
88 | */ | ||
89 | static int jffs2_build_filesystem(struct jffs2_sb_info *c) | ||
90 | { | ||
91 | int ret; | ||
92 | int i; | ||
93 | struct jffs2_inode_cache *ic; | ||
94 | struct jffs2_full_dirent *fd; | ||
95 | struct jffs2_full_dirent *dead_fds = NULL; | ||
96 | |||
97 | /* First, scan the medium and build all the inode caches with | ||
98 | lists of physical nodes */ | ||
99 | |||
100 | c->flags |= JFFS2_SB_FLAG_MOUNTING; | ||
101 | ret = jffs2_scan_medium(c); | ||
102 | if (ret) | ||
103 | goto exit; | ||
104 | |||
105 | D1(printk(KERN_DEBUG "Scanned flash completely\n")); | ||
106 | D2(jffs2_dump_block_lists(c)); | ||
107 | |||
108 | /* Now scan the directory tree, increasing nlink according to every dirent found. */ | ||
109 | for_each_inode(i, c, ic) { | ||
110 | D1(printk(KERN_DEBUG "Pass 1: ino #%u\n", ic->ino)); | ||
111 | |||
112 | D1(BUG_ON(ic->ino > c->highest_ino)); | ||
113 | |||
114 | if (ic->scan_dents) { | ||
115 | jffs2_build_inode_pass1(c, ic); | ||
116 | cond_resched(); | ||
117 | } | ||
118 | } | ||
119 | c->flags &= ~JFFS2_SB_FLAG_MOUNTING; | ||
120 | |||
121 | D1(printk(KERN_DEBUG "Pass 1 complete\n")); | ||
122 | |||
123 | /* Next, scan for inodes with nlink == 0 and remove them. If | ||
124 | they were directories, then decrement the nlink of their | ||
125 | children too, and repeat the scan. As that's going to be | ||
126 | a fairly uncommon occurrence, it's not so evil to do it this | ||
127 | way. Recursion bad. */ | ||
128 | D1(printk(KERN_DEBUG "Pass 2 starting\n")); | ||
129 | |||
130 | for_each_inode(i, c, ic) { | ||
131 | D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes)); | ||
132 | if (ic->nlink) | ||
133 | continue; | ||
134 | |||
135 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); | ||
136 | cond_resched(); | ||
137 | } | ||
138 | |||
139 | D1(printk(KERN_DEBUG "Pass 2a starting\n")); | ||
140 | |||
141 | while (dead_fds) { | ||
142 | fd = dead_fds; | ||
143 | dead_fds = fd->next; | ||
144 | |||
145 | ic = jffs2_get_ino_cache(c, fd->ino); | ||
146 | D1(printk(KERN_DEBUG "Removing dead_fd ino #%u (\"%s\"), ic at %p\n", fd->ino, fd->name, ic)); | ||
147 | |||
148 | if (ic) | ||
149 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); | ||
150 | jffs2_free_full_dirent(fd); | ||
151 | } | ||
152 | |||
153 | D1(printk(KERN_DEBUG "Pass 2 complete\n")); | ||
154 | |||
155 | /* Finally, we can scan again and free the dirent structs */ | ||
156 | for_each_inode(i, c, ic) { | ||
157 | D1(printk(KERN_DEBUG "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes)); | ||
158 | |||
159 | while(ic->scan_dents) { | ||
160 | fd = ic->scan_dents; | ||
161 | ic->scan_dents = fd->next; | ||
162 | jffs2_free_full_dirent(fd); | ||
163 | } | ||
164 | ic->scan_dents = NULL; | ||
165 | cond_resched(); | ||
166 | } | ||
167 | D1(printk(KERN_DEBUG "Pass 3 complete\n")); | ||
168 | D2(jffs2_dump_block_lists(c)); | ||
169 | |||
170 | /* Rotate the lists by some number to ensure wear levelling */ | ||
171 | jffs2_rotate_lists(c); | ||
172 | |||
173 | ret = 0; | ||
174 | |||
175 | exit: | ||
176 | if (ret) { | ||
177 | for_each_inode(i, c, ic) { | ||
178 | while(ic->scan_dents) { | ||
179 | fd = ic->scan_dents; | ||
180 | ic->scan_dents = fd->next; | ||
181 | jffs2_free_full_dirent(fd); | ||
182 | } | ||
183 | } | ||
184 | } | ||
185 | |||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds) | ||
190 | { | ||
191 | struct jffs2_raw_node_ref *raw; | ||
192 | struct jffs2_full_dirent *fd; | ||
193 | |||
194 | D1(printk(KERN_DEBUG "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino)); | ||
195 | |||
196 | raw = ic->nodes; | ||
197 | while (raw != (void *)ic) { | ||
198 | struct jffs2_raw_node_ref *next = raw->next_in_ino; | ||
199 | D1(printk(KERN_DEBUG "obsoleting node at 0x%08x\n", ref_offset(raw))); | ||
200 | jffs2_mark_node_obsolete(c, raw); | ||
201 | raw = next; | ||
202 | } | ||
203 | |||
204 | if (ic->scan_dents) { | ||
205 | int whinged = 0; | ||
206 | D1(printk(KERN_DEBUG "Inode #%u was a directory which may have children...\n", ic->ino)); | ||
207 | |||
208 | while(ic->scan_dents) { | ||
209 | struct jffs2_inode_cache *child_ic; | ||
210 | |||
211 | fd = ic->scan_dents; | ||
212 | ic->scan_dents = fd->next; | ||
213 | |||
214 | if (!fd->ino) { | ||
215 | /* It's a deletion dirent. Ignore it */ | ||
216 | D1(printk(KERN_DEBUG "Child \"%s\" is a deletion dirent, skipping...\n", fd->name)); | ||
217 | jffs2_free_full_dirent(fd); | ||
218 | continue; | ||
219 | } | ||
220 | if (!whinged) { | ||
221 | whinged = 1; | ||
222 | printk(KERN_NOTICE "Inode #%u was a directory with children - removing those too...\n", ic->ino); | ||
223 | } | ||
224 | |||
225 | D1(printk(KERN_DEBUG "Removing child \"%s\", ino #%u\n", | ||
226 | fd->name, fd->ino)); | ||
227 | |||
228 | child_ic = jffs2_get_ino_cache(c, fd->ino); | ||
229 | if (!child_ic) { | ||
230 | printk(KERN_NOTICE "Cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino); | ||
231 | jffs2_free_full_dirent(fd); | ||
232 | continue; | ||
233 | } | ||
234 | |||
235 | /* Reduce nlink of the child. If it's now zero, stick it on the | ||
236 | dead_fds list to be cleaned up later. Else just free the fd */ | ||
237 | |||
238 | child_ic->nlink--; | ||
239 | |||
240 | if (!child_ic->nlink) { | ||
241 | D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got zero nlink. Adding to dead_fds list.\n", | ||
242 | fd->ino, fd->name)); | ||
243 | fd->next = *dead_fds; | ||
244 | *dead_fds = fd; | ||
245 | } else { | ||
246 | D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", | ||
247 | fd->ino, fd->name, child_ic->nlink)); | ||
248 | jffs2_free_full_dirent(fd); | ||
249 | } | ||
250 | } | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | We don't delete the inocache from the hash list and free it yet. | ||
255 | The erase code will do that, when all the nodes are completely gone. | ||
256 | */ | ||
257 | } | ||
258 | |||
259 | static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) | ||
260 | { | ||
261 | uint32_t size; | ||
262 | |||
263 | /* Deletion should almost _always_ be allowed. We're fairly | ||
264 | buggered once we stop allowing people to delete stuff | ||
265 | because there's not enough free space... */ | ||
266 | c->resv_blocks_deletion = 2; | ||
267 | |||
268 | /* Be conservative about how much space we need before we allow writes. | ||
269 | On top of that which is required for deletia, require an extra 2% | ||
270 | of the medium to be available, for overhead caused by nodes being | ||
271 | split across blocks, etc. */ | ||
272 | |||
273 | size = c->flash_size / 50; /* 2% of flash size */ | ||
274 | size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */ | ||
275 | size += c->sector_size - 1; /* ... and round up */ | ||
276 | |||
277 | c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size); | ||
278 | |||
279 | /* When do we let the GC thread run in the background */ | ||
280 | |||
281 | c->resv_blocks_gctrigger = c->resv_blocks_write + 1; | ||
282 | |||
283 | /* When do we allow garbage collection to merge nodes to make | ||
284 | long-term progress at the expense of short-term space exhaustion? */ | ||
285 | c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; | ||
286 | |||
287 | /* When do we allow garbage collection to eat from bad blocks rather | ||
288 | than actually making progress? */ | ||
289 | c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2; | ||
290 | |||
291 | /* If there's less than this amount of dirty space, don't bother | ||
292 | trying to GC to make more space. It'll be a fruitless task */ | ||
293 | c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); | ||
294 | |||
295 | D1(printk(KERN_DEBUG "JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", | ||
296 | c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks)); | ||
297 | D1(printk(KERN_DEBUG "Blocks required to allow deletion: %d (%d KiB)\n", | ||
298 | c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024)); | ||
299 | D1(printk(KERN_DEBUG "Blocks required to allow writes: %d (%d KiB)\n", | ||
300 | c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024)); | ||
301 | D1(printk(KERN_DEBUG "Blocks required to quiesce GC thread: %d (%d KiB)\n", | ||
302 | c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024)); | ||
303 | D1(printk(KERN_DEBUG "Blocks required to allow GC merges: %d (%d KiB)\n", | ||
304 | c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024)); | ||
305 | D1(printk(KERN_DEBUG "Blocks required to GC bad blocks: %d (%d KiB)\n", | ||
306 | c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024)); | ||
307 | D1(printk(KERN_DEBUG "Amount of dirty space required to GC: %d bytes\n", | ||
308 | c->nospc_dirty_size)); | ||
309 | } | ||
310 | |||
311 | int jffs2_do_mount_fs(struct jffs2_sb_info *c) | ||
312 | { | ||
313 | int i; | ||
314 | |||
315 | c->free_size = c->flash_size; | ||
316 | c->nr_blocks = c->flash_size / c->sector_size; | ||
317 | if (c->mtd->flags & MTD_NO_VIRTBLOCKS) | ||
318 | c->blocks = vmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks); | ||
319 | else | ||
320 | c->blocks = kmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks, GFP_KERNEL); | ||
321 | if (!c->blocks) | ||
322 | return -ENOMEM; | ||
323 | for (i=0; i<c->nr_blocks; i++) { | ||
324 | INIT_LIST_HEAD(&c->blocks[i].list); | ||
325 | c->blocks[i].offset = i * c->sector_size; | ||
326 | c->blocks[i].free_size = c->sector_size; | ||
327 | c->blocks[i].dirty_size = 0; | ||
328 | c->blocks[i].wasted_size = 0; | ||
329 | c->blocks[i].unchecked_size = 0; | ||
330 | c->blocks[i].used_size = 0; | ||
331 | c->blocks[i].first_node = NULL; | ||
332 | c->blocks[i].last_node = NULL; | ||
333 | c->blocks[i].bad_count = 0; | ||
334 | } | ||
335 | |||
336 | init_MUTEX(&c->alloc_sem); | ||
337 | init_MUTEX(&c->erase_free_sem); | ||
338 | init_waitqueue_head(&c->erase_wait); | ||
339 | init_waitqueue_head(&c->inocache_wq); | ||
340 | spin_lock_init(&c->erase_completion_lock); | ||
341 | spin_lock_init(&c->inocache_lock); | ||
342 | |||
343 | INIT_LIST_HEAD(&c->clean_list); | ||
344 | INIT_LIST_HEAD(&c->very_dirty_list); | ||
345 | INIT_LIST_HEAD(&c->dirty_list); | ||
346 | INIT_LIST_HEAD(&c->erasable_list); | ||
347 | INIT_LIST_HEAD(&c->erasing_list); | ||
348 | INIT_LIST_HEAD(&c->erase_pending_list); | ||
349 | INIT_LIST_HEAD(&c->erasable_pending_wbuf_list); | ||
350 | INIT_LIST_HEAD(&c->erase_complete_list); | ||
351 | INIT_LIST_HEAD(&c->free_list); | ||
352 | INIT_LIST_HEAD(&c->bad_list); | ||
353 | INIT_LIST_HEAD(&c->bad_used_list); | ||
354 | c->highest_ino = 1; | ||
355 | |||
356 | if (jffs2_build_filesystem(c)) { | ||
357 | D1(printk(KERN_DEBUG "build_fs failed\n")); | ||
358 | jffs2_free_ino_caches(c); | ||
359 | jffs2_free_raw_node_refs(c); | ||
360 | if (c->mtd->flags & MTD_NO_VIRTBLOCKS) { | ||
361 | vfree(c->blocks); | ||
362 | } else { | ||
363 | kfree(c->blocks); | ||
364 | } | ||
365 | return -EIO; | ||
366 | } | ||
367 | |||
368 | jffs2_calc_trigger_levels(c); | ||
369 | |||
370 | return 0; | ||
371 | } | ||
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c new file mode 100644 index 000000000000..af922a9618ac --- /dev/null +++ b/fs/jffs2/compr.c | |||
@@ -0,0 +1,469 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * Created by Arjan van de Ven <arjanv@redhat.com> | ||
6 | * | ||
7 | * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, | ||
8 | * University of Szeged, Hungary | ||
9 | * | ||
10 | * For licensing information, see the file 'LICENCE' in this directory. | ||
11 | * | ||
12 | * $Id: compr.c,v 1.42 2004/08/07 21:56:08 dwmw2 Exp $ | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include "compr.h" | ||
17 | |||
18 | static DEFINE_SPINLOCK(jffs2_compressor_list_lock); | ||
19 | |||
20 | /* Available compressors are on this list */ | ||
21 | static LIST_HEAD(jffs2_compressor_list); | ||
22 | |||
23 | /* Actual compression mode */ | ||
24 | static int jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY; | ||
25 | |||
26 | /* Statistics for blocks stored without compression */ | ||
27 | static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_compr_size=0; | ||
28 | |||
29 | /* jffs2_compress: | ||
30 | * @data: Pointer to uncompressed data | ||
31 | * @cdata: Pointer to returned pointer to buffer for compressed data | ||
32 | * @datalen: On entry, holds the amount of data available for compression. | ||
33 | * On exit, expected to hold the amount of data actually compressed. | ||
34 | * @cdatalen: On entry, holds the amount of space available for compressed | ||
35 | * data. On exit, expected to hold the actual size of the compressed | ||
36 | * data. | ||
37 | * | ||
38 | * Returns: Lower byte to be stored with data indicating compression type used. | ||
39 | * Zero is used to show that the data could not be compressed - the | ||
40 | * compressed version was actually larger than the original. | ||
41 | * Upper byte will be used later. (soon) | ||
42 | * | ||
43 | * If the cdata buffer isn't large enough to hold all the uncompressed data, | ||
44 | * jffs2_compress should compress as much as will fit, and should set | ||
45 | * *datalen accordingly to show the amount of data which were compressed. | ||
46 | */ | ||
47 | uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
48 | unsigned char *data_in, unsigned char **cpage_out, | ||
49 | uint32_t *datalen, uint32_t *cdatalen) | ||
50 | { | ||
51 | int ret = JFFS2_COMPR_NONE; | ||
52 | int compr_ret; | ||
53 | struct jffs2_compressor *this, *best=NULL; | ||
54 | unsigned char *output_buf = NULL, *tmp_buf; | ||
55 | uint32_t orig_slen, orig_dlen; | ||
56 | uint32_t best_slen=0, best_dlen=0; | ||
57 | |||
58 | switch (jffs2_compression_mode) { | ||
59 | case JFFS2_COMPR_MODE_NONE: | ||
60 | break; | ||
61 | case JFFS2_COMPR_MODE_PRIORITY: | ||
62 | output_buf = kmalloc(*cdatalen,GFP_KERNEL); | ||
63 | if (!output_buf) { | ||
64 | printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n"); | ||
65 | goto out; | ||
66 | } | ||
67 | orig_slen = *datalen; | ||
68 | orig_dlen = *cdatalen; | ||
69 | spin_lock(&jffs2_compressor_list_lock); | ||
70 | list_for_each_entry(this, &jffs2_compressor_list, list) { | ||
71 | /* Skip decompress-only backwards-compatibility and disabled modules */ | ||
72 | if ((!this->compress)||(this->disabled)) | ||
73 | continue; | ||
74 | |||
75 | this->usecount++; | ||
76 | spin_unlock(&jffs2_compressor_list_lock); | ||
77 | *datalen = orig_slen; | ||
78 | *cdatalen = orig_dlen; | ||
79 | compr_ret = this->compress(data_in, output_buf, datalen, cdatalen, NULL); | ||
80 | spin_lock(&jffs2_compressor_list_lock); | ||
81 | this->usecount--; | ||
82 | if (!compr_ret) { | ||
83 | ret = this->compr; | ||
84 | this->stat_compr_blocks++; | ||
85 | this->stat_compr_orig_size += *datalen; | ||
86 | this->stat_compr_new_size += *cdatalen; | ||
87 | break; | ||
88 | } | ||
89 | } | ||
90 | spin_unlock(&jffs2_compressor_list_lock); | ||
91 | if (ret == JFFS2_COMPR_NONE) kfree(output_buf); | ||
92 | break; | ||
93 | case JFFS2_COMPR_MODE_SIZE: | ||
94 | orig_slen = *datalen; | ||
95 | orig_dlen = *cdatalen; | ||
96 | spin_lock(&jffs2_compressor_list_lock); | ||
97 | list_for_each_entry(this, &jffs2_compressor_list, list) { | ||
98 | /* Skip decompress-only backwards-compatibility and disabled modules */ | ||
99 | if ((!this->compress)||(this->disabled)) | ||
100 | continue; | ||
101 | /* Allocating memory for output buffer if necessary */ | ||
102 | if ((this->compr_buf_size<orig_dlen)&&(this->compr_buf)) { | ||
103 | spin_unlock(&jffs2_compressor_list_lock); | ||
104 | kfree(this->compr_buf); | ||
105 | spin_lock(&jffs2_compressor_list_lock); | ||
106 | this->compr_buf_size=0; | ||
107 | this->compr_buf=NULL; | ||
108 | } | ||
109 | if (!this->compr_buf) { | ||
110 | spin_unlock(&jffs2_compressor_list_lock); | ||
111 | tmp_buf = kmalloc(orig_dlen,GFP_KERNEL); | ||
112 | spin_lock(&jffs2_compressor_list_lock); | ||
113 | if (!tmp_buf) { | ||
114 | printk(KERN_WARNING "JFFS2: No memory for compressor allocation. (%d bytes)\n",orig_dlen); | ||
115 | continue; | ||
116 | } | ||
117 | else { | ||
118 | this->compr_buf = tmp_buf; | ||
119 | this->compr_buf_size = orig_dlen; | ||
120 | } | ||
121 | } | ||
122 | this->usecount++; | ||
123 | spin_unlock(&jffs2_compressor_list_lock); | ||
124 | *datalen = orig_slen; | ||
125 | *cdatalen = orig_dlen; | ||
126 | compr_ret = this->compress(data_in, this->compr_buf, datalen, cdatalen, NULL); | ||
127 | spin_lock(&jffs2_compressor_list_lock); | ||
128 | this->usecount--; | ||
129 | if (!compr_ret) { | ||
130 | if ((!best_dlen)||(best_dlen>*cdatalen)) { | ||
131 | best_dlen = *cdatalen; | ||
132 | best_slen = *datalen; | ||
133 | best = this; | ||
134 | } | ||
135 | } | ||
136 | } | ||
137 | if (best_dlen) { | ||
138 | *cdatalen = best_dlen; | ||
139 | *datalen = best_slen; | ||
140 | output_buf = best->compr_buf; | ||
141 | best->compr_buf = NULL; | ||
142 | best->compr_buf_size = 0; | ||
143 | best->stat_compr_blocks++; | ||
144 | best->stat_compr_orig_size += best_slen; | ||
145 | best->stat_compr_new_size += best_dlen; | ||
146 | ret = best->compr; | ||
147 | } | ||
148 | spin_unlock(&jffs2_compressor_list_lock); | ||
149 | break; | ||
150 | default: | ||
151 | printk(KERN_ERR "JFFS2: unknow compression mode.\n"); | ||
152 | } | ||
153 | out: | ||
154 | if (ret == JFFS2_COMPR_NONE) { | ||
155 | *cpage_out = data_in; | ||
156 | *datalen = *cdatalen; | ||
157 | none_stat_compr_blocks++; | ||
158 | none_stat_compr_size += *datalen; | ||
159 | } | ||
160 | else { | ||
161 | *cpage_out = output_buf; | ||
162 | } | ||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
167 | uint16_t comprtype, unsigned char *cdata_in, | ||
168 | unsigned char *data_out, uint32_t cdatalen, uint32_t datalen) | ||
169 | { | ||
170 | struct jffs2_compressor *this; | ||
171 | int ret; | ||
172 | |||
173 | /* Older code had a bug where it would write non-zero 'usercompr' | ||
174 | fields. Deal with it. */ | ||
175 | if ((comprtype & 0xff) <= JFFS2_COMPR_ZLIB) | ||
176 | comprtype &= 0xff; | ||
177 | |||
178 | switch (comprtype & 0xff) { | ||
179 | case JFFS2_COMPR_NONE: | ||
180 | /* This should be special-cased elsewhere, but we might as well deal with it */ | ||
181 | memcpy(data_out, cdata_in, datalen); | ||
182 | none_stat_decompr_blocks++; | ||
183 | break; | ||
184 | case JFFS2_COMPR_ZERO: | ||
185 | memset(data_out, 0, datalen); | ||
186 | break; | ||
187 | default: | ||
188 | spin_lock(&jffs2_compressor_list_lock); | ||
189 | list_for_each_entry(this, &jffs2_compressor_list, list) { | ||
190 | if (comprtype == this->compr) { | ||
191 | this->usecount++; | ||
192 | spin_unlock(&jffs2_compressor_list_lock); | ||
193 | ret = this->decompress(cdata_in, data_out, cdatalen, datalen, NULL); | ||
194 | spin_lock(&jffs2_compressor_list_lock); | ||
195 | if (ret) { | ||
196 | printk(KERN_WARNING "Decompressor \"%s\" returned %d\n", this->name, ret); | ||
197 | } | ||
198 | else { | ||
199 | this->stat_decompr_blocks++; | ||
200 | } | ||
201 | this->usecount--; | ||
202 | spin_unlock(&jffs2_compressor_list_lock); | ||
203 | return ret; | ||
204 | } | ||
205 | } | ||
206 | printk(KERN_WARNING "JFFS2 compression type 0x%02x not available.\n", comprtype); | ||
207 | spin_unlock(&jffs2_compressor_list_lock); | ||
208 | return -EIO; | ||
209 | } | ||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | int jffs2_register_compressor(struct jffs2_compressor *comp) | ||
214 | { | ||
215 | struct jffs2_compressor *this; | ||
216 | |||
217 | if (!comp->name) { | ||
218 | printk(KERN_WARNING "NULL compressor name at registering JFFS2 compressor. Failed.\n"); | ||
219 | return -1; | ||
220 | } | ||
221 | comp->compr_buf_size=0; | ||
222 | comp->compr_buf=NULL; | ||
223 | comp->usecount=0; | ||
224 | comp->stat_compr_orig_size=0; | ||
225 | comp->stat_compr_new_size=0; | ||
226 | comp->stat_compr_blocks=0; | ||
227 | comp->stat_decompr_blocks=0; | ||
228 | D1(printk(KERN_DEBUG "Registering JFFS2 compressor \"%s\"\n", comp->name)); | ||
229 | |||
230 | spin_lock(&jffs2_compressor_list_lock); | ||
231 | |||
232 | list_for_each_entry(this, &jffs2_compressor_list, list) { | ||
233 | if (this->priority < comp->priority) { | ||
234 | list_add(&comp->list, this->list.prev); | ||
235 | goto out; | ||
236 | } | ||
237 | } | ||
238 | list_add_tail(&comp->list, &jffs2_compressor_list); | ||
239 | out: | ||
240 | D2(list_for_each_entry(this, &jffs2_compressor_list, list) { | ||
241 | printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority); | ||
242 | }) | ||
243 | |||
244 | spin_unlock(&jffs2_compressor_list_lock); | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | int jffs2_unregister_compressor(struct jffs2_compressor *comp) | ||
250 | { | ||
251 | D2(struct jffs2_compressor *this;) | ||
252 | |||
253 | D1(printk(KERN_DEBUG "Unregistering JFFS2 compressor \"%s\"\n", comp->name)); | ||
254 | |||
255 | spin_lock(&jffs2_compressor_list_lock); | ||
256 | |||
257 | if (comp->usecount) { | ||
258 | spin_unlock(&jffs2_compressor_list_lock); | ||
259 | printk(KERN_WARNING "JFFS2: Compressor modul is in use. Unregister failed.\n"); | ||
260 | return -1; | ||
261 | } | ||
262 | list_del(&comp->list); | ||
263 | |||
264 | D2(list_for_each_entry(this, &jffs2_compressor_list, list) { | ||
265 | printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority); | ||
266 | }) | ||
267 | spin_unlock(&jffs2_compressor_list_lock); | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | #ifdef CONFIG_JFFS2_PROC | ||
272 | |||
273 | #define JFFS2_STAT_BUF_SIZE 16000 | ||
274 | |||
275 | char *jffs2_list_compressors(void) | ||
276 | { | ||
277 | struct jffs2_compressor *this; | ||
278 | char *buf, *act_buf; | ||
279 | |||
280 | act_buf = buf = kmalloc(JFFS2_STAT_BUF_SIZE,GFP_KERNEL); | ||
281 | list_for_each_entry(this, &jffs2_compressor_list, list) { | ||
282 | act_buf += sprintf(act_buf, "%10s priority:%d ", this->name, this->priority); | ||
283 | if ((this->disabled)||(!this->compress)) | ||
284 | act_buf += sprintf(act_buf,"disabled"); | ||
285 | else | ||
286 | act_buf += sprintf(act_buf,"enabled"); | ||
287 | act_buf += sprintf(act_buf,"\n"); | ||
288 | } | ||
289 | return buf; | ||
290 | } | ||
291 | |||
292 | char *jffs2_stats(void) | ||
293 | { | ||
294 | struct jffs2_compressor *this; | ||
295 | char *buf, *act_buf; | ||
296 | |||
297 | act_buf = buf = kmalloc(JFFS2_STAT_BUF_SIZE,GFP_KERNEL); | ||
298 | |||
299 | act_buf += sprintf(act_buf,"JFFS2 compressor statistics:\n"); | ||
300 | act_buf += sprintf(act_buf,"%10s ","none"); | ||
301 | act_buf += sprintf(act_buf,"compr: %d blocks (%d) decompr: %d blocks\n", none_stat_compr_blocks, | ||
302 | none_stat_compr_size, none_stat_decompr_blocks); | ||
303 | spin_lock(&jffs2_compressor_list_lock); | ||
304 | list_for_each_entry(this, &jffs2_compressor_list, list) { | ||
305 | act_buf += sprintf(act_buf,"%10s ",this->name); | ||
306 | if ((this->disabled)||(!this->compress)) | ||
307 | act_buf += sprintf(act_buf,"- "); | ||
308 | else | ||
309 | act_buf += sprintf(act_buf,"+ "); | ||
310 | act_buf += sprintf(act_buf,"compr: %d blocks (%d/%d) decompr: %d blocks ", this->stat_compr_blocks, | ||
311 | this->stat_compr_new_size, this->stat_compr_orig_size, | ||
312 | this->stat_decompr_blocks); | ||
313 | act_buf += sprintf(act_buf,"\n"); | ||
314 | } | ||
315 | spin_unlock(&jffs2_compressor_list_lock); | ||
316 | |||
317 | return buf; | ||
318 | } | ||
319 | |||
320 | char *jffs2_get_compression_mode_name(void) | ||
321 | { | ||
322 | switch (jffs2_compression_mode) { | ||
323 | case JFFS2_COMPR_MODE_NONE: | ||
324 | return "none"; | ||
325 | case JFFS2_COMPR_MODE_PRIORITY: | ||
326 | return "priority"; | ||
327 | case JFFS2_COMPR_MODE_SIZE: | ||
328 | return "size"; | ||
329 | } | ||
330 | return "unkown"; | ||
331 | } | ||
332 | |||
333 | int jffs2_set_compression_mode_name(const char *name) | ||
334 | { | ||
335 | if (!strcmp("none",name)) { | ||
336 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; | ||
337 | return 0; | ||
338 | } | ||
339 | if (!strcmp("priority",name)) { | ||
340 | jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY; | ||
341 | return 0; | ||
342 | } | ||
343 | if (!strcmp("size",name)) { | ||
344 | jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE; | ||
345 | return 0; | ||
346 | } | ||
347 | return 1; | ||
348 | } | ||
349 | |||
350 | static int jffs2_compressor_Xable(const char *name, int disabled) | ||
351 | { | ||
352 | struct jffs2_compressor *this; | ||
353 | spin_lock(&jffs2_compressor_list_lock); | ||
354 | list_for_each_entry(this, &jffs2_compressor_list, list) { | ||
355 | if (!strcmp(this->name, name)) { | ||
356 | this->disabled = disabled; | ||
357 | spin_unlock(&jffs2_compressor_list_lock); | ||
358 | return 0; | ||
359 | } | ||
360 | } | ||
361 | spin_unlock(&jffs2_compressor_list_lock); | ||
362 | printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name); | ||
363 | return 1; | ||
364 | } | ||
365 | |||
366 | int jffs2_enable_compressor_name(const char *name) | ||
367 | { | ||
368 | return jffs2_compressor_Xable(name, 0); | ||
369 | } | ||
370 | |||
371 | int jffs2_disable_compressor_name(const char *name) | ||
372 | { | ||
373 | return jffs2_compressor_Xable(name, 1); | ||
374 | } | ||
375 | |||
376 | int jffs2_set_compressor_priority(const char *name, int priority) | ||
377 | { | ||
378 | struct jffs2_compressor *this,*comp; | ||
379 | spin_lock(&jffs2_compressor_list_lock); | ||
380 | list_for_each_entry(this, &jffs2_compressor_list, list) { | ||
381 | if (!strcmp(this->name, name)) { | ||
382 | this->priority = priority; | ||
383 | comp = this; | ||
384 | goto reinsert; | ||
385 | } | ||
386 | } | ||
387 | spin_unlock(&jffs2_compressor_list_lock); | ||
388 | printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name); | ||
389 | return 1; | ||
390 | reinsert: | ||
391 | /* list is sorted in the order of priority, so if | ||
392 | we change it we have to reinsert it into the | ||
393 | good place */ | ||
394 | list_del(&comp->list); | ||
395 | list_for_each_entry(this, &jffs2_compressor_list, list) { | ||
396 | if (this->priority < comp->priority) { | ||
397 | list_add(&comp->list, this->list.prev); | ||
398 | spin_unlock(&jffs2_compressor_list_lock); | ||
399 | return 0; | ||
400 | } | ||
401 | } | ||
402 | list_add_tail(&comp->list, &jffs2_compressor_list); | ||
403 | spin_unlock(&jffs2_compressor_list_lock); | ||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | #endif | ||
408 | |||
409 | void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig) | ||
410 | { | ||
411 | if (orig != comprbuf) | ||
412 | kfree(comprbuf); | ||
413 | } | ||
414 | |||
415 | int jffs2_compressors_init(void) | ||
416 | { | ||
417 | /* Registering compressors */ | ||
418 | #ifdef CONFIG_JFFS2_ZLIB | ||
419 | jffs2_zlib_init(); | ||
420 | #endif | ||
421 | #ifdef CONFIG_JFFS2_RTIME | ||
422 | jffs2_rtime_init(); | ||
423 | #endif | ||
424 | #ifdef CONFIG_JFFS2_RUBIN | ||
425 | jffs2_rubinmips_init(); | ||
426 | jffs2_dynrubin_init(); | ||
427 | #endif | ||
428 | #ifdef CONFIG_JFFS2_LZARI | ||
429 | jffs2_lzari_init(); | ||
430 | #endif | ||
431 | #ifdef CONFIG_JFFS2_LZO | ||
432 | jffs2_lzo_init(); | ||
433 | #endif | ||
434 | /* Setting default compression mode */ | ||
435 | #ifdef CONFIG_JFFS2_CMODE_NONE | ||
436 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; | ||
437 | D1(printk(KERN_INFO "JFFS2: default compression mode: none\n");) | ||
438 | #else | ||
439 | #ifdef CONFIG_JFFS2_CMODE_SIZE | ||
440 | jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE; | ||
441 | D1(printk(KERN_INFO "JFFS2: default compression mode: size\n");) | ||
442 | #else | ||
443 | D1(printk(KERN_INFO "JFFS2: default compression mode: priority\n");) | ||
444 | #endif | ||
445 | #endif | ||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | int jffs2_compressors_exit(void) | ||
450 | { | ||
451 | /* Unregistering compressors */ | ||
452 | #ifdef CONFIG_JFFS2_LZO | ||
453 | jffs2_lzo_exit(); | ||
454 | #endif | ||
455 | #ifdef CONFIG_JFFS2_LZARI | ||
456 | jffs2_lzari_exit(); | ||
457 | #endif | ||
458 | #ifdef CONFIG_JFFS2_RUBIN | ||
459 | jffs2_dynrubin_exit(); | ||
460 | jffs2_rubinmips_exit(); | ||
461 | #endif | ||
462 | #ifdef CONFIG_JFFS2_RTIME | ||
463 | jffs2_rtime_exit(); | ||
464 | #endif | ||
465 | #ifdef CONFIG_JFFS2_ZLIB | ||
466 | jffs2_zlib_exit(); | ||
467 | #endif | ||
468 | return 0; | ||
469 | } | ||
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h new file mode 100644 index 000000000000..89ceeed201eb --- /dev/null +++ b/fs/jffs2/compr.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, | ||
5 | * University of Szeged, Hungary | ||
6 | * | ||
7 | * For licensing information, see the file 'LICENCE' in the | ||
8 | * jffs2 directory. | ||
9 | * | ||
10 | * $Id: compr.h,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #ifndef __JFFS2_COMPR_H__ | ||
15 | #define __JFFS2_COMPR_H__ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/vmalloc.h> | ||
19 | #include <linux/list.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/fs.h> | ||
25 | #include <linux/jffs2.h> | ||
26 | #include <linux/jffs2_fs_i.h> | ||
27 | #include <linux/jffs2_fs_sb.h> | ||
28 | #include "nodelist.h" | ||
29 | |||
30 | #define JFFS2_RUBINMIPS_PRIORITY 10 | ||
31 | #define JFFS2_DYNRUBIN_PRIORITY 20 | ||
32 | #define JFFS2_LZARI_PRIORITY 30 | ||
33 | #define JFFS2_LZO_PRIORITY 40 | ||
34 | #define JFFS2_RTIME_PRIORITY 50 | ||
35 | #define JFFS2_ZLIB_PRIORITY 60 | ||
36 | |||
37 | #define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */ | ||
38 | #define JFFS2_DYNRUBIN_DISABLED /* for decompression */ | ||
39 | |||
40 | #define JFFS2_COMPR_MODE_NONE 0 | ||
41 | #define JFFS2_COMPR_MODE_PRIORITY 1 | ||
42 | #define JFFS2_COMPR_MODE_SIZE 2 | ||
43 | |||
44 | struct jffs2_compressor { | ||
45 | struct list_head list; | ||
46 | int priority; /* used by prirority comr. mode */ | ||
47 | char *name; | ||
48 | char compr; /* JFFS2_COMPR_XXX */ | ||
49 | int (*compress)(unsigned char *data_in, unsigned char *cpage_out, | ||
50 | uint32_t *srclen, uint32_t *destlen, void *model); | ||
51 | int (*decompress)(unsigned char *cdata_in, unsigned char *data_out, | ||
52 | uint32_t cdatalen, uint32_t datalen, void *model); | ||
53 | int usecount; | ||
54 | int disabled; /* if seted the compressor won't compress */ | ||
55 | unsigned char *compr_buf; /* used by size compr. mode */ | ||
56 | uint32_t compr_buf_size; /* used by size compr. mode */ | ||
57 | uint32_t stat_compr_orig_size; | ||
58 | uint32_t stat_compr_new_size; | ||
59 | uint32_t stat_compr_blocks; | ||
60 | uint32_t stat_decompr_blocks; | ||
61 | }; | ||
62 | |||
63 | int jffs2_register_compressor(struct jffs2_compressor *comp); | ||
64 | int jffs2_unregister_compressor(struct jffs2_compressor *comp); | ||
65 | |||
66 | int jffs2_compressors_init(void); | ||
67 | int jffs2_compressors_exit(void); | ||
68 | |||
69 | uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
70 | unsigned char *data_in, unsigned char **cpage_out, | ||
71 | uint32_t *datalen, uint32_t *cdatalen); | ||
72 | |||
73 | int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
74 | uint16_t comprtype, unsigned char *cdata_in, | ||
75 | unsigned char *data_out, uint32_t cdatalen, uint32_t datalen); | ||
76 | |||
77 | void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig); | ||
78 | |||
79 | #ifdef CONFIG_JFFS2_PROC | ||
80 | int jffs2_enable_compressor_name(const char *name); | ||
81 | int jffs2_disable_compressor_name(const char *name); | ||
82 | int jffs2_set_compression_mode_name(const char *mode_name); | ||
83 | char *jffs2_get_compression_mode_name(void); | ||
84 | int jffs2_set_compressor_priority(const char *mode_name, int priority); | ||
85 | char *jffs2_list_compressors(void); | ||
86 | char *jffs2_stats(void); | ||
87 | #endif | ||
88 | |||
89 | /* Compressor modules */ | ||
90 | /* These functions will be called by jffs2_compressors_init/exit */ | ||
91 | |||
92 | #ifdef CONFIG_JFFS2_RUBIN | ||
93 | int jffs2_rubinmips_init(void); | ||
94 | void jffs2_rubinmips_exit(void); | ||
95 | int jffs2_dynrubin_init(void); | ||
96 | void jffs2_dynrubin_exit(void); | ||
97 | #endif | ||
98 | #ifdef CONFIG_JFFS2_RTIME | ||
99 | int jffs2_rtime_init(void); | ||
100 | void jffs2_rtime_exit(void); | ||
101 | #endif | ||
102 | #ifdef CONFIG_JFFS2_ZLIB | ||
103 | int jffs2_zlib_init(void); | ||
104 | void jffs2_zlib_exit(void); | ||
105 | #endif | ||
106 | #ifdef CONFIG_JFFS2_LZARI | ||
107 | int jffs2_lzari_init(void); | ||
108 | void jffs2_lzari_exit(void); | ||
109 | #endif | ||
110 | #ifdef CONFIG_JFFS2_LZO | ||
111 | int jffs2_lzo_init(void); | ||
112 | void jffs2_lzo_exit(void); | ||
113 | #endif | ||
114 | |||
115 | #endif /* __JFFS2_COMPR_H__ */ | ||
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c new file mode 100644 index 000000000000..393129418666 --- /dev/null +++ b/fs/jffs2/compr_rtime.c | |||
@@ -0,0 +1,132 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by Arjan van de Ven <arjanv@redhat.com> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: compr_rtime.c,v 1.14 2004/06/23 16:34:40 havasi Exp $ | ||
11 | * | ||
12 | * | ||
13 | * Very simple lz77-ish encoder. | ||
14 | * | ||
15 | * Theory of operation: Both encoder and decoder have a list of "last | ||
16 | * occurrences" for every possible source-value; after sending the | ||
17 | * first source-byte, the second byte indicated the "run" length of | ||
18 | * matches | ||
19 | * | ||
20 | * The algorithm is intended to only send "whole bytes", no bit-messing. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/errno.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/jffs2.h> | ||
29 | #include "compr.h" | ||
30 | |||
31 | /* _compress returns the compressed size, -1 if bigger */ | ||
32 | static int jffs2_rtime_compress(unsigned char *data_in, | ||
33 | unsigned char *cpage_out, | ||
34 | uint32_t *sourcelen, uint32_t *dstlen, | ||
35 | void *model) | ||
36 | { | ||
37 | short positions[256]; | ||
38 | int outpos = 0; | ||
39 | int pos=0; | ||
40 | |||
41 | memset(positions,0,sizeof(positions)); | ||
42 | |||
43 | while (pos < (*sourcelen) && outpos <= (*dstlen)-2) { | ||
44 | int backpos, runlen=0; | ||
45 | unsigned char value; | ||
46 | |||
47 | value = data_in[pos]; | ||
48 | |||
49 | cpage_out[outpos++] = data_in[pos++]; | ||
50 | |||
51 | backpos = positions[value]; | ||
52 | positions[value]=pos; | ||
53 | |||
54 | while ((backpos < pos) && (pos < (*sourcelen)) && | ||
55 | (data_in[pos]==data_in[backpos++]) && (runlen<255)) { | ||
56 | pos++; | ||
57 | runlen++; | ||
58 | } | ||
59 | cpage_out[outpos++] = runlen; | ||
60 | } | ||
61 | |||
62 | if (outpos >= pos) { | ||
63 | /* We failed */ | ||
64 | return -1; | ||
65 | } | ||
66 | |||
67 | /* Tell the caller how much we managed to compress, and how much space it took */ | ||
68 | *sourcelen = pos; | ||
69 | *dstlen = outpos; | ||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | |||
74 | static int jffs2_rtime_decompress(unsigned char *data_in, | ||
75 | unsigned char *cpage_out, | ||
76 | uint32_t srclen, uint32_t destlen, | ||
77 | void *model) | ||
78 | { | ||
79 | short positions[256]; | ||
80 | int outpos = 0; | ||
81 | int pos=0; | ||
82 | |||
83 | memset(positions,0,sizeof(positions)); | ||
84 | |||
85 | while (outpos<destlen) { | ||
86 | unsigned char value; | ||
87 | int backoffs; | ||
88 | int repeat; | ||
89 | |||
90 | value = data_in[pos++]; | ||
91 | cpage_out[outpos++] = value; /* first the verbatim copied byte */ | ||
92 | repeat = data_in[pos++]; | ||
93 | backoffs = positions[value]; | ||
94 | |||
95 | positions[value]=outpos; | ||
96 | if (repeat) { | ||
97 | if (backoffs + repeat >= outpos) { | ||
98 | while(repeat) { | ||
99 | cpage_out[outpos++] = cpage_out[backoffs++]; | ||
100 | repeat--; | ||
101 | } | ||
102 | } else { | ||
103 | memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat); | ||
104 | outpos+=repeat; | ||
105 | } | ||
106 | } | ||
107 | } | ||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | static struct jffs2_compressor jffs2_rtime_comp = { | ||
112 | .priority = JFFS2_RTIME_PRIORITY, | ||
113 | .name = "rtime", | ||
114 | .compr = JFFS2_COMPR_RTIME, | ||
115 | .compress = &jffs2_rtime_compress, | ||
116 | .decompress = &jffs2_rtime_decompress, | ||
117 | #ifdef JFFS2_RTIME_DISABLED | ||
118 | .disabled = 1, | ||
119 | #else | ||
120 | .disabled = 0, | ||
121 | #endif | ||
122 | }; | ||
123 | |||
124 | int jffs2_rtime_init(void) | ||
125 | { | ||
126 | return jffs2_register_compressor(&jffs2_rtime_comp); | ||
127 | } | ||
128 | |||
129 | void jffs2_rtime_exit(void) | ||
130 | { | ||
131 | jffs2_unregister_compressor(&jffs2_rtime_comp); | ||
132 | } | ||
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c new file mode 100644 index 000000000000..450d6624181f --- /dev/null +++ b/fs/jffs2/compr_rubin.c | |||
@@ -0,0 +1,373 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by Arjan van de Ven <arjanv@redhat.com> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: compr_rubin.c,v 1.20 2004/06/23 16:34:40 havasi Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | |||
15 | #include <linux/string.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/jffs2.h> | ||
18 | #include "compr_rubin.h" | ||
19 | #include "histo_mips.h" | ||
20 | #include "compr.h" | ||
21 | |||
22 | static void init_rubin(struct rubin_state *rs, int div, int *bits) | ||
23 | { | ||
24 | int c; | ||
25 | |||
26 | rs->q = 0; | ||
27 | rs->p = (long) (2 * UPPER_BIT_RUBIN); | ||
28 | rs->bit_number = (long) 0; | ||
29 | rs->bit_divider = div; | ||
30 | for (c=0; c<8; c++) | ||
31 | rs->bits[c] = bits[c]; | ||
32 | } | ||
33 | |||
34 | |||
35 | static int encode(struct rubin_state *rs, long A, long B, int symbol) | ||
36 | { | ||
37 | |||
38 | long i0, i1; | ||
39 | int ret; | ||
40 | |||
41 | while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { | ||
42 | rs->bit_number++; | ||
43 | |||
44 | ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); | ||
45 | if (ret) | ||
46 | return ret; | ||
47 | rs->q &= LOWER_BITS_RUBIN; | ||
48 | rs->q <<= 1; | ||
49 | rs->p <<= 1; | ||
50 | } | ||
51 | i0 = A * rs->p / (A + B); | ||
52 | if (i0 <= 0) { | ||
53 | i0 = 1; | ||
54 | } | ||
55 | if (i0 >= rs->p) { | ||
56 | i0 = rs->p - 1; | ||
57 | } | ||
58 | i1 = rs->p - i0; | ||
59 | |||
60 | if (symbol == 0) | ||
61 | rs->p = i0; | ||
62 | else { | ||
63 | rs->p = i1; | ||
64 | rs->q += i0; | ||
65 | } | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | |||
70 | static void end_rubin(struct rubin_state *rs) | ||
71 | { | ||
72 | |||
73 | int i; | ||
74 | |||
75 | for (i = 0; i < RUBIN_REG_SIZE; i++) { | ||
76 | pushbit(&rs->pp, (UPPER_BIT_RUBIN & rs->q) ? 1 : 0, 1); | ||
77 | rs->q &= LOWER_BITS_RUBIN; | ||
78 | rs->q <<= 1; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | |||
83 | static void init_decode(struct rubin_state *rs, int div, int *bits) | ||
84 | { | ||
85 | init_rubin(rs, div, bits); | ||
86 | |||
87 | /* behalve lower */ | ||
88 | rs->rec_q = 0; | ||
89 | |||
90 | for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE; rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp))) | ||
91 | ; | ||
92 | } | ||
93 | |||
94 | static void __do_decode(struct rubin_state *rs, unsigned long p, unsigned long q) | ||
95 | { | ||
96 | register unsigned long lower_bits_rubin = LOWER_BITS_RUBIN; | ||
97 | unsigned long rec_q; | ||
98 | int c, bits = 0; | ||
99 | |||
100 | /* | ||
101 | * First, work out how many bits we need from the input stream. | ||
102 | * Note that we have already done the initial check on this | ||
103 | * loop prior to calling this function. | ||
104 | */ | ||
105 | do { | ||
106 | bits++; | ||
107 | q &= lower_bits_rubin; | ||
108 | q <<= 1; | ||
109 | p <<= 1; | ||
110 | } while ((q >= UPPER_BIT_RUBIN) || ((p + q) <= UPPER_BIT_RUBIN)); | ||
111 | |||
112 | rs->p = p; | ||
113 | rs->q = q; | ||
114 | |||
115 | rs->bit_number += bits; | ||
116 | |||
117 | /* | ||
118 | * Now get the bits. We really want this to be "get n bits". | ||
119 | */ | ||
120 | rec_q = rs->rec_q; | ||
121 | do { | ||
122 | c = pullbit(&rs->pp); | ||
123 | rec_q &= lower_bits_rubin; | ||
124 | rec_q <<= 1; | ||
125 | rec_q += c; | ||
126 | } while (--bits); | ||
127 | rs->rec_q = rec_q; | ||
128 | } | ||
129 | |||
130 | static int decode(struct rubin_state *rs, long A, long B) | ||
131 | { | ||
132 | unsigned long p = rs->p, q = rs->q; | ||
133 | long i0, threshold; | ||
134 | int symbol; | ||
135 | |||
136 | if (q >= UPPER_BIT_RUBIN || ((p + q) <= UPPER_BIT_RUBIN)) | ||
137 | __do_decode(rs, p, q); | ||
138 | |||
139 | i0 = A * rs->p / (A + B); | ||
140 | if (i0 <= 0) { | ||
141 | i0 = 1; | ||
142 | } | ||
143 | if (i0 >= rs->p) { | ||
144 | i0 = rs->p - 1; | ||
145 | } | ||
146 | |||
147 | threshold = rs->q + i0; | ||
148 | symbol = rs->rec_q >= threshold; | ||
149 | if (rs->rec_q >= threshold) { | ||
150 | rs->q += i0; | ||
151 | i0 = rs->p - i0; | ||
152 | } | ||
153 | |||
154 | rs->p = i0; | ||
155 | |||
156 | return symbol; | ||
157 | } | ||
158 | |||
159 | |||
160 | |||
161 | static int out_byte(struct rubin_state *rs, unsigned char byte) | ||
162 | { | ||
163 | int i, ret; | ||
164 | struct rubin_state rs_copy; | ||
165 | rs_copy = *rs; | ||
166 | |||
167 | for (i=0;i<8;i++) { | ||
168 | ret = encode(rs, rs->bit_divider-rs->bits[i],rs->bits[i],byte&1); | ||
169 | if (ret) { | ||
170 | /* Failed. Restore old state */ | ||
171 | *rs = rs_copy; | ||
172 | return ret; | ||
173 | } | ||
174 | byte=byte>>1; | ||
175 | } | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | static int in_byte(struct rubin_state *rs) | ||
180 | { | ||
181 | int i, result = 0, bit_divider = rs->bit_divider; | ||
182 | |||
183 | for (i = 0; i < 8; i++) | ||
184 | result |= decode(rs, bit_divider - rs->bits[i], rs->bits[i]) << i; | ||
185 | |||
186 | return result; | ||
187 | } | ||
188 | |||
189 | |||
190 | |||
191 | static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, | ||
192 | unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) | ||
193 | { | ||
194 | int outpos = 0; | ||
195 | int pos=0; | ||
196 | struct rubin_state rs; | ||
197 | |||
198 | init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32); | ||
199 | |||
200 | init_rubin(&rs, bit_divider, bits); | ||
201 | |||
202 | while (pos < (*sourcelen) && !out_byte(&rs, data_in[pos])) | ||
203 | pos++; | ||
204 | |||
205 | end_rubin(&rs); | ||
206 | |||
207 | if (outpos > pos) { | ||
208 | /* We failed */ | ||
209 | return -1; | ||
210 | } | ||
211 | |||
212 | /* Tell the caller how much we managed to compress, | ||
213 | * and how much space it took */ | ||
214 | |||
215 | outpos = (pushedbits(&rs.pp)+7)/8; | ||
216 | |||
217 | if (outpos >= pos) | ||
218 | return -1; /* We didn't actually compress */ | ||
219 | *sourcelen = pos; | ||
220 | *dstlen = outpos; | ||
221 | return 0; | ||
222 | } | ||
223 | #if 0 | ||
224 | /* _compress returns the compressed size, -1 if bigger */ | ||
225 | int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, | ||
226 | uint32_t *sourcelen, uint32_t *dstlen, void *model) | ||
227 | { | ||
228 | return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); | ||
229 | } | ||
230 | #endif | ||
231 | int jffs2_dynrubin_compress(unsigned char *data_in, unsigned char *cpage_out, | ||
232 | uint32_t *sourcelen, uint32_t *dstlen, void *model) | ||
233 | { | ||
234 | int bits[8]; | ||
235 | unsigned char histo[256]; | ||
236 | int i; | ||
237 | int ret; | ||
238 | uint32_t mysrclen, mydstlen; | ||
239 | |||
240 | mysrclen = *sourcelen; | ||
241 | mydstlen = *dstlen - 8; | ||
242 | |||
243 | if (*dstlen <= 12) | ||
244 | return -1; | ||
245 | |||
246 | memset(histo, 0, 256); | ||
247 | for (i=0; i<mysrclen; i++) { | ||
248 | histo[data_in[i]]++; | ||
249 | } | ||
250 | memset(bits, 0, sizeof(int)*8); | ||
251 | for (i=0; i<256; i++) { | ||
252 | if (i&128) | ||
253 | bits[7] += histo[i]; | ||
254 | if (i&64) | ||
255 | bits[6] += histo[i]; | ||
256 | if (i&32) | ||
257 | bits[5] += histo[i]; | ||
258 | if (i&16) | ||
259 | bits[4] += histo[i]; | ||
260 | if (i&8) | ||
261 | bits[3] += histo[i]; | ||
262 | if (i&4) | ||
263 | bits[2] += histo[i]; | ||
264 | if (i&2) | ||
265 | bits[1] += histo[i]; | ||
266 | if (i&1) | ||
267 | bits[0] += histo[i]; | ||
268 | } | ||
269 | |||
270 | for (i=0; i<8; i++) { | ||
271 | bits[i] = (bits[i] * 256) / mysrclen; | ||
272 | if (!bits[i]) bits[i] = 1; | ||
273 | if (bits[i] > 255) bits[i] = 255; | ||
274 | cpage_out[i] = bits[i]; | ||
275 | } | ||
276 | |||
277 | ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen); | ||
278 | if (ret) | ||
279 | return ret; | ||
280 | |||
281 | /* Add back the 8 bytes we took for the probabilities */ | ||
282 | mydstlen += 8; | ||
283 | |||
284 | if (mysrclen <= mydstlen) { | ||
285 | /* We compressed */ | ||
286 | return -1; | ||
287 | } | ||
288 | |||
289 | *sourcelen = mysrclen; | ||
290 | *dstlen = mydstlen; | ||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in, | ||
295 | unsigned char *page_out, uint32_t srclen, uint32_t destlen) | ||
296 | { | ||
297 | int outpos = 0; | ||
298 | struct rubin_state rs; | ||
299 | |||
300 | init_pushpull(&rs.pp, cdata_in, srclen, 0, 0); | ||
301 | init_decode(&rs, bit_divider, bits); | ||
302 | |||
303 | while (outpos < destlen) { | ||
304 | page_out[outpos++] = in_byte(&rs); | ||
305 | } | ||
306 | } | ||
307 | |||
308 | |||
309 | int jffs2_rubinmips_decompress(unsigned char *data_in, unsigned char *cpage_out, | ||
310 | uint32_t sourcelen, uint32_t dstlen, void *model) | ||
311 | { | ||
312 | rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); | ||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | int jffs2_dynrubin_decompress(unsigned char *data_in, unsigned char *cpage_out, | ||
317 | uint32_t sourcelen, uint32_t dstlen, void *model) | ||
318 | { | ||
319 | int bits[8]; | ||
320 | int c; | ||
321 | |||
322 | for (c=0; c<8; c++) | ||
323 | bits[c] = data_in[c]; | ||
324 | |||
325 | rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8, dstlen); | ||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | static struct jffs2_compressor jffs2_rubinmips_comp = { | ||
330 | .priority = JFFS2_RUBINMIPS_PRIORITY, | ||
331 | .name = "rubinmips", | ||
332 | .compr = JFFS2_COMPR_DYNRUBIN, | ||
333 | .compress = NULL, /*&jffs2_rubinmips_compress,*/ | ||
334 | .decompress = &jffs2_rubinmips_decompress, | ||
335 | #ifdef JFFS2_RUBINMIPS_DISABLED | ||
336 | .disabled = 1, | ||
337 | #else | ||
338 | .disabled = 0, | ||
339 | #endif | ||
340 | }; | ||
341 | |||
342 | int jffs2_rubinmips_init(void) | ||
343 | { | ||
344 | return jffs2_register_compressor(&jffs2_rubinmips_comp); | ||
345 | } | ||
346 | |||
347 | void jffs2_rubinmips_exit(void) | ||
348 | { | ||
349 | jffs2_unregister_compressor(&jffs2_rubinmips_comp); | ||
350 | } | ||
351 | |||
352 | static struct jffs2_compressor jffs2_dynrubin_comp = { | ||
353 | .priority = JFFS2_DYNRUBIN_PRIORITY, | ||
354 | .name = "dynrubin", | ||
355 | .compr = JFFS2_COMPR_RUBINMIPS, | ||
356 | .compress = jffs2_dynrubin_compress, | ||
357 | .decompress = &jffs2_dynrubin_decompress, | ||
358 | #ifdef JFFS2_DYNRUBIN_DISABLED | ||
359 | .disabled = 1, | ||
360 | #else | ||
361 | .disabled = 0, | ||
362 | #endif | ||
363 | }; | ||
364 | |||
365 | int jffs2_dynrubin_init(void) | ||
366 | { | ||
367 | return jffs2_register_compressor(&jffs2_dynrubin_comp); | ||
368 | } | ||
369 | |||
370 | void jffs2_dynrubin_exit(void) | ||
371 | { | ||
372 | jffs2_unregister_compressor(&jffs2_dynrubin_comp); | ||
373 | } | ||
diff --git a/fs/jffs2/compr_rubin.h b/fs/jffs2/compr_rubin.h new file mode 100644 index 000000000000..cf51e34f6574 --- /dev/null +++ b/fs/jffs2/compr_rubin.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* Rubin encoder/decoder header */ | ||
2 | /* work started at : aug 3, 1994 */ | ||
3 | /* last modification : aug 15, 1994 */ | ||
4 | /* $Id: compr_rubin.h,v 1.6 2002/01/25 01:49:26 dwmw2 Exp $ */ | ||
5 | |||
6 | #include "pushpull.h" | ||
7 | |||
8 | #define RUBIN_REG_SIZE 16 | ||
9 | #define UPPER_BIT_RUBIN (((long) 1)<<(RUBIN_REG_SIZE-1)) | ||
10 | #define LOWER_BITS_RUBIN ((((long) 1)<<(RUBIN_REG_SIZE-1))-1) | ||
11 | |||
12 | |||
13 | struct rubin_state { | ||
14 | unsigned long p; | ||
15 | unsigned long q; | ||
16 | unsigned long rec_q; | ||
17 | long bit_number; | ||
18 | struct pushpull pp; | ||
19 | int bit_divider; | ||
20 | int bits[8]; | ||
21 | }; | ||
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c new file mode 100644 index 000000000000..9f9932c22adb --- /dev/null +++ b/fs/jffs2/compr_zlib.c | |||
@@ -0,0 +1,218 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: compr_zlib.c,v 1.29 2004/11/16 20:36:11 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #if !defined(__KERNEL__) && !defined(__ECOS) | ||
15 | #error "The userspace support got too messy and was removed. Update your mkfs.jffs2" | ||
16 | #endif | ||
17 | |||
18 | #include <linux/config.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/zlib.h> | ||
22 | #include <linux/zutil.h> | ||
23 | #include <asm/semaphore.h> | ||
24 | #include "nodelist.h" | ||
25 | #include "compr.h" | ||
26 | |||
27 | /* Plan: call deflate() with avail_in == *sourcelen, | ||
28 | avail_out = *dstlen - 12 and flush == Z_FINISH. | ||
29 | If it doesn't manage to finish, call it again with | ||
30 | avail_in == 0 and avail_out set to the remaining 12 | ||
31 | bytes for it to clean up. | ||
32 | Q: Is 12 bytes sufficient? | ||
33 | */ | ||
34 | #define STREAM_END_SPACE 12 | ||
35 | |||
36 | static DECLARE_MUTEX(deflate_sem); | ||
37 | static DECLARE_MUTEX(inflate_sem); | ||
38 | static z_stream inf_strm, def_strm; | ||
39 | |||
40 | #ifdef __KERNEL__ /* Linux-only */ | ||
41 | #include <linux/vmalloc.h> | ||
42 | #include <linux/init.h> | ||
43 | |||
44 | static int __init alloc_workspaces(void) | ||
45 | { | ||
46 | def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); | ||
47 | if (!def_strm.workspace) { | ||
48 | printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize()); | ||
49 | return -ENOMEM; | ||
50 | } | ||
51 | D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize())); | ||
52 | inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); | ||
53 | if (!inf_strm.workspace) { | ||
54 | printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize()); | ||
55 | vfree(def_strm.workspace); | ||
56 | return -ENOMEM; | ||
57 | } | ||
58 | D1(printk(KERN_DEBUG "Allocated %d bytes for inflate workspace\n", zlib_inflate_workspacesize())); | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static void free_workspaces(void) | ||
63 | { | ||
64 | vfree(def_strm.workspace); | ||
65 | vfree(inf_strm.workspace); | ||
66 | } | ||
67 | #else | ||
68 | #define alloc_workspaces() (0) | ||
69 | #define free_workspaces() do { } while(0) | ||
70 | #endif /* __KERNEL__ */ | ||
71 | |||
72 | int jffs2_zlib_compress(unsigned char *data_in, unsigned char *cpage_out, | ||
73 | uint32_t *sourcelen, uint32_t *dstlen, void *model) | ||
74 | { | ||
75 | int ret; | ||
76 | |||
77 | if (*dstlen <= STREAM_END_SPACE) | ||
78 | return -1; | ||
79 | |||
80 | down(&deflate_sem); | ||
81 | |||
82 | if (Z_OK != zlib_deflateInit(&def_strm, 3)) { | ||
83 | printk(KERN_WARNING "deflateInit failed\n"); | ||
84 | up(&deflate_sem); | ||
85 | return -1; | ||
86 | } | ||
87 | |||
88 | def_strm.next_in = data_in; | ||
89 | def_strm.total_in = 0; | ||
90 | |||
91 | def_strm.next_out = cpage_out; | ||
92 | def_strm.total_out = 0; | ||
93 | |||
94 | while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) { | ||
95 | def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE); | ||
96 | def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out); | ||
97 | D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n", | ||
98 | def_strm.avail_in, def_strm.avail_out)); | ||
99 | ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); | ||
100 | D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", | ||
101 | def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out)); | ||
102 | if (ret != Z_OK) { | ||
103 | D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); | ||
104 | zlib_deflateEnd(&def_strm); | ||
105 | up(&deflate_sem); | ||
106 | return -1; | ||
107 | } | ||
108 | } | ||
109 | def_strm.avail_out += STREAM_END_SPACE; | ||
110 | def_strm.avail_in = 0; | ||
111 | ret = zlib_deflate(&def_strm, Z_FINISH); | ||
112 | zlib_deflateEnd(&def_strm); | ||
113 | |||
114 | if (ret != Z_STREAM_END) { | ||
115 | D1(printk(KERN_DEBUG "final deflate returned %d\n", ret)); | ||
116 | ret = -1; | ||
117 | goto out; | ||
118 | } | ||
119 | |||
120 | if (def_strm.total_out >= def_strm.total_in) { | ||
121 | D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld; failing\n", | ||
122 | def_strm.total_in, def_strm.total_out)); | ||
123 | ret = -1; | ||
124 | goto out; | ||
125 | } | ||
126 | |||
127 | D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n", | ||
128 | def_strm.total_in, def_strm.total_out)); | ||
129 | |||
130 | *dstlen = def_strm.total_out; | ||
131 | *sourcelen = def_strm.total_in; | ||
132 | ret = 0; | ||
133 | out: | ||
134 | up(&deflate_sem); | ||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | int jffs2_zlib_decompress(unsigned char *data_in, unsigned char *cpage_out, | ||
139 | uint32_t srclen, uint32_t destlen, void *model) | ||
140 | { | ||
141 | int ret; | ||
142 | int wbits = MAX_WBITS; | ||
143 | |||
144 | down(&inflate_sem); | ||
145 | |||
146 | inf_strm.next_in = data_in; | ||
147 | inf_strm.avail_in = srclen; | ||
148 | inf_strm.total_in = 0; | ||
149 | |||
150 | inf_strm.next_out = cpage_out; | ||
151 | inf_strm.avail_out = destlen; | ||
152 | inf_strm.total_out = 0; | ||
153 | |||
154 | /* If it's deflate, and it's got no preset dictionary, then | ||
155 | we can tell zlib to skip the adler32 check. */ | ||
156 | if (srclen > 2 && !(data_in[1] & PRESET_DICT) && | ||
157 | ((data_in[0] & 0x0f) == Z_DEFLATED) && | ||
158 | !(((data_in[0]<<8) + data_in[1]) % 31)) { | ||
159 | |||
160 | D2(printk(KERN_DEBUG "inflate skipping adler32\n")); | ||
161 | wbits = -((data_in[0] >> 4) + 8); | ||
162 | inf_strm.next_in += 2; | ||
163 | inf_strm.avail_in -= 2; | ||
164 | } else { | ||
165 | /* Let this remain D1 for now -- it should never happen */ | ||
166 | D1(printk(KERN_DEBUG "inflate not skipping adler32\n")); | ||
167 | } | ||
168 | |||
169 | |||
170 | if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) { | ||
171 | printk(KERN_WARNING "inflateInit failed\n"); | ||
172 | up(&inflate_sem); | ||
173 | return 1; | ||
174 | } | ||
175 | |||
176 | while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK) | ||
177 | ; | ||
178 | if (ret != Z_STREAM_END) { | ||
179 | printk(KERN_NOTICE "inflate returned %d\n", ret); | ||
180 | } | ||
181 | zlib_inflateEnd(&inf_strm); | ||
182 | up(&inflate_sem); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static struct jffs2_compressor jffs2_zlib_comp = { | ||
187 | .priority = JFFS2_ZLIB_PRIORITY, | ||
188 | .name = "zlib", | ||
189 | .compr = JFFS2_COMPR_ZLIB, | ||
190 | .compress = &jffs2_zlib_compress, | ||
191 | .decompress = &jffs2_zlib_decompress, | ||
192 | #ifdef JFFS2_ZLIB_DISABLED | ||
193 | .disabled = 1, | ||
194 | #else | ||
195 | .disabled = 0, | ||
196 | #endif | ||
197 | }; | ||
198 | |||
199 | int __init jffs2_zlib_init(void) | ||
200 | { | ||
201 | int ret; | ||
202 | |||
203 | ret = alloc_workspaces(); | ||
204 | if (ret) | ||
205 | return ret; | ||
206 | |||
207 | ret = jffs2_register_compressor(&jffs2_zlib_comp); | ||
208 | if (ret) | ||
209 | free_workspaces(); | ||
210 | |||
211 | return ret; | ||
212 | } | ||
213 | |||
214 | void jffs2_zlib_exit(void) | ||
215 | { | ||
216 | jffs2_unregister_compressor(&jffs2_zlib_comp); | ||
217 | free_workspaces(); | ||
218 | } | ||
diff --git a/fs/jffs2/comprtest.c b/fs/jffs2/comprtest.c new file mode 100644 index 000000000000..cf51f091d0e7 --- /dev/null +++ b/fs/jffs2/comprtest.c | |||
@@ -0,0 +1,307 @@ | |||
1 | /* $Id: comprtest.c,v 1.5 2002/01/03 15:20:44 dwmw2 Exp $ */ | ||
2 | |||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/string.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <asm/types.h> | ||
7 | #if 0 | ||
8 | #define TESTDATA_LEN 512 | ||
9 | static unsigned char testdata[TESTDATA_LEN] = { | ||
10 | 0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
11 | 0x02, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x60, 0x83, 0x04, 0x08, 0x34, 0x00, 0x00, 0x00, | ||
12 | 0xb0, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x20, 0x00, 0x06, 0x00, 0x28, 0x00, | ||
13 | 0x1e, 0x00, 0x1b, 0x00, 0x06, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x34, 0x80, 0x04, 0x08, | ||
14 | 0x34, 0x80, 0x04, 0x08, 0xc0, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, | ||
15 | 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xf4, 0x00, 0x00, 0x00, 0xf4, 0x80, 0x04, 0x08, | ||
16 | 0xf4, 0x80, 0x04, 0x08, 0x13, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, | ||
17 | 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x04, 0x08, | ||
18 | 0x00, 0x80, 0x04, 0x08, 0x0d, 0x05, 0x00, 0x00, 0x0d, 0x05, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, | ||
19 | 0x00, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x05, 0x00, 0x00, 0x10, 0x95, 0x04, 0x08, | ||
20 | 0x10, 0x95, 0x04, 0x08, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, | ||
21 | 0x00, 0x10, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x58, 0x05, 0x00, 0x00, 0x58, 0x95, 0x04, 0x08, | ||
22 | 0x58, 0x95, 0x04, 0x08, 0xa0, 0x00, 0x00, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, | ||
23 | 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08, 0x01, 0x00, 0x00, 0x08, 0x81, 0x04, 0x08, | ||
24 | 0x08, 0x81, 0x04, 0x08, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, | ||
25 | 0x04, 0x00, 0x00, 0x00, 0x2f, 0x6c, 0x69, 0x62, 0x2f, 0x6c, 0x64, 0x2d, 0x6c, 0x69, 0x6e, 0x75, | ||
26 | 0x78, 0x2e, 0x73, 0x6f, 0x2e, 0x32, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, | ||
27 | 0x01, 0x00, 0x00, 0x00, 0x47, 0x4e, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, | ||
28 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, | ||
29 | 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
30 | 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
31 | 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
32 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00, | ||
33 | 0x0c, 0x83, 0x04, 0x08, 0x81, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, | ||
34 | 0x1c, 0x83, 0x04, 0x08, 0xac, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00, | ||
35 | 0x2c, 0x83, 0x04, 0x08, 0xdd, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, | ||
36 | 0x3c, 0x83, 0x04, 0x08, 0x2e, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, | ||
37 | 0x4c, 0x83, 0x04, 0x08, 0x7d, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, | ||
38 | 0x00, 0x85, 0x04, 0x08, 0x04, 0x00, 0x00, 0x00, 0x11, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x00, 0x00, | ||
39 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x5f, 0x67, | ||
40 | 0x6d, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x5f, 0x00, 0x6c, 0x69, 0x62, 0x63, | ||
41 | 0x2e, 0x73, 0x6f, 0x2e, 0x36, 0x00, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x00, 0x5f, 0x5f, 0x63}; | ||
42 | #else | ||
43 | #define TESTDATA_LEN 3481 | ||
44 | static unsigned char testdata[TESTDATA_LEN] = { | ||
45 | 0x23, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x22, 0x64, 0x62, 0x65, 0x6e, 0x63, 0x68, | ||
46 | 0x2e, 0x68, 0x22, 0x0a, 0x0a, 0x23, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x20, 0x4d, 0x41, 0x58, | ||
47 | 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x20, 0x31, 0x30, 0x30, 0x30, 0x0a, 0x0a, 0x73, 0x74, 0x61, | ||
48 | 0x74, 0x69, 0x63, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x62, 0x75, 0x66, 0x5b, 0x37, 0x30, 0x30, | ||
49 | 0x30, 0x30, 0x5d, 0x3b, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x20, 0x69, 0x6e, 0x74, 0x20, | ||
50 | 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x3b, 0x0a, 0x0a, 0x73, 0x74, 0x61, | ||
51 | 0x74, 0x69, 0x63, 0x20, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x20, 0x7b, 0x0a, 0x09, 0x69, 0x6e, | ||
52 | 0x74, 0x20, 0x66, 0x64, 0x3b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, | ||
53 | 0x65, 0x3b, 0x0a, 0x7d, 0x20, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x4d, 0x41, 0x58, 0x5f, | ||
54 | 0x46, 0x49, 0x4c, 0x45, 0x53, 0x5d, 0x3b, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, | ||
55 | 0x5f, 0x75, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, | ||
56 | 0x61, 0x6d, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, | ||
57 | 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x75, | ||
58 | 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x20, 0x21, 0x3d, 0x20, | ||
59 | 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, | ||
60 | 0x25, 0x64, 0x29, 0x20, 0x75, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x20, 0x25, 0x73, 0x20, 0x66, 0x61, | ||
61 | 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, | ||
62 | 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, | ||
63 | 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, | ||
64 | 0x72, 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, | ||
65 | 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x5f, 0x66, | ||
66 | 0x69, 0x6c, 0x65, 0x28, 0x69, 0x6e, 0x74, 0x20, 0x66, 0x64, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, | ||
67 | 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x3b, 0x0a, | ||
68 | 0x09, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, | ||
69 | 0x09, 0x09, 0x73, 0x20, 0x3d, 0x20, 0x4d, 0x49, 0x4e, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x6f, 0x66, | ||
70 | 0x28, 0x62, 0x75, 0x66, 0x29, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x09, | ||
71 | 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x66, 0x64, 0x2c, 0x20, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73, | ||
72 | 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x2d, 0x3d, 0x20, 0x73, 0x3b, 0x0a, | ||
73 | 0x09, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x6f, 0x70, | ||
74 | 0x65, 0x6e, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, | ||
75 | 0x69, 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, | ||
76 | 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x66, 0x64, 0x2c, | ||
77 | 0x20, 0x69, 0x3b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x20, 0x3d, | ||
78 | 0x20, 0x4f, 0x5f, 0x52, 0x44, 0x57, 0x52, 0x7c, 0x4f, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x3b, | ||
79 | 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x20, 0x73, 0x74, 0x61, 0x74, 0x20, 0x73, 0x74, | ||
80 | 0x3b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x63, 0x6f, | ||
81 | 0x75, 0x6e, 0x74, 0x3b, 0x0a, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, | ||
82 | 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69, | ||
83 | 0x7a, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x20, 0x7c, | ||
84 | 0x3d, 0x20, 0x4f, 0x5f, 0x54, 0x52, 0x55, 0x4e, 0x43, 0x3b, 0x0a, 0x0a, 0x09, 0x66, 0x64, 0x20, | ||
85 | 0x3d, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x66, 0x6c, | ||
86 | 0x61, 0x67, 0x73, 0x2c, 0x20, 0x30, 0x36, 0x30, 0x30, 0x29, 0x3b, 0x0a, 0x09, 0x69, 0x66, 0x20, | ||
87 | 0x28, 0x66, 0x64, 0x20, 0x3d, 0x3d, 0x20, 0x2d, 0x31, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, | ||
88 | 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x6f, 0x70, 0x65, 0x6e, | ||
89 | 0x20, 0x25, 0x73, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x68, | ||
90 | 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, 0x22, | ||
91 | 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, | ||
92 | 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x68, | ||
93 | 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, | ||
94 | 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, 0x72, | ||
95 | 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x66, 0x73, 0x74, 0x61, 0x74, 0x28, 0x66, 0x64, 0x2c, | ||
96 | 0x20, 0x26, 0x73, 0x74, 0x29, 0x3b, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, | ||
97 | 0x20, 0x3e, 0x20, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, | ||
98 | 0x0a, 0x23, 0x69, 0x66, 0x20, 0x44, 0x45, 0x42, 0x55, 0x47, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, | ||
99 | 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, | ||
100 | 0x69, 0x6e, 0x67, 0x20, 0x25, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x25, 0x64, 0x20, 0x66, 0x72, 0x6f, | ||
101 | 0x6d, 0x20, 0x25, 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, | ||
102 | 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, | ||
103 | 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x28, 0x69, 0x6e, 0x74, | ||
104 | 0x29, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x23, 0x65, | ||
105 | 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x09, 0x09, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x5f, 0x66, 0x69, | ||
106 | 0x6c, 0x65, 0x28, 0x66, 0x64, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x74, | ||
107 | 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x20, 0x65, 0x6c, | ||
108 | 0x73, 0x65, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x3c, 0x20, 0x73, 0x74, | ||
109 | 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, | ||
110 | 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6e, 0x67, | ||
111 | 0x20, 0x25, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x25, 0x64, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x25, | ||
112 | 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, | ||
113 | 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x28, 0x69, 0x6e, | ||
114 | 0x74, 0x29, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, | ||
115 | 0x09, 0x66, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x28, 0x66, 0x64, 0x2c, 0x20, 0x73, | ||
116 | 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, | ||
117 | 0x3d, 0x30, 0x3b, 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x3b, 0x69, | ||
118 | 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, 0x61, 0x62, | ||
119 | 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x3d, 0x20, | ||
120 | 0x30, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, | ||
121 | 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, | ||
122 | 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x66, 0x69, | ||
123 | 0x6c, 0x65, 0x20, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x66, 0x75, 0x6c, 0x6c, 0x20, 0x66, 0x6f, | ||
124 | 0x72, 0x20, 0x25, 0x73, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, | ||
125 | 0x0a, 0x09, 0x09, 0x65, 0x78, 0x69, 0x74, 0x28, 0x31, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, | ||
126 | 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, | ||
127 | 0x20, 0x3d, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x3b, 0x0a, 0x09, 0x66, 0x74, 0x61, 0x62, | ||
128 | 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x20, 0x3d, 0x20, 0x66, 0x64, 0x3b, 0x0a, 0x09, | ||
129 | 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2b, 0x2b, 0x20, 0x25, 0x20, 0x31, 0x30, | ||
130 | 0x30, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, | ||
131 | 0x74, 0x66, 0x28, 0x22, 0x2e, 0x22, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, | ||
132 | 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x69, 0x6e, 0x74, | ||
133 | 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x69, 0x7a, | ||
134 | 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x0a, 0x7b, | ||
135 | 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x62, | ||
136 | 0x75, 0x66, 0x5b, 0x30, 0x5d, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x6d, 0x65, 0x6d, 0x73, | ||
137 | 0x65, 0x74, 0x28, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x31, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x6f, | ||
138 | 0x66, 0x28, 0x62, 0x75, 0x66, 0x29, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, 0x28, | ||
139 | 0x69, 0x3d, 0x30, 0x3b, 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x3b, | ||
140 | 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, 0x61, | ||
141 | 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x3d, | ||
142 | 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, | ||
143 | 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, 0x58, | ||
144 | 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x29, 0x20, 0x7b, 0x0a, 0x23, 0x69, 0x66, 0x20, 0x31, 0x0a, | ||
145 | 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, | ||
146 | 0x6f, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x3a, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, | ||
147 | 0x25, 0x64, 0x20, 0x77, 0x61, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x20, | ||
148 | 0x73, 0x69, 0x7a, 0x65, 0x3d, 0x25, 0x64, 0x20, 0x6f, 0x66, 0x73, 0x3d, 0x25, 0x64, 0x5c, 0x6e, | ||
149 | 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, | ||
150 | 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, | ||
151 | 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x3b, 0x0a, | ||
152 | 0x23, 0x65, 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, | ||
153 | 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x6c, 0x73, 0x65, 0x65, 0x6b, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, | ||
154 | 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x2c, | ||
155 | 0x20, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x53, 0x45, 0x54, 0x29, 0x3b, 0x0a, 0x09, 0x69, 0x66, 0x20, | ||
156 | 0x28, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, | ||
157 | 0x2e, 0x66, 0x64, 0x2c, 0x20, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, | ||
158 | 0x21, 0x3d, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, | ||
159 | 0x6e, 0x74, 0x66, 0x28, 0x22, 0x77, 0x72, 0x69, 0x74, 0x65, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, | ||
160 | 0x64, 0x20, 0x6f, 0x6e, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x5c, 0x6e, | ||
161 | 0x22, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, | ||
162 | 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x28, 0x69, | ||
163 | 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x73, | ||
164 | 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, | ||
165 | 0x0a, 0x7b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, | ||
166 | 0x28, 0x69, 0x3d, 0x30, 0x3b, 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, | ||
167 | 0x3b, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, | ||
168 | 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, | ||
169 | 0x3d, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, | ||
170 | 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, | ||
171 | 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, | ||
172 | 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x72, 0x65, 0x61, | ||
173 | 0x64, 0x3a, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x20, 0x77, 0x61, 0x73, | ||
174 | 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x3d, 0x25, | ||
175 | 0x64, 0x20, 0x6f, 0x66, 0x73, 0x3d, 0x25, 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, | ||
176 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, | ||
177 | 0x74, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, | ||
178 | 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, | ||
179 | 0x72, 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x6c, 0x73, 0x65, 0x65, 0x6b, 0x28, 0x66, 0x74, | ||
180 | 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73, | ||
181 | 0x65, 0x74, 0x2c, 0x20, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x53, 0x45, 0x54, 0x29, 0x3b, 0x0a, 0x09, | ||
182 | 0x72, 0x65, 0x61, 0x64, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, | ||
183 | 0x64, 0x2c, 0x20, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x7d, | ||
184 | 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x28, | ||
185 | 0x69, 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x69, | ||
186 | 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x3d, 0x30, 0x3b, | ||
187 | 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x3b, 0x69, 0x2b, 0x2b, 0x29, | ||
188 | 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, | ||
189 | 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x68, 0x61, 0x6e, | ||
190 | 0x64, 0x6c, 0x65, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, | ||
191 | 0x69, 0x66, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, | ||
192 | 0x45, 0x53, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, | ||
193 | 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x3a, 0x20, 0x68, | ||
194 | 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x20, 0x77, 0x61, 0x73, 0x20, 0x6e, 0x6f, 0x74, | ||
195 | 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, | ||
196 | 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, | ||
197 | 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, 0x72, | ||
198 | 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x28, 0x66, 0x74, 0x61, | ||
199 | 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x29, 0x3b, 0x0a, 0x09, 0x66, 0x74, 0x61, | ||
200 | 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x20, | ||
201 | 0x30, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x6d, 0x6b, | ||
202 | 0x64, 0x69, 0x72, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, | ||
203 | 0x0a, 0x7b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x66, 0x6e, 0x61, | ||
204 | 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x6d, 0x6b, 0x64, 0x69, 0x72, | ||
205 | 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x30, 0x37, 0x30, 0x30, 0x29, 0x20, 0x21, 0x3d, | ||
206 | 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x23, 0x69, 0x66, 0x20, 0x44, 0x45, 0x42, 0x55, 0x47, 0x0a, | ||
207 | 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x6d, 0x6b, 0x64, 0x69, 0x72, 0x20, | ||
208 | 0x25, 0x73, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, | ||
209 | 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6e, 0x61, | ||
210 | 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, | ||
211 | 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x23, 0x65, 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x09, 0x7d, 0x0a, | ||
212 | 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x72, 0x6d, 0x64, 0x69, 0x72, | ||
213 | 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x0a, 0x7b, 0x0a, | ||
214 | 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, | ||
215 | 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x72, 0x6d, 0x64, 0x69, 0x72, 0x28, 0x66, 0x6e, | ||
216 | 0x61, 0x6d, 0x65, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, | ||
217 | 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x72, 0x6d, 0x64, 0x69, 0x72, 0x20, 0x25, 0x73, 0x20, | ||
218 | 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, 0x22, 0x2c, 0x20, | ||
219 | 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, | ||
220 | 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, | ||
221 | 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, | ||
222 | 0x5f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x6f, 0x6c, | ||
223 | 0x64, 0x2c, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x6e, 0x65, 0x77, 0x29, 0x0a, 0x7b, 0x0a, | ||
224 | 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x6f, 0x6c, 0x64, 0x29, 0x3b, 0x0a, | ||
225 | 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x6e, 0x65, 0x77, 0x29, 0x3b, 0x0a, | ||
226 | 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x28, 0x6f, 0x6c, 0x64, | ||
227 | 0x2c, 0x20, 0x6e, 0x65, 0x77, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, | ||
228 | 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x20, | ||
229 | 0x25, 0x73, 0x20, 0x25, 0x73, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, | ||
230 | 0x29, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, | ||
231 | 0x6f, 0x6c, 0x64, 0x2c, 0x20, 0x6e, 0x65, 0x77, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, | ||
232 | 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, | ||
233 | 0x0a, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x28, | ||
234 | 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, | ||
235 | 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, | ||
236 | 0x20, 0x73, 0x74, 0x61, 0x74, 0x20, 0x73, 0x74, 0x3b, 0x0a, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, | ||
237 | 0x70, 0x70, 0x65, 0x72, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, | ||
238 | 0x66, 0x20, 0x28, 0x73, 0x74, 0x61, 0x74, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x26, | ||
239 | 0x73, 0x74, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, | ||
240 | 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x73, 0x74, | ||
241 | 0x61, 0x74, 0x3a, 0x20, 0x25, 0x73, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x3d, 0x25, 0x64, 0x20, 0x25, | ||
242 | 0x73, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, | ||
243 | 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, | ||
244 | 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, | ||
245 | 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, | ||
246 | 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x53, 0x5f, 0x49, | ||
247 | 0x53, 0x44, 0x49, 0x52, 0x28, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x29, | ||
248 | 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, | ||
249 | 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x21, 0x3d, 0x20, 0x73, 0x69, | ||
250 | 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, | ||
251 | 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x3a, 0x20, 0x25, 0x73, | ||
252 | 0x20, 0x77, 0x72, 0x6f, 0x6e, 0x67, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x25, 0x64, 0x20, 0x25, | ||
253 | 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, | ||
254 | 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, | ||
255 | 0x65, 0x2c, 0x20, 0x28, 0x69, 0x6e, 0x74, 0x29, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, | ||
256 | 0x7a, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, 0x0a, | ||
257 | 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x28, | ||
258 | 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, | ||
259 | 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x64, 0x6f, 0x5f, 0x6f, 0x70, 0x65, | ||
260 | 0x6e, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x35, 0x30, 0x30, 0x30, 0x2c, 0x20, 0x73, | ||
261 | 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x64, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x28, | ||
262 | 0x35, 0x30, 0x30, 0x30, 0x29, 0x3b, 0x0a, 0x7d, 0x0a | ||
263 | }; | ||
264 | #endif | ||
265 | static unsigned char comprbuf[TESTDATA_LEN]; | ||
266 | static unsigned char decomprbuf[TESTDATA_LEN]; | ||
267 | |||
268 | int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in, | ||
269 | unsigned char *data_out, uint32_t cdatalen, uint32_t datalen); | ||
270 | unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out, | ||
271 | uint32_t *datalen, uint32_t *cdatalen); | ||
272 | |||
273 | int init_module(void ) { | ||
274 | unsigned char comprtype; | ||
275 | uint32_t c, d; | ||
276 | int ret; | ||
277 | |||
278 | printk("Original data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
279 | testdata[0],testdata[1],testdata[2],testdata[3], | ||
280 | testdata[4],testdata[5],testdata[6],testdata[7], | ||
281 | testdata[8],testdata[9],testdata[10],testdata[11], | ||
282 | testdata[12],testdata[13],testdata[14],testdata[15]); | ||
283 | d = TESTDATA_LEN; | ||
284 | c = TESTDATA_LEN; | ||
285 | comprtype = jffs2_compress(testdata, comprbuf, &d, &c); | ||
286 | |||
287 | printk("jffs2_compress used compression type %d. Compressed size %d, uncompressed size %d\n", | ||
288 | comprtype, c, d); | ||
289 | printk("Compressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
290 | comprbuf[0],comprbuf[1],comprbuf[2],comprbuf[3], | ||
291 | comprbuf[4],comprbuf[5],comprbuf[6],comprbuf[7], | ||
292 | comprbuf[8],comprbuf[9],comprbuf[10],comprbuf[11], | ||
293 | comprbuf[12],comprbuf[13],comprbuf[14],comprbuf[15]); | ||
294 | |||
295 | ret = jffs2_decompress(comprtype, comprbuf, decomprbuf, c, d); | ||
296 | printk("jffs2_decompress returned %d\n", ret); | ||
297 | printk("Decompressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
298 | decomprbuf[0],decomprbuf[1],decomprbuf[2],decomprbuf[3], | ||
299 | decomprbuf[4],decomprbuf[5],decomprbuf[6],decomprbuf[7], | ||
300 | decomprbuf[8],decomprbuf[9],decomprbuf[10],decomprbuf[11], | ||
301 | decomprbuf[12],decomprbuf[13],decomprbuf[14],decomprbuf[15]); | ||
302 | if (memcmp(decomprbuf, testdata, d)) | ||
303 | printk("Compression and decompression corrupted data\n"); | ||
304 | else | ||
305 | printk("Compression good for %d bytes\n", d); | ||
306 | return 1; | ||
307 | } | ||
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c new file mode 100644 index 000000000000..757306fa3ff4 --- /dev/null +++ b/fs/jffs2/dir.c | |||
@@ -0,0 +1,799 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: dir.c,v 1.84 2004/11/16 20:36:11 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/crc32.h> | ||
19 | #include <linux/jffs2.h> | ||
20 | #include <linux/jffs2_fs_i.h> | ||
21 | #include <linux/jffs2_fs_sb.h> | ||
22 | #include <linux/time.h> | ||
23 | #include "nodelist.h" | ||
24 | |||
25 | /* Urgh. Please tell me there's a nicer way of doing these. */ | ||
26 | #include <linux/version.h> | ||
27 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,48) | ||
28 | typedef int mknod_arg_t; | ||
29 | #define NAMEI_COMPAT(x) ((void *)x) | ||
30 | #else | ||
31 | typedef dev_t mknod_arg_t; | ||
32 | #define NAMEI_COMPAT(x) (x) | ||
33 | #endif | ||
34 | |||
35 | static int jffs2_readdir (struct file *, void *, filldir_t); | ||
36 | |||
37 | static int jffs2_create (struct inode *,struct dentry *,int, | ||
38 | struct nameidata *); | ||
39 | static struct dentry *jffs2_lookup (struct inode *,struct dentry *, | ||
40 | struct nameidata *); | ||
41 | static int jffs2_link (struct dentry *,struct inode *,struct dentry *); | ||
42 | static int jffs2_unlink (struct inode *,struct dentry *); | ||
43 | static int jffs2_symlink (struct inode *,struct dentry *,const char *); | ||
44 | static int jffs2_mkdir (struct inode *,struct dentry *,int); | ||
45 | static int jffs2_rmdir (struct inode *,struct dentry *); | ||
46 | static int jffs2_mknod (struct inode *,struct dentry *,int,mknod_arg_t); | ||
47 | static int jffs2_rename (struct inode *, struct dentry *, | ||
48 | struct inode *, struct dentry *); | ||
49 | |||
50 | struct file_operations jffs2_dir_operations = | ||
51 | { | ||
52 | .read = generic_read_dir, | ||
53 | .readdir = jffs2_readdir, | ||
54 | .ioctl = jffs2_ioctl, | ||
55 | .fsync = jffs2_fsync | ||
56 | }; | ||
57 | |||
58 | |||
59 | struct inode_operations jffs2_dir_inode_operations = | ||
60 | { | ||
61 | .create = NAMEI_COMPAT(jffs2_create), | ||
62 | .lookup = NAMEI_COMPAT(jffs2_lookup), | ||
63 | .link = jffs2_link, | ||
64 | .unlink = jffs2_unlink, | ||
65 | .symlink = jffs2_symlink, | ||
66 | .mkdir = jffs2_mkdir, | ||
67 | .rmdir = jffs2_rmdir, | ||
68 | .mknod = jffs2_mknod, | ||
69 | .rename = jffs2_rename, | ||
70 | .setattr = jffs2_setattr, | ||
71 | }; | ||
72 | |||
73 | /***********************************************************************/ | ||
74 | |||
75 | |||
76 | /* We keep the dirent list sorted in increasing order of name hash, | ||
77 | and we use the same hash function as the dentries. Makes this | ||
78 | nice and simple | ||
79 | */ | ||
80 | static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, | ||
81 | struct nameidata *nd) | ||
82 | { | ||
83 | struct jffs2_inode_info *dir_f; | ||
84 | struct jffs2_sb_info *c; | ||
85 | struct jffs2_full_dirent *fd = NULL, *fd_list; | ||
86 | uint32_t ino = 0; | ||
87 | struct inode *inode = NULL; | ||
88 | |||
89 | D1(printk(KERN_DEBUG "jffs2_lookup()\n")); | ||
90 | |||
91 | dir_f = JFFS2_INODE_INFO(dir_i); | ||
92 | c = JFFS2_SB_INFO(dir_i->i_sb); | ||
93 | |||
94 | down(&dir_f->sem); | ||
95 | |||
96 | /* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */ | ||
97 | for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) { | ||
98 | if (fd_list->nhash == target->d_name.hash && | ||
99 | (!fd || fd_list->version > fd->version) && | ||
100 | strlen(fd_list->name) == target->d_name.len && | ||
101 | !strncmp(fd_list->name, target->d_name.name, target->d_name.len)) { | ||
102 | fd = fd_list; | ||
103 | } | ||
104 | } | ||
105 | if (fd) | ||
106 | ino = fd->ino; | ||
107 | up(&dir_f->sem); | ||
108 | if (ino) { | ||
109 | inode = iget(dir_i->i_sb, ino); | ||
110 | if (!inode) { | ||
111 | printk(KERN_WARNING "iget() failed for ino #%u\n", ino); | ||
112 | return (ERR_PTR(-EIO)); | ||
113 | } | ||
114 | } | ||
115 | |||
116 | d_add(target, inode); | ||
117 | |||
118 | return NULL; | ||
119 | } | ||
120 | |||
121 | /***********************************************************************/ | ||
122 | |||
123 | |||
124 | static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir) | ||
125 | { | ||
126 | struct jffs2_inode_info *f; | ||
127 | struct jffs2_sb_info *c; | ||
128 | struct inode *inode = filp->f_dentry->d_inode; | ||
129 | struct jffs2_full_dirent *fd; | ||
130 | unsigned long offset, curofs; | ||
131 | |||
132 | D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_dentry->d_inode->i_ino)); | ||
133 | |||
134 | f = JFFS2_INODE_INFO(inode); | ||
135 | c = JFFS2_SB_INFO(inode->i_sb); | ||
136 | |||
137 | offset = filp->f_pos; | ||
138 | |||
139 | if (offset == 0) { | ||
140 | D1(printk(KERN_DEBUG "Dirent 0: \".\", ino #%lu\n", inode->i_ino)); | ||
141 | if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) | ||
142 | goto out; | ||
143 | offset++; | ||
144 | } | ||
145 | if (offset == 1) { | ||
146 | unsigned long pino = parent_ino(filp->f_dentry); | ||
147 | D1(printk(KERN_DEBUG "Dirent 1: \"..\", ino #%lu\n", pino)); | ||
148 | if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0) | ||
149 | goto out; | ||
150 | offset++; | ||
151 | } | ||
152 | |||
153 | curofs=1; | ||
154 | down(&f->sem); | ||
155 | for (fd = f->dents; fd; fd = fd->next) { | ||
156 | |||
157 | curofs++; | ||
158 | /* First loop: curofs = 2; offset = 2 */ | ||
159 | if (curofs < offset) { | ||
160 | D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", | ||
161 | fd->name, fd->ino, fd->type, curofs, offset)); | ||
162 | continue; | ||
163 | } | ||
164 | if (!fd->ino) { | ||
165 | D2(printk(KERN_DEBUG "Skipping deletion dirent \"%s\"\n", fd->name)); | ||
166 | offset++; | ||
167 | continue; | ||
168 | } | ||
169 | D2(printk(KERN_DEBUG "Dirent %ld: \"%s\", ino #%u, type %d\n", offset, fd->name, fd->ino, fd->type)); | ||
170 | if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0) | ||
171 | break; | ||
172 | offset++; | ||
173 | } | ||
174 | up(&f->sem); | ||
175 | out: | ||
176 | filp->f_pos = offset; | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | /***********************************************************************/ | ||
181 | |||
182 | |||
183 | static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode, | ||
184 | struct nameidata *nd) | ||
185 | { | ||
186 | struct jffs2_raw_inode *ri; | ||
187 | struct jffs2_inode_info *f, *dir_f; | ||
188 | struct jffs2_sb_info *c; | ||
189 | struct inode *inode; | ||
190 | int ret; | ||
191 | |||
192 | ri = jffs2_alloc_raw_inode(); | ||
193 | if (!ri) | ||
194 | return -ENOMEM; | ||
195 | |||
196 | c = JFFS2_SB_INFO(dir_i->i_sb); | ||
197 | |||
198 | D1(printk(KERN_DEBUG "jffs2_create()\n")); | ||
199 | |||
200 | inode = jffs2_new_inode(dir_i, mode, ri); | ||
201 | |||
202 | if (IS_ERR(inode)) { | ||
203 | D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n")); | ||
204 | jffs2_free_raw_inode(ri); | ||
205 | return PTR_ERR(inode); | ||
206 | } | ||
207 | |||
208 | inode->i_op = &jffs2_file_inode_operations; | ||
209 | inode->i_fop = &jffs2_file_operations; | ||
210 | inode->i_mapping->a_ops = &jffs2_file_address_operations; | ||
211 | inode->i_mapping->nrpages = 0; | ||
212 | |||
213 | f = JFFS2_INODE_INFO(inode); | ||
214 | dir_f = JFFS2_INODE_INFO(dir_i); | ||
215 | |||
216 | ret = jffs2_do_create(c, dir_f, f, ri, | ||
217 | dentry->d_name.name, dentry->d_name.len); | ||
218 | |||
219 | if (ret) { | ||
220 | make_bad_inode(inode); | ||
221 | iput(inode); | ||
222 | jffs2_free_raw_inode(ri); | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime)); | ||
227 | |||
228 | jffs2_free_raw_inode(ri); | ||
229 | d_instantiate(dentry, inode); | ||
230 | |||
231 | D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", | ||
232 | inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages)); | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | /***********************************************************************/ | ||
237 | |||
238 | |||
239 | static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry) | ||
240 | { | ||
241 | struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb); | ||
242 | struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); | ||
243 | struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(dentry->d_inode); | ||
244 | int ret; | ||
245 | |||
246 | ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, | ||
247 | dentry->d_name.len, dead_f); | ||
248 | if (dead_f->inocache) | ||
249 | dentry->d_inode->i_nlink = dead_f->inocache->nlink; | ||
250 | return ret; | ||
251 | } | ||
252 | /***********************************************************************/ | ||
253 | |||
254 | |||
255 | static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct dentry *dentry) | ||
256 | { | ||
257 | struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dentry->d_inode->i_sb); | ||
258 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode); | ||
259 | struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); | ||
260 | int ret; | ||
261 | uint8_t type; | ||
262 | |||
263 | /* Don't let people make hard links to bad inodes. */ | ||
264 | if (!f->inocache) | ||
265 | return -EIO; | ||
266 | |||
267 | if (S_ISDIR(old_dentry->d_inode->i_mode)) | ||
268 | return -EPERM; | ||
269 | |||
270 | /* XXX: This is ugly */ | ||
271 | type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; | ||
272 | if (!type) type = DT_REG; | ||
273 | |||
274 | ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len); | ||
275 | |||
276 | if (!ret) { | ||
277 | down(&f->sem); | ||
278 | old_dentry->d_inode->i_nlink = ++f->inocache->nlink; | ||
279 | up(&f->sem); | ||
280 | d_instantiate(dentry, old_dentry->d_inode); | ||
281 | atomic_inc(&old_dentry->d_inode->i_count); | ||
282 | } | ||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | /***********************************************************************/ | ||
287 | |||
288 | static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char *target) | ||
289 | { | ||
290 | struct jffs2_inode_info *f, *dir_f; | ||
291 | struct jffs2_sb_info *c; | ||
292 | struct inode *inode; | ||
293 | struct jffs2_raw_inode *ri; | ||
294 | struct jffs2_raw_dirent *rd; | ||
295 | struct jffs2_full_dnode *fn; | ||
296 | struct jffs2_full_dirent *fd; | ||
297 | int namelen; | ||
298 | uint32_t alloclen, phys_ofs; | ||
299 | int ret; | ||
300 | |||
301 | /* FIXME: If you care. We'd need to use frags for the target | ||
302 | if it grows much more than this */ | ||
303 | if (strlen(target) > 254) | ||
304 | return -EINVAL; | ||
305 | |||
306 | ri = jffs2_alloc_raw_inode(); | ||
307 | |||
308 | if (!ri) | ||
309 | return -ENOMEM; | ||
310 | |||
311 | c = JFFS2_SB_INFO(dir_i->i_sb); | ||
312 | |||
313 | /* Try to reserve enough space for both node and dirent. | ||
314 | * Just the node will do for now, though | ||
315 | */ | ||
316 | namelen = dentry->d_name.len; | ||
317 | ret = jffs2_reserve_space(c, sizeof(*ri) + strlen(target), &phys_ofs, &alloclen, ALLOC_NORMAL); | ||
318 | |||
319 | if (ret) { | ||
320 | jffs2_free_raw_inode(ri); | ||
321 | return ret; | ||
322 | } | ||
323 | |||
324 | inode = jffs2_new_inode(dir_i, S_IFLNK | S_IRWXUGO, ri); | ||
325 | |||
326 | if (IS_ERR(inode)) { | ||
327 | jffs2_free_raw_inode(ri); | ||
328 | jffs2_complete_reservation(c); | ||
329 | return PTR_ERR(inode); | ||
330 | } | ||
331 | |||
332 | inode->i_op = &jffs2_symlink_inode_operations; | ||
333 | |||
334 | f = JFFS2_INODE_INFO(inode); | ||
335 | |||
336 | inode->i_size = strlen(target); | ||
337 | ri->isize = ri->dsize = ri->csize = cpu_to_je32(inode->i_size); | ||
338 | ri->totlen = cpu_to_je32(sizeof(*ri) + inode->i_size); | ||
339 | ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); | ||
340 | |||
341 | ri->compr = JFFS2_COMPR_NONE; | ||
342 | ri->data_crc = cpu_to_je32(crc32(0, target, strlen(target))); | ||
343 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | ||
344 | |||
345 | fn = jffs2_write_dnode(c, f, ri, target, strlen(target), phys_ofs, ALLOC_NORMAL); | ||
346 | |||
347 | jffs2_free_raw_inode(ri); | ||
348 | |||
349 | if (IS_ERR(fn)) { | ||
350 | /* Eeek. Wave bye bye */ | ||
351 | up(&f->sem); | ||
352 | jffs2_complete_reservation(c); | ||
353 | jffs2_clear_inode(inode); | ||
354 | return PTR_ERR(fn); | ||
355 | } | ||
356 | /* No data here. Only a metadata node, which will be | ||
357 | obsoleted by the first data write | ||
358 | */ | ||
359 | f->metadata = fn; | ||
360 | up(&f->sem); | ||
361 | |||
362 | jffs2_complete_reservation(c); | ||
363 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | ||
364 | if (ret) { | ||
365 | /* Eep. */ | ||
366 | jffs2_clear_inode(inode); | ||
367 | return ret; | ||
368 | } | ||
369 | |||
370 | rd = jffs2_alloc_raw_dirent(); | ||
371 | if (!rd) { | ||
372 | /* Argh. Now we treat it like a normal delete */ | ||
373 | jffs2_complete_reservation(c); | ||
374 | jffs2_clear_inode(inode); | ||
375 | return -ENOMEM; | ||
376 | } | ||
377 | |||
378 | dir_f = JFFS2_INODE_INFO(dir_i); | ||
379 | down(&dir_f->sem); | ||
380 | |||
381 | rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
382 | rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); | ||
383 | rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); | ||
384 | rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); | ||
385 | |||
386 | rd->pino = cpu_to_je32(dir_i->i_ino); | ||
387 | rd->version = cpu_to_je32(++dir_f->highest_version); | ||
388 | rd->ino = cpu_to_je32(inode->i_ino); | ||
389 | rd->mctime = cpu_to_je32(get_seconds()); | ||
390 | rd->nsize = namelen; | ||
391 | rd->type = DT_LNK; | ||
392 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | ||
393 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); | ||
394 | |||
395 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); | ||
396 | |||
397 | if (IS_ERR(fd)) { | ||
398 | /* dirent failed to write. Delete the inode normally | ||
399 | as if it were the final unlink() */ | ||
400 | jffs2_complete_reservation(c); | ||
401 | jffs2_free_raw_dirent(rd); | ||
402 | up(&dir_f->sem); | ||
403 | jffs2_clear_inode(inode); | ||
404 | return PTR_ERR(fd); | ||
405 | } | ||
406 | |||
407 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime)); | ||
408 | |||
409 | jffs2_free_raw_dirent(rd); | ||
410 | |||
411 | /* Link the fd into the inode's list, obsoleting an old | ||
412 | one if necessary. */ | ||
413 | jffs2_add_fd_to_list(c, fd, &dir_f->dents); | ||
414 | |||
415 | up(&dir_f->sem); | ||
416 | jffs2_complete_reservation(c); | ||
417 | |||
418 | d_instantiate(dentry, inode); | ||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | |||
423 | static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | ||
424 | { | ||
425 | struct jffs2_inode_info *f, *dir_f; | ||
426 | struct jffs2_sb_info *c; | ||
427 | struct inode *inode; | ||
428 | struct jffs2_raw_inode *ri; | ||
429 | struct jffs2_raw_dirent *rd; | ||
430 | struct jffs2_full_dnode *fn; | ||
431 | struct jffs2_full_dirent *fd; | ||
432 | int namelen; | ||
433 | uint32_t alloclen, phys_ofs; | ||
434 | int ret; | ||
435 | |||
436 | mode |= S_IFDIR; | ||
437 | |||
438 | ri = jffs2_alloc_raw_inode(); | ||
439 | if (!ri) | ||
440 | return -ENOMEM; | ||
441 | |||
442 | c = JFFS2_SB_INFO(dir_i->i_sb); | ||
443 | |||
444 | /* Try to reserve enough space for both node and dirent. | ||
445 | * Just the node will do for now, though | ||
446 | */ | ||
447 | namelen = dentry->d_name.len; | ||
448 | ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL); | ||
449 | |||
450 | if (ret) { | ||
451 | jffs2_free_raw_inode(ri); | ||
452 | return ret; | ||
453 | } | ||
454 | |||
455 | inode = jffs2_new_inode(dir_i, mode, ri); | ||
456 | |||
457 | if (IS_ERR(inode)) { | ||
458 | jffs2_free_raw_inode(ri); | ||
459 | jffs2_complete_reservation(c); | ||
460 | return PTR_ERR(inode); | ||
461 | } | ||
462 | |||
463 | inode->i_op = &jffs2_dir_inode_operations; | ||
464 | inode->i_fop = &jffs2_dir_operations; | ||
465 | /* Directories get nlink 2 at start */ | ||
466 | inode->i_nlink = 2; | ||
467 | |||
468 | f = JFFS2_INODE_INFO(inode); | ||
469 | |||
470 | ri->data_crc = cpu_to_je32(0); | ||
471 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | ||
472 | |||
473 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL); | ||
474 | |||
475 | jffs2_free_raw_inode(ri); | ||
476 | |||
477 | if (IS_ERR(fn)) { | ||
478 | /* Eeek. Wave bye bye */ | ||
479 | up(&f->sem); | ||
480 | jffs2_complete_reservation(c); | ||
481 | jffs2_clear_inode(inode); | ||
482 | return PTR_ERR(fn); | ||
483 | } | ||
484 | /* No data here. Only a metadata node, which will be | ||
485 | obsoleted by the first data write | ||
486 | */ | ||
487 | f->metadata = fn; | ||
488 | up(&f->sem); | ||
489 | |||
490 | jffs2_complete_reservation(c); | ||
491 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | ||
492 | if (ret) { | ||
493 | /* Eep. */ | ||
494 | jffs2_clear_inode(inode); | ||
495 | return ret; | ||
496 | } | ||
497 | |||
498 | rd = jffs2_alloc_raw_dirent(); | ||
499 | if (!rd) { | ||
500 | /* Argh. Now we treat it like a normal delete */ | ||
501 | jffs2_complete_reservation(c); | ||
502 | jffs2_clear_inode(inode); | ||
503 | return -ENOMEM; | ||
504 | } | ||
505 | |||
506 | dir_f = JFFS2_INODE_INFO(dir_i); | ||
507 | down(&dir_f->sem); | ||
508 | |||
509 | rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
510 | rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); | ||
511 | rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); | ||
512 | rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); | ||
513 | |||
514 | rd->pino = cpu_to_je32(dir_i->i_ino); | ||
515 | rd->version = cpu_to_je32(++dir_f->highest_version); | ||
516 | rd->ino = cpu_to_je32(inode->i_ino); | ||
517 | rd->mctime = cpu_to_je32(get_seconds()); | ||
518 | rd->nsize = namelen; | ||
519 | rd->type = DT_DIR; | ||
520 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | ||
521 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); | ||
522 | |||
523 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); | ||
524 | |||
525 | if (IS_ERR(fd)) { | ||
526 | /* dirent failed to write. Delete the inode normally | ||
527 | as if it were the final unlink() */ | ||
528 | jffs2_complete_reservation(c); | ||
529 | jffs2_free_raw_dirent(rd); | ||
530 | up(&dir_f->sem); | ||
531 | jffs2_clear_inode(inode); | ||
532 | return PTR_ERR(fd); | ||
533 | } | ||
534 | |||
535 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime)); | ||
536 | dir_i->i_nlink++; | ||
537 | |||
538 | jffs2_free_raw_dirent(rd); | ||
539 | |||
540 | /* Link the fd into the inode's list, obsoleting an old | ||
541 | one if necessary. */ | ||
542 | jffs2_add_fd_to_list(c, fd, &dir_f->dents); | ||
543 | |||
544 | up(&dir_f->sem); | ||
545 | jffs2_complete_reservation(c); | ||
546 | |||
547 | d_instantiate(dentry, inode); | ||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) | ||
552 | { | ||
553 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode); | ||
554 | struct jffs2_full_dirent *fd; | ||
555 | int ret; | ||
556 | |||
557 | for (fd = f->dents ; fd; fd = fd->next) { | ||
558 | if (fd->ino) | ||
559 | return -ENOTEMPTY; | ||
560 | } | ||
561 | ret = jffs2_unlink(dir_i, dentry); | ||
562 | if (!ret) | ||
563 | dir_i->i_nlink--; | ||
564 | return ret; | ||
565 | } | ||
566 | |||
567 | static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, mknod_arg_t rdev) | ||
568 | { | ||
569 | struct jffs2_inode_info *f, *dir_f; | ||
570 | struct jffs2_sb_info *c; | ||
571 | struct inode *inode; | ||
572 | struct jffs2_raw_inode *ri; | ||
573 | struct jffs2_raw_dirent *rd; | ||
574 | struct jffs2_full_dnode *fn; | ||
575 | struct jffs2_full_dirent *fd; | ||
576 | int namelen; | ||
577 | jint16_t dev; | ||
578 | int devlen = 0; | ||
579 | uint32_t alloclen, phys_ofs; | ||
580 | int ret; | ||
581 | |||
582 | if (!old_valid_dev(rdev)) | ||
583 | return -EINVAL; | ||
584 | |||
585 | ri = jffs2_alloc_raw_inode(); | ||
586 | if (!ri) | ||
587 | return -ENOMEM; | ||
588 | |||
589 | c = JFFS2_SB_INFO(dir_i->i_sb); | ||
590 | |||
591 | if (S_ISBLK(mode) || S_ISCHR(mode)) { | ||
592 | dev = cpu_to_je16(old_encode_dev(rdev)); | ||
593 | devlen = sizeof(dev); | ||
594 | } | ||
595 | |||
596 | /* Try to reserve enough space for both node and dirent. | ||
597 | * Just the node will do for now, though | ||
598 | */ | ||
599 | namelen = dentry->d_name.len; | ||
600 | ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen, ALLOC_NORMAL); | ||
601 | |||
602 | if (ret) { | ||
603 | jffs2_free_raw_inode(ri); | ||
604 | return ret; | ||
605 | } | ||
606 | |||
607 | inode = jffs2_new_inode(dir_i, mode, ri); | ||
608 | |||
609 | if (IS_ERR(inode)) { | ||
610 | jffs2_free_raw_inode(ri); | ||
611 | jffs2_complete_reservation(c); | ||
612 | return PTR_ERR(inode); | ||
613 | } | ||
614 | inode->i_op = &jffs2_file_inode_operations; | ||
615 | init_special_inode(inode, inode->i_mode, rdev); | ||
616 | |||
617 | f = JFFS2_INODE_INFO(inode); | ||
618 | |||
619 | ri->dsize = ri->csize = cpu_to_je32(devlen); | ||
620 | ri->totlen = cpu_to_je32(sizeof(*ri) + devlen); | ||
621 | ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); | ||
622 | |||
623 | ri->compr = JFFS2_COMPR_NONE; | ||
624 | ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen)); | ||
625 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | ||
626 | |||
627 | fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL); | ||
628 | |||
629 | jffs2_free_raw_inode(ri); | ||
630 | |||
631 | if (IS_ERR(fn)) { | ||
632 | /* Eeek. Wave bye bye */ | ||
633 | up(&f->sem); | ||
634 | jffs2_complete_reservation(c); | ||
635 | jffs2_clear_inode(inode); | ||
636 | return PTR_ERR(fn); | ||
637 | } | ||
638 | /* No data here. Only a metadata node, which will be | ||
639 | obsoleted by the first data write | ||
640 | */ | ||
641 | f->metadata = fn; | ||
642 | up(&f->sem); | ||
643 | |||
644 | jffs2_complete_reservation(c); | ||
645 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | ||
646 | if (ret) { | ||
647 | /* Eep. */ | ||
648 | jffs2_clear_inode(inode); | ||
649 | return ret; | ||
650 | } | ||
651 | |||
652 | rd = jffs2_alloc_raw_dirent(); | ||
653 | if (!rd) { | ||
654 | /* Argh. Now we treat it like a normal delete */ | ||
655 | jffs2_complete_reservation(c); | ||
656 | jffs2_clear_inode(inode); | ||
657 | return -ENOMEM; | ||
658 | } | ||
659 | |||
660 | dir_f = JFFS2_INODE_INFO(dir_i); | ||
661 | down(&dir_f->sem); | ||
662 | |||
663 | rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
664 | rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); | ||
665 | rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); | ||
666 | rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); | ||
667 | |||
668 | rd->pino = cpu_to_je32(dir_i->i_ino); | ||
669 | rd->version = cpu_to_je32(++dir_f->highest_version); | ||
670 | rd->ino = cpu_to_je32(inode->i_ino); | ||
671 | rd->mctime = cpu_to_je32(get_seconds()); | ||
672 | rd->nsize = namelen; | ||
673 | |||
674 | /* XXX: This is ugly. */ | ||
675 | rd->type = (mode & S_IFMT) >> 12; | ||
676 | |||
677 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | ||
678 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); | ||
679 | |||
680 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); | ||
681 | |||
682 | if (IS_ERR(fd)) { | ||
683 | /* dirent failed to write. Delete the inode normally | ||
684 | as if it were the final unlink() */ | ||
685 | jffs2_complete_reservation(c); | ||
686 | jffs2_free_raw_dirent(rd); | ||
687 | up(&dir_f->sem); | ||
688 | jffs2_clear_inode(inode); | ||
689 | return PTR_ERR(fd); | ||
690 | } | ||
691 | |||
692 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime)); | ||
693 | |||
694 | jffs2_free_raw_dirent(rd); | ||
695 | |||
696 | /* Link the fd into the inode's list, obsoleting an old | ||
697 | one if necessary. */ | ||
698 | jffs2_add_fd_to_list(c, fd, &dir_f->dents); | ||
699 | |||
700 | up(&dir_f->sem); | ||
701 | jffs2_complete_reservation(c); | ||
702 | |||
703 | d_instantiate(dentry, inode); | ||
704 | |||
705 | return 0; | ||
706 | } | ||
707 | |||
708 | static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, | ||
709 | struct inode *new_dir_i, struct dentry *new_dentry) | ||
710 | { | ||
711 | int ret; | ||
712 | struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb); | ||
713 | struct jffs2_inode_info *victim_f = NULL; | ||
714 | uint8_t type; | ||
715 | |||
716 | /* The VFS will check for us and prevent trying to rename a | ||
717 | * file over a directory and vice versa, but if it's a directory, | ||
718 | * the VFS can't check whether the victim is empty. The filesystem | ||
719 | * needs to do that for itself. | ||
720 | */ | ||
721 | if (new_dentry->d_inode) { | ||
722 | victim_f = JFFS2_INODE_INFO(new_dentry->d_inode); | ||
723 | if (S_ISDIR(new_dentry->d_inode->i_mode)) { | ||
724 | struct jffs2_full_dirent *fd; | ||
725 | |||
726 | down(&victim_f->sem); | ||
727 | for (fd = victim_f->dents; fd; fd = fd->next) { | ||
728 | if (fd->ino) { | ||
729 | up(&victim_f->sem); | ||
730 | return -ENOTEMPTY; | ||
731 | } | ||
732 | } | ||
733 | up(&victim_f->sem); | ||
734 | } | ||
735 | } | ||
736 | |||
737 | /* XXX: We probably ought to alloc enough space for | ||
738 | both nodes at the same time. Writing the new link, | ||
739 | then getting -ENOSPC, is quite bad :) | ||
740 | */ | ||
741 | |||
742 | /* Make a hard link */ | ||
743 | |||
744 | /* XXX: This is ugly */ | ||
745 | type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; | ||
746 | if (!type) type = DT_REG; | ||
747 | |||
748 | ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i), | ||
749 | old_dentry->d_inode->i_ino, type, | ||
750 | new_dentry->d_name.name, new_dentry->d_name.len); | ||
751 | |||
752 | if (ret) | ||
753 | return ret; | ||
754 | |||
755 | if (victim_f) { | ||
756 | /* There was a victim. Kill it off nicely */ | ||
757 | new_dentry->d_inode->i_nlink--; | ||
758 | /* Don't oops if the victim was a dirent pointing to an | ||
759 | inode which didn't exist. */ | ||
760 | if (victim_f->inocache) { | ||
761 | down(&victim_f->sem); | ||
762 | victim_f->inocache->nlink--; | ||
763 | up(&victim_f->sem); | ||
764 | } | ||
765 | } | ||
766 | |||
767 | /* If it was a directory we moved, and there was no victim, | ||
768 | increase i_nlink on its new parent */ | ||
769 | if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f) | ||
770 | new_dir_i->i_nlink++; | ||
771 | |||
772 | /* Unlink the original */ | ||
773 | ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), | ||
774 | old_dentry->d_name.name, old_dentry->d_name.len, NULL); | ||
775 | |||
776 | /* We don't touch inode->i_nlink */ | ||
777 | |||
778 | if (ret) { | ||
779 | /* Oh shit. We really ought to make a single node which can do both atomically */ | ||
780 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode); | ||
781 | down(&f->sem); | ||
782 | old_dentry->d_inode->i_nlink++; | ||
783 | if (f->inocache) | ||
784 | f->inocache->nlink++; | ||
785 | up(&f->sem); | ||
786 | |||
787 | printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret); | ||
788 | /* Might as well let the VFS know */ | ||
789 | d_instantiate(new_dentry, old_dentry->d_inode); | ||
790 | atomic_inc(&old_dentry->d_inode->i_count); | ||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | if (S_ISDIR(old_dentry->d_inode->i_mode)) | ||
795 | old_dir_i->i_nlink--; | ||
796 | |||
797 | return 0; | ||
798 | } | ||
799 | |||
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c new file mode 100644 index 000000000000..41451e8bf361 --- /dev/null +++ b/fs/jffs2/erase.c | |||
@@ -0,0 +1,442 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: erase.c,v 1.66 2004/11/16 20:36:11 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/mtd/mtd.h> | ||
17 | #include <linux/compiler.h> | ||
18 | #include <linux/crc32.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/pagemap.h> | ||
21 | #include "nodelist.h" | ||
22 | |||
23 | struct erase_priv_struct { | ||
24 | struct jffs2_eraseblock *jeb; | ||
25 | struct jffs2_sb_info *c; | ||
26 | }; | ||
27 | |||
28 | #ifndef __ECOS | ||
29 | static void jffs2_erase_callback(struct erase_info *); | ||
30 | #endif | ||
31 | static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); | ||
32 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
33 | static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
34 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
35 | |||
36 | static void jffs2_erase_block(struct jffs2_sb_info *c, | ||
37 | struct jffs2_eraseblock *jeb) | ||
38 | { | ||
39 | int ret; | ||
40 | uint32_t bad_offset; | ||
41 | #ifdef __ECOS | ||
42 | ret = jffs2_flash_erase(c, jeb); | ||
43 | if (!ret) { | ||
44 | jffs2_erase_succeeded(c, jeb); | ||
45 | return; | ||
46 | } | ||
47 | bad_offset = jeb->offset; | ||
48 | #else /* Linux */ | ||
49 | struct erase_info *instr; | ||
50 | |||
51 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); | ||
52 | if (!instr) { | ||
53 | printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); | ||
54 | spin_lock(&c->erase_completion_lock); | ||
55 | list_del(&jeb->list); | ||
56 | list_add(&jeb->list, &c->erase_pending_list); | ||
57 | c->erasing_size -= c->sector_size; | ||
58 | c->dirty_size += c->sector_size; | ||
59 | jeb->dirty_size = c->sector_size; | ||
60 | spin_unlock(&c->erase_completion_lock); | ||
61 | return; | ||
62 | } | ||
63 | |||
64 | memset(instr, 0, sizeof(*instr)); | ||
65 | |||
66 | instr->mtd = c->mtd; | ||
67 | instr->addr = jeb->offset; | ||
68 | instr->len = c->sector_size; | ||
69 | instr->callback = jffs2_erase_callback; | ||
70 | instr->priv = (unsigned long)(&instr[1]); | ||
71 | instr->fail_addr = 0xffffffff; | ||
72 | |||
73 | ((struct erase_priv_struct *)instr->priv)->jeb = jeb; | ||
74 | ((struct erase_priv_struct *)instr->priv)->c = c; | ||
75 | |||
76 | ret = c->mtd->erase(c->mtd, instr); | ||
77 | if (!ret) | ||
78 | return; | ||
79 | |||
80 | bad_offset = instr->fail_addr; | ||
81 | kfree(instr); | ||
82 | #endif /* __ECOS */ | ||
83 | |||
84 | if (ret == -ENOMEM || ret == -EAGAIN) { | ||
85 | /* Erase failed immediately. Refile it on the list */ | ||
86 | D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); | ||
87 | spin_lock(&c->erase_completion_lock); | ||
88 | list_del(&jeb->list); | ||
89 | list_add(&jeb->list, &c->erase_pending_list); | ||
90 | c->erasing_size -= c->sector_size; | ||
91 | c->dirty_size += c->sector_size; | ||
92 | jeb->dirty_size = c->sector_size; | ||
93 | spin_unlock(&c->erase_completion_lock); | ||
94 | return; | ||
95 | } | ||
96 | |||
97 | if (ret == -EROFS) | ||
98 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); | ||
99 | else | ||
100 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); | ||
101 | |||
102 | jffs2_erase_failed(c, jeb, bad_offset); | ||
103 | } | ||
104 | |||
105 | void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) | ||
106 | { | ||
107 | struct jffs2_eraseblock *jeb; | ||
108 | |||
109 | down(&c->erase_free_sem); | ||
110 | |||
111 | spin_lock(&c->erase_completion_lock); | ||
112 | |||
113 | while (!list_empty(&c->erase_complete_list) || | ||
114 | !list_empty(&c->erase_pending_list)) { | ||
115 | |||
116 | if (!list_empty(&c->erase_complete_list)) { | ||
117 | jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list); | ||
118 | list_del(&jeb->list); | ||
119 | spin_unlock(&c->erase_completion_lock); | ||
120 | jffs2_mark_erased_block(c, jeb); | ||
121 | |||
122 | if (!--count) { | ||
123 | D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n")); | ||
124 | goto done; | ||
125 | } | ||
126 | |||
127 | } else if (!list_empty(&c->erase_pending_list)) { | ||
128 | jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); | ||
129 | D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset)); | ||
130 | list_del(&jeb->list); | ||
131 | c->erasing_size += c->sector_size; | ||
132 | c->wasted_size -= jeb->wasted_size; | ||
133 | c->free_size -= jeb->free_size; | ||
134 | c->used_size -= jeb->used_size; | ||
135 | c->dirty_size -= jeb->dirty_size; | ||
136 | jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0; | ||
137 | jffs2_free_all_node_refs(c, jeb); | ||
138 | list_add(&jeb->list, &c->erasing_list); | ||
139 | spin_unlock(&c->erase_completion_lock); | ||
140 | |||
141 | jffs2_erase_block(c, jeb); | ||
142 | |||
143 | } else { | ||
144 | BUG(); | ||
145 | } | ||
146 | |||
147 | /* Be nice */ | ||
148 | cond_resched(); | ||
149 | spin_lock(&c->erase_completion_lock); | ||
150 | } | ||
151 | |||
152 | spin_unlock(&c->erase_completion_lock); | ||
153 | done: | ||
154 | D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); | ||
155 | |||
156 | up(&c->erase_free_sem); | ||
157 | } | ||
158 | |||
159 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
160 | { | ||
161 | D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); | ||
162 | spin_lock(&c->erase_completion_lock); | ||
163 | list_del(&jeb->list); | ||
164 | list_add_tail(&jeb->list, &c->erase_complete_list); | ||
165 | spin_unlock(&c->erase_completion_lock); | ||
166 | /* Ensure that kupdated calls us again to mark them clean */ | ||
167 | jffs2_erase_pending_trigger(c); | ||
168 | } | ||
169 | |||
170 | static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) | ||
171 | { | ||
172 | /* For NAND, if the failure did not occur at the device level for a | ||
173 | specific physical page, don't bother updating the bad block table. */ | ||
174 | if (jffs2_cleanmarker_oob(c) && (bad_offset != 0xffffffff)) { | ||
175 | /* We had a device-level failure to erase. Let's see if we've | ||
176 | failed too many times. */ | ||
177 | if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { | ||
178 | /* We'd like to give this block another try. */ | ||
179 | spin_lock(&c->erase_completion_lock); | ||
180 | list_del(&jeb->list); | ||
181 | list_add(&jeb->list, &c->erase_pending_list); | ||
182 | c->erasing_size -= c->sector_size; | ||
183 | c->dirty_size += c->sector_size; | ||
184 | jeb->dirty_size = c->sector_size; | ||
185 | spin_unlock(&c->erase_completion_lock); | ||
186 | return; | ||
187 | } | ||
188 | } | ||
189 | |||
190 | spin_lock(&c->erase_completion_lock); | ||
191 | c->erasing_size -= c->sector_size; | ||
192 | c->bad_size += c->sector_size; | ||
193 | list_del(&jeb->list); | ||
194 | list_add(&jeb->list, &c->bad_list); | ||
195 | c->nr_erasing_blocks--; | ||
196 | spin_unlock(&c->erase_completion_lock); | ||
197 | wake_up(&c->erase_wait); | ||
198 | } | ||
199 | |||
200 | #ifndef __ECOS | ||
201 | static void jffs2_erase_callback(struct erase_info *instr) | ||
202 | { | ||
203 | struct erase_priv_struct *priv = (void *)instr->priv; | ||
204 | |||
205 | if(instr->state != MTD_ERASE_DONE) { | ||
206 | printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state); | ||
207 | jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); | ||
208 | } else { | ||
209 | jffs2_erase_succeeded(priv->c, priv->jeb); | ||
210 | } | ||
211 | kfree(instr); | ||
212 | } | ||
213 | #endif /* !__ECOS */ | ||
214 | |||
215 | /* Hmmm. Maybe we should accept the extra space it takes and make | ||
216 | this a standard doubly-linked list? */ | ||
217 | static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | ||
218 | struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb) | ||
219 | { | ||
220 | struct jffs2_inode_cache *ic = NULL; | ||
221 | struct jffs2_raw_node_ref **prev; | ||
222 | |||
223 | prev = &ref->next_in_ino; | ||
224 | |||
225 | /* Walk the inode's list once, removing any nodes from this eraseblock */ | ||
226 | while (1) { | ||
227 | if (!(*prev)->next_in_ino) { | ||
228 | /* We're looking at the jffs2_inode_cache, which is | ||
229 | at the end of the linked list. Stash it and continue | ||
230 | from the beginning of the list */ | ||
231 | ic = (struct jffs2_inode_cache *)(*prev); | ||
232 | prev = &ic->nodes; | ||
233 | continue; | ||
234 | } | ||
235 | |||
236 | if (((*prev)->flash_offset & ~(c->sector_size -1)) == jeb->offset) { | ||
237 | /* It's in the block we're erasing */ | ||
238 | struct jffs2_raw_node_ref *this; | ||
239 | |||
240 | this = *prev; | ||
241 | *prev = this->next_in_ino; | ||
242 | this->next_in_ino = NULL; | ||
243 | |||
244 | if (this == ref) | ||
245 | break; | ||
246 | |||
247 | continue; | ||
248 | } | ||
249 | /* Not to be deleted. Skip */ | ||
250 | prev = &((*prev)->next_in_ino); | ||
251 | } | ||
252 | |||
253 | /* PARANOIA */ | ||
254 | if (!ic) { | ||
255 | printk(KERN_WARNING "inode_cache not found in remove_node_refs()!!\n"); | ||
256 | return; | ||
257 | } | ||
258 | |||
259 | D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", | ||
260 | jeb->offset, jeb->offset + c->sector_size, ic->ino)); | ||
261 | |||
262 | D2({ | ||
263 | int i=0; | ||
264 | struct jffs2_raw_node_ref *this; | ||
265 | printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG); | ||
266 | |||
267 | this = ic->nodes; | ||
268 | |||
269 | while(this) { | ||
270 | printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this)); | ||
271 | if (++i == 5) { | ||
272 | printk("\n" KERN_DEBUG); | ||
273 | i=0; | ||
274 | } | ||
275 | this = this->next_in_ino; | ||
276 | } | ||
277 | printk("\n"); | ||
278 | }); | ||
279 | |||
280 | if (ic->nodes == (void *)ic) { | ||
281 | D1(printk(KERN_DEBUG "inocache for ino #%u is all gone now. Freeing\n", ic->ino)); | ||
282 | jffs2_del_ino_cache(c, ic); | ||
283 | jffs2_free_inode_cache(ic); | ||
284 | } | ||
285 | } | ||
286 | |||
287 | static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
288 | { | ||
289 | struct jffs2_raw_node_ref *ref; | ||
290 | D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset)); | ||
291 | while(jeb->first_node) { | ||
292 | ref = jeb->first_node; | ||
293 | jeb->first_node = ref->next_phys; | ||
294 | |||
295 | /* Remove from the inode-list */ | ||
296 | if (ref->next_in_ino) | ||
297 | jffs2_remove_node_refs_from_ino_list(c, ref, jeb); | ||
298 | /* else it was a non-inode node or already removed, so don't bother */ | ||
299 | |||
300 | jffs2_free_raw_node_ref(ref); | ||
301 | } | ||
302 | jeb->last_node = NULL; | ||
303 | } | ||
304 | |||
305 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
306 | { | ||
307 | struct jffs2_raw_node_ref *marker_ref = NULL; | ||
308 | unsigned char *ebuf; | ||
309 | size_t retlen; | ||
310 | int ret; | ||
311 | uint32_t bad_offset; | ||
312 | |||
313 | if (!jffs2_cleanmarker_oob(c)) { | ||
314 | marker_ref = jffs2_alloc_raw_node_ref(); | ||
315 | if (!marker_ref) { | ||
316 | printk(KERN_WARNING "Failed to allocate raw node ref for clean marker\n"); | ||
317 | /* Stick it back on the list from whence it came and come back later */ | ||
318 | jffs2_erase_pending_trigger(c); | ||
319 | spin_lock(&c->erase_completion_lock); | ||
320 | list_add(&jeb->list, &c->erase_complete_list); | ||
321 | spin_unlock(&c->erase_completion_lock); | ||
322 | return; | ||
323 | } | ||
324 | } | ||
325 | ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
326 | if (!ebuf) { | ||
327 | printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Assuming it worked\n", jeb->offset); | ||
328 | } else { | ||
329 | uint32_t ofs = jeb->offset; | ||
330 | |||
331 | D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); | ||
332 | while(ofs < jeb->offset + c->sector_size) { | ||
333 | uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); | ||
334 | int i; | ||
335 | |||
336 | bad_offset = ofs; | ||
337 | |||
338 | ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf); | ||
339 | if (ret) { | ||
340 | printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); | ||
341 | goto bad; | ||
342 | } | ||
343 | if (retlen != readlen) { | ||
344 | printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); | ||
345 | goto bad; | ||
346 | } | ||
347 | for (i=0; i<readlen; i += sizeof(unsigned long)) { | ||
348 | /* It's OK. We know it's properly aligned */ | ||
349 | unsigned long datum = *(unsigned long *)(&ebuf[i]); | ||
350 | if (datum + 1) { | ||
351 | bad_offset += i; | ||
352 | printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, bad_offset); | ||
353 | bad: | ||
354 | if (!jffs2_cleanmarker_oob(c)) | ||
355 | jffs2_free_raw_node_ref(marker_ref); | ||
356 | kfree(ebuf); | ||
357 | bad2: | ||
358 | spin_lock(&c->erase_completion_lock); | ||
359 | /* Stick it on a list (any list) so | ||
360 | erase_failed can take it right off | ||
361 | again. Silly, but shouldn't happen | ||
362 | often. */ | ||
363 | list_add(&jeb->list, &c->erasing_list); | ||
364 | spin_unlock(&c->erase_completion_lock); | ||
365 | jffs2_erase_failed(c, jeb, bad_offset); | ||
366 | return; | ||
367 | } | ||
368 | } | ||
369 | ofs += readlen; | ||
370 | cond_resched(); | ||
371 | } | ||
372 | kfree(ebuf); | ||
373 | } | ||
374 | |||
375 | bad_offset = jeb->offset; | ||
376 | |||
377 | /* Write the erase complete marker */ | ||
378 | D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); | ||
379 | if (jffs2_cleanmarker_oob(c)) { | ||
380 | |||
381 | if (jffs2_write_nand_cleanmarker(c, jeb)) | ||
382 | goto bad2; | ||
383 | |||
384 | jeb->first_node = jeb->last_node = NULL; | ||
385 | |||
386 | jeb->free_size = c->sector_size; | ||
387 | jeb->used_size = 0; | ||
388 | jeb->dirty_size = 0; | ||
389 | jeb->wasted_size = 0; | ||
390 | } else { | ||
391 | struct kvec vecs[1]; | ||
392 | struct jffs2_unknown_node marker = { | ||
393 | .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), | ||
394 | .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), | ||
395 | .totlen = cpu_to_je32(c->cleanmarker_size) | ||
396 | }; | ||
397 | |||
398 | marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); | ||
399 | |||
400 | vecs[0].iov_base = (unsigned char *) ▮ | ||
401 | vecs[0].iov_len = sizeof(marker); | ||
402 | ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); | ||
403 | |||
404 | if (ret) { | ||
405 | printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", | ||
406 | jeb->offset, ret); | ||
407 | goto bad2; | ||
408 | } | ||
409 | if (retlen != sizeof(marker)) { | ||
410 | printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", | ||
411 | jeb->offset, sizeof(marker), retlen); | ||
412 | goto bad2; | ||
413 | } | ||
414 | |||
415 | marker_ref->next_in_ino = NULL; | ||
416 | marker_ref->next_phys = NULL; | ||
417 | marker_ref->flash_offset = jeb->offset | REF_NORMAL; | ||
418 | marker_ref->__totlen = c->cleanmarker_size; | ||
419 | |||
420 | jeb->first_node = jeb->last_node = marker_ref; | ||
421 | |||
422 | jeb->free_size = c->sector_size - c->cleanmarker_size; | ||
423 | jeb->used_size = c->cleanmarker_size; | ||
424 | jeb->dirty_size = 0; | ||
425 | jeb->wasted_size = 0; | ||
426 | } | ||
427 | |||
428 | spin_lock(&c->erase_completion_lock); | ||
429 | c->erasing_size -= c->sector_size; | ||
430 | c->free_size += jeb->free_size; | ||
431 | c->used_size += jeb->used_size; | ||
432 | |||
433 | ACCT_SANITY_CHECK(c,jeb); | ||
434 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
435 | |||
436 | list_add_tail(&jeb->list, &c->free_list); | ||
437 | c->nr_erasing_blocks--; | ||
438 | c->nr_free_blocks++; | ||
439 | spin_unlock(&c->erase_completion_lock); | ||
440 | wake_up(&c->erase_wait); | ||
441 | } | ||
442 | |||
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c new file mode 100644 index 000000000000..0c607c1388f4 --- /dev/null +++ b/fs/jffs2/file.c | |||
@@ -0,0 +1,290 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: file.c,v 1.99 2004/11/16 20:36:11 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/version.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/time.h> | ||
19 | #include <linux/pagemap.h> | ||
20 | #include <linux/highmem.h> | ||
21 | #include <linux/crc32.h> | ||
22 | #include <linux/jffs2.h> | ||
23 | #include "nodelist.h" | ||
24 | |||
25 | extern int generic_file_open(struct inode *, struct file *) __attribute__((weak)); | ||
26 | extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin) __attribute__((weak)); | ||
27 | |||
28 | static int jffs2_commit_write (struct file *filp, struct page *pg, | ||
29 | unsigned start, unsigned end); | ||
30 | static int jffs2_prepare_write (struct file *filp, struct page *pg, | ||
31 | unsigned start, unsigned end); | ||
32 | static int jffs2_readpage (struct file *filp, struct page *pg); | ||
33 | |||
34 | int jffs2_fsync(struct file *filp, struct dentry *dentry, int datasync) | ||
35 | { | ||
36 | struct inode *inode = dentry->d_inode; | ||
37 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | ||
38 | |||
39 | /* Trigger GC to flush any pending writes for this inode */ | ||
40 | jffs2_flush_wbuf_gc(c, inode->i_ino); | ||
41 | |||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | struct file_operations jffs2_file_operations = | ||
46 | { | ||
47 | .llseek = generic_file_llseek, | ||
48 | .open = generic_file_open, | ||
49 | .read = generic_file_read, | ||
50 | .write = generic_file_write, | ||
51 | .ioctl = jffs2_ioctl, | ||
52 | .mmap = generic_file_readonly_mmap, | ||
53 | .fsync = jffs2_fsync, | ||
54 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,29) | ||
55 | .sendfile = generic_file_sendfile | ||
56 | #endif | ||
57 | }; | ||
58 | |||
59 | /* jffs2_file_inode_operations */ | ||
60 | |||
61 | struct inode_operations jffs2_file_inode_operations = | ||
62 | { | ||
63 | .setattr = jffs2_setattr | ||
64 | }; | ||
65 | |||
66 | struct address_space_operations jffs2_file_address_operations = | ||
67 | { | ||
68 | .readpage = jffs2_readpage, | ||
69 | .prepare_write =jffs2_prepare_write, | ||
70 | .commit_write = jffs2_commit_write | ||
71 | }; | ||
72 | |||
73 | static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) | ||
74 | { | ||
75 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | ||
76 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | ||
77 | unsigned char *pg_buf; | ||
78 | int ret; | ||
79 | |||
80 | D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT)); | ||
81 | |||
82 | if (!PageLocked(pg)) | ||
83 | PAGE_BUG(pg); | ||
84 | |||
85 | pg_buf = kmap(pg); | ||
86 | /* FIXME: Can kmap fail? */ | ||
87 | |||
88 | ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); | ||
89 | |||
90 | if (ret) { | ||
91 | ClearPageUptodate(pg); | ||
92 | SetPageError(pg); | ||
93 | } else { | ||
94 | SetPageUptodate(pg); | ||
95 | ClearPageError(pg); | ||
96 | } | ||
97 | |||
98 | flush_dcache_page(pg); | ||
99 | kunmap(pg); | ||
100 | |||
101 | D2(printk(KERN_DEBUG "readpage finished\n")); | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg) | ||
106 | { | ||
107 | int ret = jffs2_do_readpage_nolock(inode, pg); | ||
108 | unlock_page(pg); | ||
109 | return ret; | ||
110 | } | ||
111 | |||
112 | |||
113 | static int jffs2_readpage (struct file *filp, struct page *pg) | ||
114 | { | ||
115 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host); | ||
116 | int ret; | ||
117 | |||
118 | down(&f->sem); | ||
119 | ret = jffs2_do_readpage_unlock(pg->mapping->host, pg); | ||
120 | up(&f->sem); | ||
121 | return ret; | ||
122 | } | ||
123 | |||
124 | static int jffs2_prepare_write (struct file *filp, struct page *pg, | ||
125 | unsigned start, unsigned end) | ||
126 | { | ||
127 | struct inode *inode = pg->mapping->host; | ||
128 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | ||
129 | uint32_t pageofs = pg->index << PAGE_CACHE_SHIFT; | ||
130 | int ret = 0; | ||
131 | |||
132 | D1(printk(KERN_DEBUG "jffs2_prepare_write()\n")); | ||
133 | |||
134 | if (pageofs > inode->i_size) { | ||
135 | /* Make new hole frag from old EOF to new page */ | ||
136 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | ||
137 | struct jffs2_raw_inode ri; | ||
138 | struct jffs2_full_dnode *fn; | ||
139 | uint32_t phys_ofs, alloc_len; | ||
140 | |||
141 | D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", | ||
142 | (unsigned int)inode->i_size, pageofs)); | ||
143 | |||
144 | ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL); | ||
145 | if (ret) | ||
146 | return ret; | ||
147 | |||
148 | down(&f->sem); | ||
149 | memset(&ri, 0, sizeof(ri)); | ||
150 | |||
151 | ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
152 | ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); | ||
153 | ri.totlen = cpu_to_je32(sizeof(ri)); | ||
154 | ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4)); | ||
155 | |||
156 | ri.ino = cpu_to_je32(f->inocache->ino); | ||
157 | ri.version = cpu_to_je32(++f->highest_version); | ||
158 | ri.mode = cpu_to_jemode(inode->i_mode); | ||
159 | ri.uid = cpu_to_je16(inode->i_uid); | ||
160 | ri.gid = cpu_to_je16(inode->i_gid); | ||
161 | ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs)); | ||
162 | ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds()); | ||
163 | ri.offset = cpu_to_je32(inode->i_size); | ||
164 | ri.dsize = cpu_to_je32(pageofs - inode->i_size); | ||
165 | ri.csize = cpu_to_je32(0); | ||
166 | ri.compr = JFFS2_COMPR_ZERO; | ||
167 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | ||
168 | ri.data_crc = cpu_to_je32(0); | ||
169 | |||
170 | fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL); | ||
171 | |||
172 | if (IS_ERR(fn)) { | ||
173 | ret = PTR_ERR(fn); | ||
174 | jffs2_complete_reservation(c); | ||
175 | up(&f->sem); | ||
176 | return ret; | ||
177 | } | ||
178 | ret = jffs2_add_full_dnode_to_inode(c, f, fn); | ||
179 | if (f->metadata) { | ||
180 | jffs2_mark_node_obsolete(c, f->metadata->raw); | ||
181 | jffs2_free_full_dnode(f->metadata); | ||
182 | f->metadata = NULL; | ||
183 | } | ||
184 | if (ret) { | ||
185 | D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in prepare_write, returned %d\n", ret)); | ||
186 | jffs2_mark_node_obsolete(c, fn->raw); | ||
187 | jffs2_free_full_dnode(fn); | ||
188 | jffs2_complete_reservation(c); | ||
189 | up(&f->sem); | ||
190 | return ret; | ||
191 | } | ||
192 | jffs2_complete_reservation(c); | ||
193 | inode->i_size = pageofs; | ||
194 | up(&f->sem); | ||
195 | } | ||
196 | |||
197 | /* Read in the page if it wasn't already present, unless it's a whole page */ | ||
198 | if (!PageUptodate(pg) && (start || end < PAGE_CACHE_SIZE)) { | ||
199 | down(&f->sem); | ||
200 | ret = jffs2_do_readpage_nolock(inode, pg); | ||
201 | up(&f->sem); | ||
202 | } | ||
203 | D1(printk(KERN_DEBUG "end prepare_write(). pg->flags %lx\n", pg->flags)); | ||
204 | return ret; | ||
205 | } | ||
206 | |||
207 | static int jffs2_commit_write (struct file *filp, struct page *pg, | ||
208 | unsigned start, unsigned end) | ||
209 | { | ||
210 | /* Actually commit the write from the page cache page we're looking at. | ||
211 | * For now, we write the full page out each time. It sucks, but it's simple | ||
212 | */ | ||
213 | struct inode *inode = pg->mapping->host; | ||
214 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | ||
215 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | ||
216 | struct jffs2_raw_inode *ri; | ||
217 | unsigned aligned_start = start & ~3; | ||
218 | int ret = 0; | ||
219 | uint32_t writtenlen = 0; | ||
220 | |||
221 | D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", | ||
222 | inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags)); | ||
223 | |||
224 | if (!start && end == PAGE_CACHE_SIZE) { | ||
225 | /* We need to avoid deadlock with page_cache_read() in | ||
226 | jffs2_garbage_collect_pass(). So we have to mark the | ||
227 | page up to date, to prevent page_cache_read() from | ||
228 | trying to re-lock it. */ | ||
229 | SetPageUptodate(pg); | ||
230 | } | ||
231 | |||
232 | ri = jffs2_alloc_raw_inode(); | ||
233 | |||
234 | if (!ri) { | ||
235 | D1(printk(KERN_DEBUG "jffs2_commit_write(): Allocation of raw inode failed\n")); | ||
236 | return -ENOMEM; | ||
237 | } | ||
238 | |||
239 | /* Set the fields that the generic jffs2_write_inode_range() code can't find */ | ||
240 | ri->ino = cpu_to_je32(inode->i_ino); | ||
241 | ri->mode = cpu_to_jemode(inode->i_mode); | ||
242 | ri->uid = cpu_to_je16(inode->i_uid); | ||
243 | ri->gid = cpu_to_je16(inode->i_gid); | ||
244 | ri->isize = cpu_to_je32((uint32_t)inode->i_size); | ||
245 | ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds()); | ||
246 | |||
247 | /* In 2.4, it was already kmapped by generic_file_write(). Doesn't | ||
248 | hurt to do it again. The alternative is ifdefs, which are ugly. */ | ||
249 | kmap(pg); | ||
250 | |||
251 | ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start, | ||
252 | (pg->index << PAGE_CACHE_SHIFT) + aligned_start, | ||
253 | end - aligned_start, &writtenlen); | ||
254 | |||
255 | kunmap(pg); | ||
256 | |||
257 | if (ret) { | ||
258 | /* There was an error writing. */ | ||
259 | SetPageError(pg); | ||
260 | } | ||
261 | |||
262 | /* Adjust writtenlen for the padding we did, so we don't confuse our caller */ | ||
263 | if (writtenlen < (start&3)) | ||
264 | writtenlen = 0; | ||
265 | else | ||
266 | writtenlen -= (start&3); | ||
267 | |||
268 | if (writtenlen) { | ||
269 | if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) { | ||
270 | inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen; | ||
271 | inode->i_blocks = (inode->i_size + 511) >> 9; | ||
272 | |||
273 | inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime)); | ||
274 | } | ||
275 | } | ||
276 | |||
277 | jffs2_free_raw_inode(ri); | ||
278 | |||
279 | if (start+writtenlen < end) { | ||
280 | /* generic_file_write has written more to the page cache than we've | ||
281 | actually written to the medium. Mark the page !Uptodate so that | ||
282 | it gets reread */ | ||
283 | D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n")); | ||
284 | SetPageError(pg); | ||
285 | ClearPageUptodate(pg); | ||
286 | } | ||
287 | |||
288 | D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d\n",writtenlen?writtenlen:ret)); | ||
289 | return writtenlen?writtenlen:ret; | ||
290 | } | ||
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c new file mode 100644 index 000000000000..30ab233fe423 --- /dev/null +++ b/fs/jffs2/fs.c | |||
@@ -0,0 +1,677 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: fs.c,v 1.51 2004/11/28 12:19:37 dedekind Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/version.h> | ||
15 | #include <linux/config.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/list.h> | ||
20 | #include <linux/mtd/mtd.h> | ||
21 | #include <linux/pagemap.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/vmalloc.h> | ||
24 | #include <linux/vfs.h> | ||
25 | #include <linux/crc32.h> | ||
26 | #include "nodelist.h" | ||
27 | |||
28 | static int jffs2_flash_setup(struct jffs2_sb_info *c); | ||
29 | |||
30 | static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | ||
31 | { | ||
32 | struct jffs2_full_dnode *old_metadata, *new_metadata; | ||
33 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | ||
34 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | ||
35 | struct jffs2_raw_inode *ri; | ||
36 | unsigned short dev; | ||
37 | unsigned char *mdata = NULL; | ||
38 | int mdatalen = 0; | ||
39 | unsigned int ivalid; | ||
40 | uint32_t phys_ofs, alloclen; | ||
41 | int ret; | ||
42 | D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); | ||
43 | ret = inode_change_ok(inode, iattr); | ||
44 | if (ret) | ||
45 | return ret; | ||
46 | |||
47 | /* Special cases - we don't want more than one data node | ||
48 | for these types on the medium at any time. So setattr | ||
49 | must read the original data associated with the node | ||
50 | (i.e. the device numbers or the target name) and write | ||
51 | it out again with the appropriate data attached */ | ||
52 | if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { | ||
53 | /* For these, we don't actually need to read the old node */ | ||
54 | dev = old_encode_dev(inode->i_rdev); | ||
55 | mdata = (char *)&dev; | ||
56 | mdatalen = sizeof(dev); | ||
57 | D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen)); | ||
58 | } else if (S_ISLNK(inode->i_mode)) { | ||
59 | mdatalen = f->metadata->size; | ||
60 | mdata = kmalloc(f->metadata->size, GFP_USER); | ||
61 | if (!mdata) | ||
62 | return -ENOMEM; | ||
63 | ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); | ||
64 | if (ret) { | ||
65 | kfree(mdata); | ||
66 | return ret; | ||
67 | } | ||
68 | D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen)); | ||
69 | } | ||
70 | |||
71 | ri = jffs2_alloc_raw_inode(); | ||
72 | if (!ri) { | ||
73 | if (S_ISLNK(inode->i_mode)) | ||
74 | kfree(mdata); | ||
75 | return -ENOMEM; | ||
76 | } | ||
77 | |||
78 | ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, ALLOC_NORMAL); | ||
79 | if (ret) { | ||
80 | jffs2_free_raw_inode(ri); | ||
81 | if (S_ISLNK(inode->i_mode & S_IFMT)) | ||
82 | kfree(mdata); | ||
83 | return ret; | ||
84 | } | ||
85 | down(&f->sem); | ||
86 | ivalid = iattr->ia_valid; | ||
87 | |||
88 | ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
89 | ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); | ||
90 | ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); | ||
91 | ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); | ||
92 | |||
93 | ri->ino = cpu_to_je32(inode->i_ino); | ||
94 | ri->version = cpu_to_je32(++f->highest_version); | ||
95 | |||
96 | ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid); | ||
97 | ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); | ||
98 | |||
99 | if (ivalid & ATTR_MODE) | ||
100 | if (iattr->ia_mode & S_ISGID && | ||
101 | !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID)) | ||
102 | ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID); | ||
103 | else | ||
104 | ri->mode = cpu_to_jemode(iattr->ia_mode); | ||
105 | else | ||
106 | ri->mode = cpu_to_jemode(inode->i_mode); | ||
107 | |||
108 | |||
109 | ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size); | ||
110 | ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime)); | ||
111 | ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime)); | ||
112 | ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime)); | ||
113 | |||
114 | ri->offset = cpu_to_je32(0); | ||
115 | ri->csize = ri->dsize = cpu_to_je32(mdatalen); | ||
116 | ri->compr = JFFS2_COMPR_NONE; | ||
117 | if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { | ||
118 | /* It's an extension. Make it a hole node */ | ||
119 | ri->compr = JFFS2_COMPR_ZERO; | ||
120 | ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size); | ||
121 | ri->offset = cpu_to_je32(inode->i_size); | ||
122 | } | ||
123 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | ||
124 | if (mdatalen) | ||
125 | ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); | ||
126 | else | ||
127 | ri->data_crc = cpu_to_je32(0); | ||
128 | |||
129 | new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL); | ||
130 | if (S_ISLNK(inode->i_mode)) | ||
131 | kfree(mdata); | ||
132 | |||
133 | if (IS_ERR(new_metadata)) { | ||
134 | jffs2_complete_reservation(c); | ||
135 | jffs2_free_raw_inode(ri); | ||
136 | up(&f->sem); | ||
137 | return PTR_ERR(new_metadata); | ||
138 | } | ||
139 | /* It worked. Update the inode */ | ||
140 | inode->i_atime = ITIME(je32_to_cpu(ri->atime)); | ||
141 | inode->i_ctime = ITIME(je32_to_cpu(ri->ctime)); | ||
142 | inode->i_mtime = ITIME(je32_to_cpu(ri->mtime)); | ||
143 | inode->i_mode = jemode_to_cpu(ri->mode); | ||
144 | inode->i_uid = je16_to_cpu(ri->uid); | ||
145 | inode->i_gid = je16_to_cpu(ri->gid); | ||
146 | |||
147 | |||
148 | old_metadata = f->metadata; | ||
149 | |||
150 | if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) | ||
151 | jffs2_truncate_fraglist (c, &f->fragtree, iattr->ia_size); | ||
152 | |||
153 | if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { | ||
154 | jffs2_add_full_dnode_to_inode(c, f, new_metadata); | ||
155 | inode->i_size = iattr->ia_size; | ||
156 | f->metadata = NULL; | ||
157 | } else { | ||
158 | f->metadata = new_metadata; | ||
159 | } | ||
160 | if (old_metadata) { | ||
161 | jffs2_mark_node_obsolete(c, old_metadata->raw); | ||
162 | jffs2_free_full_dnode(old_metadata); | ||
163 | } | ||
164 | jffs2_free_raw_inode(ri); | ||
165 | |||
166 | up(&f->sem); | ||
167 | jffs2_complete_reservation(c); | ||
168 | |||
169 | /* We have to do the vmtruncate() without f->sem held, since | ||
170 | some pages may be locked and waiting for it in readpage(). | ||
171 | We are protected from a simultaneous write() extending i_size | ||
172 | back past iattr->ia_size, because do_truncate() holds the | ||
173 | generic inode semaphore. */ | ||
174 | if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) | ||
175 | vmtruncate(inode, iattr->ia_size); | ||
176 | |||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | int jffs2_setattr(struct dentry *dentry, struct iattr *iattr) | ||
181 | { | ||
182 | return jffs2_do_setattr(dentry->d_inode, iattr); | ||
183 | } | ||
184 | |||
185 | int jffs2_statfs(struct super_block *sb, struct kstatfs *buf) | ||
186 | { | ||
187 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); | ||
188 | unsigned long avail; | ||
189 | |||
190 | buf->f_type = JFFS2_SUPER_MAGIC; | ||
191 | buf->f_bsize = 1 << PAGE_SHIFT; | ||
192 | buf->f_blocks = c->flash_size >> PAGE_SHIFT; | ||
193 | buf->f_files = 0; | ||
194 | buf->f_ffree = 0; | ||
195 | buf->f_namelen = JFFS2_MAX_NAME_LEN; | ||
196 | |||
197 | spin_lock(&c->erase_completion_lock); | ||
198 | |||
199 | avail = c->dirty_size + c->free_size; | ||
200 | if (avail > c->sector_size * c->resv_blocks_write) | ||
201 | avail -= c->sector_size * c->resv_blocks_write; | ||
202 | else | ||
203 | avail = 0; | ||
204 | |||
205 | buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT; | ||
206 | |||
207 | D2(jffs2_dump_block_lists(c)); | ||
208 | |||
209 | spin_unlock(&c->erase_completion_lock); | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | |||
215 | void jffs2_clear_inode (struct inode *inode) | ||
216 | { | ||
217 | /* We can forget about this inode for now - drop all | ||
218 | * the nodelists associated with it, etc. | ||
219 | */ | ||
220 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | ||
221 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | ||
222 | |||
223 | D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); | ||
224 | |||
225 | jffs2_do_clear_inode(c, f); | ||
226 | } | ||
227 | |||
228 | void jffs2_read_inode (struct inode *inode) | ||
229 | { | ||
230 | struct jffs2_inode_info *f; | ||
231 | struct jffs2_sb_info *c; | ||
232 | struct jffs2_raw_inode latest_node; | ||
233 | int ret; | ||
234 | |||
235 | D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino)); | ||
236 | |||
237 | f = JFFS2_INODE_INFO(inode); | ||
238 | c = JFFS2_SB_INFO(inode->i_sb); | ||
239 | |||
240 | jffs2_init_inode_info(f); | ||
241 | |||
242 | ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); | ||
243 | |||
244 | if (ret) { | ||
245 | make_bad_inode(inode); | ||
246 | up(&f->sem); | ||
247 | return; | ||
248 | } | ||
249 | inode->i_mode = jemode_to_cpu(latest_node.mode); | ||
250 | inode->i_uid = je16_to_cpu(latest_node.uid); | ||
251 | inode->i_gid = je16_to_cpu(latest_node.gid); | ||
252 | inode->i_size = je32_to_cpu(latest_node.isize); | ||
253 | inode->i_atime = ITIME(je32_to_cpu(latest_node.atime)); | ||
254 | inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime)); | ||
255 | inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime)); | ||
256 | |||
257 | inode->i_nlink = f->inocache->nlink; | ||
258 | |||
259 | inode->i_blksize = PAGE_SIZE; | ||
260 | inode->i_blocks = (inode->i_size + 511) >> 9; | ||
261 | |||
262 | switch (inode->i_mode & S_IFMT) { | ||
263 | jint16_t rdev; | ||
264 | |||
265 | case S_IFLNK: | ||
266 | inode->i_op = &jffs2_symlink_inode_operations; | ||
267 | break; | ||
268 | |||
269 | case S_IFDIR: | ||
270 | { | ||
271 | struct jffs2_full_dirent *fd; | ||
272 | |||
273 | for (fd=f->dents; fd; fd = fd->next) { | ||
274 | if (fd->type == DT_DIR && fd->ino) | ||
275 | inode->i_nlink++; | ||
276 | } | ||
277 | /* and '..' */ | ||
278 | inode->i_nlink++; | ||
279 | /* Root dir gets i_nlink 3 for some reason */ | ||
280 | if (inode->i_ino == 1) | ||
281 | inode->i_nlink++; | ||
282 | |||
283 | inode->i_op = &jffs2_dir_inode_operations; | ||
284 | inode->i_fop = &jffs2_dir_operations; | ||
285 | break; | ||
286 | } | ||
287 | case S_IFREG: | ||
288 | inode->i_op = &jffs2_file_inode_operations; | ||
289 | inode->i_fop = &jffs2_file_operations; | ||
290 | inode->i_mapping->a_ops = &jffs2_file_address_operations; | ||
291 | inode->i_mapping->nrpages = 0; | ||
292 | break; | ||
293 | |||
294 | case S_IFBLK: | ||
295 | case S_IFCHR: | ||
296 | /* Read the device numbers from the media */ | ||
297 | D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); | ||
298 | if (jffs2_read_dnode(c, f, f->metadata, (char *)&rdev, 0, sizeof(rdev)) < 0) { | ||
299 | /* Eep */ | ||
300 | printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); | ||
301 | up(&f->sem); | ||
302 | jffs2_do_clear_inode(c, f); | ||
303 | make_bad_inode(inode); | ||
304 | return; | ||
305 | } | ||
306 | |||
307 | case S_IFSOCK: | ||
308 | case S_IFIFO: | ||
309 | inode->i_op = &jffs2_file_inode_operations; | ||
310 | init_special_inode(inode, inode->i_mode, | ||
311 | old_decode_dev((je16_to_cpu(rdev)))); | ||
312 | break; | ||
313 | |||
314 | default: | ||
315 | printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino); | ||
316 | } | ||
317 | |||
318 | up(&f->sem); | ||
319 | |||
320 | D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n")); | ||
321 | } | ||
322 | |||
323 | void jffs2_dirty_inode(struct inode *inode) | ||
324 | { | ||
325 | struct iattr iattr; | ||
326 | |||
327 | if (!(inode->i_state & I_DIRTY_DATASYNC)) { | ||
328 | D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino)); | ||
329 | return; | ||
330 | } | ||
331 | |||
332 | D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino)); | ||
333 | |||
334 | iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME; | ||
335 | iattr.ia_mode = inode->i_mode; | ||
336 | iattr.ia_uid = inode->i_uid; | ||
337 | iattr.ia_gid = inode->i_gid; | ||
338 | iattr.ia_atime = inode->i_atime; | ||
339 | iattr.ia_mtime = inode->i_mtime; | ||
340 | iattr.ia_ctime = inode->i_ctime; | ||
341 | |||
342 | jffs2_do_setattr(inode, &iattr); | ||
343 | } | ||
344 | |||
345 | int jffs2_remount_fs (struct super_block *sb, int *flags, char *data) | ||
346 | { | ||
347 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); | ||
348 | |||
349 | if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY)) | ||
350 | return -EROFS; | ||
351 | |||
352 | /* We stop if it was running, then restart if it needs to. | ||
353 | This also catches the case where it was stopped and this | ||
354 | is just a remount to restart it. | ||
355 | Flush the writebuffer, if neccecary, else we loose it */ | ||
356 | if (!(sb->s_flags & MS_RDONLY)) { | ||
357 | jffs2_stop_garbage_collect_thread(c); | ||
358 | down(&c->alloc_sem); | ||
359 | jffs2_flush_wbuf_pad(c); | ||
360 | up(&c->alloc_sem); | ||
361 | } | ||
362 | |||
363 | if (!(*flags & MS_RDONLY)) | ||
364 | jffs2_start_garbage_collect_thread(c); | ||
365 | |||
366 | *flags |= MS_NOATIME; | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | void jffs2_write_super (struct super_block *sb) | ||
372 | { | ||
373 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); | ||
374 | sb->s_dirt = 0; | ||
375 | |||
376 | if (sb->s_flags & MS_RDONLY) | ||
377 | return; | ||
378 | |||
379 | D1(printk(KERN_DEBUG "jffs2_write_super()\n")); | ||
380 | jffs2_garbage_collect_trigger(c); | ||
381 | jffs2_erase_pending_blocks(c, 0); | ||
382 | jffs2_flush_wbuf_gc(c, 0); | ||
383 | } | ||
384 | |||
385 | |||
386 | /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash, | ||
387 | fill in the raw_inode while you're at it. */ | ||
388 | struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri) | ||
389 | { | ||
390 | struct inode *inode; | ||
391 | struct super_block *sb = dir_i->i_sb; | ||
392 | struct jffs2_sb_info *c; | ||
393 | struct jffs2_inode_info *f; | ||
394 | int ret; | ||
395 | |||
396 | D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode)); | ||
397 | |||
398 | c = JFFS2_SB_INFO(sb); | ||
399 | |||
400 | inode = new_inode(sb); | ||
401 | |||
402 | if (!inode) | ||
403 | return ERR_PTR(-ENOMEM); | ||
404 | |||
405 | f = JFFS2_INODE_INFO(inode); | ||
406 | jffs2_init_inode_info(f); | ||
407 | |||
408 | memset(ri, 0, sizeof(*ri)); | ||
409 | /* Set OS-specific defaults for new inodes */ | ||
410 | ri->uid = cpu_to_je16(current->fsuid); | ||
411 | |||
412 | if (dir_i->i_mode & S_ISGID) { | ||
413 | ri->gid = cpu_to_je16(dir_i->i_gid); | ||
414 | if (S_ISDIR(mode)) | ||
415 | mode |= S_ISGID; | ||
416 | } else { | ||
417 | ri->gid = cpu_to_je16(current->fsgid); | ||
418 | } | ||
419 | ri->mode = cpu_to_jemode(mode); | ||
420 | ret = jffs2_do_new_inode (c, f, mode, ri); | ||
421 | if (ret) { | ||
422 | make_bad_inode(inode); | ||
423 | iput(inode); | ||
424 | return ERR_PTR(ret); | ||
425 | } | ||
426 | inode->i_nlink = 1; | ||
427 | inode->i_ino = je32_to_cpu(ri->ino); | ||
428 | inode->i_mode = jemode_to_cpu(ri->mode); | ||
429 | inode->i_gid = je16_to_cpu(ri->gid); | ||
430 | inode->i_uid = je16_to_cpu(ri->uid); | ||
431 | inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; | ||
432 | ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime)); | ||
433 | |||
434 | inode->i_blksize = PAGE_SIZE; | ||
435 | inode->i_blocks = 0; | ||
436 | inode->i_size = 0; | ||
437 | |||
438 | insert_inode_hash(inode); | ||
439 | |||
440 | return inode; | ||
441 | } | ||
442 | |||
443 | |||
444 | int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | ||
445 | { | ||
446 | struct jffs2_sb_info *c; | ||
447 | struct inode *root_i; | ||
448 | int ret; | ||
449 | size_t blocks; | ||
450 | |||
451 | c = JFFS2_SB_INFO(sb); | ||
452 | |||
453 | #ifndef CONFIG_JFFS2_FS_NAND | ||
454 | if (c->mtd->type == MTD_NANDFLASH) { | ||
455 | printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n"); | ||
456 | return -EINVAL; | ||
457 | } | ||
458 | #endif | ||
459 | |||
460 | c->flash_size = c->mtd->size; | ||
461 | |||
462 | /* | ||
463 | * Check, if we have to concatenate physical blocks to larger virtual blocks | ||
464 | * to reduce the memorysize for c->blocks. (kmalloc allows max. 128K allocation) | ||
465 | */ | ||
466 | c->sector_size = c->mtd->erasesize; | ||
467 | blocks = c->flash_size / c->sector_size; | ||
468 | if (!(c->mtd->flags & MTD_NO_VIRTBLOCKS)) { | ||
469 | while ((blocks * sizeof (struct jffs2_eraseblock)) > (128 * 1024)) { | ||
470 | blocks >>= 1; | ||
471 | c->sector_size <<= 1; | ||
472 | } | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * Size alignment check | ||
477 | */ | ||
478 | if ((c->sector_size * blocks) != c->flash_size) { | ||
479 | c->flash_size = c->sector_size * blocks; | ||
480 | printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n", | ||
481 | c->flash_size / 1024); | ||
482 | } | ||
483 | |||
484 | if (c->sector_size != c->mtd->erasesize) | ||
485 | printk(KERN_INFO "jffs2: Erase block size too small (%dKiB). Using virtual blocks size (%dKiB) instead\n", | ||
486 | c->mtd->erasesize / 1024, c->sector_size / 1024); | ||
487 | |||
488 | if (c->flash_size < 5*c->sector_size) { | ||
489 | printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size); | ||
490 | return -EINVAL; | ||
491 | } | ||
492 | |||
493 | c->cleanmarker_size = sizeof(struct jffs2_unknown_node); | ||
494 | /* Joern -- stick alignment for weird 8-byte-page flash here */ | ||
495 | |||
496 | /* NAND (or other bizarre) flash... do setup accordingly */ | ||
497 | ret = jffs2_flash_setup(c); | ||
498 | if (ret) | ||
499 | return ret; | ||
500 | |||
501 | c->inocache_list = kmalloc(INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *), GFP_KERNEL); | ||
502 | if (!c->inocache_list) { | ||
503 | ret = -ENOMEM; | ||
504 | goto out_wbuf; | ||
505 | } | ||
506 | memset(c->inocache_list, 0, INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *)); | ||
507 | |||
508 | if ((ret = jffs2_do_mount_fs(c))) | ||
509 | goto out_inohash; | ||
510 | |||
511 | ret = -EINVAL; | ||
512 | |||
513 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n")); | ||
514 | root_i = iget(sb, 1); | ||
515 | if (is_bad_inode(root_i)) { | ||
516 | D1(printk(KERN_WARNING "get root inode failed\n")); | ||
517 | goto out_nodes; | ||
518 | } | ||
519 | |||
520 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); | ||
521 | sb->s_root = d_alloc_root(root_i); | ||
522 | if (!sb->s_root) | ||
523 | goto out_root_i; | ||
524 | |||
525 | #if LINUX_VERSION_CODE >= 0x20403 | ||
526 | sb->s_maxbytes = 0xFFFFFFFF; | ||
527 | #endif | ||
528 | sb->s_blocksize = PAGE_CACHE_SIZE; | ||
529 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | ||
530 | sb->s_magic = JFFS2_SUPER_MAGIC; | ||
531 | if (!(sb->s_flags & MS_RDONLY)) | ||
532 | jffs2_start_garbage_collect_thread(c); | ||
533 | return 0; | ||
534 | |||
535 | out_root_i: | ||
536 | iput(root_i); | ||
537 | out_nodes: | ||
538 | jffs2_free_ino_caches(c); | ||
539 | jffs2_free_raw_node_refs(c); | ||
540 | if (c->mtd->flags & MTD_NO_VIRTBLOCKS) | ||
541 | vfree(c->blocks); | ||
542 | else | ||
543 | kfree(c->blocks); | ||
544 | out_inohash: | ||
545 | kfree(c->inocache_list); | ||
546 | out_wbuf: | ||
547 | jffs2_flash_cleanup(c); | ||
548 | |||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | void jffs2_gc_release_inode(struct jffs2_sb_info *c, | ||
553 | struct jffs2_inode_info *f) | ||
554 | { | ||
555 | iput(OFNI_EDONI_2SFFJ(f)); | ||
556 | } | ||
557 | |||
558 | struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, | ||
559 | int inum, int nlink) | ||
560 | { | ||
561 | struct inode *inode; | ||
562 | struct jffs2_inode_cache *ic; | ||
563 | if (!nlink) { | ||
564 | /* The inode has zero nlink but its nodes weren't yet marked | ||
565 | obsolete. This has to be because we're still waiting for | ||
566 | the final (close() and) iput() to happen. | ||
567 | |||
568 | There's a possibility that the final iput() could have | ||
569 | happened while we were contemplating. In order to ensure | ||
570 | that we don't cause a new read_inode() (which would fail) | ||
571 | for the inode in question, we use ilookup() in this case | ||
572 | instead of iget(). | ||
573 | |||
574 | The nlink can't _become_ zero at this point because we're | ||
575 | holding the alloc_sem, and jffs2_do_unlink() would also | ||
576 | need that while decrementing nlink on any inode. | ||
577 | */ | ||
578 | inode = ilookup(OFNI_BS_2SFFJ(c), inum); | ||
579 | if (!inode) { | ||
580 | D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n", | ||
581 | inum)); | ||
582 | |||
583 | spin_lock(&c->inocache_lock); | ||
584 | ic = jffs2_get_ino_cache(c, inum); | ||
585 | if (!ic) { | ||
586 | D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum)); | ||
587 | spin_unlock(&c->inocache_lock); | ||
588 | return NULL; | ||
589 | } | ||
590 | if (ic->state != INO_STATE_CHECKEDABSENT) { | ||
591 | /* Wait for progress. Don't just loop */ | ||
592 | D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n", | ||
593 | ic->ino, ic->state)); | ||
594 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | ||
595 | } else { | ||
596 | spin_unlock(&c->inocache_lock); | ||
597 | } | ||
598 | |||
599 | return NULL; | ||
600 | } | ||
601 | } else { | ||
602 | /* Inode has links to it still; they're not going away because | ||
603 | jffs2_do_unlink() would need the alloc_sem and we have it. | ||
604 | Just iget() it, and if read_inode() is necessary that's OK. | ||
605 | */ | ||
606 | inode = iget(OFNI_BS_2SFFJ(c), inum); | ||
607 | if (!inode) | ||
608 | return ERR_PTR(-ENOMEM); | ||
609 | } | ||
610 | if (is_bad_inode(inode)) { | ||
611 | printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n", | ||
612 | inum, nlink); | ||
613 | /* NB. This will happen again. We need to do something appropriate here. */ | ||
614 | iput(inode); | ||
615 | return ERR_PTR(-EIO); | ||
616 | } | ||
617 | |||
618 | return JFFS2_INODE_INFO(inode); | ||
619 | } | ||
620 | |||
621 | unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, | ||
622 | struct jffs2_inode_info *f, | ||
623 | unsigned long offset, | ||
624 | unsigned long *priv) | ||
625 | { | ||
626 | struct inode *inode = OFNI_EDONI_2SFFJ(f); | ||
627 | struct page *pg; | ||
628 | |||
629 | pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, | ||
630 | (void *)jffs2_do_readpage_unlock, inode); | ||
631 | if (IS_ERR(pg)) | ||
632 | return (void *)pg; | ||
633 | |||
634 | *priv = (unsigned long)pg; | ||
635 | return kmap(pg); | ||
636 | } | ||
637 | |||
638 | void jffs2_gc_release_page(struct jffs2_sb_info *c, | ||
639 | unsigned char *ptr, | ||
640 | unsigned long *priv) | ||
641 | { | ||
642 | struct page *pg = (void *)*priv; | ||
643 | |||
644 | kunmap(pg); | ||
645 | page_cache_release(pg); | ||
646 | } | ||
647 | |||
648 | static int jffs2_flash_setup(struct jffs2_sb_info *c) { | ||
649 | int ret = 0; | ||
650 | |||
651 | if (jffs2_cleanmarker_oob(c)) { | ||
652 | /* NAND flash... do setup accordingly */ | ||
653 | ret = jffs2_nand_flash_setup(c); | ||
654 | if (ret) | ||
655 | return ret; | ||
656 | } | ||
657 | |||
658 | /* add setups for other bizarre flashes here... */ | ||
659 | if (jffs2_nor_ecc(c)) { | ||
660 | ret = jffs2_nor_ecc_flash_setup(c); | ||
661 | if (ret) | ||
662 | return ret; | ||
663 | } | ||
664 | return ret; | ||
665 | } | ||
666 | |||
667 | void jffs2_flash_cleanup(struct jffs2_sb_info *c) { | ||
668 | |||
669 | if (jffs2_cleanmarker_oob(c)) { | ||
670 | jffs2_nand_flash_cleanup(c); | ||
671 | } | ||
672 | |||
673 | /* add cleanups for other bizarre flashes here... */ | ||
674 | if (jffs2_nor_ecc(c)) { | ||
675 | jffs2_nor_ecc_flash_cleanup(c); | ||
676 | } | ||
677 | } | ||
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c new file mode 100644 index 000000000000..87ec74ff5930 --- /dev/null +++ b/fs/jffs2/gc.c | |||
@@ -0,0 +1,1246 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: gc.c,v 1.144 2004/12/21 11:18:50 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/mtd/mtd.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/pagemap.h> | ||
18 | #include <linux/crc32.h> | ||
19 | #include <linux/compiler.h> | ||
20 | #include <linux/stat.h> | ||
21 | #include "nodelist.h" | ||
22 | #include "compr.h" | ||
23 | |||
24 | static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | ||
25 | struct jffs2_inode_cache *ic, | ||
26 | struct jffs2_raw_node_ref *raw); | ||
27 | static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
28 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fd); | ||
29 | static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
30 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); | ||
31 | static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
32 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); | ||
33 | static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
34 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, | ||
35 | uint32_t start, uint32_t end); | ||
36 | static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
37 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, | ||
38 | uint32_t start, uint32_t end); | ||
39 | static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
40 | struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f); | ||
41 | |||
42 | /* Called with erase_completion_lock held */ | ||
43 | static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c) | ||
44 | { | ||
45 | struct jffs2_eraseblock *ret; | ||
46 | struct list_head *nextlist = NULL; | ||
47 | int n = jiffies % 128; | ||
48 | |||
49 | /* Pick an eraseblock to garbage collect next. This is where we'll | ||
50 | put the clever wear-levelling algorithms. Eventually. */ | ||
51 | /* We possibly want to favour the dirtier blocks more when the | ||
52 | number of free blocks is low. */ | ||
53 | if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) { | ||
54 | D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n")); | ||
55 | nextlist = &c->bad_used_list; | ||
56 | } else if (n < 50 && !list_empty(&c->erasable_list)) { | ||
57 | /* Note that most of them will have gone directly to be erased. | ||
58 | So don't favour the erasable_list _too_ much. */ | ||
59 | D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n")); | ||
60 | nextlist = &c->erasable_list; | ||
61 | } else if (n < 110 && !list_empty(&c->very_dirty_list)) { | ||
62 | /* Most of the time, pick one off the very_dirty list */ | ||
63 | D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n")); | ||
64 | nextlist = &c->very_dirty_list; | ||
65 | } else if (n < 126 && !list_empty(&c->dirty_list)) { | ||
66 | D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n")); | ||
67 | nextlist = &c->dirty_list; | ||
68 | } else if (!list_empty(&c->clean_list)) { | ||
69 | D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n")); | ||
70 | nextlist = &c->clean_list; | ||
71 | } else if (!list_empty(&c->dirty_list)) { | ||
72 | D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n")); | ||
73 | |||
74 | nextlist = &c->dirty_list; | ||
75 | } else if (!list_empty(&c->very_dirty_list)) { | ||
76 | D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n")); | ||
77 | nextlist = &c->very_dirty_list; | ||
78 | } else if (!list_empty(&c->erasable_list)) { | ||
79 | D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n")); | ||
80 | |||
81 | nextlist = &c->erasable_list; | ||
82 | } else { | ||
83 | /* Eep. All were empty */ | ||
84 | D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n")); | ||
85 | return NULL; | ||
86 | } | ||
87 | |||
88 | ret = list_entry(nextlist->next, struct jffs2_eraseblock, list); | ||
89 | list_del(&ret->list); | ||
90 | c->gcblock = ret; | ||
91 | ret->gc_node = ret->first_node; | ||
92 | if (!ret->gc_node) { | ||
93 | printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset); | ||
94 | BUG(); | ||
95 | } | ||
96 | |||
97 | /* Have we accidentally picked a clean block with wasted space ? */ | ||
98 | if (ret->wasted_size) { | ||
99 | D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size)); | ||
100 | ret->dirty_size += ret->wasted_size; | ||
101 | c->wasted_size -= ret->wasted_size; | ||
102 | c->dirty_size += ret->wasted_size; | ||
103 | ret->wasted_size = 0; | ||
104 | } | ||
105 | |||
106 | D2(jffs2_dump_block_lists(c)); | ||
107 | return ret; | ||
108 | } | ||
109 | |||
110 | /* jffs2_garbage_collect_pass | ||
111 | * Make a single attempt to progress GC. Move one node, and possibly | ||
112 | * start erasing one eraseblock. | ||
113 | */ | ||
114 | int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | ||
115 | { | ||
116 | struct jffs2_inode_info *f; | ||
117 | struct jffs2_inode_cache *ic; | ||
118 | struct jffs2_eraseblock *jeb; | ||
119 | struct jffs2_raw_node_ref *raw; | ||
120 | int ret = 0, inum, nlink; | ||
121 | |||
122 | if (down_interruptible(&c->alloc_sem)) | ||
123 | return -EINTR; | ||
124 | |||
125 | for (;;) { | ||
126 | spin_lock(&c->erase_completion_lock); | ||
127 | if (!c->unchecked_size) | ||
128 | break; | ||
129 | |||
130 | /* We can't start doing GC yet. We haven't finished checking | ||
131 | the node CRCs etc. Do it now. */ | ||
132 | |||
133 | /* checked_ino is protected by the alloc_sem */ | ||
134 | if (c->checked_ino > c->highest_ino) { | ||
135 | printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n", | ||
136 | c->unchecked_size); | ||
137 | D2(jffs2_dump_block_lists(c)); | ||
138 | spin_unlock(&c->erase_completion_lock); | ||
139 | BUG(); | ||
140 | } | ||
141 | |||
142 | spin_unlock(&c->erase_completion_lock); | ||
143 | |||
144 | spin_lock(&c->inocache_lock); | ||
145 | |||
146 | ic = jffs2_get_ino_cache(c, c->checked_ino++); | ||
147 | |||
148 | if (!ic) { | ||
149 | spin_unlock(&c->inocache_lock); | ||
150 | continue; | ||
151 | } | ||
152 | |||
153 | if (!ic->nlink) { | ||
154 | D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n", | ||
155 | ic->ino)); | ||
156 | spin_unlock(&c->inocache_lock); | ||
157 | continue; | ||
158 | } | ||
159 | switch(ic->state) { | ||
160 | case INO_STATE_CHECKEDABSENT: | ||
161 | case INO_STATE_PRESENT: | ||
162 | D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino)); | ||
163 | spin_unlock(&c->inocache_lock); | ||
164 | continue; | ||
165 | |||
166 | case INO_STATE_GC: | ||
167 | case INO_STATE_CHECKING: | ||
168 | printk(KERN_WARNING "Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state); | ||
169 | spin_unlock(&c->inocache_lock); | ||
170 | BUG(); | ||
171 | |||
172 | case INO_STATE_READING: | ||
173 | /* We need to wait for it to finish, lest we move on | ||
174 | and trigger the BUG() above while we haven't yet | ||
175 | finished checking all its nodes */ | ||
176 | D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); | ||
177 | up(&c->alloc_sem); | ||
178 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | ||
179 | return 0; | ||
180 | |||
181 | default: | ||
182 | BUG(); | ||
183 | |||
184 | case INO_STATE_UNCHECKED: | ||
185 | ; | ||
186 | } | ||
187 | ic->state = INO_STATE_CHECKING; | ||
188 | spin_unlock(&c->inocache_lock); | ||
189 | |||
190 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino)); | ||
191 | |||
192 | ret = jffs2_do_crccheck_inode(c, ic); | ||
193 | if (ret) | ||
194 | printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino); | ||
195 | |||
196 | jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT); | ||
197 | up(&c->alloc_sem); | ||
198 | return ret; | ||
199 | } | ||
200 | |||
201 | /* First, work out which block we're garbage-collecting */ | ||
202 | jeb = c->gcblock; | ||
203 | |||
204 | if (!jeb) | ||
205 | jeb = jffs2_find_gc_block(c); | ||
206 | |||
207 | if (!jeb) { | ||
208 | D1 (printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n")); | ||
209 | spin_unlock(&c->erase_completion_lock); | ||
210 | up(&c->alloc_sem); | ||
211 | return -EIO; | ||
212 | } | ||
213 | |||
214 | D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size)); | ||
215 | D1(if (c->nextblock) | ||
216 | printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size)); | ||
217 | |||
218 | if (!jeb->used_size) { | ||
219 | up(&c->alloc_sem); | ||
220 | goto eraseit; | ||
221 | } | ||
222 | |||
223 | raw = jeb->gc_node; | ||
224 | |||
225 | while(ref_obsolete(raw)) { | ||
226 | D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); | ||
227 | raw = raw->next_phys; | ||
228 | if (unlikely(!raw)) { | ||
229 | printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); | ||
230 | printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", | ||
231 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); | ||
232 | jeb->gc_node = raw; | ||
233 | spin_unlock(&c->erase_completion_lock); | ||
234 | up(&c->alloc_sem); | ||
235 | BUG(); | ||
236 | } | ||
237 | } | ||
238 | jeb->gc_node = raw; | ||
239 | |||
240 | D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw))); | ||
241 | |||
242 | if (!raw->next_in_ino) { | ||
243 | /* Inode-less node. Clean marker, snapshot or something like that */ | ||
244 | /* FIXME: If it's something that needs to be copied, including something | ||
245 | we don't grok that has JFFS2_NODETYPE_RWCOMPAT_COPY, we should do so */ | ||
246 | spin_unlock(&c->erase_completion_lock); | ||
247 | jffs2_mark_node_obsolete(c, raw); | ||
248 | up(&c->alloc_sem); | ||
249 | goto eraseit_lock; | ||
250 | } | ||
251 | |||
252 | ic = jffs2_raw_ref_to_ic(raw); | ||
253 | |||
254 | /* We need to hold the inocache. Either the erase_completion_lock or | ||
255 | the inocache_lock are sufficient; we trade down since the inocache_lock | ||
256 | causes less contention. */ | ||
257 | spin_lock(&c->inocache_lock); | ||
258 | |||
259 | spin_unlock(&c->erase_completion_lock); | ||
260 | |||
261 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino)); | ||
262 | |||
263 | /* Three possibilities: | ||
264 | 1. Inode is already in-core. We must iget it and do proper | ||
265 | updating to its fragtree, etc. | ||
266 | 2. Inode is not in-core, node is REF_PRISTINE. We lock the | ||
267 | inocache to prevent a read_inode(), copy the node intact. | ||
268 | 3. Inode is not in-core, node is not pristine. We must iget() | ||
269 | and take the slow path. | ||
270 | */ | ||
271 | |||
272 | switch(ic->state) { | ||
273 | case INO_STATE_CHECKEDABSENT: | ||
274 | /* It's been checked, but it's not currently in-core. | ||
275 | We can just copy any pristine nodes, but have | ||
276 | to prevent anyone else from doing read_inode() while | ||
277 | we're at it, so we set the state accordingly */ | ||
278 | if (ref_flags(raw) == REF_PRISTINE) | ||
279 | ic->state = INO_STATE_GC; | ||
280 | else { | ||
281 | D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", | ||
282 | ic->ino)); | ||
283 | } | ||
284 | break; | ||
285 | |||
286 | case INO_STATE_PRESENT: | ||
287 | /* It's in-core. GC must iget() it. */ | ||
288 | break; | ||
289 | |||
290 | case INO_STATE_UNCHECKED: | ||
291 | case INO_STATE_CHECKING: | ||
292 | case INO_STATE_GC: | ||
293 | /* Should never happen. We should have finished checking | ||
294 | by the time we actually start doing any GC, and since | ||
295 | we're holding the alloc_sem, no other garbage collection | ||
296 | can happen. | ||
297 | */ | ||
298 | printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n", | ||
299 | ic->ino, ic->state); | ||
300 | up(&c->alloc_sem); | ||
301 | spin_unlock(&c->inocache_lock); | ||
302 | BUG(); | ||
303 | |||
304 | case INO_STATE_READING: | ||
305 | /* Someone's currently trying to read it. We must wait for | ||
306 | them to finish and then go through the full iget() route | ||
307 | to do the GC. However, sometimes read_inode() needs to get | ||
308 | the alloc_sem() (for marking nodes invalid) so we must | ||
309 | drop the alloc_sem before sleeping. */ | ||
310 | |||
311 | up(&c->alloc_sem); | ||
312 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n", | ||
313 | ic->ino, ic->state)); | ||
314 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | ||
315 | /* And because we dropped the alloc_sem we must start again from the | ||
316 | beginning. Ponder chance of livelock here -- we're returning success | ||
317 | without actually making any progress. | ||
318 | |||
319 | Q: What are the chances that the inode is back in INO_STATE_READING | ||
320 | again by the time we next enter this function? And that this happens | ||
321 | enough times to cause a real delay? | ||
322 | |||
323 | A: Small enough that I don't care :) | ||
324 | */ | ||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | /* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the | ||
329 | node intact, and we don't have to muck about with the fragtree etc. | ||
330 | because we know it's not in-core. If it _was_ in-core, we go through | ||
331 | all the iget() crap anyway */ | ||
332 | |||
333 | if (ic->state == INO_STATE_GC) { | ||
334 | spin_unlock(&c->inocache_lock); | ||
335 | |||
336 | ret = jffs2_garbage_collect_pristine(c, ic, raw); | ||
337 | |||
338 | spin_lock(&c->inocache_lock); | ||
339 | ic->state = INO_STATE_CHECKEDABSENT; | ||
340 | wake_up(&c->inocache_wq); | ||
341 | |||
342 | if (ret != -EBADFD) { | ||
343 | spin_unlock(&c->inocache_lock); | ||
344 | goto release_sem; | ||
345 | } | ||
346 | |||
347 | /* Fall through if it wanted us to, with inocache_lock held */ | ||
348 | } | ||
349 | |||
350 | /* Prevent the fairly unlikely race where the gcblock is | ||
351 | entirely obsoleted by the final close of a file which had | ||
352 | the only valid nodes in the block, followed by erasure, | ||
353 | followed by freeing of the ic because the erased block(s) | ||
354 | held _all_ the nodes of that inode.... never been seen but | ||
355 | it's vaguely possible. */ | ||
356 | |||
357 | inum = ic->ino; | ||
358 | nlink = ic->nlink; | ||
359 | spin_unlock(&c->inocache_lock); | ||
360 | |||
361 | f = jffs2_gc_fetch_inode(c, inum, nlink); | ||
362 | if (IS_ERR(f)) { | ||
363 | ret = PTR_ERR(f); | ||
364 | goto release_sem; | ||
365 | } | ||
366 | if (!f) { | ||
367 | ret = 0; | ||
368 | goto release_sem; | ||
369 | } | ||
370 | |||
371 | ret = jffs2_garbage_collect_live(c, jeb, raw, f); | ||
372 | |||
373 | jffs2_gc_release_inode(c, f); | ||
374 | |||
375 | release_sem: | ||
376 | up(&c->alloc_sem); | ||
377 | |||
378 | eraseit_lock: | ||
379 | /* If we've finished this block, start it erasing */ | ||
380 | spin_lock(&c->erase_completion_lock); | ||
381 | |||
382 | eraseit: | ||
383 | if (c->gcblock && !c->gcblock->used_size) { | ||
384 | D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset)); | ||
385 | /* We're GC'ing an empty block? */ | ||
386 | list_add_tail(&c->gcblock->list, &c->erase_pending_list); | ||
387 | c->gcblock = NULL; | ||
388 | c->nr_erasing_blocks++; | ||
389 | jffs2_erase_pending_trigger(c); | ||
390 | } | ||
391 | spin_unlock(&c->erase_completion_lock); | ||
392 | |||
393 | return ret; | ||
394 | } | ||
395 | |||
396 | static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
397 | struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f) | ||
398 | { | ||
399 | struct jffs2_node_frag *frag; | ||
400 | struct jffs2_full_dnode *fn = NULL; | ||
401 | struct jffs2_full_dirent *fd; | ||
402 | uint32_t start = 0, end = 0, nrfrags = 0; | ||
403 | int ret = 0; | ||
404 | |||
405 | down(&f->sem); | ||
406 | |||
407 | /* Now we have the lock for this inode. Check that it's still the one at the head | ||
408 | of the list. */ | ||
409 | |||
410 | spin_lock(&c->erase_completion_lock); | ||
411 | |||
412 | if (c->gcblock != jeb) { | ||
413 | spin_unlock(&c->erase_completion_lock); | ||
414 | D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n")); | ||
415 | goto upnout; | ||
416 | } | ||
417 | if (ref_obsolete(raw)) { | ||
418 | spin_unlock(&c->erase_completion_lock); | ||
419 | D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n")); | ||
420 | /* They'll call again */ | ||
421 | goto upnout; | ||
422 | } | ||
423 | spin_unlock(&c->erase_completion_lock); | ||
424 | |||
425 | /* OK. Looks safe. And nobody can get us now because we have the semaphore. Move the block */ | ||
426 | if (f->metadata && f->metadata->raw == raw) { | ||
427 | fn = f->metadata; | ||
428 | ret = jffs2_garbage_collect_metadata(c, jeb, f, fn); | ||
429 | goto upnout; | ||
430 | } | ||
431 | |||
432 | /* FIXME. Read node and do lookup? */ | ||
433 | for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { | ||
434 | if (frag->node && frag->node->raw == raw) { | ||
435 | fn = frag->node; | ||
436 | end = frag->ofs + frag->size; | ||
437 | if (!nrfrags++) | ||
438 | start = frag->ofs; | ||
439 | if (nrfrags == frag->node->frags) | ||
440 | break; /* We've found them all */ | ||
441 | } | ||
442 | } | ||
443 | if (fn) { | ||
444 | if (ref_flags(raw) == REF_PRISTINE) { | ||
445 | ret = jffs2_garbage_collect_pristine(c, f->inocache, raw); | ||
446 | if (!ret) { | ||
447 | /* Urgh. Return it sensibly. */ | ||
448 | frag->node->raw = f->inocache->nodes; | ||
449 | } | ||
450 | if (ret != -EBADFD) | ||
451 | goto upnout; | ||
452 | } | ||
453 | /* We found a datanode. Do the GC */ | ||
454 | if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) { | ||
455 | /* It crosses a page boundary. Therefore, it must be a hole. */ | ||
456 | ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end); | ||
457 | } else { | ||
458 | /* It could still be a hole. But we GC the page this way anyway */ | ||
459 | ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end); | ||
460 | } | ||
461 | goto upnout; | ||
462 | } | ||
463 | |||
464 | /* Wasn't a dnode. Try dirent */ | ||
465 | for (fd = f->dents; fd; fd=fd->next) { | ||
466 | if (fd->raw == raw) | ||
467 | break; | ||
468 | } | ||
469 | |||
470 | if (fd && fd->ino) { | ||
471 | ret = jffs2_garbage_collect_dirent(c, jeb, f, fd); | ||
472 | } else if (fd) { | ||
473 | ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd); | ||
474 | } else { | ||
475 | printk(KERN_WARNING "Raw node at 0x%08x wasn't in node lists for ino #%u\n", | ||
476 | ref_offset(raw), f->inocache->ino); | ||
477 | if (ref_obsolete(raw)) { | ||
478 | printk(KERN_WARNING "But it's obsolete so we don't mind too much\n"); | ||
479 | } else { | ||
480 | ret = -EIO; | ||
481 | } | ||
482 | } | ||
483 | upnout: | ||
484 | up(&f->sem); | ||
485 | |||
486 | return ret; | ||
487 | } | ||
488 | |||
489 | static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | ||
490 | struct jffs2_inode_cache *ic, | ||
491 | struct jffs2_raw_node_ref *raw) | ||
492 | { | ||
493 | union jffs2_node_union *node; | ||
494 | struct jffs2_raw_node_ref *nraw; | ||
495 | size_t retlen; | ||
496 | int ret; | ||
497 | uint32_t phys_ofs, alloclen; | ||
498 | uint32_t crc, rawlen; | ||
499 | int retried = 0; | ||
500 | |||
501 | D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw))); | ||
502 | |||
503 | rawlen = ref_totlen(c, c->gcblock, raw); | ||
504 | |||
505 | /* Ask for a small amount of space (or the totlen if smaller) because we | ||
506 | don't want to force wastage of the end of a block if splitting would | ||
507 | work. */ | ||
508 | ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN, | ||
509 | rawlen), &phys_ofs, &alloclen); | ||
510 | if (ret) | ||
511 | return ret; | ||
512 | |||
513 | if (alloclen < rawlen) { | ||
514 | /* Doesn't fit untouched. We'll go the old route and split it */ | ||
515 | return -EBADFD; | ||
516 | } | ||
517 | |||
518 | node = kmalloc(rawlen, GFP_KERNEL); | ||
519 | if (!node) | ||
520 | return -ENOMEM; | ||
521 | |||
522 | ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node); | ||
523 | if (!ret && retlen != rawlen) | ||
524 | ret = -EIO; | ||
525 | if (ret) | ||
526 | goto out_node; | ||
527 | |||
528 | crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4); | ||
529 | if (je32_to_cpu(node->u.hdr_crc) != crc) { | ||
530 | printk(KERN_WARNING "Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
531 | ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc); | ||
532 | goto bail; | ||
533 | } | ||
534 | |||
535 | switch(je16_to_cpu(node->u.nodetype)) { | ||
536 | case JFFS2_NODETYPE_INODE: | ||
537 | crc = crc32(0, node, sizeof(node->i)-8); | ||
538 | if (je32_to_cpu(node->i.node_crc) != crc) { | ||
539 | printk(KERN_WARNING "Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
540 | ref_offset(raw), je32_to_cpu(node->i.node_crc), crc); | ||
541 | goto bail; | ||
542 | } | ||
543 | |||
544 | if (je32_to_cpu(node->i.dsize)) { | ||
545 | crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize)); | ||
546 | if (je32_to_cpu(node->i.data_crc) != crc) { | ||
547 | printk(KERN_WARNING "Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
548 | ref_offset(raw), je32_to_cpu(node->i.data_crc), crc); | ||
549 | goto bail; | ||
550 | } | ||
551 | } | ||
552 | break; | ||
553 | |||
554 | case JFFS2_NODETYPE_DIRENT: | ||
555 | crc = crc32(0, node, sizeof(node->d)-8); | ||
556 | if (je32_to_cpu(node->d.node_crc) != crc) { | ||
557 | printk(KERN_WARNING "Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
558 | ref_offset(raw), je32_to_cpu(node->d.node_crc), crc); | ||
559 | goto bail; | ||
560 | } | ||
561 | |||
562 | if (node->d.nsize) { | ||
563 | crc = crc32(0, node->d.name, node->d.nsize); | ||
564 | if (je32_to_cpu(node->d.name_crc) != crc) { | ||
565 | printk(KERN_WARNING "Name CRC failed on REF_PRISTINE dirent ode at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
566 | ref_offset(raw), je32_to_cpu(node->d.name_crc), crc); | ||
567 | goto bail; | ||
568 | } | ||
569 | } | ||
570 | break; | ||
571 | default: | ||
572 | printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", | ||
573 | ref_offset(raw), je16_to_cpu(node->u.nodetype)); | ||
574 | goto bail; | ||
575 | } | ||
576 | |||
577 | nraw = jffs2_alloc_raw_node_ref(); | ||
578 | if (!nraw) { | ||
579 | ret = -ENOMEM; | ||
580 | goto out_node; | ||
581 | } | ||
582 | |||
583 | /* OK, all the CRCs are good; this node can just be copied as-is. */ | ||
584 | retry: | ||
585 | nraw->flash_offset = phys_ofs; | ||
586 | nraw->__totlen = rawlen; | ||
587 | nraw->next_phys = NULL; | ||
588 | |||
589 | ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node); | ||
590 | |||
591 | if (ret || (retlen != rawlen)) { | ||
592 | printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n", | ||
593 | rawlen, phys_ofs, ret, retlen); | ||
594 | if (retlen) { | ||
595 | /* Doesn't belong to any inode */ | ||
596 | nraw->next_in_ino = NULL; | ||
597 | |||
598 | nraw->flash_offset |= REF_OBSOLETE; | ||
599 | jffs2_add_physical_node_ref(c, nraw); | ||
600 | jffs2_mark_node_obsolete(c, nraw); | ||
601 | } else { | ||
602 | printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", nraw->flash_offset); | ||
603 | jffs2_free_raw_node_ref(nraw); | ||
604 | } | ||
605 | if (!retried && (nraw = jffs2_alloc_raw_node_ref())) { | ||
606 | /* Try to reallocate space and retry */ | ||
607 | uint32_t dummy; | ||
608 | struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size]; | ||
609 | |||
610 | retried = 1; | ||
611 | |||
612 | D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n")); | ||
613 | |||
614 | ACCT_SANITY_CHECK(c,jeb); | ||
615 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
616 | |||
617 | ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy); | ||
618 | |||
619 | if (!ret) { | ||
620 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs)); | ||
621 | |||
622 | ACCT_SANITY_CHECK(c,jeb); | ||
623 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
624 | |||
625 | goto retry; | ||
626 | } | ||
627 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | ||
628 | jffs2_free_raw_node_ref(nraw); | ||
629 | } | ||
630 | |||
631 | jffs2_free_raw_node_ref(nraw); | ||
632 | if (!ret) | ||
633 | ret = -EIO; | ||
634 | goto out_node; | ||
635 | } | ||
636 | nraw->flash_offset |= REF_PRISTINE; | ||
637 | jffs2_add_physical_node_ref(c, nraw); | ||
638 | |||
639 | /* Link into per-inode list. This is safe because of the ic | ||
640 | state being INO_STATE_GC. Note that if we're doing this | ||
641 | for an inode which is in-core, the 'nraw' pointer is then | ||
642 | going to be fetched from ic->nodes by our caller. */ | ||
643 | spin_lock(&c->erase_completion_lock); | ||
644 | nraw->next_in_ino = ic->nodes; | ||
645 | ic->nodes = nraw; | ||
646 | spin_unlock(&c->erase_completion_lock); | ||
647 | |||
648 | jffs2_mark_node_obsolete(c, raw); | ||
649 | D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw))); | ||
650 | |||
651 | out_node: | ||
652 | kfree(node); | ||
653 | return ret; | ||
654 | bail: | ||
655 | ret = -EBADFD; | ||
656 | goto out_node; | ||
657 | } | ||
658 | |||
659 | static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
660 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) | ||
661 | { | ||
662 | struct jffs2_full_dnode *new_fn; | ||
663 | struct jffs2_raw_inode ri; | ||
664 | jint16_t dev; | ||
665 | char *mdata = NULL, mdatalen = 0; | ||
666 | uint32_t alloclen, phys_ofs; | ||
667 | int ret; | ||
668 | |||
669 | if (S_ISBLK(JFFS2_F_I_MODE(f)) || | ||
670 | S_ISCHR(JFFS2_F_I_MODE(f)) ) { | ||
671 | /* For these, we don't actually need to read the old node */ | ||
672 | /* FIXME: for minor or major > 255. */ | ||
673 | dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) | | ||
674 | JFFS2_F_I_RDEV_MIN(f))); | ||
675 | mdata = (char *)&dev; | ||
676 | mdatalen = sizeof(dev); | ||
677 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen)); | ||
678 | } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { | ||
679 | mdatalen = fn->size; | ||
680 | mdata = kmalloc(fn->size, GFP_KERNEL); | ||
681 | if (!mdata) { | ||
682 | printk(KERN_WARNING "kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n"); | ||
683 | return -ENOMEM; | ||
684 | } | ||
685 | ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen); | ||
686 | if (ret) { | ||
687 | printk(KERN_WARNING "read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret); | ||
688 | kfree(mdata); | ||
689 | return ret; | ||
690 | } | ||
691 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen)); | ||
692 | |||
693 | } | ||
694 | |||
695 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen); | ||
696 | if (ret) { | ||
697 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", | ||
698 | sizeof(ri)+ mdatalen, ret); | ||
699 | goto out; | ||
700 | } | ||
701 | |||
702 | memset(&ri, 0, sizeof(ri)); | ||
703 | ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
704 | ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); | ||
705 | ri.totlen = cpu_to_je32(sizeof(ri) + mdatalen); | ||
706 | ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4)); | ||
707 | |||
708 | ri.ino = cpu_to_je32(f->inocache->ino); | ||
709 | ri.version = cpu_to_je32(++f->highest_version); | ||
710 | ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f)); | ||
711 | ri.uid = cpu_to_je16(JFFS2_F_I_UID(f)); | ||
712 | ri.gid = cpu_to_je16(JFFS2_F_I_GID(f)); | ||
713 | ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f)); | ||
714 | ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f)); | ||
715 | ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f)); | ||
716 | ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f)); | ||
717 | ri.offset = cpu_to_je32(0); | ||
718 | ri.csize = cpu_to_je32(mdatalen); | ||
719 | ri.dsize = cpu_to_je32(mdatalen); | ||
720 | ri.compr = JFFS2_COMPR_NONE; | ||
721 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | ||
722 | ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); | ||
723 | |||
724 | new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, phys_ofs, ALLOC_GC); | ||
725 | |||
726 | if (IS_ERR(new_fn)) { | ||
727 | printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn)); | ||
728 | ret = PTR_ERR(new_fn); | ||
729 | goto out; | ||
730 | } | ||
731 | jffs2_mark_node_obsolete(c, fn->raw); | ||
732 | jffs2_free_full_dnode(fn); | ||
733 | f->metadata = new_fn; | ||
734 | out: | ||
735 | if (S_ISLNK(JFFS2_F_I_MODE(f))) | ||
736 | kfree(mdata); | ||
737 | return ret; | ||
738 | } | ||
739 | |||
740 | static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
741 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) | ||
742 | { | ||
743 | struct jffs2_full_dirent *new_fd; | ||
744 | struct jffs2_raw_dirent rd; | ||
745 | uint32_t alloclen, phys_ofs; | ||
746 | int ret; | ||
747 | |||
748 | rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
749 | rd.nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); | ||
750 | rd.nsize = strlen(fd->name); | ||
751 | rd.totlen = cpu_to_je32(sizeof(rd) + rd.nsize); | ||
752 | rd.hdr_crc = cpu_to_je32(crc32(0, &rd, sizeof(struct jffs2_unknown_node)-4)); | ||
753 | |||
754 | rd.pino = cpu_to_je32(f->inocache->ino); | ||
755 | rd.version = cpu_to_je32(++f->highest_version); | ||
756 | rd.ino = cpu_to_je32(fd->ino); | ||
757 | rd.mctime = cpu_to_je32(max(JFFS2_F_I_MTIME(f), JFFS2_F_I_CTIME(f))); | ||
758 | rd.type = fd->type; | ||
759 | rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8)); | ||
760 | rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize)); | ||
761 | |||
762 | ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen); | ||
763 | if (ret) { | ||
764 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", | ||
765 | sizeof(rd)+rd.nsize, ret); | ||
766 | return ret; | ||
767 | } | ||
768 | new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, phys_ofs, ALLOC_GC); | ||
769 | |||
770 | if (IS_ERR(new_fd)) { | ||
771 | printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd)); | ||
772 | return PTR_ERR(new_fd); | ||
773 | } | ||
774 | jffs2_add_fd_to_list(c, new_fd, &f->dents); | ||
775 | return 0; | ||
776 | } | ||
777 | |||
778 | static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
779 | struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) | ||
780 | { | ||
781 | struct jffs2_full_dirent **fdp = &f->dents; | ||
782 | int found = 0; | ||
783 | |||
784 | /* On a medium where we can't actually mark nodes obsolete | ||
785 | pernamently, such as NAND flash, we need to work out | ||
786 | whether this deletion dirent is still needed to actively | ||
787 | delete a 'real' dirent with the same name that's still | ||
788 | somewhere else on the flash. */ | ||
789 | if (!jffs2_can_mark_obsolete(c)) { | ||
790 | struct jffs2_raw_dirent *rd; | ||
791 | struct jffs2_raw_node_ref *raw; | ||
792 | int ret; | ||
793 | size_t retlen; | ||
794 | int name_len = strlen(fd->name); | ||
795 | uint32_t name_crc = crc32(0, fd->name, name_len); | ||
796 | uint32_t rawlen = ref_totlen(c, jeb, fd->raw); | ||
797 | |||
798 | rd = kmalloc(rawlen, GFP_KERNEL); | ||
799 | if (!rd) | ||
800 | return -ENOMEM; | ||
801 | |||
802 | /* Prevent the erase code from nicking the obsolete node refs while | ||
803 | we're looking at them. I really don't like this extra lock but | ||
804 | can't see any alternative. Suggestions on a postcard to... */ | ||
805 | down(&c->erase_free_sem); | ||
806 | |||
807 | for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) { | ||
808 | |||
809 | /* We only care about obsolete ones */ | ||
810 | if (!(ref_obsolete(raw))) | ||
811 | continue; | ||
812 | |||
813 | /* Any dirent with the same name is going to have the same length... */ | ||
814 | if (ref_totlen(c, NULL, raw) != rawlen) | ||
815 | continue; | ||
816 | |||
817 | /* Doesn't matter if there's one in the same erase block. We're going to | ||
818 | delete it too at the same time. */ | ||
819 | if ((raw->flash_offset & ~(c->sector_size-1)) == | ||
820 | (fd->raw->flash_offset & ~(c->sector_size-1))) | ||
821 | continue; | ||
822 | |||
823 | D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw))); | ||
824 | |||
825 | /* This is an obsolete node belonging to the same directory, and it's of the right | ||
826 | length. We need to take a closer look...*/ | ||
827 | ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd); | ||
828 | if (ret) { | ||
829 | printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading obsolete node at %08x\n", ret, ref_offset(raw)); | ||
830 | /* If we can't read it, we don't need to continue to obsolete it. Continue */ | ||
831 | continue; | ||
832 | } | ||
833 | if (retlen != rawlen) { | ||
834 | printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n", | ||
835 | retlen, rawlen, ref_offset(raw)); | ||
836 | continue; | ||
837 | } | ||
838 | |||
839 | if (je16_to_cpu(rd->nodetype) != JFFS2_NODETYPE_DIRENT) | ||
840 | continue; | ||
841 | |||
842 | /* If the name CRC doesn't match, skip */ | ||
843 | if (je32_to_cpu(rd->name_crc) != name_crc) | ||
844 | continue; | ||
845 | |||
846 | /* If the name length doesn't match, or it's another deletion dirent, skip */ | ||
847 | if (rd->nsize != name_len || !je32_to_cpu(rd->ino)) | ||
848 | continue; | ||
849 | |||
850 | /* OK, check the actual name now */ | ||
851 | if (memcmp(rd->name, fd->name, name_len)) | ||
852 | continue; | ||
853 | |||
854 | /* OK. The name really does match. There really is still an older node on | ||
855 | the flash which our deletion dirent obsoletes. So we have to write out | ||
856 | a new deletion dirent to replace it */ | ||
857 | up(&c->erase_free_sem); | ||
858 | |||
859 | D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n", | ||
860 | ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino))); | ||
861 | kfree(rd); | ||
862 | |||
863 | return jffs2_garbage_collect_dirent(c, jeb, f, fd); | ||
864 | } | ||
865 | |||
866 | up(&c->erase_free_sem); | ||
867 | kfree(rd); | ||
868 | } | ||
869 | |||
870 | /* No need for it any more. Just mark it obsolete and remove it from the list */ | ||
871 | while (*fdp) { | ||
872 | if ((*fdp) == fd) { | ||
873 | found = 1; | ||
874 | *fdp = fd->next; | ||
875 | break; | ||
876 | } | ||
877 | fdp = &(*fdp)->next; | ||
878 | } | ||
879 | if (!found) { | ||
880 | printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino); | ||
881 | } | ||
882 | jffs2_mark_node_obsolete(c, fd->raw); | ||
883 | jffs2_free_full_dirent(fd); | ||
884 | return 0; | ||
885 | } | ||
886 | |||
887 | static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
888 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, | ||
889 | uint32_t start, uint32_t end) | ||
890 | { | ||
891 | struct jffs2_raw_inode ri; | ||
892 | struct jffs2_node_frag *frag; | ||
893 | struct jffs2_full_dnode *new_fn; | ||
894 | uint32_t alloclen, phys_ofs; | ||
895 | int ret; | ||
896 | |||
897 | D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", | ||
898 | f->inocache->ino, start, end)); | ||
899 | |||
900 | memset(&ri, 0, sizeof(ri)); | ||
901 | |||
902 | if(fn->frags > 1) { | ||
903 | size_t readlen; | ||
904 | uint32_t crc; | ||
905 | /* It's partially obsoleted by a later write. So we have to | ||
906 | write it out again with the _same_ version as before */ | ||
907 | ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri); | ||
908 | if (readlen != sizeof(ri) || ret) { | ||
909 | printk(KERN_WARNING "Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", ret, readlen); | ||
910 | goto fill; | ||
911 | } | ||
912 | if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) { | ||
913 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n", | ||
914 | ref_offset(fn->raw), | ||
915 | je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE); | ||
916 | return -EIO; | ||
917 | } | ||
918 | if (je32_to_cpu(ri.totlen) != sizeof(ri)) { | ||
919 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n", | ||
920 | ref_offset(fn->raw), | ||
921 | je32_to_cpu(ri.totlen), sizeof(ri)); | ||
922 | return -EIO; | ||
923 | } | ||
924 | crc = crc32(0, &ri, sizeof(ri)-8); | ||
925 | if (crc != je32_to_cpu(ri.node_crc)) { | ||
926 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n", | ||
927 | ref_offset(fn->raw), | ||
928 | je32_to_cpu(ri.node_crc), crc); | ||
929 | /* FIXME: We could possibly deal with this by writing new holes for each frag */ | ||
930 | printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", | ||
931 | start, end, f->inocache->ino); | ||
932 | goto fill; | ||
933 | } | ||
934 | if (ri.compr != JFFS2_COMPR_ZERO) { | ||
935 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw)); | ||
936 | printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", | ||
937 | start, end, f->inocache->ino); | ||
938 | goto fill; | ||
939 | } | ||
940 | } else { | ||
941 | fill: | ||
942 | ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
943 | ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); | ||
944 | ri.totlen = cpu_to_je32(sizeof(ri)); | ||
945 | ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4)); | ||
946 | |||
947 | ri.ino = cpu_to_je32(f->inocache->ino); | ||
948 | ri.version = cpu_to_je32(++f->highest_version); | ||
949 | ri.offset = cpu_to_je32(start); | ||
950 | ri.dsize = cpu_to_je32(end - start); | ||
951 | ri.csize = cpu_to_je32(0); | ||
952 | ri.compr = JFFS2_COMPR_ZERO; | ||
953 | } | ||
954 | ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f)); | ||
955 | ri.uid = cpu_to_je16(JFFS2_F_I_UID(f)); | ||
956 | ri.gid = cpu_to_je16(JFFS2_F_I_GID(f)); | ||
957 | ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f)); | ||
958 | ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f)); | ||
959 | ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f)); | ||
960 | ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f)); | ||
961 | ri.data_crc = cpu_to_je32(0); | ||
962 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | ||
963 | |||
964 | ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen); | ||
965 | if (ret) { | ||
966 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", | ||
967 | sizeof(ri), ret); | ||
968 | return ret; | ||
969 | } | ||
970 | new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_GC); | ||
971 | |||
972 | if (IS_ERR(new_fn)) { | ||
973 | printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn)); | ||
974 | return PTR_ERR(new_fn); | ||
975 | } | ||
976 | if (je32_to_cpu(ri.version) == f->highest_version) { | ||
977 | jffs2_add_full_dnode_to_inode(c, f, new_fn); | ||
978 | if (f->metadata) { | ||
979 | jffs2_mark_node_obsolete(c, f->metadata->raw); | ||
980 | jffs2_free_full_dnode(f->metadata); | ||
981 | f->metadata = NULL; | ||
982 | } | ||
983 | return 0; | ||
984 | } | ||
985 | |||
986 | /* | ||
987 | * We should only get here in the case where the node we are | ||
988 | * replacing had more than one frag, so we kept the same version | ||
989 | * number as before. (Except in case of error -- see 'goto fill;' | ||
990 | * above.) | ||
991 | */ | ||
992 | D1(if(unlikely(fn->frags <= 1)) { | ||
993 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n", | ||
994 | fn->frags, je32_to_cpu(ri.version), f->highest_version, | ||
995 | je32_to_cpu(ri.ino)); | ||
996 | }); | ||
997 | |||
998 | /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */ | ||
999 | mark_ref_normal(new_fn->raw); | ||
1000 | |||
1001 | for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); | ||
1002 | frag; frag = frag_next(frag)) { | ||
1003 | if (frag->ofs > fn->size + fn->ofs) | ||
1004 | break; | ||
1005 | if (frag->node == fn) { | ||
1006 | frag->node = new_fn; | ||
1007 | new_fn->frags++; | ||
1008 | fn->frags--; | ||
1009 | } | ||
1010 | } | ||
1011 | if (fn->frags) { | ||
1012 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Old node still has frags!\n"); | ||
1013 | BUG(); | ||
1014 | } | ||
1015 | if (!new_fn->frags) { | ||
1016 | printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n"); | ||
1017 | BUG(); | ||
1018 | } | ||
1019 | |||
1020 | jffs2_mark_node_obsolete(c, fn->raw); | ||
1021 | jffs2_free_full_dnode(fn); | ||
1022 | |||
1023 | return 0; | ||
1024 | } | ||
1025 | |||
1026 | static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
1027 | struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, | ||
1028 | uint32_t start, uint32_t end) | ||
1029 | { | ||
1030 | struct jffs2_full_dnode *new_fn; | ||
1031 | struct jffs2_raw_inode ri; | ||
1032 | uint32_t alloclen, phys_ofs, offset, orig_end, orig_start; | ||
1033 | int ret = 0; | ||
1034 | unsigned char *comprbuf = NULL, *writebuf; | ||
1035 | unsigned long pg; | ||
1036 | unsigned char *pg_ptr; | ||
1037 | |||
1038 | memset(&ri, 0, sizeof(ri)); | ||
1039 | |||
1040 | D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", | ||
1041 | f->inocache->ino, start, end)); | ||
1042 | |||
1043 | orig_end = end; | ||
1044 | orig_start = start; | ||
1045 | |||
1046 | if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) { | ||
1047 | /* Attempt to do some merging. But only expand to cover logically | ||
1048 | adjacent frags if the block containing them is already considered | ||
1049 | to be dirty. Otherwise we end up with GC just going round in | ||
1050 | circles dirtying the nodes it already wrote out, especially | ||
1051 | on NAND where we have small eraseblocks and hence a much higher | ||
1052 | chance of nodes having to be split to cross boundaries. */ | ||
1053 | |||
1054 | struct jffs2_node_frag *frag; | ||
1055 | uint32_t min, max; | ||
1056 | |||
1057 | min = start & ~(PAGE_CACHE_SIZE-1); | ||
1058 | max = min + PAGE_CACHE_SIZE; | ||
1059 | |||
1060 | frag = jffs2_lookup_node_frag(&f->fragtree, start); | ||
1061 | |||
1062 | /* BUG_ON(!frag) but that'll happen anyway... */ | ||
1063 | |||
1064 | BUG_ON(frag->ofs != start); | ||
1065 | |||
1066 | /* First grow down... */ | ||
1067 | while((frag = frag_prev(frag)) && frag->ofs >= min) { | ||
1068 | |||
1069 | /* If the previous frag doesn't even reach the beginning, there's | ||
1070 | excessive fragmentation. Just merge. */ | ||
1071 | if (frag->ofs > min) { | ||
1072 | D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n", | ||
1073 | frag->ofs, frag->ofs+frag->size)); | ||
1074 | start = frag->ofs; | ||
1075 | continue; | ||
1076 | } | ||
1077 | /* OK. This frag holds the first byte of the page. */ | ||
1078 | if (!frag->node || !frag->node->raw) { | ||
1079 | D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n", | ||
1080 | frag->ofs, frag->ofs+frag->size)); | ||
1081 | break; | ||
1082 | } else { | ||
1083 | |||
1084 | /* OK, it's a frag which extends to the beginning of the page. Does it live | ||
1085 | in a block which is still considered clean? If so, don't obsolete it. | ||
1086 | If not, cover it anyway. */ | ||
1087 | |||
1088 | struct jffs2_raw_node_ref *raw = frag->node->raw; | ||
1089 | struct jffs2_eraseblock *jeb; | ||
1090 | |||
1091 | jeb = &c->blocks[raw->flash_offset / c->sector_size]; | ||
1092 | |||
1093 | if (jeb == c->gcblock) { | ||
1094 | D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n", | ||
1095 | frag->ofs, frag->ofs+frag->size, ref_offset(raw))); | ||
1096 | start = frag->ofs; | ||
1097 | break; | ||
1098 | } | ||
1099 | if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { | ||
1100 | D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n", | ||
1101 | frag->ofs, frag->ofs+frag->size, jeb->offset)); | ||
1102 | break; | ||
1103 | } | ||
1104 | |||
1105 | D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n", | ||
1106 | frag->ofs, frag->ofs+frag->size, jeb->offset)); | ||
1107 | start = frag->ofs; | ||
1108 | break; | ||
1109 | } | ||
1110 | } | ||
1111 | |||
1112 | /* ... then up */ | ||
1113 | |||
1114 | /* Find last frag which is actually part of the node we're to GC. */ | ||
1115 | frag = jffs2_lookup_node_frag(&f->fragtree, end-1); | ||
1116 | |||
1117 | while((frag = frag_next(frag)) && frag->ofs+frag->size <= max) { | ||
1118 | |||
1119 | /* If the previous frag doesn't even reach the beginning, there's lots | ||
1120 | of fragmentation. Just merge. */ | ||
1121 | if (frag->ofs+frag->size < max) { | ||
1122 | D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n", | ||
1123 | frag->ofs, frag->ofs+frag->size)); | ||
1124 | end = frag->ofs + frag->size; | ||
1125 | continue; | ||
1126 | } | ||
1127 | |||
1128 | if (!frag->node || !frag->node->raw) { | ||
1129 | D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n", | ||
1130 | frag->ofs, frag->ofs+frag->size)); | ||
1131 | break; | ||
1132 | } else { | ||
1133 | |||
1134 | /* OK, it's a frag which extends to the beginning of the page. Does it live | ||
1135 | in a block which is still considered clean? If so, don't obsolete it. | ||
1136 | If not, cover it anyway. */ | ||
1137 | |||
1138 | struct jffs2_raw_node_ref *raw = frag->node->raw; | ||
1139 | struct jffs2_eraseblock *jeb; | ||
1140 | |||
1141 | jeb = &c->blocks[raw->flash_offset / c->sector_size]; | ||
1142 | |||
1143 | if (jeb == c->gcblock) { | ||
1144 | D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n", | ||
1145 | frag->ofs, frag->ofs+frag->size, ref_offset(raw))); | ||
1146 | end = frag->ofs + frag->size; | ||
1147 | break; | ||
1148 | } | ||
1149 | if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { | ||
1150 | D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n", | ||
1151 | frag->ofs, frag->ofs+frag->size, jeb->offset)); | ||
1152 | break; | ||
1153 | } | ||
1154 | |||
1155 | D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n", | ||
1156 | frag->ofs, frag->ofs+frag->size, jeb->offset)); | ||
1157 | end = frag->ofs + frag->size; | ||
1158 | break; | ||
1159 | } | ||
1160 | } | ||
1161 | D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", | ||
1162 | orig_start, orig_end, start, end)); | ||
1163 | |||
1164 | BUG_ON(end > JFFS2_F_I_SIZE(f)); | ||
1165 | BUG_ON(end < orig_end); | ||
1166 | BUG_ON(start > orig_start); | ||
1167 | } | ||
1168 | |||
1169 | /* First, use readpage() to read the appropriate page into the page cache */ | ||
1170 | /* Q: What happens if we actually try to GC the _same_ page for which commit_write() | ||
1171 | * triggered garbage collection in the first place? | ||
1172 | * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the | ||
1173 | * page OK. We'll actually write it out again in commit_write, which is a little | ||
1174 | * suboptimal, but at least we're correct. | ||
1175 | */ | ||
1176 | pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); | ||
1177 | |||
1178 | if (IS_ERR(pg_ptr)) { | ||
1179 | printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg_ptr)); | ||
1180 | return PTR_ERR(pg_ptr); | ||
1181 | } | ||
1182 | |||
1183 | offset = start; | ||
1184 | while(offset < orig_end) { | ||
1185 | uint32_t datalen; | ||
1186 | uint32_t cdatalen; | ||
1187 | uint16_t comprtype = JFFS2_COMPR_NONE; | ||
1188 | |||
1189 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen); | ||
1190 | |||
1191 | if (ret) { | ||
1192 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n", | ||
1193 | sizeof(ri)+ JFFS2_MIN_DATA_LEN, ret); | ||
1194 | break; | ||
1195 | } | ||
1196 | cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset); | ||
1197 | datalen = end - offset; | ||
1198 | |||
1199 | writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1)); | ||
1200 | |||
1201 | comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen); | ||
1202 | |||
1203 | ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
1204 | ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); | ||
1205 | ri.totlen = cpu_to_je32(sizeof(ri) + cdatalen); | ||
1206 | ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4)); | ||
1207 | |||
1208 | ri.ino = cpu_to_je32(f->inocache->ino); | ||
1209 | ri.version = cpu_to_je32(++f->highest_version); | ||
1210 | ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f)); | ||
1211 | ri.uid = cpu_to_je16(JFFS2_F_I_UID(f)); | ||
1212 | ri.gid = cpu_to_je16(JFFS2_F_I_GID(f)); | ||
1213 | ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f)); | ||
1214 | ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f)); | ||
1215 | ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f)); | ||
1216 | ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f)); | ||
1217 | ri.offset = cpu_to_je32(offset); | ||
1218 | ri.csize = cpu_to_je32(cdatalen); | ||
1219 | ri.dsize = cpu_to_je32(datalen); | ||
1220 | ri.compr = comprtype & 0xff; | ||
1221 | ri.usercompr = (comprtype >> 8) & 0xff; | ||
1222 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | ||
1223 | ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); | ||
1224 | |||
1225 | new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC); | ||
1226 | |||
1227 | jffs2_free_comprbuf(comprbuf, writebuf); | ||
1228 | |||
1229 | if (IS_ERR(new_fn)) { | ||
1230 | printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn)); | ||
1231 | ret = PTR_ERR(new_fn); | ||
1232 | break; | ||
1233 | } | ||
1234 | ret = jffs2_add_full_dnode_to_inode(c, f, new_fn); | ||
1235 | offset += datalen; | ||
1236 | if (f->metadata) { | ||
1237 | jffs2_mark_node_obsolete(c, f->metadata->raw); | ||
1238 | jffs2_free_full_dnode(f->metadata); | ||
1239 | f->metadata = NULL; | ||
1240 | } | ||
1241 | } | ||
1242 | |||
1243 | jffs2_gc_release_page(c, pg_ptr, &pg); | ||
1244 | return ret; | ||
1245 | } | ||
1246 | |||
diff --git a/fs/jffs2/histo.h b/fs/jffs2/histo.h new file mode 100644 index 000000000000..84f184f0836f --- /dev/null +++ b/fs/jffs2/histo.h | |||
@@ -0,0 +1,3 @@ | |||
1 | /* This file provides the bit-probabilities for the input file */ | ||
2 | #define BIT_DIVIDER 629 | ||
3 | static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */ | ||
diff --git a/fs/jffs2/histo_mips.h b/fs/jffs2/histo_mips.h new file mode 100644 index 000000000000..9a443268d885 --- /dev/null +++ b/fs/jffs2/histo_mips.h | |||
@@ -0,0 +1,2 @@ | |||
1 | #define BIT_DIVIDER_MIPS 1043 | ||
2 | static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */ | ||
diff --git a/fs/jffs2/ioctl.c b/fs/jffs2/ioctl.c new file mode 100644 index 000000000000..238c7992064c --- /dev/null +++ b/fs/jffs2/ioctl.c | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: ioctl.c,v 1.9 2004/11/16 20:36:11 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/fs.h> | ||
15 | |||
16 | int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, | ||
17 | unsigned long arg) | ||
18 | { | ||
19 | /* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which | ||
20 | will include compression support etc. */ | ||
21 | return -ENOTTY; | ||
22 | } | ||
23 | |||
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c new file mode 100644 index 000000000000..5abb431c2a00 --- /dev/null +++ b/fs/jffs2/malloc.c | |||
@@ -0,0 +1,205 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: malloc.c,v 1.28 2004/11/16 20:36:11 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/jffs2.h> | ||
18 | #include "nodelist.h" | ||
19 | |||
20 | #if 0 | ||
21 | #define JFFS2_SLAB_POISON SLAB_POISON | ||
22 | #else | ||
23 | #define JFFS2_SLAB_POISON 0 | ||
24 | #endif | ||
25 | |||
26 | // replace this by #define D3 (x) x for cache debugging | ||
27 | #define D3(x) | ||
28 | |||
29 | /* These are initialised to NULL in the kernel startup code. | ||
30 | If you're porting to other operating systems, beware */ | ||
31 | static kmem_cache_t *full_dnode_slab; | ||
32 | static kmem_cache_t *raw_dirent_slab; | ||
33 | static kmem_cache_t *raw_inode_slab; | ||
34 | static kmem_cache_t *tmp_dnode_info_slab; | ||
35 | static kmem_cache_t *raw_node_ref_slab; | ||
36 | static kmem_cache_t *node_frag_slab; | ||
37 | static kmem_cache_t *inode_cache_slab; | ||
38 | |||
39 | int __init jffs2_create_slab_caches(void) | ||
40 | { | ||
41 | full_dnode_slab = kmem_cache_create("jffs2_full_dnode", | ||
42 | sizeof(struct jffs2_full_dnode), | ||
43 | 0, JFFS2_SLAB_POISON, NULL, NULL); | ||
44 | if (!full_dnode_slab) | ||
45 | goto err; | ||
46 | |||
47 | raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", | ||
48 | sizeof(struct jffs2_raw_dirent), | ||
49 | 0, JFFS2_SLAB_POISON, NULL, NULL); | ||
50 | if (!raw_dirent_slab) | ||
51 | goto err; | ||
52 | |||
53 | raw_inode_slab = kmem_cache_create("jffs2_raw_inode", | ||
54 | sizeof(struct jffs2_raw_inode), | ||
55 | 0, JFFS2_SLAB_POISON, NULL, NULL); | ||
56 | if (!raw_inode_slab) | ||
57 | goto err; | ||
58 | |||
59 | tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode", | ||
60 | sizeof(struct jffs2_tmp_dnode_info), | ||
61 | 0, JFFS2_SLAB_POISON, NULL, NULL); | ||
62 | if (!tmp_dnode_info_slab) | ||
63 | goto err; | ||
64 | |||
65 | raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref", | ||
66 | sizeof(struct jffs2_raw_node_ref), | ||
67 | 0, JFFS2_SLAB_POISON, NULL, NULL); | ||
68 | if (!raw_node_ref_slab) | ||
69 | goto err; | ||
70 | |||
71 | node_frag_slab = kmem_cache_create("jffs2_node_frag", | ||
72 | sizeof(struct jffs2_node_frag), | ||
73 | 0, JFFS2_SLAB_POISON, NULL, NULL); | ||
74 | if (!node_frag_slab) | ||
75 | goto err; | ||
76 | |||
77 | inode_cache_slab = kmem_cache_create("jffs2_inode_cache", | ||
78 | sizeof(struct jffs2_inode_cache), | ||
79 | 0, JFFS2_SLAB_POISON, NULL, NULL); | ||
80 | if (inode_cache_slab) | ||
81 | return 0; | ||
82 | err: | ||
83 | jffs2_destroy_slab_caches(); | ||
84 | return -ENOMEM; | ||
85 | } | ||
86 | |||
87 | void jffs2_destroy_slab_caches(void) | ||
88 | { | ||
89 | if(full_dnode_slab) | ||
90 | kmem_cache_destroy(full_dnode_slab); | ||
91 | if(raw_dirent_slab) | ||
92 | kmem_cache_destroy(raw_dirent_slab); | ||
93 | if(raw_inode_slab) | ||
94 | kmem_cache_destroy(raw_inode_slab); | ||
95 | if(tmp_dnode_info_slab) | ||
96 | kmem_cache_destroy(tmp_dnode_info_slab); | ||
97 | if(raw_node_ref_slab) | ||
98 | kmem_cache_destroy(raw_node_ref_slab); | ||
99 | if(node_frag_slab) | ||
100 | kmem_cache_destroy(node_frag_slab); | ||
101 | if(inode_cache_slab) | ||
102 | kmem_cache_destroy(inode_cache_slab); | ||
103 | } | ||
104 | |||
105 | struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) | ||
106 | { | ||
107 | return kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL); | ||
108 | } | ||
109 | |||
110 | void jffs2_free_full_dirent(struct jffs2_full_dirent *x) | ||
111 | { | ||
112 | kfree(x); | ||
113 | } | ||
114 | |||
115 | struct jffs2_full_dnode *jffs2_alloc_full_dnode(void) | ||
116 | { | ||
117 | struct jffs2_full_dnode *ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL); | ||
118 | D3 (printk (KERN_DEBUG "alloc_full_dnode at %p\n", ret)); | ||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | void jffs2_free_full_dnode(struct jffs2_full_dnode *x) | ||
123 | { | ||
124 | D3 (printk (KERN_DEBUG "free full_dnode at %p\n", x)); | ||
125 | kmem_cache_free(full_dnode_slab, x); | ||
126 | } | ||
127 | |||
128 | struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void) | ||
129 | { | ||
130 | struct jffs2_raw_dirent *ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL); | ||
131 | D3 (printk (KERN_DEBUG "alloc_raw_dirent\n", ret)); | ||
132 | return ret; | ||
133 | } | ||
134 | |||
135 | void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x) | ||
136 | { | ||
137 | D3 (printk (KERN_DEBUG "free_raw_dirent at %p\n", x)); | ||
138 | kmem_cache_free(raw_dirent_slab, x); | ||
139 | } | ||
140 | |||
141 | struct jffs2_raw_inode *jffs2_alloc_raw_inode(void) | ||
142 | { | ||
143 | struct jffs2_raw_inode *ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL); | ||
144 | D3 (printk (KERN_DEBUG "alloc_raw_inode at %p\n", ret)); | ||
145 | return ret; | ||
146 | } | ||
147 | |||
148 | void jffs2_free_raw_inode(struct jffs2_raw_inode *x) | ||
149 | { | ||
150 | D3 (printk (KERN_DEBUG "free_raw_inode at %p\n", x)); | ||
151 | kmem_cache_free(raw_inode_slab, x); | ||
152 | } | ||
153 | |||
154 | struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void) | ||
155 | { | ||
156 | struct jffs2_tmp_dnode_info *ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL); | ||
157 | D3 (printk (KERN_DEBUG "alloc_tmp_dnode_info at %p\n", ret)); | ||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x) | ||
162 | { | ||
163 | D3 (printk (KERN_DEBUG "free_tmp_dnode_info at %p\n", x)); | ||
164 | kmem_cache_free(tmp_dnode_info_slab, x); | ||
165 | } | ||
166 | |||
167 | struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void) | ||
168 | { | ||
169 | struct jffs2_raw_node_ref *ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL); | ||
170 | D3 (printk (KERN_DEBUG "alloc_raw_node_ref at %p\n", ret)); | ||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x) | ||
175 | { | ||
176 | D3 (printk (KERN_DEBUG "free_raw_node_ref at %p\n", x)); | ||
177 | kmem_cache_free(raw_node_ref_slab, x); | ||
178 | } | ||
179 | |||
180 | struct jffs2_node_frag *jffs2_alloc_node_frag(void) | ||
181 | { | ||
182 | struct jffs2_node_frag *ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL); | ||
183 | D3 (printk (KERN_DEBUG "alloc_node_frag at %p\n", ret)); | ||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | void jffs2_free_node_frag(struct jffs2_node_frag *x) | ||
188 | { | ||
189 | D3 (printk (KERN_DEBUG "free_node_frag at %p\n", x)); | ||
190 | kmem_cache_free(node_frag_slab, x); | ||
191 | } | ||
192 | |||
193 | struct jffs2_inode_cache *jffs2_alloc_inode_cache(void) | ||
194 | { | ||
195 | struct jffs2_inode_cache *ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL); | ||
196 | D3 (printk(KERN_DEBUG "Allocated inocache at %p\n", ret)); | ||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | void jffs2_free_inode_cache(struct jffs2_inode_cache *x) | ||
201 | { | ||
202 | D3 (printk(KERN_DEBUG "Freeing inocache at %p\n", x)); | ||
203 | kmem_cache_free(inode_cache_slab, x); | ||
204 | } | ||
205 | |||
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c new file mode 100644 index 000000000000..cd6a8bd13e0b --- /dev/null +++ b/fs/jffs2/nodelist.c | |||
@@ -0,0 +1,681 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: nodelist.c,v 1.90 2004/12/08 17:59:20 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/mtd/mtd.h> | ||
18 | #include <linux/rbtree.h> | ||
19 | #include <linux/crc32.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/pagemap.h> | ||
22 | #include "nodelist.h" | ||
23 | |||
24 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list) | ||
25 | { | ||
26 | struct jffs2_full_dirent **prev = list; | ||
27 | D1(printk(KERN_DEBUG "jffs2_add_fd_to_list( %p, %p (->%p))\n", new, list, *list)); | ||
28 | |||
29 | while ((*prev) && (*prev)->nhash <= new->nhash) { | ||
30 | if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { | ||
31 | /* Duplicate. Free one */ | ||
32 | if (new->version < (*prev)->version) { | ||
33 | D1(printk(KERN_DEBUG "Eep! Marking new dirent node obsolete\n")); | ||
34 | D1(printk(KERN_DEBUG "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n", new->name, new->ino, (*prev)->name, (*prev)->ino)); | ||
35 | jffs2_mark_node_obsolete(c, new->raw); | ||
36 | jffs2_free_full_dirent(new); | ||
37 | } else { | ||
38 | D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino)); | ||
39 | new->next = (*prev)->next; | ||
40 | jffs2_mark_node_obsolete(c, ((*prev)->raw)); | ||
41 | jffs2_free_full_dirent(*prev); | ||
42 | *prev = new; | ||
43 | } | ||
44 | goto out; | ||
45 | } | ||
46 | prev = &((*prev)->next); | ||
47 | } | ||
48 | new->next = *prev; | ||
49 | *prev = new; | ||
50 | |||
51 | out: | ||
52 | D2(while(*list) { | ||
53 | printk(KERN_DEBUG "Dirent \"%s\" (hash 0x%08x, ino #%u\n", (*list)->name, (*list)->nhash, (*list)->ino); | ||
54 | list = &(*list)->next; | ||
55 | }); | ||
56 | } | ||
57 | |||
58 | /* Put a new tmp_dnode_info into the list, keeping the list in | ||
59 | order of increasing version | ||
60 | */ | ||
61 | static void jffs2_add_tn_to_list(struct jffs2_tmp_dnode_info *tn, struct jffs2_tmp_dnode_info **list) | ||
62 | { | ||
63 | struct jffs2_tmp_dnode_info **prev = list; | ||
64 | |||
65 | while ((*prev) && (*prev)->version < tn->version) { | ||
66 | prev = &((*prev)->next); | ||
67 | } | ||
68 | tn->next = (*prev); | ||
69 | *prev = tn; | ||
70 | } | ||
71 | |||
72 | static void jffs2_free_tmp_dnode_info_list(struct jffs2_tmp_dnode_info *tn) | ||
73 | { | ||
74 | struct jffs2_tmp_dnode_info *next; | ||
75 | |||
76 | while (tn) { | ||
77 | next = tn; | ||
78 | tn = tn->next; | ||
79 | jffs2_free_full_dnode(next->fn); | ||
80 | jffs2_free_tmp_dnode_info(next); | ||
81 | } | ||
82 | } | ||
83 | |||
84 | static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) | ||
85 | { | ||
86 | struct jffs2_full_dirent *next; | ||
87 | |||
88 | while (fd) { | ||
89 | next = fd->next; | ||
90 | jffs2_free_full_dirent(fd); | ||
91 | fd = next; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | /* Returns first valid node after 'ref'. May return 'ref' */ | ||
96 | static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) | ||
97 | { | ||
98 | while (ref && ref->next_in_ino) { | ||
99 | if (!ref_obsolete(ref)) | ||
100 | return ref; | ||
101 | D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref))); | ||
102 | ref = ref->next_in_ino; | ||
103 | } | ||
104 | return NULL; | ||
105 | } | ||
106 | |||
107 | /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated | ||
108 | with this ino, returning the former in order of version */ | ||
109 | |||
110 | int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
111 | struct jffs2_tmp_dnode_info **tnp, struct jffs2_full_dirent **fdp, | ||
112 | uint32_t *highest_version, uint32_t *latest_mctime, | ||
113 | uint32_t *mctime_ver) | ||
114 | { | ||
115 | struct jffs2_raw_node_ref *ref, *valid_ref; | ||
116 | struct jffs2_tmp_dnode_info *tn, *ret_tn = NULL; | ||
117 | struct jffs2_full_dirent *fd, *ret_fd = NULL; | ||
118 | union jffs2_node_union node; | ||
119 | size_t retlen; | ||
120 | int err; | ||
121 | |||
122 | *mctime_ver = 0; | ||
123 | |||
124 | D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino)); | ||
125 | |||
126 | spin_lock(&c->erase_completion_lock); | ||
127 | |||
128 | valid_ref = jffs2_first_valid_node(f->inocache->nodes); | ||
129 | |||
130 | if (!valid_ref) | ||
131 | printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino); | ||
132 | |||
133 | while (valid_ref) { | ||
134 | /* We can hold a pointer to a non-obsolete node without the spinlock, | ||
135 | but _obsolete_ nodes may disappear at any time, if the block | ||
136 | they're in gets erased. So if we mark 'ref' obsolete while we're | ||
137 | not holding the lock, it can go away immediately. For that reason, | ||
138 | we find the next valid node first, before processing 'ref'. | ||
139 | */ | ||
140 | ref = valid_ref; | ||
141 | valid_ref = jffs2_first_valid_node(ref->next_in_ino); | ||
142 | spin_unlock(&c->erase_completion_lock); | ||
143 | |||
144 | cond_resched(); | ||
145 | |||
146 | /* FIXME: point() */ | ||
147 | err = jffs2_flash_read(c, (ref_offset(ref)), | ||
148 | min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)), | ||
149 | &retlen, (void *)&node); | ||
150 | if (err) { | ||
151 | printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref)); | ||
152 | goto free_out; | ||
153 | } | ||
154 | |||
155 | |||
156 | /* Check we've managed to read at least the common node header */ | ||
157 | if (retlen < min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node.u))) { | ||
158 | printk(KERN_WARNING "short read in get_inode_nodes()\n"); | ||
159 | err = -EIO; | ||
160 | goto free_out; | ||
161 | } | ||
162 | |||
163 | switch (je16_to_cpu(node.u.nodetype)) { | ||
164 | case JFFS2_NODETYPE_DIRENT: | ||
165 | D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref))); | ||
166 | if (ref_flags(ref) == REF_UNCHECKED) { | ||
167 | printk(KERN_WARNING "BUG: Dirent node at 0x%08x never got checked? How?\n", ref_offset(ref)); | ||
168 | BUG(); | ||
169 | } | ||
170 | if (retlen < sizeof(node.d)) { | ||
171 | printk(KERN_WARNING "short read in get_inode_nodes()\n"); | ||
172 | err = -EIO; | ||
173 | goto free_out; | ||
174 | } | ||
175 | /* sanity check */ | ||
176 | if (PAD((node.d.nsize + sizeof (node.d))) != PAD(je32_to_cpu (node.d.totlen))) { | ||
177 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): Illegal nsize in node at 0x%08x: nsize 0x%02x, totlen %04x\n", | ||
178 | ref_offset(ref), node.d.nsize, je32_to_cpu(node.d.totlen)); | ||
179 | jffs2_mark_node_obsolete(c, ref); | ||
180 | spin_lock(&c->erase_completion_lock); | ||
181 | continue; | ||
182 | } | ||
183 | if (je32_to_cpu(node.d.version) > *highest_version) | ||
184 | *highest_version = je32_to_cpu(node.d.version); | ||
185 | if (ref_obsolete(ref)) { | ||
186 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | ||
187 | printk(KERN_ERR "Dirent node at 0x%08x became obsolete while we weren't looking\n", | ||
188 | ref_offset(ref)); | ||
189 | BUG(); | ||
190 | } | ||
191 | |||
192 | fd = jffs2_alloc_full_dirent(node.d.nsize+1); | ||
193 | if (!fd) { | ||
194 | err = -ENOMEM; | ||
195 | goto free_out; | ||
196 | } | ||
197 | fd->raw = ref; | ||
198 | fd->version = je32_to_cpu(node.d.version); | ||
199 | fd->ino = je32_to_cpu(node.d.ino); | ||
200 | fd->type = node.d.type; | ||
201 | |||
202 | /* Pick out the mctime of the latest dirent */ | ||
203 | if(fd->version > *mctime_ver) { | ||
204 | *mctime_ver = fd->version; | ||
205 | *latest_mctime = je32_to_cpu(node.d.mctime); | ||
206 | } | ||
207 | |||
208 | /* memcpy as much of the name as possible from the raw | ||
209 | dirent we've already read from the flash | ||
210 | */ | ||
211 | if (retlen > sizeof(struct jffs2_raw_dirent)) | ||
212 | memcpy(&fd->name[0], &node.d.name[0], min_t(uint32_t, node.d.nsize, (retlen-sizeof(struct jffs2_raw_dirent)))); | ||
213 | |||
214 | /* Do we need to copy any more of the name directly | ||
215 | from the flash? | ||
216 | */ | ||
217 | if (node.d.nsize + sizeof(struct jffs2_raw_dirent) > retlen) { | ||
218 | /* FIXME: point() */ | ||
219 | int already = retlen - sizeof(struct jffs2_raw_dirent); | ||
220 | |||
221 | err = jffs2_flash_read(c, (ref_offset(ref)) + retlen, | ||
222 | node.d.nsize - already, &retlen, &fd->name[already]); | ||
223 | if (!err && retlen != node.d.nsize - already) | ||
224 | err = -EIO; | ||
225 | |||
226 | if (err) { | ||
227 | printk(KERN_WARNING "Read remainder of name in jffs2_get_inode_nodes(): error %d\n", err); | ||
228 | jffs2_free_full_dirent(fd); | ||
229 | goto free_out; | ||
230 | } | ||
231 | } | ||
232 | fd->nhash = full_name_hash(fd->name, node.d.nsize); | ||
233 | fd->next = NULL; | ||
234 | fd->name[node.d.nsize] = '\0'; | ||
235 | /* Wheee. We now have a complete jffs2_full_dirent structure, with | ||
236 | the name in it and everything. Link it into the list | ||
237 | */ | ||
238 | D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino)); | ||
239 | jffs2_add_fd_to_list(c, fd, &ret_fd); | ||
240 | break; | ||
241 | |||
242 | case JFFS2_NODETYPE_INODE: | ||
243 | D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref))); | ||
244 | if (retlen < sizeof(node.i)) { | ||
245 | printk(KERN_WARNING "read too short for dnode\n"); | ||
246 | err = -EIO; | ||
247 | goto free_out; | ||
248 | } | ||
249 | if (je32_to_cpu(node.i.version) > *highest_version) | ||
250 | *highest_version = je32_to_cpu(node.i.version); | ||
251 | D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", je32_to_cpu(node.i.version), *highest_version)); | ||
252 | |||
253 | if (ref_obsolete(ref)) { | ||
254 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | ||
255 | printk(KERN_ERR "Inode node at 0x%08x became obsolete while we weren't looking\n", | ||
256 | ref_offset(ref)); | ||
257 | BUG(); | ||
258 | } | ||
259 | |||
260 | /* If we've never checked the CRCs on this node, check them now. */ | ||
261 | if (ref_flags(ref) == REF_UNCHECKED) { | ||
262 | uint32_t crc, len; | ||
263 | struct jffs2_eraseblock *jeb; | ||
264 | |||
265 | crc = crc32(0, &node, sizeof(node.i)-8); | ||
266 | if (crc != je32_to_cpu(node.i.node_crc)) { | ||
267 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
268 | ref_offset(ref), je32_to_cpu(node.i.node_crc), crc); | ||
269 | jffs2_mark_node_obsolete(c, ref); | ||
270 | spin_lock(&c->erase_completion_lock); | ||
271 | continue; | ||
272 | } | ||
273 | |||
274 | /* sanity checks */ | ||
275 | if ( je32_to_cpu(node.i.offset) > je32_to_cpu(node.i.isize) || | ||
276 | PAD(je32_to_cpu(node.i.csize) + sizeof (node.i)) != PAD(je32_to_cpu(node.i.totlen))) { | ||
277 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): Inode corrupted at 0x%08x, totlen %d, #ino %d, version %d, isize %d, csize %d, dsize %d \n", | ||
278 | ref_offset(ref), je32_to_cpu(node.i.totlen), je32_to_cpu(node.i.ino), | ||
279 | je32_to_cpu(node.i.version), je32_to_cpu(node.i.isize), | ||
280 | je32_to_cpu(node.i.csize), je32_to_cpu(node.i.dsize)); | ||
281 | jffs2_mark_node_obsolete(c, ref); | ||
282 | spin_lock(&c->erase_completion_lock); | ||
283 | continue; | ||
284 | } | ||
285 | |||
286 | if (node.i.compr != JFFS2_COMPR_ZERO && je32_to_cpu(node.i.csize)) { | ||
287 | unsigned char *buf=NULL; | ||
288 | uint32_t pointed = 0; | ||
289 | #ifndef __ECOS | ||
290 | if (c->mtd->point) { | ||
291 | err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize), | ||
292 | &retlen, &buf); | ||
293 | if (!err && retlen < je32_to_cpu(node.i.csize)) { | ||
294 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen)); | ||
295 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize)); | ||
296 | } else if (err){ | ||
297 | D1(printk(KERN_DEBUG "MTD point failed %d\n", err)); | ||
298 | } else | ||
299 | pointed = 1; /* succefully pointed to device */ | ||
300 | } | ||
301 | #endif | ||
302 | if(!pointed){ | ||
303 | buf = kmalloc(je32_to_cpu(node.i.csize), GFP_KERNEL); | ||
304 | if (!buf) | ||
305 | return -ENOMEM; | ||
306 | |||
307 | err = jffs2_flash_read(c, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize), | ||
308 | &retlen, buf); | ||
309 | if (!err && retlen != je32_to_cpu(node.i.csize)) | ||
310 | err = -EIO; | ||
311 | if (err) { | ||
312 | kfree(buf); | ||
313 | return err; | ||
314 | } | ||
315 | } | ||
316 | crc = crc32(0, buf, je32_to_cpu(node.i.csize)); | ||
317 | if(!pointed) | ||
318 | kfree(buf); | ||
319 | #ifndef __ECOS | ||
320 | else | ||
321 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize)); | ||
322 | #endif | ||
323 | |||
324 | if (crc != je32_to_cpu(node.i.data_crc)) { | ||
325 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): Data CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
326 | ref_offset(ref), je32_to_cpu(node.i.data_crc), crc); | ||
327 | jffs2_mark_node_obsolete(c, ref); | ||
328 | spin_lock(&c->erase_completion_lock); | ||
329 | continue; | ||
330 | } | ||
331 | |||
332 | } | ||
333 | |||
334 | /* Mark the node as having been checked and fix the accounting accordingly */ | ||
335 | spin_lock(&c->erase_completion_lock); | ||
336 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
337 | len = ref_totlen(c, jeb, ref); | ||
338 | |||
339 | jeb->used_size += len; | ||
340 | jeb->unchecked_size -= len; | ||
341 | c->used_size += len; | ||
342 | c->unchecked_size -= len; | ||
343 | |||
344 | /* If node covers at least a whole page, or if it starts at the | ||
345 | beginning of a page and runs to the end of the file, or if | ||
346 | it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. | ||
347 | |||
348 | If it's actually overlapped, it'll get made NORMAL (or OBSOLETE) | ||
349 | when the overlapping node(s) get added to the tree anyway. | ||
350 | */ | ||
351 | if ((je32_to_cpu(node.i.dsize) >= PAGE_CACHE_SIZE) || | ||
352 | ( ((je32_to_cpu(node.i.offset)&(PAGE_CACHE_SIZE-1))==0) && | ||
353 | (je32_to_cpu(node.i.dsize)+je32_to_cpu(node.i.offset) == je32_to_cpu(node.i.isize)))) { | ||
354 | D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_PRISTINE\n", ref_offset(ref))); | ||
355 | ref->flash_offset = ref_offset(ref) | REF_PRISTINE; | ||
356 | } else { | ||
357 | D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_NORMAL\n", ref_offset(ref))); | ||
358 | ref->flash_offset = ref_offset(ref) | REF_NORMAL; | ||
359 | } | ||
360 | spin_unlock(&c->erase_completion_lock); | ||
361 | } | ||
362 | |||
363 | tn = jffs2_alloc_tmp_dnode_info(); | ||
364 | if (!tn) { | ||
365 | D1(printk(KERN_DEBUG "alloc tn failed\n")); | ||
366 | err = -ENOMEM; | ||
367 | goto free_out; | ||
368 | } | ||
369 | |||
370 | tn->fn = jffs2_alloc_full_dnode(); | ||
371 | if (!tn->fn) { | ||
372 | D1(printk(KERN_DEBUG "alloc fn failed\n")); | ||
373 | err = -ENOMEM; | ||
374 | jffs2_free_tmp_dnode_info(tn); | ||
375 | goto free_out; | ||
376 | } | ||
377 | tn->version = je32_to_cpu(node.i.version); | ||
378 | tn->fn->ofs = je32_to_cpu(node.i.offset); | ||
379 | /* There was a bug where we wrote hole nodes out with | ||
380 | csize/dsize swapped. Deal with it */ | ||
381 | if (node.i.compr == JFFS2_COMPR_ZERO && !je32_to_cpu(node.i.dsize) && je32_to_cpu(node.i.csize)) | ||
382 | tn->fn->size = je32_to_cpu(node.i.csize); | ||
383 | else // normal case... | ||
384 | tn->fn->size = je32_to_cpu(node.i.dsize); | ||
385 | tn->fn->raw = ref; | ||
386 | D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %04x, dsize %04x\n", | ||
387 | ref_offset(ref), je32_to_cpu(node.i.version), | ||
388 | je32_to_cpu(node.i.offset), je32_to_cpu(node.i.dsize))); | ||
389 | jffs2_add_tn_to_list(tn, &ret_tn); | ||
390 | break; | ||
391 | |||
392 | default: | ||
393 | if (ref_flags(ref) == REF_UNCHECKED) { | ||
394 | struct jffs2_eraseblock *jeb; | ||
395 | uint32_t len; | ||
396 | |||
397 | printk(KERN_ERR "Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n", | ||
398 | je16_to_cpu(node.u.nodetype), ref_offset(ref)); | ||
399 | |||
400 | /* Mark the node as having been checked and fix the accounting accordingly */ | ||
401 | spin_lock(&c->erase_completion_lock); | ||
402 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
403 | len = ref_totlen(c, jeb, ref); | ||
404 | |||
405 | jeb->used_size += len; | ||
406 | jeb->unchecked_size -= len; | ||
407 | c->used_size += len; | ||
408 | c->unchecked_size -= len; | ||
409 | |||
410 | mark_ref_normal(ref); | ||
411 | spin_unlock(&c->erase_completion_lock); | ||
412 | } | ||
413 | node.u.nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(node.u.nodetype)); | ||
414 | if (crc32(0, &node, sizeof(struct jffs2_unknown_node)-4) != je32_to_cpu(node.u.hdr_crc)) { | ||
415 | /* Hmmm. This should have been caught at scan time. */ | ||
416 | printk(KERN_ERR "Node header CRC failed at %08x. But it must have been OK earlier.\n", | ||
417 | ref_offset(ref)); | ||
418 | printk(KERN_ERR "Node was: { %04x, %04x, %08x, %08x }\n", | ||
419 | je16_to_cpu(node.u.magic), je16_to_cpu(node.u.nodetype), je32_to_cpu(node.u.totlen), | ||
420 | je32_to_cpu(node.u.hdr_crc)); | ||
421 | jffs2_mark_node_obsolete(c, ref); | ||
422 | } else switch(je16_to_cpu(node.u.nodetype) & JFFS2_COMPAT_MASK) { | ||
423 | case JFFS2_FEATURE_INCOMPAT: | ||
424 | printk(KERN_NOTICE "Unknown INCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | ||
425 | /* EEP */ | ||
426 | BUG(); | ||
427 | break; | ||
428 | case JFFS2_FEATURE_ROCOMPAT: | ||
429 | printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | ||
430 | if (!(c->flags & JFFS2_SB_FLAG_RO)) | ||
431 | BUG(); | ||
432 | break; | ||
433 | case JFFS2_FEATURE_RWCOMPAT_COPY: | ||
434 | printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | ||
435 | break; | ||
436 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | ||
437 | printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | ||
438 | jffs2_mark_node_obsolete(c, ref); | ||
439 | break; | ||
440 | } | ||
441 | |||
442 | } | ||
443 | spin_lock(&c->erase_completion_lock); | ||
444 | |||
445 | } | ||
446 | spin_unlock(&c->erase_completion_lock); | ||
447 | *tnp = ret_tn; | ||
448 | *fdp = ret_fd; | ||
449 | |||
450 | return 0; | ||
451 | |||
452 | free_out: | ||
453 | jffs2_free_tmp_dnode_info_list(ret_tn); | ||
454 | jffs2_free_full_dirent_list(ret_fd); | ||
455 | return err; | ||
456 | } | ||
457 | |||
458 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) | ||
459 | { | ||
460 | spin_lock(&c->inocache_lock); | ||
461 | ic->state = state; | ||
462 | wake_up(&c->inocache_wq); | ||
463 | spin_unlock(&c->inocache_lock); | ||
464 | } | ||
465 | |||
466 | /* During mount, this needs no locking. During normal operation, its | ||
467 | callers want to do other stuff while still holding the inocache_lock. | ||
468 | Rather than introducing special case get_ino_cache functions or | ||
469 | callbacks, we just let the caller do the locking itself. */ | ||
470 | |||
471 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino) | ||
472 | { | ||
473 | struct jffs2_inode_cache *ret; | ||
474 | |||
475 | D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino)); | ||
476 | |||
477 | ret = c->inocache_list[ino % INOCACHE_HASHSIZE]; | ||
478 | while (ret && ret->ino < ino) { | ||
479 | ret = ret->next; | ||
480 | } | ||
481 | |||
482 | if (ret && ret->ino != ino) | ||
483 | ret = NULL; | ||
484 | |||
485 | D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino)); | ||
486 | return ret; | ||
487 | } | ||
488 | |||
489 | void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new) | ||
490 | { | ||
491 | struct jffs2_inode_cache **prev; | ||
492 | D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino)); | ||
493 | spin_lock(&c->inocache_lock); | ||
494 | |||
495 | prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE]; | ||
496 | |||
497 | while ((*prev) && (*prev)->ino < new->ino) { | ||
498 | prev = &(*prev)->next; | ||
499 | } | ||
500 | new->next = *prev; | ||
501 | *prev = new; | ||
502 | |||
503 | spin_unlock(&c->inocache_lock); | ||
504 | } | ||
505 | |||
506 | void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) | ||
507 | { | ||
508 | struct jffs2_inode_cache **prev; | ||
509 | D2(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino)); | ||
510 | spin_lock(&c->inocache_lock); | ||
511 | |||
512 | prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE]; | ||
513 | |||
514 | while ((*prev) && (*prev)->ino < old->ino) { | ||
515 | prev = &(*prev)->next; | ||
516 | } | ||
517 | if ((*prev) == old) { | ||
518 | *prev = old->next; | ||
519 | } | ||
520 | |||
521 | spin_unlock(&c->inocache_lock); | ||
522 | } | ||
523 | |||
524 | void jffs2_free_ino_caches(struct jffs2_sb_info *c) | ||
525 | { | ||
526 | int i; | ||
527 | struct jffs2_inode_cache *this, *next; | ||
528 | |||
529 | for (i=0; i<INOCACHE_HASHSIZE; i++) { | ||
530 | this = c->inocache_list[i]; | ||
531 | while (this) { | ||
532 | next = this->next; | ||
533 | D2(printk(KERN_DEBUG "jffs2_free_ino_caches: Freeing ino #%u at %p\n", this->ino, this)); | ||
534 | jffs2_free_inode_cache(this); | ||
535 | this = next; | ||
536 | } | ||
537 | c->inocache_list[i] = NULL; | ||
538 | } | ||
539 | } | ||
540 | |||
541 | void jffs2_free_raw_node_refs(struct jffs2_sb_info *c) | ||
542 | { | ||
543 | int i; | ||
544 | struct jffs2_raw_node_ref *this, *next; | ||
545 | |||
546 | for (i=0; i<c->nr_blocks; i++) { | ||
547 | this = c->blocks[i].first_node; | ||
548 | while(this) { | ||
549 | next = this->next_phys; | ||
550 | jffs2_free_raw_node_ref(this); | ||
551 | this = next; | ||
552 | } | ||
553 | c->blocks[i].first_node = c->blocks[i].last_node = NULL; | ||
554 | } | ||
555 | } | ||
556 | |||
557 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset) | ||
558 | { | ||
559 | /* The common case in lookup is that there will be a node | ||
560 | which precisely matches. So we go looking for that first */ | ||
561 | struct rb_node *next; | ||
562 | struct jffs2_node_frag *prev = NULL; | ||
563 | struct jffs2_node_frag *frag = NULL; | ||
564 | |||
565 | D2(printk(KERN_DEBUG "jffs2_lookup_node_frag(%p, %d)\n", fragtree, offset)); | ||
566 | |||
567 | next = fragtree->rb_node; | ||
568 | |||
569 | while(next) { | ||
570 | frag = rb_entry(next, struct jffs2_node_frag, rb); | ||
571 | |||
572 | D2(printk(KERN_DEBUG "Considering frag %d-%d (%p). left %p, right %p\n", | ||
573 | frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right)); | ||
574 | if (frag->ofs + frag->size <= offset) { | ||
575 | D2(printk(KERN_DEBUG "Going right from frag %d-%d, before the region we care about\n", | ||
576 | frag->ofs, frag->ofs+frag->size)); | ||
577 | /* Remember the closest smaller match on the way down */ | ||
578 | if (!prev || frag->ofs > prev->ofs) | ||
579 | prev = frag; | ||
580 | next = frag->rb.rb_right; | ||
581 | } else if (frag->ofs > offset) { | ||
582 | D2(printk(KERN_DEBUG "Going left from frag %d-%d, after the region we care about\n", | ||
583 | frag->ofs, frag->ofs+frag->size)); | ||
584 | next = frag->rb.rb_left; | ||
585 | } else { | ||
586 | D2(printk(KERN_DEBUG "Returning frag %d,%d, matched\n", | ||
587 | frag->ofs, frag->ofs+frag->size)); | ||
588 | return frag; | ||
589 | } | ||
590 | } | ||
591 | |||
592 | /* Exact match not found. Go back up looking at each parent, | ||
593 | and return the closest smaller one */ | ||
594 | |||
595 | if (prev) | ||
596 | D2(printk(KERN_DEBUG "No match. Returning frag %d,%d, closest previous\n", | ||
597 | prev->ofs, prev->ofs+prev->size)); | ||
598 | else | ||
599 | D2(printk(KERN_DEBUG "Returning NULL, empty fragtree\n")); | ||
600 | |||
601 | return prev; | ||
602 | } | ||
603 | |||
604 | /* Pass 'c' argument to indicate that nodes should be marked obsolete as | ||
605 | they're killed. */ | ||
606 | void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) | ||
607 | { | ||
608 | struct jffs2_node_frag *frag; | ||
609 | struct jffs2_node_frag *parent; | ||
610 | |||
611 | if (!root->rb_node) | ||
612 | return; | ||
613 | |||
614 | frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb)); | ||
615 | |||
616 | while(frag) { | ||
617 | if (frag->rb.rb_left) { | ||
618 | D2(printk(KERN_DEBUG "Going left from frag (%p) %d-%d\n", | ||
619 | frag, frag->ofs, frag->ofs+frag->size)); | ||
620 | frag = frag_left(frag); | ||
621 | continue; | ||
622 | } | ||
623 | if (frag->rb.rb_right) { | ||
624 | D2(printk(KERN_DEBUG "Going right from frag (%p) %d-%d\n", | ||
625 | frag, frag->ofs, frag->ofs+frag->size)); | ||
626 | frag = frag_right(frag); | ||
627 | continue; | ||
628 | } | ||
629 | |||
630 | D2(printk(KERN_DEBUG "jffs2_kill_fragtree: frag at 0x%x-0x%x: node %p, frags %d--\n", | ||
631 | frag->ofs, frag->ofs+frag->size, frag->node, | ||
632 | frag->node?frag->node->frags:0)); | ||
633 | |||
634 | if (frag->node && !(--frag->node->frags)) { | ||
635 | /* Not a hole, and it's the final remaining frag | ||
636 | of this node. Free the node */ | ||
637 | if (c) | ||
638 | jffs2_mark_node_obsolete(c, frag->node->raw); | ||
639 | |||
640 | jffs2_free_full_dnode(frag->node); | ||
641 | } | ||
642 | parent = frag_parent(frag); | ||
643 | if (parent) { | ||
644 | if (frag_left(parent) == frag) | ||
645 | parent->rb.rb_left = NULL; | ||
646 | else | ||
647 | parent->rb.rb_right = NULL; | ||
648 | } | ||
649 | |||
650 | jffs2_free_node_frag(frag); | ||
651 | frag = parent; | ||
652 | |||
653 | cond_resched(); | ||
654 | } | ||
655 | } | ||
656 | |||
657 | void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) | ||
658 | { | ||
659 | struct rb_node *parent = &base->rb; | ||
660 | struct rb_node **link = &parent; | ||
661 | |||
662 | D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag, | ||
663 | newfrag->ofs, newfrag->ofs+newfrag->size, base)); | ||
664 | |||
665 | while (*link) { | ||
666 | parent = *link; | ||
667 | base = rb_entry(parent, struct jffs2_node_frag, rb); | ||
668 | |||
669 | D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs)); | ||
670 | if (newfrag->ofs > base->ofs) | ||
671 | link = &base->rb.rb_right; | ||
672 | else if (newfrag->ofs < base->ofs) | ||
673 | link = &base->rb.rb_left; | ||
674 | else { | ||
675 | printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); | ||
676 | BUG(); | ||
677 | } | ||
678 | } | ||
679 | |||
680 | rb_link_node(&newfrag->rb, &base->rb, link); | ||
681 | } | ||
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h new file mode 100644 index 000000000000..a4864d05ea92 --- /dev/null +++ b/fs/jffs2/nodelist.h | |||
@@ -0,0 +1,473 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: nodelist.h,v 1.126 2004/11/19 15:06:29 dedekind Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #ifndef __JFFS2_NODELIST_H__ | ||
15 | #define __JFFS2_NODELIST_H__ | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/jffs2.h> | ||
21 | #include <linux/jffs2_fs_sb.h> | ||
22 | #include <linux/jffs2_fs_i.h> | ||
23 | |||
24 | #ifdef __ECOS | ||
25 | #include "os-ecos.h" | ||
26 | #else | ||
27 | #include <linux/mtd/compatmac.h> /* For min/max in older kernels */ | ||
28 | #include "os-linux.h" | ||
29 | #endif | ||
30 | |||
31 | #ifndef CONFIG_JFFS2_FS_DEBUG | ||
32 | #define CONFIG_JFFS2_FS_DEBUG 1 | ||
33 | #endif | ||
34 | |||
35 | #if CONFIG_JFFS2_FS_DEBUG > 0 | ||
36 | #define D1(x) x | ||
37 | #else | ||
38 | #define D1(x) | ||
39 | #endif | ||
40 | |||
41 | #if CONFIG_JFFS2_FS_DEBUG > 1 | ||
42 | #define D2(x) x | ||
43 | #else | ||
44 | #define D2(x) | ||
45 | #endif | ||
46 | |||
47 | #define JFFS2_NATIVE_ENDIAN | ||
48 | |||
49 | /* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from | ||
50 | whatever OS we're actually running on here too. */ | ||
51 | |||
52 | #if defined(JFFS2_NATIVE_ENDIAN) | ||
53 | #define cpu_to_je16(x) ((jint16_t){x}) | ||
54 | #define cpu_to_je32(x) ((jint32_t){x}) | ||
55 | #define cpu_to_jemode(x) ((jmode_t){os_to_jffs2_mode(x)}) | ||
56 | |||
57 | #define je16_to_cpu(x) ((x).v16) | ||
58 | #define je32_to_cpu(x) ((x).v32) | ||
59 | #define jemode_to_cpu(x) (jffs2_to_os_mode((x).m)) | ||
60 | #elif defined(JFFS2_BIG_ENDIAN) | ||
61 | #define cpu_to_je16(x) ((jint16_t){cpu_to_be16(x)}) | ||
62 | #define cpu_to_je32(x) ((jint32_t){cpu_to_be32(x)}) | ||
63 | #define cpu_to_jemode(x) ((jmode_t){cpu_to_be32(os_to_jffs2_mode(x))}) | ||
64 | |||
65 | #define je16_to_cpu(x) (be16_to_cpu(x.v16)) | ||
66 | #define je32_to_cpu(x) (be32_to_cpu(x.v32)) | ||
67 | #define jemode_to_cpu(x) (be32_to_cpu(jffs2_to_os_mode((x).m))) | ||
68 | #elif defined(JFFS2_LITTLE_ENDIAN) | ||
69 | #define cpu_to_je16(x) ((jint16_t){cpu_to_le16(x)}) | ||
70 | #define cpu_to_je32(x) ((jint32_t){cpu_to_le32(x)}) | ||
71 | #define cpu_to_jemode(x) ((jmode_t){cpu_to_le32(os_to_jffs2_mode(x))}) | ||
72 | |||
73 | #define je16_to_cpu(x) (le16_to_cpu(x.v16)) | ||
74 | #define je32_to_cpu(x) (le32_to_cpu(x.v32)) | ||
75 | #define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m))) | ||
76 | #else | ||
77 | #error wibble | ||
78 | #endif | ||
79 | |||
80 | /* | ||
81 | This is all we need to keep in-core for each raw node during normal | ||
82 | operation. As and when we do read_inode on a particular inode, we can | ||
83 | scan the nodes which are listed for it and build up a proper map of | ||
84 | which nodes are currently valid. JFFSv1 always used to keep that whole | ||
85 | map in core for each inode. | ||
86 | */ | ||
87 | struct jffs2_raw_node_ref | ||
88 | { | ||
89 | struct jffs2_raw_node_ref *next_in_ino; /* Points to the next raw_node_ref | ||
90 | for this inode. If this is the last, it points to the inode_cache | ||
91 | for this inode instead. The inode_cache will have NULL in the first | ||
92 | word so you know when you've got there :) */ | ||
93 | struct jffs2_raw_node_ref *next_phys; | ||
94 | uint32_t flash_offset; | ||
95 | uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */ | ||
96 | }; | ||
97 | |||
98 | /* flash_offset & 3 always has to be zero, because nodes are | ||
99 | always aligned at 4 bytes. So we have a couple of extra bits | ||
100 | to play with, which indicate the node's status; see below: */ | ||
101 | #define REF_UNCHECKED 0 /* We haven't yet checked the CRC or built its inode */ | ||
102 | #define REF_OBSOLETE 1 /* Obsolete, can be completely ignored */ | ||
103 | #define REF_PRISTINE 2 /* Completely clean. GC without looking */ | ||
104 | #define REF_NORMAL 3 /* Possibly overlapped. Read the page and write again on GC */ | ||
105 | #define ref_flags(ref) ((ref)->flash_offset & 3) | ||
106 | #define ref_offset(ref) ((ref)->flash_offset & ~3) | ||
107 | #define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE) | ||
108 | #define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0) | ||
109 | |||
110 | /* For each inode in the filesystem, we need to keep a record of | ||
111 | nlink, because it would be a PITA to scan the whole directory tree | ||
112 | at read_inode() time to calculate it, and to keep sufficient information | ||
113 | in the raw_node_ref (basically both parent and child inode number for | ||
114 | dirent nodes) would take more space than this does. We also keep | ||
115 | a pointer to the first physical node which is part of this inode, too. | ||
116 | */ | ||
117 | struct jffs2_inode_cache { | ||
118 | struct jffs2_full_dirent *scan_dents; /* Used during scan to hold | ||
119 | temporary lists of dirents, and later must be set to | ||
120 | NULL to mark the end of the raw_node_ref->next_in_ino | ||
121 | chain. */ | ||
122 | struct jffs2_inode_cache *next; | ||
123 | struct jffs2_raw_node_ref *nodes; | ||
124 | uint32_t ino; | ||
125 | int nlink; | ||
126 | int state; | ||
127 | }; | ||
128 | |||
129 | /* Inode states for 'state' above. We need the 'GC' state to prevent | ||
130 | someone from doing a read_inode() while we're moving a 'REF_PRISTINE' | ||
131 | node without going through all the iget() nonsense */ | ||
132 | #define INO_STATE_UNCHECKED 0 /* CRC checks not yet done */ | ||
133 | #define INO_STATE_CHECKING 1 /* CRC checks in progress */ | ||
134 | #define INO_STATE_PRESENT 2 /* In core */ | ||
135 | #define INO_STATE_CHECKEDABSENT 3 /* Checked, cleared again */ | ||
136 | #define INO_STATE_GC 4 /* GCing a 'pristine' node */ | ||
137 | #define INO_STATE_READING 5 /* In read_inode() */ | ||
138 | |||
139 | #define INOCACHE_HASHSIZE 128 | ||
140 | |||
141 | /* | ||
142 | Larger representation of a raw node, kept in-core only when the | ||
143 | struct inode for this particular ino is instantiated. | ||
144 | */ | ||
145 | |||
146 | struct jffs2_full_dnode | ||
147 | { | ||
148 | struct jffs2_raw_node_ref *raw; | ||
149 | uint32_t ofs; /* The offset to which the data of this node belongs */ | ||
150 | uint32_t size; | ||
151 | uint32_t frags; /* Number of fragments which currently refer | ||
152 | to this node. When this reaches zero, | ||
153 | the node is obsolete. */ | ||
154 | }; | ||
155 | |||
156 | /* | ||
157 | Even larger representation of a raw node, kept in-core only while | ||
158 | we're actually building up the original map of which nodes go where, | ||
159 | in read_inode() | ||
160 | */ | ||
161 | struct jffs2_tmp_dnode_info | ||
162 | { | ||
163 | struct jffs2_tmp_dnode_info *next; | ||
164 | struct jffs2_full_dnode *fn; | ||
165 | uint32_t version; | ||
166 | }; | ||
167 | |||
168 | struct jffs2_full_dirent | ||
169 | { | ||
170 | struct jffs2_raw_node_ref *raw; | ||
171 | struct jffs2_full_dirent *next; | ||
172 | uint32_t version; | ||
173 | uint32_t ino; /* == zero for unlink */ | ||
174 | unsigned int nhash; | ||
175 | unsigned char type; | ||
176 | unsigned char name[0]; | ||
177 | }; | ||
178 | |||
179 | /* | ||
180 | Fragments - used to build a map of which raw node to obtain | ||
181 | data from for each part of the ino | ||
182 | */ | ||
183 | struct jffs2_node_frag | ||
184 | { | ||
185 | struct rb_node rb; | ||
186 | struct jffs2_full_dnode *node; /* NULL for holes */ | ||
187 | uint32_t size; | ||
188 | uint32_t ofs; /* The offset to which this fragment belongs */ | ||
189 | }; | ||
190 | |||
191 | struct jffs2_eraseblock | ||
192 | { | ||
193 | struct list_head list; | ||
194 | int bad_count; | ||
195 | uint32_t offset; /* of this block in the MTD */ | ||
196 | |||
197 | uint32_t unchecked_size; | ||
198 | uint32_t used_size; | ||
199 | uint32_t dirty_size; | ||
200 | uint32_t wasted_size; | ||
201 | uint32_t free_size; /* Note that sector_size - free_size | ||
202 | is the address of the first free space */ | ||
203 | struct jffs2_raw_node_ref *first_node; | ||
204 | struct jffs2_raw_node_ref *last_node; | ||
205 | |||
206 | struct jffs2_raw_node_ref *gc_node; /* Next node to be garbage collected */ | ||
207 | }; | ||
208 | |||
209 | #define ACCT_SANITY_CHECK(c, jeb) do { \ | ||
210 | struct jffs2_eraseblock *___j = jeb; \ | ||
211 | if ((___j) && ___j->used_size + ___j->dirty_size + ___j->free_size + ___j->wasted_size + ___j->unchecked_size != c->sector_size) { \ | ||
212 | printk(KERN_NOTICE "Eeep. Space accounting for block at 0x%08x is screwed\n", ___j->offset); \ | ||
213 | printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + wasted %08x + unchecked %08x != total %08x\n", \ | ||
214 | ___j->free_size, ___j->dirty_size, ___j->used_size, ___j->wasted_size, ___j->unchecked_size, c->sector_size); \ | ||
215 | BUG(); \ | ||
216 | } \ | ||
217 | if (c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size + c->wasted_size + c->unchecked_size != c->flash_size) { \ | ||
218 | printk(KERN_NOTICE "Eeep. Space accounting superblock info is screwed\n"); \ | ||
219 | printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + erasing %08x + bad %08x + wasted %08x + unchecked %08x != total %08x\n", \ | ||
220 | c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, c->wasted_size, c->unchecked_size, c->flash_size); \ | ||
221 | BUG(); \ | ||
222 | } \ | ||
223 | } while(0) | ||
224 | |||
225 | static inline void paranoia_failed_dump(struct jffs2_eraseblock *jeb) | ||
226 | { | ||
227 | struct jffs2_raw_node_ref *ref; | ||
228 | int i=0; | ||
229 | |||
230 | printk(KERN_NOTICE); | ||
231 | for (ref = jeb->first_node; ref; ref = ref->next_phys) { | ||
232 | printk("%08x->", ref_offset(ref)); | ||
233 | if (++i == 8) { | ||
234 | i = 0; | ||
235 | printk("\n" KERN_NOTICE); | ||
236 | } | ||
237 | } | ||
238 | printk("\n"); | ||
239 | } | ||
240 | |||
241 | |||
242 | #define ACCT_PARANOIA_CHECK(jeb) do { \ | ||
243 | uint32_t my_used_size = 0; \ | ||
244 | uint32_t my_unchecked_size = 0; \ | ||
245 | struct jffs2_raw_node_ref *ref2 = jeb->first_node; \ | ||
246 | while (ref2) { \ | ||
247 | if (unlikely(ref2->flash_offset < jeb->offset || \ | ||
248 | ref2->flash_offset > jeb->offset + c->sector_size)) { \ | ||
249 | printk(KERN_NOTICE "Node %08x shouldn't be in block at %08x!\n", \ | ||
250 | ref_offset(ref2), jeb->offset); \ | ||
251 | paranoia_failed_dump(jeb); \ | ||
252 | BUG(); \ | ||
253 | } \ | ||
254 | if (ref_flags(ref2) == REF_UNCHECKED) \ | ||
255 | my_unchecked_size += ref_totlen(c, jeb, ref2); \ | ||
256 | else if (!ref_obsolete(ref2)) \ | ||
257 | my_used_size += ref_totlen(c, jeb, ref2); \ | ||
258 | if (unlikely((!ref2->next_phys) != (ref2 == jeb->last_node))) { \ | ||
259 | if (!ref2->next_phys) \ | ||
260 | printk("ref for node at %p (phys %08x) has next_phys->%p (----), last_node->%p (phys %08x)\n", \ | ||
261 | ref2, ref_offset(ref2), ref2->next_phys, \ | ||
262 | jeb->last_node, ref_offset(jeb->last_node)); \ | ||
263 | else \ | ||
264 | printk("ref for node at %p (phys %08x) has next_phys->%p (%08x), last_node->%p (phys %08x)\n", \ | ||
265 | ref2, ref_offset(ref2), ref2->next_phys, ref_offset(ref2->next_phys), \ | ||
266 | jeb->last_node, ref_offset(jeb->last_node)); \ | ||
267 | paranoia_failed_dump(jeb); \ | ||
268 | BUG(); \ | ||
269 | } \ | ||
270 | ref2 = ref2->next_phys; \ | ||
271 | } \ | ||
272 | if (my_used_size != jeb->used_size) { \ | ||
273 | printk(KERN_NOTICE "Calculated used size %08x != stored used size %08x\n", my_used_size, jeb->used_size); \ | ||
274 | BUG(); \ | ||
275 | } \ | ||
276 | if (my_unchecked_size != jeb->unchecked_size) { \ | ||
277 | printk(KERN_NOTICE "Calculated unchecked size %08x != stored unchecked size %08x\n", my_unchecked_size, jeb->unchecked_size); \ | ||
278 | BUG(); \ | ||
279 | } \ | ||
280 | } while(0) | ||
281 | |||
282 | /* Calculate totlen from surrounding nodes or eraseblock */ | ||
283 | static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, | ||
284 | struct jffs2_eraseblock *jeb, | ||
285 | struct jffs2_raw_node_ref *ref) | ||
286 | { | ||
287 | uint32_t ref_end; | ||
288 | |||
289 | if (ref->next_phys) | ||
290 | ref_end = ref_offset(ref->next_phys); | ||
291 | else { | ||
292 | if (!jeb) | ||
293 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
294 | |||
295 | /* Last node in block. Use free_space */ | ||
296 | BUG_ON(ref != jeb->last_node); | ||
297 | ref_end = jeb->offset + c->sector_size - jeb->free_size; | ||
298 | } | ||
299 | return ref_end - ref_offset(ref); | ||
300 | } | ||
301 | |||
302 | static inline uint32_t ref_totlen(struct jffs2_sb_info *c, | ||
303 | struct jffs2_eraseblock *jeb, | ||
304 | struct jffs2_raw_node_ref *ref) | ||
305 | { | ||
306 | uint32_t ret; | ||
307 | |||
308 | D1(if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) { | ||
309 | printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n", | ||
310 | jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref)); | ||
311 | BUG(); | ||
312 | }) | ||
313 | |||
314 | #if 1 | ||
315 | ret = ref->__totlen; | ||
316 | #else | ||
317 | /* This doesn't actually work yet */ | ||
318 | ret = __ref_totlen(c, jeb, ref); | ||
319 | if (ret != ref->__totlen) { | ||
320 | printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n", | ||
321 | ref, ref_offset(ref), ref_offset(ref)+ref->__totlen, | ||
322 | ret, ref->__totlen); | ||
323 | if (!jeb) | ||
324 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
325 | paranoia_failed_dump(jeb); | ||
326 | BUG(); | ||
327 | } | ||
328 | #endif | ||
329 | return ret; | ||
330 | } | ||
331 | |||
332 | |||
333 | #define ALLOC_NORMAL 0 /* Normal allocation */ | ||
334 | #define ALLOC_DELETION 1 /* Deletion node. Best to allow it */ | ||
335 | #define ALLOC_GC 2 /* Space requested for GC. Give it or die */ | ||
336 | #define ALLOC_NORETRY 3 /* For jffs2_write_dnode: On failure, return -EAGAIN instead of retrying */ | ||
337 | |||
338 | /* How much dirty space before it goes on the very_dirty_list */ | ||
339 | #define VERYDIRTY(c, size) ((size) >= ((c)->sector_size / 2)) | ||
340 | |||
341 | /* check if dirty space is more than 255 Byte */ | ||
342 | #define ISDIRTY(size) ((size) > sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) | ||
343 | |||
344 | #define PAD(x) (((x)+3)&~3) | ||
345 | |||
346 | static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw) | ||
347 | { | ||
348 | while(raw->next_in_ino) { | ||
349 | raw = raw->next_in_ino; | ||
350 | } | ||
351 | |||
352 | return ((struct jffs2_inode_cache *)raw); | ||
353 | } | ||
354 | |||
355 | static inline struct jffs2_node_frag *frag_first(struct rb_root *root) | ||
356 | { | ||
357 | struct rb_node *node = root->rb_node; | ||
358 | |||
359 | if (!node) | ||
360 | return NULL; | ||
361 | while(node->rb_left) | ||
362 | node = node->rb_left; | ||
363 | return rb_entry(node, struct jffs2_node_frag, rb); | ||
364 | } | ||
365 | #define rb_parent(rb) ((rb)->rb_parent) | ||
366 | #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb) | ||
367 | #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb) | ||
368 | #define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb) | ||
369 | #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb) | ||
370 | #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb) | ||
371 | #define frag_erase(frag, list) rb_erase(&frag->rb, list); | ||
372 | |||
373 | /* nodelist.c */ | ||
374 | D2(void jffs2_print_frag_list(struct jffs2_inode_info *f)); | ||
375 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list); | ||
376 | int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
377 | struct jffs2_tmp_dnode_info **tnp, struct jffs2_full_dirent **fdp, | ||
378 | uint32_t *highest_version, uint32_t *latest_mctime, | ||
379 | uint32_t *mctime_ver); | ||
380 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state); | ||
381 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino); | ||
382 | void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new); | ||
383 | void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old); | ||
384 | void jffs2_free_ino_caches(struct jffs2_sb_info *c); | ||
385 | void jffs2_free_raw_node_refs(struct jffs2_sb_info *c); | ||
386 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset); | ||
387 | void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete); | ||
388 | void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base); | ||
389 | struct rb_node *rb_next(struct rb_node *); | ||
390 | struct rb_node *rb_prev(struct rb_node *); | ||
391 | void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); | ||
392 | |||
393 | /* nodemgmt.c */ | ||
394 | int jffs2_thread_should_wake(struct jffs2_sb_info *c); | ||
395 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio); | ||
396 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len); | ||
397 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new); | ||
398 | void jffs2_complete_reservation(struct jffs2_sb_info *c); | ||
399 | void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw); | ||
400 | void jffs2_dump_block_lists(struct jffs2_sb_info *c); | ||
401 | |||
402 | /* write.c */ | ||
403 | int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri); | ||
404 | |||
405 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode); | ||
406 | struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode); | ||
407 | int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
408 | struct jffs2_raw_inode *ri, unsigned char *buf, | ||
409 | uint32_t offset, uint32_t writelen, uint32_t *retlen); | ||
410 | int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen); | ||
411 | int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f); | ||
412 | int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen); | ||
413 | |||
414 | |||
415 | /* readinode.c */ | ||
416 | void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); | ||
417 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); | ||
418 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
419 | uint32_t ino, struct jffs2_raw_inode *latest_node); | ||
420 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); | ||
421 | void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f); | ||
422 | |||
423 | /* malloc.c */ | ||
424 | int jffs2_create_slab_caches(void); | ||
425 | void jffs2_destroy_slab_caches(void); | ||
426 | |||
427 | struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize); | ||
428 | void jffs2_free_full_dirent(struct jffs2_full_dirent *); | ||
429 | struct jffs2_full_dnode *jffs2_alloc_full_dnode(void); | ||
430 | void jffs2_free_full_dnode(struct jffs2_full_dnode *); | ||
431 | struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void); | ||
432 | void jffs2_free_raw_dirent(struct jffs2_raw_dirent *); | ||
433 | struct jffs2_raw_inode *jffs2_alloc_raw_inode(void); | ||
434 | void jffs2_free_raw_inode(struct jffs2_raw_inode *); | ||
435 | struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void); | ||
436 | void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *); | ||
437 | struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void); | ||
438 | void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *); | ||
439 | struct jffs2_node_frag *jffs2_alloc_node_frag(void); | ||
440 | void jffs2_free_node_frag(struct jffs2_node_frag *); | ||
441 | struct jffs2_inode_cache *jffs2_alloc_inode_cache(void); | ||
442 | void jffs2_free_inode_cache(struct jffs2_inode_cache *); | ||
443 | |||
444 | /* gc.c */ | ||
445 | int jffs2_garbage_collect_pass(struct jffs2_sb_info *c); | ||
446 | |||
447 | /* read.c */ | ||
448 | int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
449 | struct jffs2_full_dnode *fd, unsigned char *buf, | ||
450 | int ofs, int len); | ||
451 | int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
452 | unsigned char *buf, uint32_t offset, uint32_t len); | ||
453 | char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f); | ||
454 | |||
455 | /* scan.c */ | ||
456 | int jffs2_scan_medium(struct jffs2_sb_info *c); | ||
457 | void jffs2_rotate_lists(struct jffs2_sb_info *c); | ||
458 | |||
459 | /* build.c */ | ||
460 | int jffs2_do_mount_fs(struct jffs2_sb_info *c); | ||
461 | |||
462 | /* erase.c */ | ||
463 | void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count); | ||
464 | |||
465 | #ifdef CONFIG_JFFS2_FS_NAND | ||
466 | /* wbuf.c */ | ||
467 | int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino); | ||
468 | int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c); | ||
469 | int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
470 | int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
471 | #endif | ||
472 | |||
473 | #endif /* __JFFS2_NODELIST_H__ */ | ||
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c new file mode 100644 index 000000000000..2651135bdf42 --- /dev/null +++ b/fs/jffs2/nodemgmt.c | |||
@@ -0,0 +1,838 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: nodemgmt.c,v 1.115 2004/11/22 11:07:21 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/mtd/mtd.h> | ||
17 | #include <linux/compiler.h> | ||
18 | #include <linux/sched.h> /* For cond_resched() */ | ||
19 | #include "nodelist.h" | ||
20 | |||
21 | /** | ||
22 | * jffs2_reserve_space - request physical space to write nodes to flash | ||
23 | * @c: superblock info | ||
24 | * @minsize: Minimum acceptable size of allocation | ||
25 | * @ofs: Returned value of node offset | ||
26 | * @len: Returned value of allocation length | ||
27 | * @prio: Allocation type - ALLOC_{NORMAL,DELETION} | ||
28 | * | ||
29 | * Requests a block of physical space on the flash. Returns zero for success | ||
30 | * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC | ||
31 | * or other error if appropriate. | ||
32 | * | ||
33 | * If it returns zero, jffs2_reserve_space() also downs the per-filesystem | ||
34 | * allocation semaphore, to prevent more than one allocation from being | ||
35 | * active at any time. The semaphore is later released by jffs2_commit_allocation() | ||
36 | * | ||
37 | * jffs2_reserve_space() may trigger garbage collection in order to make room | ||
38 | * for the requested allocation. | ||
39 | */ | ||
40 | |||
41 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len); | ||
42 | |||
43 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio) | ||
44 | { | ||
45 | int ret = -EAGAIN; | ||
46 | int blocksneeded = c->resv_blocks_write; | ||
47 | /* align it */ | ||
48 | minsize = PAD(minsize); | ||
49 | |||
50 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize)); | ||
51 | down(&c->alloc_sem); | ||
52 | |||
53 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n")); | ||
54 | |||
55 | spin_lock(&c->erase_completion_lock); | ||
56 | |||
57 | /* this needs a little more thought (true <tglx> :)) */ | ||
58 | while(ret == -EAGAIN) { | ||
59 | while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) { | ||
60 | int ret; | ||
61 | uint32_t dirty, avail; | ||
62 | |||
63 | /* calculate real dirty size | ||
64 | * dirty_size contains blocks on erase_pending_list | ||
65 | * those blocks are counted in c->nr_erasing_blocks. | ||
66 | * If one block is actually erased, it is not longer counted as dirty_space | ||
67 | * but it is counted in c->nr_erasing_blocks, so we add it and subtract it | ||
68 | * with c->nr_erasing_blocks * c->sector_size again. | ||
69 | * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks | ||
70 | * This helps us to force gc and pick eventually a clean block to spread the load. | ||
71 | * We add unchecked_size here, as we hopefully will find some space to use. | ||
72 | * This will affect the sum only once, as gc first finishes checking | ||
73 | * of nodes. | ||
74 | */ | ||
75 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size; | ||
76 | if (dirty < c->nospc_dirty_size) { | ||
77 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { | ||
78 | printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"); | ||
79 | break; | ||
80 | } | ||
81 | D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n", | ||
82 | dirty, c->unchecked_size, c->sector_size)); | ||
83 | |||
84 | spin_unlock(&c->erase_completion_lock); | ||
85 | up(&c->alloc_sem); | ||
86 | return -ENOSPC; | ||
87 | } | ||
88 | |||
89 | /* Calc possibly available space. Possibly available means that we | ||
90 | * don't know, if unchecked size contains obsoleted nodes, which could give us some | ||
91 | * more usable space. This will affect the sum only once, as gc first finishes checking | ||
92 | * of nodes. | ||
93 | + Return -ENOSPC, if the maximum possibly available space is less or equal than | ||
94 | * blocksneeded * sector_size. | ||
95 | * This blocks endless gc looping on a filesystem, which is nearly full, even if | ||
96 | * the check above passes. | ||
97 | */ | ||
98 | avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; | ||
99 | if ( (avail / c->sector_size) <= blocksneeded) { | ||
100 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { | ||
101 | printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"); | ||
102 | break; | ||
103 | } | ||
104 | |||
105 | D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", | ||
106 | avail, blocksneeded * c->sector_size)); | ||
107 | spin_unlock(&c->erase_completion_lock); | ||
108 | up(&c->alloc_sem); | ||
109 | return -ENOSPC; | ||
110 | } | ||
111 | |||
112 | up(&c->alloc_sem); | ||
113 | |||
114 | D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", | ||
115 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, | ||
116 | c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); | ||
117 | spin_unlock(&c->erase_completion_lock); | ||
118 | |||
119 | ret = jffs2_garbage_collect_pass(c); | ||
120 | if (ret) | ||
121 | return ret; | ||
122 | |||
123 | cond_resched(); | ||
124 | |||
125 | if (signal_pending(current)) | ||
126 | return -EINTR; | ||
127 | |||
128 | down(&c->alloc_sem); | ||
129 | spin_lock(&c->erase_completion_lock); | ||
130 | } | ||
131 | |||
132 | ret = jffs2_do_reserve_space(c, minsize, ofs, len); | ||
133 | if (ret) { | ||
134 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); | ||
135 | } | ||
136 | } | ||
137 | spin_unlock(&c->erase_completion_lock); | ||
138 | if (ret) | ||
139 | up(&c->alloc_sem); | ||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len) | ||
144 | { | ||
145 | int ret = -EAGAIN; | ||
146 | minsize = PAD(minsize); | ||
147 | |||
148 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize)); | ||
149 | |||
150 | spin_lock(&c->erase_completion_lock); | ||
151 | while(ret == -EAGAIN) { | ||
152 | ret = jffs2_do_reserve_space(c, minsize, ofs, len); | ||
153 | if (ret) { | ||
154 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); | ||
155 | } | ||
156 | } | ||
157 | spin_unlock(&c->erase_completion_lock); | ||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | /* Called with alloc sem _and_ erase_completion_lock */ | ||
162 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len) | ||
163 | { | ||
164 | struct jffs2_eraseblock *jeb = c->nextblock; | ||
165 | |||
166 | restart: | ||
167 | if (jeb && minsize > jeb->free_size) { | ||
168 | /* Skip the end of this block and file it as having some dirty space */ | ||
169 | /* If there's a pending write to it, flush now */ | ||
170 | if (jffs2_wbuf_dirty(c)) { | ||
171 | spin_unlock(&c->erase_completion_lock); | ||
172 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | ||
173 | jffs2_flush_wbuf_pad(c); | ||
174 | spin_lock(&c->erase_completion_lock); | ||
175 | jeb = c->nextblock; | ||
176 | goto restart; | ||
177 | } | ||
178 | c->wasted_size += jeb->free_size; | ||
179 | c->free_size -= jeb->free_size; | ||
180 | jeb->wasted_size += jeb->free_size; | ||
181 | jeb->free_size = 0; | ||
182 | |||
183 | /* Check, if we have a dirty block now, or if it was dirty already */ | ||
184 | if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) { | ||
185 | c->dirty_size += jeb->wasted_size; | ||
186 | c->wasted_size -= jeb->wasted_size; | ||
187 | jeb->dirty_size += jeb->wasted_size; | ||
188 | jeb->wasted_size = 0; | ||
189 | if (VERYDIRTY(c, jeb->dirty_size)) { | ||
190 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
191 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
192 | list_add_tail(&jeb->list, &c->very_dirty_list); | ||
193 | } else { | ||
194 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
195 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
196 | list_add_tail(&jeb->list, &c->dirty_list); | ||
197 | } | ||
198 | } else { | ||
199 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
200 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
201 | list_add_tail(&jeb->list, &c->clean_list); | ||
202 | } | ||
203 | c->nextblock = jeb = NULL; | ||
204 | } | ||
205 | |||
206 | if (!jeb) { | ||
207 | struct list_head *next; | ||
208 | /* Take the next block off the 'free' list */ | ||
209 | |||
210 | if (list_empty(&c->free_list)) { | ||
211 | |||
212 | if (!c->nr_erasing_blocks && | ||
213 | !list_empty(&c->erasable_list)) { | ||
214 | struct jffs2_eraseblock *ejeb; | ||
215 | |||
216 | ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); | ||
217 | list_del(&ejeb->list); | ||
218 | list_add_tail(&ejeb->list, &c->erase_pending_list); | ||
219 | c->nr_erasing_blocks++; | ||
220 | jffs2_erase_pending_trigger(c); | ||
221 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n", | ||
222 | ejeb->offset)); | ||
223 | } | ||
224 | |||
225 | if (!c->nr_erasing_blocks && | ||
226 | !list_empty(&c->erasable_pending_wbuf_list)) { | ||
227 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | ||
228 | /* c->nextblock is NULL, no update to c->nextblock allowed */ | ||
229 | spin_unlock(&c->erase_completion_lock); | ||
230 | jffs2_flush_wbuf_pad(c); | ||
231 | spin_lock(&c->erase_completion_lock); | ||
232 | /* Have another go. It'll be on the erasable_list now */ | ||
233 | return -EAGAIN; | ||
234 | } | ||
235 | |||
236 | if (!c->nr_erasing_blocks) { | ||
237 | /* Ouch. We're in GC, or we wouldn't have got here. | ||
238 | And there's no space left. At all. */ | ||
239 | printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", | ||
240 | c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", | ||
241 | list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); | ||
242 | return -ENOSPC; | ||
243 | } | ||
244 | |||
245 | spin_unlock(&c->erase_completion_lock); | ||
246 | /* Don't wait for it; just erase one right now */ | ||
247 | jffs2_erase_pending_blocks(c, 1); | ||
248 | spin_lock(&c->erase_completion_lock); | ||
249 | |||
250 | /* An erase may have failed, decreasing the | ||
251 | amount of free space available. So we must | ||
252 | restart from the beginning */ | ||
253 | return -EAGAIN; | ||
254 | } | ||
255 | |||
256 | next = c->free_list.next; | ||
257 | list_del(next); | ||
258 | c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list); | ||
259 | c->nr_free_blocks--; | ||
260 | |||
261 | if (jeb->free_size != c->sector_size - c->cleanmarker_size) { | ||
262 | printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); | ||
263 | goto restart; | ||
264 | } | ||
265 | } | ||
266 | /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has | ||
267 | enough space */ | ||
268 | *ofs = jeb->offset + (c->sector_size - jeb->free_size); | ||
269 | *len = jeb->free_size; | ||
270 | |||
271 | if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && | ||
272 | !jeb->first_node->next_in_ino) { | ||
273 | /* Only node in it beforehand was a CLEANMARKER node (we think). | ||
274 | So mark it obsolete now that there's going to be another node | ||
275 | in the block. This will reduce used_size to zero but We've | ||
276 | already set c->nextblock so that jffs2_mark_node_obsolete() | ||
277 | won't try to refile it to the dirty_list. | ||
278 | */ | ||
279 | spin_unlock(&c->erase_completion_lock); | ||
280 | jffs2_mark_node_obsolete(c, jeb->first_node); | ||
281 | spin_lock(&c->erase_completion_lock); | ||
282 | } | ||
283 | |||
284 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs)); | ||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * jffs2_add_physical_node_ref - add a physical node reference to the list | ||
290 | * @c: superblock info | ||
291 | * @new: new node reference to add | ||
292 | * @len: length of this physical node | ||
293 | * @dirty: dirty flag for new node | ||
294 | * | ||
295 | * Should only be used to report nodes for which space has been allocated | ||
296 | * by jffs2_reserve_space. | ||
297 | * | ||
298 | * Must be called with the alloc_sem held. | ||
299 | */ | ||
300 | |||
301 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) | ||
302 | { | ||
303 | struct jffs2_eraseblock *jeb; | ||
304 | uint32_t len; | ||
305 | |||
306 | jeb = &c->blocks[new->flash_offset / c->sector_size]; | ||
307 | len = ref_totlen(c, jeb, new); | ||
308 | |||
309 | D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len)); | ||
310 | #if 1 | ||
311 | if (jeb != c->nextblock || (ref_offset(new)) != jeb->offset + (c->sector_size - jeb->free_size)) { | ||
312 | printk(KERN_WARNING "argh. node added in wrong place\n"); | ||
313 | jffs2_free_raw_node_ref(new); | ||
314 | return -EINVAL; | ||
315 | } | ||
316 | #endif | ||
317 | spin_lock(&c->erase_completion_lock); | ||
318 | |||
319 | if (!jeb->first_node) | ||
320 | jeb->first_node = new; | ||
321 | if (jeb->last_node) | ||
322 | jeb->last_node->next_phys = new; | ||
323 | jeb->last_node = new; | ||
324 | |||
325 | jeb->free_size -= len; | ||
326 | c->free_size -= len; | ||
327 | if (ref_obsolete(new)) { | ||
328 | jeb->dirty_size += len; | ||
329 | c->dirty_size += len; | ||
330 | } else { | ||
331 | jeb->used_size += len; | ||
332 | c->used_size += len; | ||
333 | } | ||
334 | |||
335 | if (!jeb->free_size && !jeb->dirty_size) { | ||
336 | /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ | ||
337 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
338 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
339 | if (jffs2_wbuf_dirty(c)) { | ||
340 | /* Flush the last write in the block if it's outstanding */ | ||
341 | spin_unlock(&c->erase_completion_lock); | ||
342 | jffs2_flush_wbuf_pad(c); | ||
343 | spin_lock(&c->erase_completion_lock); | ||
344 | } | ||
345 | |||
346 | list_add_tail(&jeb->list, &c->clean_list); | ||
347 | c->nextblock = NULL; | ||
348 | } | ||
349 | ACCT_SANITY_CHECK(c,jeb); | ||
350 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
351 | |||
352 | spin_unlock(&c->erase_completion_lock); | ||
353 | |||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | |||
358 | void jffs2_complete_reservation(struct jffs2_sb_info *c) | ||
359 | { | ||
360 | D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n")); | ||
361 | jffs2_garbage_collect_trigger(c); | ||
362 | up(&c->alloc_sem); | ||
363 | } | ||
364 | |||
365 | static inline int on_list(struct list_head *obj, struct list_head *head) | ||
366 | { | ||
367 | struct list_head *this; | ||
368 | |||
369 | list_for_each(this, head) { | ||
370 | if (this == obj) { | ||
371 | D1(printk("%p is on list at %p\n", obj, head)); | ||
372 | return 1; | ||
373 | |||
374 | } | ||
375 | } | ||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref) | ||
380 | { | ||
381 | struct jffs2_eraseblock *jeb; | ||
382 | int blocknr; | ||
383 | struct jffs2_unknown_node n; | ||
384 | int ret, addedsize; | ||
385 | size_t retlen; | ||
386 | |||
387 | if(!ref) { | ||
388 | printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); | ||
389 | return; | ||
390 | } | ||
391 | if (ref_obsolete(ref)) { | ||
392 | D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref))); | ||
393 | return; | ||
394 | } | ||
395 | blocknr = ref->flash_offset / c->sector_size; | ||
396 | if (blocknr >= c->nr_blocks) { | ||
397 | printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset); | ||
398 | BUG(); | ||
399 | } | ||
400 | jeb = &c->blocks[blocknr]; | ||
401 | |||
402 | if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && | ||
403 | !(c->flags & JFFS2_SB_FLAG_MOUNTING)) { | ||
404 | /* Hm. This may confuse static lock analysis. If any of the above | ||
405 | three conditions is false, we're going to return from this | ||
406 | function without actually obliterating any nodes or freeing | ||
407 | any jffs2_raw_node_refs. So we don't need to stop erases from | ||
408 | happening, or protect against people holding an obsolete | ||
409 | jffs2_raw_node_ref without the erase_completion_lock. */ | ||
410 | down(&c->erase_free_sem); | ||
411 | } | ||
412 | |||
413 | spin_lock(&c->erase_completion_lock); | ||
414 | |||
415 | if (ref_flags(ref) == REF_UNCHECKED) { | ||
416 | D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) { | ||
417 | printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", | ||
418 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); | ||
419 | BUG(); | ||
420 | }) | ||
421 | D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); | ||
422 | jeb->unchecked_size -= ref_totlen(c, jeb, ref); | ||
423 | c->unchecked_size -= ref_totlen(c, jeb, ref); | ||
424 | } else { | ||
425 | D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) { | ||
426 | printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", | ||
427 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); | ||
428 | BUG(); | ||
429 | }) | ||
430 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); | ||
431 | jeb->used_size -= ref_totlen(c, jeb, ref); | ||
432 | c->used_size -= ref_totlen(c, jeb, ref); | ||
433 | } | ||
434 | |||
435 | // Take care, that wasted size is taken into concern | ||
436 | if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) { | ||
437 | D1(printk("Dirtying\n")); | ||
438 | addedsize = ref_totlen(c, jeb, ref); | ||
439 | jeb->dirty_size += ref_totlen(c, jeb, ref); | ||
440 | c->dirty_size += ref_totlen(c, jeb, ref); | ||
441 | |||
442 | /* Convert wasted space to dirty, if not a bad block */ | ||
443 | if (jeb->wasted_size) { | ||
444 | if (on_list(&jeb->list, &c->bad_used_list)) { | ||
445 | D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n", | ||
446 | jeb->offset)); | ||
447 | addedsize = 0; /* To fool the refiling code later */ | ||
448 | } else { | ||
449 | D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n", | ||
450 | jeb->wasted_size, jeb->offset)); | ||
451 | addedsize += jeb->wasted_size; | ||
452 | jeb->dirty_size += jeb->wasted_size; | ||
453 | c->dirty_size += jeb->wasted_size; | ||
454 | c->wasted_size -= jeb->wasted_size; | ||
455 | jeb->wasted_size = 0; | ||
456 | } | ||
457 | } | ||
458 | } else { | ||
459 | D1(printk("Wasting\n")); | ||
460 | addedsize = 0; | ||
461 | jeb->wasted_size += ref_totlen(c, jeb, ref); | ||
462 | c->wasted_size += ref_totlen(c, jeb, ref); | ||
463 | } | ||
464 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; | ||
465 | |||
466 | ACCT_SANITY_CHECK(c, jeb); | ||
467 | |||
468 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
469 | |||
470 | if (c->flags & JFFS2_SB_FLAG_MOUNTING) { | ||
471 | /* Mount in progress. Don't muck about with the block | ||
472 | lists because they're not ready yet, and don't actually | ||
473 | obliterate nodes that look obsolete. If they weren't | ||
474 | marked obsolete on the flash at the time they _became_ | ||
475 | obsolete, there was probably a reason for that. */ | ||
476 | spin_unlock(&c->erase_completion_lock); | ||
477 | /* We didn't lock the erase_free_sem */ | ||
478 | return; | ||
479 | } | ||
480 | |||
481 | if (jeb == c->nextblock) { | ||
482 | D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset)); | ||
483 | } else if (!jeb->used_size && !jeb->unchecked_size) { | ||
484 | if (jeb == c->gcblock) { | ||
485 | D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset)); | ||
486 | c->gcblock = NULL; | ||
487 | } else { | ||
488 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset)); | ||
489 | list_del(&jeb->list); | ||
490 | } | ||
491 | if (jffs2_wbuf_dirty(c)) { | ||
492 | D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n")); | ||
493 | list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list); | ||
494 | } else { | ||
495 | if (jiffies & 127) { | ||
496 | /* Most of the time, we just erase it immediately. Otherwise we | ||
497 | spend ages scanning it on mount, etc. */ | ||
498 | D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); | ||
499 | list_add_tail(&jeb->list, &c->erase_pending_list); | ||
500 | c->nr_erasing_blocks++; | ||
501 | jffs2_erase_pending_trigger(c); | ||
502 | } else { | ||
503 | /* Sometimes, however, we leave it elsewhere so it doesn't get | ||
504 | immediately reused, and we spread the load a bit. */ | ||
505 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); | ||
506 | list_add_tail(&jeb->list, &c->erasable_list); | ||
507 | } | ||
508 | } | ||
509 | D1(printk(KERN_DEBUG "Done OK\n")); | ||
510 | } else if (jeb == c->gcblock) { | ||
511 | D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset)); | ||
512 | } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) { | ||
513 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset)); | ||
514 | list_del(&jeb->list); | ||
515 | D1(printk(KERN_DEBUG "...and adding to dirty_list\n")); | ||
516 | list_add_tail(&jeb->list, &c->dirty_list); | ||
517 | } else if (VERYDIRTY(c, jeb->dirty_size) && | ||
518 | !VERYDIRTY(c, jeb->dirty_size - addedsize)) { | ||
519 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset)); | ||
520 | list_del(&jeb->list); | ||
521 | D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n")); | ||
522 | list_add_tail(&jeb->list, &c->very_dirty_list); | ||
523 | } else { | ||
524 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", | ||
525 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
526 | } | ||
527 | |||
528 | spin_unlock(&c->erase_completion_lock); | ||
529 | |||
530 | if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c)) { | ||
531 | /* We didn't lock the erase_free_sem */ | ||
532 | return; | ||
533 | } | ||
534 | |||
535 | /* The erase_free_sem is locked, and has been since before we marked the node obsolete | ||
536 | and potentially put its eraseblock onto the erase_pending_list. Thus, we know that | ||
537 | the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet | ||
538 | by jffs2_free_all_node_refs() in erase.c. Which is nice. */ | ||
539 | |||
540 | D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref))); | ||
541 | ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); | ||
542 | if (ret) { | ||
543 | printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); | ||
544 | goto out_erase_sem; | ||
545 | } | ||
546 | if (retlen != sizeof(n)) { | ||
547 | printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); | ||
548 | goto out_erase_sem; | ||
549 | } | ||
550 | if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) { | ||
551 | printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref)); | ||
552 | goto out_erase_sem; | ||
553 | } | ||
554 | if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { | ||
555 | D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype))); | ||
556 | goto out_erase_sem; | ||
557 | } | ||
558 | /* XXX FIXME: This is ugly now */ | ||
559 | n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE); | ||
560 | ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); | ||
561 | if (ret) { | ||
562 | printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); | ||
563 | goto out_erase_sem; | ||
564 | } | ||
565 | if (retlen != sizeof(n)) { | ||
566 | printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); | ||
567 | goto out_erase_sem; | ||
568 | } | ||
569 | |||
570 | /* Nodes which have been marked obsolete no longer need to be | ||
571 | associated with any inode. Remove them from the per-inode list. | ||
572 | |||
573 | Note we can't do this for NAND at the moment because we need | ||
574 | obsolete dirent nodes to stay on the lists, because of the | ||
575 | horridness in jffs2_garbage_collect_deletion_dirent(). Also | ||
576 | because we delete the inocache, and on NAND we need that to | ||
577 | stay around until all the nodes are actually erased, in order | ||
578 | to stop us from giving the same inode number to another newly | ||
579 | created inode. */ | ||
580 | if (ref->next_in_ino) { | ||
581 | struct jffs2_inode_cache *ic; | ||
582 | struct jffs2_raw_node_ref **p; | ||
583 | |||
584 | spin_lock(&c->erase_completion_lock); | ||
585 | |||
586 | ic = jffs2_raw_ref_to_ic(ref); | ||
587 | for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino)) | ||
588 | ; | ||
589 | |||
590 | *p = ref->next_in_ino; | ||
591 | ref->next_in_ino = NULL; | ||
592 | |||
593 | if (ic->nodes == (void *)ic) { | ||
594 | D1(printk(KERN_DEBUG "inocache for ino #%u is all gone now. Freeing\n", ic->ino)); | ||
595 | jffs2_del_ino_cache(c, ic); | ||
596 | jffs2_free_inode_cache(ic); | ||
597 | } | ||
598 | |||
599 | spin_unlock(&c->erase_completion_lock); | ||
600 | } | ||
601 | |||
602 | |||
603 | /* Merge with the next node in the physical list, if there is one | ||
604 | and if it's also obsolete and if it doesn't belong to any inode */ | ||
605 | if (ref->next_phys && ref_obsolete(ref->next_phys) && | ||
606 | !ref->next_phys->next_in_ino) { | ||
607 | struct jffs2_raw_node_ref *n = ref->next_phys; | ||
608 | |||
609 | spin_lock(&c->erase_completion_lock); | ||
610 | |||
611 | ref->__totlen += n->__totlen; | ||
612 | ref->next_phys = n->next_phys; | ||
613 | if (jeb->last_node == n) jeb->last_node = ref; | ||
614 | if (jeb->gc_node == n) { | ||
615 | /* gc will be happy continuing gc on this node */ | ||
616 | jeb->gc_node=ref; | ||
617 | } | ||
618 | spin_unlock(&c->erase_completion_lock); | ||
619 | |||
620 | jffs2_free_raw_node_ref(n); | ||
621 | } | ||
622 | |||
623 | /* Also merge with the previous node in the list, if there is one | ||
624 | and that one is obsolete */ | ||
625 | if (ref != jeb->first_node ) { | ||
626 | struct jffs2_raw_node_ref *p = jeb->first_node; | ||
627 | |||
628 | spin_lock(&c->erase_completion_lock); | ||
629 | |||
630 | while (p->next_phys != ref) | ||
631 | p = p->next_phys; | ||
632 | |||
633 | if (ref_obsolete(p) && !ref->next_in_ino) { | ||
634 | p->__totlen += ref->__totlen; | ||
635 | if (jeb->last_node == ref) { | ||
636 | jeb->last_node = p; | ||
637 | } | ||
638 | if (jeb->gc_node == ref) { | ||
639 | /* gc will be happy continuing gc on this node */ | ||
640 | jeb->gc_node=p; | ||
641 | } | ||
642 | p->next_phys = ref->next_phys; | ||
643 | jffs2_free_raw_node_ref(ref); | ||
644 | } | ||
645 | spin_unlock(&c->erase_completion_lock); | ||
646 | } | ||
647 | out_erase_sem: | ||
648 | up(&c->erase_free_sem); | ||
649 | } | ||
650 | |||
651 | #if CONFIG_JFFS2_FS_DEBUG >= 2 | ||
652 | void jffs2_dump_block_lists(struct jffs2_sb_info *c) | ||
653 | { | ||
654 | |||
655 | |||
656 | printk(KERN_DEBUG "jffs2_dump_block_lists:\n"); | ||
657 | printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size); | ||
658 | printk(KERN_DEBUG "used_size: %08x\n", c->used_size); | ||
659 | printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size); | ||
660 | printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size); | ||
661 | printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size); | ||
662 | printk(KERN_DEBUG "free_size: %08x\n", c->free_size); | ||
663 | printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size); | ||
664 | printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size); | ||
665 | printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size); | ||
666 | printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write); | ||
667 | |||
668 | if (c->nextblock) { | ||
669 | printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
670 | c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size); | ||
671 | } else { | ||
672 | printk(KERN_DEBUG "nextblock: NULL\n"); | ||
673 | } | ||
674 | if (c->gcblock) { | ||
675 | printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
676 | c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); | ||
677 | } else { | ||
678 | printk(KERN_DEBUG "gcblock: NULL\n"); | ||
679 | } | ||
680 | if (list_empty(&c->clean_list)) { | ||
681 | printk(KERN_DEBUG "clean_list: empty\n"); | ||
682 | } else { | ||
683 | struct list_head *this; | ||
684 | int numblocks = 0; | ||
685 | uint32_t dirty = 0; | ||
686 | |||
687 | list_for_each(this, &c->clean_list) { | ||
688 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
689 | numblocks ++; | ||
690 | dirty += jeb->wasted_size; | ||
691 | printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
692 | } | ||
693 | printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks); | ||
694 | } | ||
695 | if (list_empty(&c->very_dirty_list)) { | ||
696 | printk(KERN_DEBUG "very_dirty_list: empty\n"); | ||
697 | } else { | ||
698 | struct list_head *this; | ||
699 | int numblocks = 0; | ||
700 | uint32_t dirty = 0; | ||
701 | |||
702 | list_for_each(this, &c->very_dirty_list) { | ||
703 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
704 | numblocks ++; | ||
705 | dirty += jeb->dirty_size; | ||
706 | printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
707 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
708 | } | ||
709 | printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | ||
710 | numblocks, dirty, dirty / numblocks); | ||
711 | } | ||
712 | if (list_empty(&c->dirty_list)) { | ||
713 | printk(KERN_DEBUG "dirty_list: empty\n"); | ||
714 | } else { | ||
715 | struct list_head *this; | ||
716 | int numblocks = 0; | ||
717 | uint32_t dirty = 0; | ||
718 | |||
719 | list_for_each(this, &c->dirty_list) { | ||
720 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
721 | numblocks ++; | ||
722 | dirty += jeb->dirty_size; | ||
723 | printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
724 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
725 | } | ||
726 | printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | ||
727 | numblocks, dirty, dirty / numblocks); | ||
728 | } | ||
729 | if (list_empty(&c->erasable_list)) { | ||
730 | printk(KERN_DEBUG "erasable_list: empty\n"); | ||
731 | } else { | ||
732 | struct list_head *this; | ||
733 | |||
734 | list_for_each(this, &c->erasable_list) { | ||
735 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
736 | printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
737 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
738 | } | ||
739 | } | ||
740 | if (list_empty(&c->erasing_list)) { | ||
741 | printk(KERN_DEBUG "erasing_list: empty\n"); | ||
742 | } else { | ||
743 | struct list_head *this; | ||
744 | |||
745 | list_for_each(this, &c->erasing_list) { | ||
746 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
747 | printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
748 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
749 | } | ||
750 | } | ||
751 | if (list_empty(&c->erase_pending_list)) { | ||
752 | printk(KERN_DEBUG "erase_pending_list: empty\n"); | ||
753 | } else { | ||
754 | struct list_head *this; | ||
755 | |||
756 | list_for_each(this, &c->erase_pending_list) { | ||
757 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
758 | printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
759 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
760 | } | ||
761 | } | ||
762 | if (list_empty(&c->erasable_pending_wbuf_list)) { | ||
763 | printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n"); | ||
764 | } else { | ||
765 | struct list_head *this; | ||
766 | |||
767 | list_for_each(this, &c->erasable_pending_wbuf_list) { | ||
768 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
769 | printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
770 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
771 | } | ||
772 | } | ||
773 | if (list_empty(&c->free_list)) { | ||
774 | printk(KERN_DEBUG "free_list: empty\n"); | ||
775 | } else { | ||
776 | struct list_head *this; | ||
777 | |||
778 | list_for_each(this, &c->free_list) { | ||
779 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
780 | printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
781 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
782 | } | ||
783 | } | ||
784 | if (list_empty(&c->bad_list)) { | ||
785 | printk(KERN_DEBUG "bad_list: empty\n"); | ||
786 | } else { | ||
787 | struct list_head *this; | ||
788 | |||
789 | list_for_each(this, &c->bad_list) { | ||
790 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
791 | printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
792 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
793 | } | ||
794 | } | ||
795 | if (list_empty(&c->bad_used_list)) { | ||
796 | printk(KERN_DEBUG "bad_used_list: empty\n"); | ||
797 | } else { | ||
798 | struct list_head *this; | ||
799 | |||
800 | list_for_each(this, &c->bad_used_list) { | ||
801 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
802 | printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
803 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
804 | } | ||
805 | } | ||
806 | } | ||
807 | #endif /* CONFIG_JFFS2_FS_DEBUG */ | ||
808 | |||
809 | int jffs2_thread_should_wake(struct jffs2_sb_info *c) | ||
810 | { | ||
811 | int ret = 0; | ||
812 | uint32_t dirty; | ||
813 | |||
814 | if (c->unchecked_size) { | ||
815 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n", | ||
816 | c->unchecked_size, c->checked_ino)); | ||
817 | return 1; | ||
818 | } | ||
819 | |||
820 | /* dirty_size contains blocks on erase_pending_list | ||
821 | * those blocks are counted in c->nr_erasing_blocks. | ||
822 | * If one block is actually erased, it is not longer counted as dirty_space | ||
823 | * but it is counted in c->nr_erasing_blocks, so we add it and subtract it | ||
824 | * with c->nr_erasing_blocks * c->sector_size again. | ||
825 | * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks | ||
826 | * This helps us to force gc and pick eventually a clean block to spread the load. | ||
827 | */ | ||
828 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; | ||
829 | |||
830 | if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && | ||
831 | (dirty > c->nospc_dirty_size)) | ||
832 | ret = 1; | ||
833 | |||
834 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", | ||
835 | c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no")); | ||
836 | |||
837 | return ret; | ||
838 | } | ||
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h new file mode 100644 index 000000000000..03b0acc37b73 --- /dev/null +++ b/fs/jffs2/os-linux.h | |||
@@ -0,0 +1,217 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2002-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: os-linux.h,v 1.51 2004/11/16 20:36:11 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #ifndef __JFFS2_OS_LINUX_H__ | ||
15 | #define __JFFS2_OS_LINUX_H__ | ||
16 | #include <linux/version.h> | ||
17 | |||
18 | /* JFFS2 uses Linux mode bits natively -- no need for conversion */ | ||
19 | #define os_to_jffs2_mode(x) (x) | ||
20 | #define jffs2_to_os_mode(x) (x) | ||
21 | |||
22 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,73) | ||
23 | #define kstatfs statfs | ||
24 | #endif | ||
25 | |||
26 | struct kstatfs; | ||
27 | struct kvec; | ||
28 | |||
29 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,2) | ||
30 | #define JFFS2_INODE_INFO(i) (list_entry(i, struct jffs2_inode_info, vfs_inode)) | ||
31 | #define OFNI_EDONI_2SFFJ(f) (&(f)->vfs_inode) | ||
32 | #define JFFS2_SB_INFO(sb) (sb->s_fs_info) | ||
33 | #define OFNI_BS_2SFFJ(c) ((struct super_block *)c->os_priv) | ||
34 | #elif defined(JFFS2_OUT_OF_KERNEL) | ||
35 | #define JFFS2_INODE_INFO(i) ((struct jffs2_inode_info *) &(i)->u) | ||
36 | #define OFNI_EDONI_2SFFJ(f) ((struct inode *) ( ((char *)f) - ((char *)(&((struct inode *)NULL)->u)) ) ) | ||
37 | #define JFFS2_SB_INFO(sb) ((struct jffs2_sb_info *) &(sb)->u) | ||
38 | #define OFNI_BS_2SFFJ(c) ((struct super_block *) ( ((char *)c) - ((char *)(&((struct super_block *)NULL)->u)) ) ) | ||
39 | #else | ||
40 | #define JFFS2_INODE_INFO(i) (&i->u.jffs2_i) | ||
41 | #define OFNI_EDONI_2SFFJ(f) ((struct inode *) ( ((char *)f) - ((char *)(&((struct inode *)NULL)->u)) ) ) | ||
42 | #define JFFS2_SB_INFO(sb) (&sb->u.jffs2_sb) | ||
43 | #define OFNI_BS_2SFFJ(c) ((struct super_block *) ( ((char *)c) - ((char *)(&((struct super_block *)NULL)->u)) ) ) | ||
44 | #endif | ||
45 | |||
46 | |||
47 | #define JFFS2_F_I_SIZE(f) (OFNI_EDONI_2SFFJ(f)->i_size) | ||
48 | #define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode) | ||
49 | #define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid) | ||
50 | #define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid) | ||
51 | |||
52 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,1) | ||
53 | #define JFFS2_F_I_RDEV_MIN(f) (iminor(OFNI_EDONI_2SFFJ(f))) | ||
54 | #define JFFS2_F_I_RDEV_MAJ(f) (imajor(OFNI_EDONI_2SFFJ(f))) | ||
55 | #else | ||
56 | #define JFFS2_F_I_RDEV_MIN(f) (MINOR(to_kdev_t(OFNI_EDONI_2SFFJ(f)->i_rdev))) | ||
57 | #define JFFS2_F_I_RDEV_MAJ(f) (MAJOR(to_kdev_t(OFNI_EDONI_2SFFJ(f)->i_rdev))) | ||
58 | #endif | ||
59 | |||
60 | /* Urgh. The things we do to keep the 2.4 build working */ | ||
61 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,47) | ||
62 | #define ITIME(sec) ((struct timespec){sec, 0}) | ||
63 | #define I_SEC(tv) ((tv).tv_sec) | ||
64 | #define JFFS2_F_I_CTIME(f) (OFNI_EDONI_2SFFJ(f)->i_ctime.tv_sec) | ||
65 | #define JFFS2_F_I_MTIME(f) (OFNI_EDONI_2SFFJ(f)->i_mtime.tv_sec) | ||
66 | #define JFFS2_F_I_ATIME(f) (OFNI_EDONI_2SFFJ(f)->i_atime.tv_sec) | ||
67 | #else | ||
68 | #define ITIME(x) (x) | ||
69 | #define I_SEC(x) (x) | ||
70 | #define JFFS2_F_I_CTIME(f) (OFNI_EDONI_2SFFJ(f)->i_ctime) | ||
71 | #define JFFS2_F_I_MTIME(f) (OFNI_EDONI_2SFFJ(f)->i_mtime) | ||
72 | #define JFFS2_F_I_ATIME(f) (OFNI_EDONI_2SFFJ(f)->i_atime) | ||
73 | #endif | ||
74 | |||
75 | #define sleep_on_spinunlock(wq, s) \ | ||
76 | do { \ | ||
77 | DECLARE_WAITQUEUE(__wait, current); \ | ||
78 | add_wait_queue((wq), &__wait); \ | ||
79 | set_current_state(TASK_UNINTERRUPTIBLE); \ | ||
80 | spin_unlock(s); \ | ||
81 | schedule(); \ | ||
82 | remove_wait_queue((wq), &__wait); \ | ||
83 | } while(0) | ||
84 | |||
85 | static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) | ||
86 | { | ||
87 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,2) | ||
88 | f->highest_version = 0; | ||
89 | f->fragtree = RB_ROOT; | ||
90 | f->metadata = NULL; | ||
91 | f->dents = NULL; | ||
92 | f->flags = 0; | ||
93 | f->usercompr = 0; | ||
94 | #else | ||
95 | memset(f, 0, sizeof(*f)); | ||
96 | init_MUTEX_LOCKED(&f->sem); | ||
97 | #endif | ||
98 | } | ||
99 | |||
100 | #define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY) | ||
101 | |||
102 | #if (!defined CONFIG_JFFS2_FS_NAND && !defined CONFIG_JFFS2_FS_NOR_ECC) | ||
103 | #define jffs2_can_mark_obsolete(c) (1) | ||
104 | #define jffs2_cleanmarker_oob(c) (0) | ||
105 | #define jffs2_write_nand_cleanmarker(c,jeb) (-EIO) | ||
106 | |||
107 | #define jffs2_flash_write(c, ofs, len, retlen, buf) ((c)->mtd->write((c)->mtd, ofs, len, retlen, buf)) | ||
108 | #define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf)) | ||
109 | #define jffs2_flush_wbuf_pad(c) ({ (void)(c), 0; }) | ||
110 | #define jffs2_flush_wbuf_gc(c, i) ({ (void)(c), (void) i, 0; }) | ||
111 | #define jffs2_write_nand_badblock(c,jeb,bad_offset) (1) | ||
112 | #define jffs2_nand_flash_setup(c) (0) | ||
113 | #define jffs2_nand_flash_cleanup(c) do {} while(0) | ||
114 | #define jffs2_wbuf_dirty(c) (0) | ||
115 | #define jffs2_flash_writev(a,b,c,d,e,f) jffs2_flash_direct_writev(a,b,c,d,e) | ||
116 | #define jffs2_wbuf_timeout NULL | ||
117 | #define jffs2_wbuf_process NULL | ||
118 | #define jffs2_nor_ecc(c) (0) | ||
119 | #define jffs2_nor_ecc_flash_setup(c) (0) | ||
120 | #define jffs2_nor_ecc_flash_cleanup(c) do {} while (0) | ||
121 | |||
122 | #else /* NAND and/or ECC'd NOR support present */ | ||
123 | |||
124 | #define jffs2_can_mark_obsolete(c) ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & MTD_ECC)) || c->mtd->type == MTD_RAM) | ||
125 | #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) | ||
126 | |||
127 | #define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf)) | ||
128 | #define jffs2_flash_read_oob(c, ofs, len, retlen, buf) ((c)->mtd->read_oob((c)->mtd, ofs, len, retlen, buf)) | ||
129 | #define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len) | ||
130 | |||
131 | /* wbuf.c */ | ||
132 | int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino); | ||
133 | int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf); | ||
134 | int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf); | ||
135 | int jffs2_check_oob_empty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,int mode); | ||
136 | int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
137 | int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
138 | int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); | ||
139 | void jffs2_wbuf_timeout(unsigned long data); | ||
140 | void jffs2_wbuf_process(void *data); | ||
141 | int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino); | ||
142 | int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c); | ||
143 | int jffs2_nand_flash_setup(struct jffs2_sb_info *c); | ||
144 | void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c); | ||
145 | #ifdef CONFIG_JFFS2_FS_NOR_ECC | ||
146 | #define jffs2_nor_ecc(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_ECC)) | ||
147 | int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c); | ||
148 | void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c); | ||
149 | #else | ||
150 | #define jffs2_nor_ecc(c) (0) | ||
151 | #define jffs2_nor_ecc_flash_setup(c) (0) | ||
152 | #define jffs2_nor_ecc_flash_cleanup(c) do {} while (0) | ||
153 | #endif /* NOR ECC */ | ||
154 | #endif /* NAND */ | ||
155 | |||
156 | /* erase.c */ | ||
157 | static inline void jffs2_erase_pending_trigger(struct jffs2_sb_info *c) | ||
158 | { | ||
159 | OFNI_BS_2SFFJ(c)->s_dirt = 1; | ||
160 | } | ||
161 | |||
162 | /* background.c */ | ||
163 | int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c); | ||
164 | void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c); | ||
165 | void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c); | ||
166 | |||
167 | /* dir.c */ | ||
168 | extern struct file_operations jffs2_dir_operations; | ||
169 | extern struct inode_operations jffs2_dir_inode_operations; | ||
170 | |||
171 | /* file.c */ | ||
172 | extern struct file_operations jffs2_file_operations; | ||
173 | extern struct inode_operations jffs2_file_inode_operations; | ||
174 | extern struct address_space_operations jffs2_file_address_operations; | ||
175 | int jffs2_fsync(struct file *, struct dentry *, int); | ||
176 | int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg); | ||
177 | |||
178 | /* ioctl.c */ | ||
179 | int jffs2_ioctl(struct inode *, struct file *, unsigned int, unsigned long); | ||
180 | |||
181 | /* symlink.c */ | ||
182 | extern struct inode_operations jffs2_symlink_inode_operations; | ||
183 | |||
184 | /* fs.c */ | ||
185 | int jffs2_setattr (struct dentry *, struct iattr *); | ||
186 | void jffs2_read_inode (struct inode *); | ||
187 | void jffs2_clear_inode (struct inode *); | ||
188 | void jffs2_dirty_inode(struct inode *inode); | ||
189 | struct inode *jffs2_new_inode (struct inode *dir_i, int mode, | ||
190 | struct jffs2_raw_inode *ri); | ||
191 | int jffs2_statfs (struct super_block *, struct kstatfs *); | ||
192 | void jffs2_write_super (struct super_block *); | ||
193 | int jffs2_remount_fs (struct super_block *, int *, char *); | ||
194 | int jffs2_do_fill_super(struct super_block *sb, void *data, int silent); | ||
195 | void jffs2_gc_release_inode(struct jffs2_sb_info *c, | ||
196 | struct jffs2_inode_info *f); | ||
197 | struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, | ||
198 | int inum, int nlink); | ||
199 | |||
200 | unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, | ||
201 | struct jffs2_inode_info *f, | ||
202 | unsigned long offset, | ||
203 | unsigned long *priv); | ||
204 | void jffs2_gc_release_page(struct jffs2_sb_info *c, | ||
205 | unsigned char *pg, | ||
206 | unsigned long *priv); | ||
207 | void jffs2_flash_cleanup(struct jffs2_sb_info *c); | ||
208 | |||
209 | |||
210 | /* writev.c */ | ||
211 | int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, | ||
212 | unsigned long count, loff_t to, size_t *retlen); | ||
213 | |||
214 | |||
215 | #endif /* __JFFS2_OS_LINUX_H__ */ | ||
216 | |||
217 | |||
diff --git a/fs/jffs2/pushpull.h b/fs/jffs2/pushpull.h new file mode 100644 index 000000000000..c0c2a9158dff --- /dev/null +++ b/fs/jffs2/pushpull.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: pushpull.h,v 1.10 2004/11/16 20:36:11 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #ifndef __PUSHPULL_H__ | ||
15 | #define __PUSHPULL_H__ | ||
16 | |||
17 | #include <linux/errno.h> | ||
18 | |||
19 | struct pushpull { | ||
20 | unsigned char *buf; | ||
21 | unsigned int buflen; | ||
22 | unsigned int ofs; | ||
23 | unsigned int reserve; | ||
24 | }; | ||
25 | |||
26 | |||
27 | static inline void init_pushpull(struct pushpull *pp, char *buf, unsigned buflen, unsigned ofs, unsigned reserve) | ||
28 | { | ||
29 | pp->buf = buf; | ||
30 | pp->buflen = buflen; | ||
31 | pp->ofs = ofs; | ||
32 | pp->reserve = reserve; | ||
33 | } | ||
34 | |||
35 | static inline int pushbit(struct pushpull *pp, int bit, int use_reserved) | ||
36 | { | ||
37 | if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) { | ||
38 | return -ENOSPC; | ||
39 | } | ||
40 | |||
41 | if (bit) { | ||
42 | pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs &7))); | ||
43 | } | ||
44 | else { | ||
45 | pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs &7))); | ||
46 | } | ||
47 | pp->ofs++; | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | static inline int pushedbits(struct pushpull *pp) | ||
53 | { | ||
54 | return pp->ofs; | ||
55 | } | ||
56 | |||
57 | static inline int pullbit(struct pushpull *pp) | ||
58 | { | ||
59 | int bit; | ||
60 | |||
61 | bit = (pp->buf[pp->ofs >> 3] >> (7-(pp->ofs & 7))) & 1; | ||
62 | |||
63 | pp->ofs++; | ||
64 | return bit; | ||
65 | } | ||
66 | |||
67 | static inline int pulledbits(struct pushpull *pp) | ||
68 | { | ||
69 | return pp->ofs; | ||
70 | } | ||
71 | |||
72 | #endif /* __PUSHPULL_H__ */ | ||
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c new file mode 100644 index 000000000000..eb493dc06db7 --- /dev/null +++ b/fs/jffs2/read.c | |||
@@ -0,0 +1,246 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: read.c,v 1.38 2004/11/16 20:36:12 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/crc32.h> | ||
17 | #include <linux/pagemap.h> | ||
18 | #include <linux/mtd/mtd.h> | ||
19 | #include <linux/compiler.h> | ||
20 | #include "nodelist.h" | ||
21 | #include "compr.h" | ||
22 | |||
23 | int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
24 | struct jffs2_full_dnode *fd, unsigned char *buf, | ||
25 | int ofs, int len) | ||
26 | { | ||
27 | struct jffs2_raw_inode *ri; | ||
28 | size_t readlen; | ||
29 | uint32_t crc; | ||
30 | unsigned char *decomprbuf = NULL; | ||
31 | unsigned char *readbuf = NULL; | ||
32 | int ret = 0; | ||
33 | |||
34 | ri = jffs2_alloc_raw_inode(); | ||
35 | if (!ri) | ||
36 | return -ENOMEM; | ||
37 | |||
38 | ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri); | ||
39 | if (ret) { | ||
40 | jffs2_free_raw_inode(ri); | ||
41 | printk(KERN_WARNING "Error reading node from 0x%08x: %d\n", ref_offset(fd->raw), ret); | ||
42 | return ret; | ||
43 | } | ||
44 | if (readlen != sizeof(*ri)) { | ||
45 | jffs2_free_raw_inode(ri); | ||
46 | printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", | ||
47 | ref_offset(fd->raw), sizeof(*ri), readlen); | ||
48 | return -EIO; | ||
49 | } | ||
50 | crc = crc32(0, ri, sizeof(*ri)-8); | ||
51 | |||
52 | D1(printk(KERN_DEBUG "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n", | ||
53 | ref_offset(fd->raw), je32_to_cpu(ri->node_crc), | ||
54 | crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize), | ||
55 | je32_to_cpu(ri->offset), buf)); | ||
56 | if (crc != je32_to_cpu(ri->node_crc)) { | ||
57 | printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n", | ||
58 | je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw)); | ||
59 | ret = -EIO; | ||
60 | goto out_ri; | ||
61 | } | ||
62 | /* There was a bug where we wrote hole nodes out with csize/dsize | ||
63 | swapped. Deal with it */ | ||
64 | if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) && | ||
65 | je32_to_cpu(ri->csize)) { | ||
66 | ri->dsize = ri->csize; | ||
67 | ri->csize = cpu_to_je32(0); | ||
68 | } | ||
69 | |||
70 | D1(if(ofs + len > je32_to_cpu(ri->dsize)) { | ||
71 | printk(KERN_WARNING "jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n", | ||
72 | len, ofs, je32_to_cpu(ri->dsize)); | ||
73 | ret = -EINVAL; | ||
74 | goto out_ri; | ||
75 | }); | ||
76 | |||
77 | |||
78 | if (ri->compr == JFFS2_COMPR_ZERO) { | ||
79 | memset(buf, 0, len); | ||
80 | goto out_ri; | ||
81 | } | ||
82 | |||
83 | /* Cases: | ||
84 | Reading whole node and it's uncompressed - read directly to buffer provided, check CRC. | ||
85 | Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided | ||
86 | Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy | ||
87 | Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy | ||
88 | */ | ||
89 | if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) { | ||
90 | readbuf = buf; | ||
91 | } else { | ||
92 | readbuf = kmalloc(je32_to_cpu(ri->csize), GFP_KERNEL); | ||
93 | if (!readbuf) { | ||
94 | ret = -ENOMEM; | ||
95 | goto out_ri; | ||
96 | } | ||
97 | } | ||
98 | if (ri->compr != JFFS2_COMPR_NONE) { | ||
99 | if (len < je32_to_cpu(ri->dsize)) { | ||
100 | decomprbuf = kmalloc(je32_to_cpu(ri->dsize), GFP_KERNEL); | ||
101 | if (!decomprbuf) { | ||
102 | ret = -ENOMEM; | ||
103 | goto out_readbuf; | ||
104 | } | ||
105 | } else { | ||
106 | decomprbuf = buf; | ||
107 | } | ||
108 | } else { | ||
109 | decomprbuf = readbuf; | ||
110 | } | ||
111 | |||
112 | D2(printk(KERN_DEBUG "Read %d bytes to %p\n", je32_to_cpu(ri->csize), | ||
113 | readbuf)); | ||
114 | ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri), | ||
115 | je32_to_cpu(ri->csize), &readlen, readbuf); | ||
116 | |||
117 | if (!ret && readlen != je32_to_cpu(ri->csize)) | ||
118 | ret = -EIO; | ||
119 | if (ret) | ||
120 | goto out_decomprbuf; | ||
121 | |||
122 | crc = crc32(0, readbuf, je32_to_cpu(ri->csize)); | ||
123 | if (crc != je32_to_cpu(ri->data_crc)) { | ||
124 | printk(KERN_WARNING "Data CRC %08x != calculated CRC %08x for node at %08x\n", | ||
125 | je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw)); | ||
126 | ret = -EIO; | ||
127 | goto out_decomprbuf; | ||
128 | } | ||
129 | D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc)); | ||
130 | if (ri->compr != JFFS2_COMPR_NONE) { | ||
131 | D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n", | ||
132 | je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf)); | ||
133 | ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); | ||
134 | if (ret) { | ||
135 | printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret); | ||
136 | goto out_decomprbuf; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | if (len < je32_to_cpu(ri->dsize)) { | ||
141 | memcpy(buf, decomprbuf+ofs, len); | ||
142 | } | ||
143 | out_decomprbuf: | ||
144 | if(decomprbuf != buf && decomprbuf != readbuf) | ||
145 | kfree(decomprbuf); | ||
146 | out_readbuf: | ||
147 | if(readbuf != buf) | ||
148 | kfree(readbuf); | ||
149 | out_ri: | ||
150 | jffs2_free_raw_inode(ri); | ||
151 | |||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
156 | unsigned char *buf, uint32_t offset, uint32_t len) | ||
157 | { | ||
158 | uint32_t end = offset + len; | ||
159 | struct jffs2_node_frag *frag; | ||
160 | int ret; | ||
161 | |||
162 | D1(printk(KERN_DEBUG "jffs2_read_inode_range: ino #%u, range 0x%08x-0x%08x\n", | ||
163 | f->inocache->ino, offset, offset+len)); | ||
164 | |||
165 | frag = jffs2_lookup_node_frag(&f->fragtree, offset); | ||
166 | |||
167 | /* XXX FIXME: Where a single physical node actually shows up in two | ||
168 | frags, we read it twice. Don't do that. */ | ||
169 | /* Now we're pointing at the first frag which overlaps our page */ | ||
170 | while(offset < end) { | ||
171 | D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end)); | ||
172 | if (unlikely(!frag || frag->ofs > offset)) { | ||
173 | uint32_t holesize = end - offset; | ||
174 | if (frag) { | ||
175 | D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); | ||
176 | holesize = min(holesize, frag->ofs - offset); | ||
177 | D2(jffs2_print_frag_list(f)); | ||
178 | } | ||
179 | D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize)); | ||
180 | memset(buf, 0, holesize); | ||
181 | buf += holesize; | ||
182 | offset += holesize; | ||
183 | continue; | ||
184 | } else if (unlikely(!frag->node)) { | ||
185 | uint32_t holeend = min(end, frag->ofs + frag->size); | ||
186 | D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size)); | ||
187 | memset(buf, 0, holeend - offset); | ||
188 | buf += holeend - offset; | ||
189 | offset = holeend; | ||
190 | frag = frag_next(frag); | ||
191 | continue; | ||
192 | } else { | ||
193 | uint32_t readlen; | ||
194 | uint32_t fragofs; /* offset within the frag to start reading */ | ||
195 | |||
196 | fragofs = offset - frag->ofs; | ||
197 | readlen = min(frag->size - fragofs, end - offset); | ||
198 | D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n", | ||
199 | frag->ofs+fragofs, frag->ofs+fragofs+readlen, | ||
200 | ref_offset(frag->node->raw), ref_flags(frag->node->raw))); | ||
201 | ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen); | ||
202 | D2(printk(KERN_DEBUG "node read done\n")); | ||
203 | if (ret) { | ||
204 | D1(printk(KERN_DEBUG"jffs2_read_inode_range error %d\n",ret)); | ||
205 | memset(buf, 0, readlen); | ||
206 | return ret; | ||
207 | } | ||
208 | buf += readlen; | ||
209 | offset += readlen; | ||
210 | frag = frag_next(frag); | ||
211 | D2(printk(KERN_DEBUG "node read was OK. Looping\n")); | ||
212 | } | ||
213 | } | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | /* Core function to read symlink target. */ | ||
218 | char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f) | ||
219 | { | ||
220 | char *buf; | ||
221 | int ret; | ||
222 | |||
223 | down(&f->sem); | ||
224 | |||
225 | if (!f->metadata) { | ||
226 | printk(KERN_NOTICE "No metadata for symlink inode #%u\n", f->inocache->ino); | ||
227 | up(&f->sem); | ||
228 | return ERR_PTR(-EINVAL); | ||
229 | } | ||
230 | buf = kmalloc(f->metadata->size+1, GFP_USER); | ||
231 | if (!buf) { | ||
232 | up(&f->sem); | ||
233 | return ERR_PTR(-ENOMEM); | ||
234 | } | ||
235 | buf[f->metadata->size]=0; | ||
236 | |||
237 | ret = jffs2_read_dnode(c, f, f->metadata, buf, 0, f->metadata->size); | ||
238 | |||
239 | up(&f->sem); | ||
240 | |||
241 | if (ret) { | ||
242 | kfree(buf); | ||
243 | return ERR_PTR(ret); | ||
244 | } | ||
245 | return buf; | ||
246 | } | ||
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c new file mode 100644 index 000000000000..aca4a0b17bcd --- /dev/null +++ b/fs/jffs2/readinode.c | |||
@@ -0,0 +1,695 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: readinode.c,v 1.117 2004/11/20 18:06:54 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/crc32.h> | ||
18 | #include <linux/pagemap.h> | ||
19 | #include <linux/mtd/mtd.h> | ||
20 | #include <linux/compiler.h> | ||
21 | #include "nodelist.h" | ||
22 | |||
23 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag); | ||
24 | |||
25 | #if CONFIG_JFFS2_FS_DEBUG >= 2 | ||
26 | static void jffs2_print_fragtree(struct rb_root *list, int permitbug) | ||
27 | { | ||
28 | struct jffs2_node_frag *this = frag_first(list); | ||
29 | uint32_t lastofs = 0; | ||
30 | int buggy = 0; | ||
31 | |||
32 | while(this) { | ||
33 | if (this->node) | ||
34 | printk(KERN_DEBUG "frag %04x-%04x: 0x%08x(%d) on flash (*%p). left (%p), right (%p), parent (%p)\n", | ||
35 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw), ref_flags(this->node->raw), | ||
36 | this, frag_left(this), frag_right(this), frag_parent(this)); | ||
37 | else | ||
38 | printk(KERN_DEBUG "frag %04x-%04x: hole (*%p). left (%p} right (%p), parent (%p)\n", this->ofs, | ||
39 | this->ofs+this->size, this, frag_left(this), frag_right(this), frag_parent(this)); | ||
40 | if (this->ofs != lastofs) | ||
41 | buggy = 1; | ||
42 | lastofs = this->ofs+this->size; | ||
43 | this = frag_next(this); | ||
44 | } | ||
45 | if (buggy && !permitbug) { | ||
46 | printk(KERN_CRIT "Frag tree got a hole in it\n"); | ||
47 | BUG(); | ||
48 | } | ||
49 | } | ||
50 | |||
51 | void jffs2_print_frag_list(struct jffs2_inode_info *f) | ||
52 | { | ||
53 | jffs2_print_fragtree(&f->fragtree, 0); | ||
54 | |||
55 | if (f->metadata) { | ||
56 | printk(KERN_DEBUG "metadata at 0x%08x\n", ref_offset(f->metadata->raw)); | ||
57 | } | ||
58 | } | ||
59 | #endif | ||
60 | |||
61 | #if CONFIG_JFFS2_FS_DEBUG >= 1 | ||
62 | static int jffs2_sanitycheck_fragtree(struct jffs2_inode_info *f) | ||
63 | { | ||
64 | struct jffs2_node_frag *frag; | ||
65 | int bitched = 0; | ||
66 | |||
67 | for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { | ||
68 | |||
69 | struct jffs2_full_dnode *fn = frag->node; | ||
70 | if (!fn || !fn->raw) | ||
71 | continue; | ||
72 | |||
73 | if (ref_flags(fn->raw) == REF_PRISTINE) { | ||
74 | |||
75 | if (fn->frags > 1) { | ||
76 | printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2\n", ref_offset(fn->raw), fn->frags); | ||
77 | bitched = 1; | ||
78 | } | ||
79 | /* A hole node which isn't multi-page should be garbage-collected | ||
80 | and merged anyway, so we just check for the frag size here, | ||
81 | rather than mucking around with actually reading the node | ||
82 | and checking the compression type, which is the real way | ||
83 | to tell a hole node. */ | ||
84 | if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { | ||
85 | printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2\n", | ||
86 | ref_offset(fn->raw)); | ||
87 | bitched = 1; | ||
88 | } | ||
89 | |||
90 | if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { | ||
91 | printk(KERN_WARNING "REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2\n", | ||
92 | ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); | ||
93 | bitched = 1; | ||
94 | } | ||
95 | } | ||
96 | } | ||
97 | |||
98 | if (bitched) { | ||
99 | struct jffs2_node_frag *thisfrag; | ||
100 | |||
101 | printk(KERN_WARNING "Inode is #%u\n", f->inocache->ino); | ||
102 | thisfrag = frag_first(&f->fragtree); | ||
103 | while (thisfrag) { | ||
104 | if (!thisfrag->node) { | ||
105 | printk("Frag @0x%x-0x%x; node-less hole\n", | ||
106 | thisfrag->ofs, thisfrag->size + thisfrag->ofs); | ||
107 | } else if (!thisfrag->node->raw) { | ||
108 | printk("Frag @0x%x-0x%x; raw-less hole\n", | ||
109 | thisfrag->ofs, thisfrag->size + thisfrag->ofs); | ||
110 | } else { | ||
111 | printk("Frag @0x%x-0x%x; raw at 0x%08x(%d) (0x%x-0x%x)\n", | ||
112 | thisfrag->ofs, thisfrag->size + thisfrag->ofs, | ||
113 | ref_offset(thisfrag->node->raw), ref_flags(thisfrag->node->raw), | ||
114 | thisfrag->node->ofs, thisfrag->node->ofs+thisfrag->node->size); | ||
115 | } | ||
116 | thisfrag = frag_next(thisfrag); | ||
117 | } | ||
118 | } | ||
119 | return bitched; | ||
120 | } | ||
121 | #endif /* D1 */ | ||
122 | |||
123 | static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) | ||
124 | { | ||
125 | if (this->node) { | ||
126 | this->node->frags--; | ||
127 | if (!this->node->frags) { | ||
128 | /* The node has no valid frags left. It's totally obsoleted */ | ||
129 | D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", | ||
130 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size)); | ||
131 | jffs2_mark_node_obsolete(c, this->node->raw); | ||
132 | jffs2_free_full_dnode(this->node); | ||
133 | } else { | ||
134 | D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", | ||
135 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, | ||
136 | this->node->frags)); | ||
137 | mark_ref_normal(this->node->raw); | ||
138 | } | ||
139 | |||
140 | } | ||
141 | jffs2_free_node_frag(this); | ||
142 | } | ||
143 | |||
144 | /* Given an inode, probably with existing list of fragments, add the new node | ||
145 | * to the fragment list. | ||
146 | */ | ||
147 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) | ||
148 | { | ||
149 | int ret; | ||
150 | struct jffs2_node_frag *newfrag; | ||
151 | |||
152 | D1(printk(KERN_DEBUG "jffs2_add_full_dnode_to_inode(ino #%u, f %p, fn %p)\n", f->inocache->ino, f, fn)); | ||
153 | |||
154 | newfrag = jffs2_alloc_node_frag(); | ||
155 | if (unlikely(!newfrag)) | ||
156 | return -ENOMEM; | ||
157 | |||
158 | D2(printk(KERN_DEBUG "adding node %04x-%04x @0x%08x on flash, newfrag *%p\n", | ||
159 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag)); | ||
160 | |||
161 | if (unlikely(!fn->size)) { | ||
162 | jffs2_free_node_frag(newfrag); | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | newfrag->ofs = fn->ofs; | ||
167 | newfrag->size = fn->size; | ||
168 | newfrag->node = fn; | ||
169 | newfrag->node->frags = 1; | ||
170 | |||
171 | ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); | ||
172 | if (ret) | ||
173 | return ret; | ||
174 | |||
175 | /* If we now share a page with other nodes, mark either previous | ||
176 | or next node REF_NORMAL, as appropriate. */ | ||
177 | if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { | ||
178 | struct jffs2_node_frag *prev = frag_prev(newfrag); | ||
179 | |||
180 | mark_ref_normal(fn->raw); | ||
181 | /* If we don't start at zero there's _always_ a previous */ | ||
182 | if (prev->node) | ||
183 | mark_ref_normal(prev->node->raw); | ||
184 | } | ||
185 | |||
186 | if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { | ||
187 | struct jffs2_node_frag *next = frag_next(newfrag); | ||
188 | |||
189 | if (next) { | ||
190 | mark_ref_normal(fn->raw); | ||
191 | if (next->node) | ||
192 | mark_ref_normal(next->node->raw); | ||
193 | } | ||
194 | } | ||
195 | D2(if (jffs2_sanitycheck_fragtree(f)) { | ||
196 | printk(KERN_WARNING "Just added node %04x-%04x @0x%08x on flash, newfrag *%p\n", | ||
197 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag); | ||
198 | return 0; | ||
199 | }) | ||
200 | D2(jffs2_print_frag_list(f)); | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | /* Doesn't set inode->i_size */ | ||
205 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag) | ||
206 | { | ||
207 | struct jffs2_node_frag *this; | ||
208 | uint32_t lastend; | ||
209 | |||
210 | /* Skip all the nodes which are completed before this one starts */ | ||
211 | this = jffs2_lookup_node_frag(list, newfrag->node->ofs); | ||
212 | |||
213 | if (this) { | ||
214 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | ||
215 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); | ||
216 | lastend = this->ofs + this->size; | ||
217 | } else { | ||
218 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave no frag\n")); | ||
219 | lastend = 0; | ||
220 | } | ||
221 | |||
222 | /* See if we ran off the end of the list */ | ||
223 | if (lastend <= newfrag->ofs) { | ||
224 | /* We did */ | ||
225 | |||
226 | /* Check if 'this' node was on the same page as the new node. | ||
227 | If so, both 'this' and the new node get marked REF_NORMAL so | ||
228 | the GC can take a look. | ||
229 | */ | ||
230 | if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { | ||
231 | if (this->node) | ||
232 | mark_ref_normal(this->node->raw); | ||
233 | mark_ref_normal(newfrag->node->raw); | ||
234 | } | ||
235 | |||
236 | if (lastend < newfrag->node->ofs) { | ||
237 | /* ... and we need to put a hole in before the new node */ | ||
238 | struct jffs2_node_frag *holefrag = jffs2_alloc_node_frag(); | ||
239 | if (!holefrag) { | ||
240 | jffs2_free_node_frag(newfrag); | ||
241 | return -ENOMEM; | ||
242 | } | ||
243 | holefrag->ofs = lastend; | ||
244 | holefrag->size = newfrag->node->ofs - lastend; | ||
245 | holefrag->node = NULL; | ||
246 | if (this) { | ||
247 | /* By definition, the 'this' node has no right-hand child, | ||
248 | because there are no frags with offset greater than it. | ||
249 | So that's where we want to put the hole */ | ||
250 | D2(printk(KERN_DEBUG "Adding hole frag (%p) on right of node at (%p)\n", holefrag, this)); | ||
251 | rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); | ||
252 | } else { | ||
253 | D2(printk(KERN_DEBUG "Adding hole frag (%p) at root of tree\n", holefrag)); | ||
254 | rb_link_node(&holefrag->rb, NULL, &list->rb_node); | ||
255 | } | ||
256 | rb_insert_color(&holefrag->rb, list); | ||
257 | this = holefrag; | ||
258 | } | ||
259 | if (this) { | ||
260 | /* By definition, the 'this' node has no right-hand child, | ||
261 | because there are no frags with offset greater than it. | ||
262 | So that's where we want to put the hole */ | ||
263 | D2(printk(KERN_DEBUG "Adding new frag (%p) on right of node at (%p)\n", newfrag, this)); | ||
264 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | ||
265 | } else { | ||
266 | D2(printk(KERN_DEBUG "Adding new frag (%p) at root of tree\n", newfrag)); | ||
267 | rb_link_node(&newfrag->rb, NULL, &list->rb_node); | ||
268 | } | ||
269 | rb_insert_color(&newfrag->rb, list); | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: dealing with frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | ||
274 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); | ||
275 | |||
276 | /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, | ||
277 | * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs | ||
278 | */ | ||
279 | if (newfrag->ofs > this->ofs) { | ||
280 | /* This node isn't completely obsoleted. The start of it remains valid */ | ||
281 | |||
282 | /* Mark the new node and the partially covered node REF_NORMAL -- let | ||
283 | the GC take a look at them */ | ||
284 | mark_ref_normal(newfrag->node->raw); | ||
285 | if (this->node) | ||
286 | mark_ref_normal(this->node->raw); | ||
287 | |||
288 | if (this->ofs + this->size > newfrag->ofs + newfrag->size) { | ||
289 | /* The new node splits 'this' frag into two */ | ||
290 | struct jffs2_node_frag *newfrag2 = jffs2_alloc_node_frag(); | ||
291 | if (!newfrag2) { | ||
292 | jffs2_free_node_frag(newfrag); | ||
293 | return -ENOMEM; | ||
294 | } | ||
295 | D2(printk(KERN_DEBUG "split old frag 0x%04x-0x%04x -->", this->ofs, this->ofs+this->size); | ||
296 | if (this->node) | ||
297 | printk("phys 0x%08x\n", ref_offset(this->node->raw)); | ||
298 | else | ||
299 | printk("hole\n"); | ||
300 | ) | ||
301 | |||
302 | /* New second frag pointing to this's node */ | ||
303 | newfrag2->ofs = newfrag->ofs + newfrag->size; | ||
304 | newfrag2->size = (this->ofs+this->size) - newfrag2->ofs; | ||
305 | newfrag2->node = this->node; | ||
306 | if (this->node) | ||
307 | this->node->frags++; | ||
308 | |||
309 | /* Adjust size of original 'this' */ | ||
310 | this->size = newfrag->ofs - this->ofs; | ||
311 | |||
312 | /* Now, we know there's no node with offset | ||
313 | greater than this->ofs but smaller than | ||
314 | newfrag2->ofs or newfrag->ofs, for obvious | ||
315 | reasons. So we can do a tree insert from | ||
316 | 'this' to insert newfrag, and a tree insert | ||
317 | from newfrag to insert newfrag2. */ | ||
318 | jffs2_fragtree_insert(newfrag, this); | ||
319 | rb_insert_color(&newfrag->rb, list); | ||
320 | |||
321 | jffs2_fragtree_insert(newfrag2, newfrag); | ||
322 | rb_insert_color(&newfrag2->rb, list); | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | /* New node just reduces 'this' frag in size, doesn't split it */ | ||
327 | this->size = newfrag->ofs - this->ofs; | ||
328 | |||
329 | /* Again, we know it lives down here in the tree */ | ||
330 | jffs2_fragtree_insert(newfrag, this); | ||
331 | rb_insert_color(&newfrag->rb, list); | ||
332 | } else { | ||
333 | /* New frag starts at the same point as 'this' used to. Replace | ||
334 | it in the tree without doing a delete and insertion */ | ||
335 | D2(printk(KERN_DEBUG "Inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", | ||
336 | newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, | ||
337 | this, this->ofs, this->ofs+this->size)); | ||
338 | |||
339 | rb_replace_node(&this->rb, &newfrag->rb, list); | ||
340 | |||
341 | if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { | ||
342 | D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size)); | ||
343 | jffs2_obsolete_node_frag(c, this); | ||
344 | } else { | ||
345 | this->ofs += newfrag->size; | ||
346 | this->size -= newfrag->size; | ||
347 | |||
348 | jffs2_fragtree_insert(this, newfrag); | ||
349 | rb_insert_color(&this->rb, list); | ||
350 | return 0; | ||
351 | } | ||
352 | } | ||
353 | /* OK, now we have newfrag added in the correct place in the tree, but | ||
354 | frag_next(newfrag) may be a fragment which is overlapped by it | ||
355 | */ | ||
356 | while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { | ||
357 | /* 'this' frag is obsoleted completely. */ | ||
358 | D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x) and removing from tree\n", this, this->ofs, this->ofs+this->size)); | ||
359 | rb_erase(&this->rb, list); | ||
360 | jffs2_obsolete_node_frag(c, this); | ||
361 | } | ||
362 | /* Now we're pointing at the first frag which isn't totally obsoleted by | ||
363 | the new frag */ | ||
364 | |||
365 | if (!this || newfrag->ofs + newfrag->size == this->ofs) { | ||
366 | return 0; | ||
367 | } | ||
368 | /* Still some overlap but we don't need to move it in the tree */ | ||
369 | this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); | ||
370 | this->ofs = newfrag->ofs + newfrag->size; | ||
371 | |||
372 | /* And mark them REF_NORMAL so the GC takes a look at them */ | ||
373 | if (this->node) | ||
374 | mark_ref_normal(this->node->raw); | ||
375 | mark_ref_normal(newfrag->node->raw); | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) | ||
381 | { | ||
382 | struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); | ||
383 | |||
384 | D1(printk(KERN_DEBUG "Truncating fraglist to 0x%08x bytes\n", size)); | ||
385 | |||
386 | /* We know frag->ofs <= size. That's what lookup does for us */ | ||
387 | if (frag && frag->ofs != size) { | ||
388 | if (frag->ofs+frag->size >= size) { | ||
389 | D1(printk(KERN_DEBUG "Truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); | ||
390 | frag->size = size - frag->ofs; | ||
391 | } | ||
392 | frag = frag_next(frag); | ||
393 | } | ||
394 | while (frag && frag->ofs >= size) { | ||
395 | struct jffs2_node_frag *next = frag_next(frag); | ||
396 | |||
397 | D1(printk(KERN_DEBUG "Removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); | ||
398 | frag_erase(frag, list); | ||
399 | jffs2_obsolete_node_frag(c, frag); | ||
400 | frag = next; | ||
401 | } | ||
402 | } | ||
403 | |||
404 | /* Scan the list of all nodes present for this ino, build map of versions, etc. */ | ||
405 | |||
406 | static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | ||
407 | struct jffs2_inode_info *f, | ||
408 | struct jffs2_raw_inode *latest_node); | ||
409 | |||
410 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
411 | uint32_t ino, struct jffs2_raw_inode *latest_node) | ||
412 | { | ||
413 | D2(printk(KERN_DEBUG "jffs2_do_read_inode(): getting inocache\n")); | ||
414 | |||
415 | retry_inocache: | ||
416 | spin_lock(&c->inocache_lock); | ||
417 | f->inocache = jffs2_get_ino_cache(c, ino); | ||
418 | |||
419 | D2(printk(KERN_DEBUG "jffs2_do_read_inode(): Got inocache at %p\n", f->inocache)); | ||
420 | |||
421 | if (f->inocache) { | ||
422 | /* Check its state. We may need to wait before we can use it */ | ||
423 | switch(f->inocache->state) { | ||
424 | case INO_STATE_UNCHECKED: | ||
425 | case INO_STATE_CHECKEDABSENT: | ||
426 | f->inocache->state = INO_STATE_READING; | ||
427 | break; | ||
428 | |||
429 | case INO_STATE_CHECKING: | ||
430 | case INO_STATE_GC: | ||
431 | /* If it's in either of these states, we need | ||
432 | to wait for whoever's got it to finish and | ||
433 | put it back. */ | ||
434 | D1(printk(KERN_DEBUG "jffs2_get_ino_cache_read waiting for ino #%u in state %d\n", | ||
435 | ino, f->inocache->state)); | ||
436 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | ||
437 | goto retry_inocache; | ||
438 | |||
439 | case INO_STATE_READING: | ||
440 | case INO_STATE_PRESENT: | ||
441 | /* Eep. This should never happen. It can | ||
442 | happen if Linux calls read_inode() again | ||
443 | before clear_inode() has finished though. */ | ||
444 | printk(KERN_WARNING "Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state); | ||
445 | /* Fail. That's probably better than allowing it to succeed */ | ||
446 | f->inocache = NULL; | ||
447 | break; | ||
448 | |||
449 | default: | ||
450 | BUG(); | ||
451 | } | ||
452 | } | ||
453 | spin_unlock(&c->inocache_lock); | ||
454 | |||
455 | if (!f->inocache && ino == 1) { | ||
456 | /* Special case - no root inode on medium */ | ||
457 | f->inocache = jffs2_alloc_inode_cache(); | ||
458 | if (!f->inocache) { | ||
459 | printk(KERN_CRIT "jffs2_do_read_inode(): Cannot allocate inocache for root inode\n"); | ||
460 | return -ENOMEM; | ||
461 | } | ||
462 | D1(printk(KERN_DEBUG "jffs2_do_read_inode(): Creating inocache for root inode\n")); | ||
463 | memset(f->inocache, 0, sizeof(struct jffs2_inode_cache)); | ||
464 | f->inocache->ino = f->inocache->nlink = 1; | ||
465 | f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; | ||
466 | f->inocache->state = INO_STATE_READING; | ||
467 | jffs2_add_ino_cache(c, f->inocache); | ||
468 | } | ||
469 | if (!f->inocache) { | ||
470 | printk(KERN_WARNING "jffs2_do_read_inode() on nonexistent ino %u\n", ino); | ||
471 | return -ENOENT; | ||
472 | } | ||
473 | |||
474 | return jffs2_do_read_inode_internal(c, f, latest_node); | ||
475 | } | ||
476 | |||
477 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | ||
478 | { | ||
479 | struct jffs2_raw_inode n; | ||
480 | struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL); | ||
481 | int ret; | ||
482 | |||
483 | if (!f) | ||
484 | return -ENOMEM; | ||
485 | |||
486 | memset(f, 0, sizeof(*f)); | ||
487 | init_MUTEX_LOCKED(&f->sem); | ||
488 | f->inocache = ic; | ||
489 | |||
490 | ret = jffs2_do_read_inode_internal(c, f, &n); | ||
491 | if (!ret) { | ||
492 | up(&f->sem); | ||
493 | jffs2_do_clear_inode(c, f); | ||
494 | } | ||
495 | kfree (f); | ||
496 | return ret; | ||
497 | } | ||
498 | |||
499 | static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | ||
500 | struct jffs2_inode_info *f, | ||
501 | struct jffs2_raw_inode *latest_node) | ||
502 | { | ||
503 | struct jffs2_tmp_dnode_info *tn_list, *tn; | ||
504 | struct jffs2_full_dirent *fd_list; | ||
505 | struct jffs2_full_dnode *fn = NULL; | ||
506 | uint32_t crc; | ||
507 | uint32_t latest_mctime, mctime_ver; | ||
508 | uint32_t mdata_ver = 0; | ||
509 | size_t retlen; | ||
510 | int ret; | ||
511 | |||
512 | D1(printk(KERN_DEBUG "jffs2_do_read_inode_internal(): ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink)); | ||
513 | |||
514 | /* Grab all nodes relevant to this ino */ | ||
515 | ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver); | ||
516 | |||
517 | if (ret) { | ||
518 | printk(KERN_CRIT "jffs2_get_inode_nodes() for ino %u returned %d\n", f->inocache->ino, ret); | ||
519 | if (f->inocache->state == INO_STATE_READING) | ||
520 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); | ||
521 | return ret; | ||
522 | } | ||
523 | f->dents = fd_list; | ||
524 | |||
525 | while (tn_list) { | ||
526 | tn = tn_list; | ||
527 | |||
528 | fn = tn->fn; | ||
529 | |||
530 | if (f->metadata) { | ||
531 | if (likely(tn->version >= mdata_ver)) { | ||
532 | D1(printk(KERN_DEBUG "Obsoleting old metadata at 0x%08x\n", ref_offset(f->metadata->raw))); | ||
533 | jffs2_mark_node_obsolete(c, f->metadata->raw); | ||
534 | jffs2_free_full_dnode(f->metadata); | ||
535 | f->metadata = NULL; | ||
536 | |||
537 | mdata_ver = 0; | ||
538 | } else { | ||
539 | /* This should never happen. */ | ||
540 | printk(KERN_WARNING "Er. New metadata at 0x%08x with ver %d is actually older than previous ver %d at 0x%08x\n", | ||
541 | ref_offset(fn->raw), tn->version, mdata_ver, ref_offset(f->metadata->raw)); | ||
542 | jffs2_mark_node_obsolete(c, fn->raw); | ||
543 | jffs2_free_full_dnode(fn); | ||
544 | /* Fill in latest_node from the metadata, not this one we're about to free... */ | ||
545 | fn = f->metadata; | ||
546 | goto next_tn; | ||
547 | } | ||
548 | } | ||
549 | |||
550 | if (fn->size) { | ||
551 | jffs2_add_full_dnode_to_inode(c, f, fn); | ||
552 | } else { | ||
553 | /* Zero-sized node at end of version list. Just a metadata update */ | ||
554 | D1(printk(KERN_DEBUG "metadata @%08x: ver %d\n", ref_offset(fn->raw), tn->version)); | ||
555 | f->metadata = fn; | ||
556 | mdata_ver = tn->version; | ||
557 | } | ||
558 | next_tn: | ||
559 | tn_list = tn->next; | ||
560 | jffs2_free_tmp_dnode_info(tn); | ||
561 | } | ||
562 | D1(jffs2_sanitycheck_fragtree(f)); | ||
563 | |||
564 | if (!fn) { | ||
565 | /* No data nodes for this inode. */ | ||
566 | if (f->inocache->ino != 1) { | ||
567 | printk(KERN_WARNING "jffs2_do_read_inode(): No data nodes found for ino #%u\n", f->inocache->ino); | ||
568 | if (!fd_list) { | ||
569 | if (f->inocache->state == INO_STATE_READING) | ||
570 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); | ||
571 | return -EIO; | ||
572 | } | ||
573 | printk(KERN_WARNING "jffs2_do_read_inode(): But it has children so we fake some modes for it\n"); | ||
574 | } | ||
575 | latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO); | ||
576 | latest_node->version = cpu_to_je32(0); | ||
577 | latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0); | ||
578 | latest_node->isize = cpu_to_je32(0); | ||
579 | latest_node->gid = cpu_to_je16(0); | ||
580 | latest_node->uid = cpu_to_je16(0); | ||
581 | if (f->inocache->state == INO_STATE_READING) | ||
582 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT); | ||
583 | return 0; | ||
584 | } | ||
585 | |||
586 | ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node); | ||
587 | if (ret || retlen != sizeof(*latest_node)) { | ||
588 | printk(KERN_NOTICE "MTD read in jffs2_do_read_inode() failed: Returned %d, %zd of %zd bytes read\n", | ||
589 | ret, retlen, sizeof(*latest_node)); | ||
590 | /* FIXME: If this fails, there seems to be a memory leak. Find it. */ | ||
591 | up(&f->sem); | ||
592 | jffs2_do_clear_inode(c, f); | ||
593 | return ret?ret:-EIO; | ||
594 | } | ||
595 | |||
596 | crc = crc32(0, latest_node, sizeof(*latest_node)-8); | ||
597 | if (crc != je32_to_cpu(latest_node->node_crc)) { | ||
598 | printk(KERN_NOTICE "CRC failed for read_inode of inode %u at physical location 0x%x\n", f->inocache->ino, ref_offset(fn->raw)); | ||
599 | up(&f->sem); | ||
600 | jffs2_do_clear_inode(c, f); | ||
601 | return -EIO; | ||
602 | } | ||
603 | |||
604 | switch(jemode_to_cpu(latest_node->mode) & S_IFMT) { | ||
605 | case S_IFDIR: | ||
606 | if (mctime_ver > je32_to_cpu(latest_node->version)) { | ||
607 | /* The times in the latest_node are actually older than | ||
608 | mctime in the latest dirent. Cheat. */ | ||
609 | latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime); | ||
610 | } | ||
611 | break; | ||
612 | |||
613 | |||
614 | case S_IFREG: | ||
615 | /* If it was a regular file, truncate it to the latest node's isize */ | ||
616 | jffs2_truncate_fraglist(c, &f->fragtree, je32_to_cpu(latest_node->isize)); | ||
617 | break; | ||
618 | |||
619 | case S_IFLNK: | ||
620 | /* Hack to work around broken isize in old symlink code. | ||
621 | Remove this when dwmw2 comes to his senses and stops | ||
622 | symlinks from being an entirely gratuitous special | ||
623 | case. */ | ||
624 | if (!je32_to_cpu(latest_node->isize)) | ||
625 | latest_node->isize = latest_node->dsize; | ||
626 | /* fall through... */ | ||
627 | |||
628 | case S_IFBLK: | ||
629 | case S_IFCHR: | ||
630 | /* Certain inode types should have only one data node, and it's | ||
631 | kept as the metadata node */ | ||
632 | if (f->metadata) { | ||
633 | printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o had metadata node\n", | ||
634 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); | ||
635 | up(&f->sem); | ||
636 | jffs2_do_clear_inode(c, f); | ||
637 | return -EIO; | ||
638 | } | ||
639 | if (!frag_first(&f->fragtree)) { | ||
640 | printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o has no fragments\n", | ||
641 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); | ||
642 | up(&f->sem); | ||
643 | jffs2_do_clear_inode(c, f); | ||
644 | return -EIO; | ||
645 | } | ||
646 | /* ASSERT: f->fraglist != NULL */ | ||
647 | if (frag_next(frag_first(&f->fragtree))) { | ||
648 | printk(KERN_WARNING "Argh. Special inode #%u with mode 0x%x had more than one node\n", | ||
649 | f->inocache->ino, jemode_to_cpu(latest_node->mode)); | ||
650 | /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */ | ||
651 | up(&f->sem); | ||
652 | jffs2_do_clear_inode(c, f); | ||
653 | return -EIO; | ||
654 | } | ||
655 | /* OK. We're happy */ | ||
656 | f->metadata = frag_first(&f->fragtree)->node; | ||
657 | jffs2_free_node_frag(frag_first(&f->fragtree)); | ||
658 | f->fragtree = RB_ROOT; | ||
659 | break; | ||
660 | } | ||
661 | if (f->inocache->state == INO_STATE_READING) | ||
662 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT); | ||
663 | |||
664 | return 0; | ||
665 | } | ||
666 | |||
667 | void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) | ||
668 | { | ||
669 | struct jffs2_full_dirent *fd, *fds; | ||
670 | int deleted; | ||
671 | |||
672 | down(&f->sem); | ||
673 | deleted = f->inocache && !f->inocache->nlink; | ||
674 | |||
675 | if (f->metadata) { | ||
676 | if (deleted) | ||
677 | jffs2_mark_node_obsolete(c, f->metadata->raw); | ||
678 | jffs2_free_full_dnode(f->metadata); | ||
679 | } | ||
680 | |||
681 | jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); | ||
682 | |||
683 | fds = f->dents; | ||
684 | |||
685 | while(fds) { | ||
686 | fd = fds; | ||
687 | fds = fd->next; | ||
688 | jffs2_free_full_dirent(fd); | ||
689 | } | ||
690 | |||
691 | if (f->inocache && f->inocache->state != INO_STATE_CHECKING) | ||
692 | jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); | ||
693 | |||
694 | up(&f->sem); | ||
695 | } | ||
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c new file mode 100644 index 000000000000..ded53584a897 --- /dev/null +++ b/fs/jffs2/scan.c | |||
@@ -0,0 +1,916 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: scan.c,v 1.115 2004/11/17 12:59:08 dedekind Exp $ | ||
11 | * | ||
12 | */ | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/mtd/mtd.h> | ||
17 | #include <linux/pagemap.h> | ||
18 | #include <linux/crc32.h> | ||
19 | #include <linux/compiler.h> | ||
20 | #include "nodelist.h" | ||
21 | |||
22 | #define EMPTY_SCAN_SIZE 1024 | ||
23 | |||
24 | #define DIRTY_SPACE(x) do { typeof(x) _x = (x); \ | ||
25 | c->free_size -= _x; c->dirty_size += _x; \ | ||
26 | jeb->free_size -= _x ; jeb->dirty_size += _x; \ | ||
27 | }while(0) | ||
28 | #define USED_SPACE(x) do { typeof(x) _x = (x); \ | ||
29 | c->free_size -= _x; c->used_size += _x; \ | ||
30 | jeb->free_size -= _x ; jeb->used_size += _x; \ | ||
31 | }while(0) | ||
32 | #define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \ | ||
33 | c->free_size -= _x; c->unchecked_size += _x; \ | ||
34 | jeb->free_size -= _x ; jeb->unchecked_size += _x; \ | ||
35 | }while(0) | ||
36 | |||
37 | #define noisy_printk(noise, args...) do { \ | ||
38 | if (*(noise)) { \ | ||
39 | printk(KERN_NOTICE args); \ | ||
40 | (*(noise))--; \ | ||
41 | if (!(*(noise))) { \ | ||
42 | printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \ | ||
43 | } \ | ||
44 | } \ | ||
45 | } while(0) | ||
46 | |||
47 | static uint32_t pseudo_random; | ||
48 | |||
49 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
50 | unsigned char *buf, uint32_t buf_size); | ||
51 | |||
52 | /* These helper functions _must_ increase ofs and also do the dirty/used space accounting. | ||
53 | * Returning an error will abort the mount - bad checksums etc. should just mark the space | ||
54 | * as dirty. | ||
55 | */ | ||
56 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
57 | struct jffs2_raw_inode *ri, uint32_t ofs); | ||
58 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
59 | struct jffs2_raw_dirent *rd, uint32_t ofs); | ||
60 | |||
61 | #define BLK_STATE_ALLFF 0 | ||
62 | #define BLK_STATE_CLEAN 1 | ||
63 | #define BLK_STATE_PARTDIRTY 2 | ||
64 | #define BLK_STATE_CLEANMARKER 3 | ||
65 | #define BLK_STATE_ALLDIRTY 4 | ||
66 | #define BLK_STATE_BADBLOCK 5 | ||
67 | |||
68 | static inline int min_free(struct jffs2_sb_info *c) | ||
69 | { | ||
70 | uint32_t min = 2 * sizeof(struct jffs2_raw_inode); | ||
71 | #if defined CONFIG_JFFS2_FS_NAND || defined CONFIG_JFFS2_FS_NOR_ECC | ||
72 | if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize) | ||
73 | return c->wbuf_pagesize; | ||
74 | #endif | ||
75 | return min; | ||
76 | |||
77 | } | ||
78 | int jffs2_scan_medium(struct jffs2_sb_info *c) | ||
79 | { | ||
80 | int i, ret; | ||
81 | uint32_t empty_blocks = 0, bad_blocks = 0; | ||
82 | unsigned char *flashbuf = NULL; | ||
83 | uint32_t buf_size = 0; | ||
84 | #ifndef __ECOS | ||
85 | size_t pointlen; | ||
86 | |||
87 | if (c->mtd->point) { | ||
88 | ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf); | ||
89 | if (!ret && pointlen < c->mtd->size) { | ||
90 | /* Don't muck about if it won't let us point to the whole flash */ | ||
91 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen)); | ||
92 | c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); | ||
93 | flashbuf = NULL; | ||
94 | } | ||
95 | if (ret) | ||
96 | D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); | ||
97 | } | ||
98 | #endif | ||
99 | if (!flashbuf) { | ||
100 | /* For NAND it's quicker to read a whole eraseblock at a time, | ||
101 | apparently */ | ||
102 | if (jffs2_cleanmarker_oob(c)) | ||
103 | buf_size = c->sector_size; | ||
104 | else | ||
105 | buf_size = PAGE_SIZE; | ||
106 | |||
107 | /* Respect kmalloc limitations */ | ||
108 | if (buf_size > 128*1024) | ||
109 | buf_size = 128*1024; | ||
110 | |||
111 | D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size)); | ||
112 | flashbuf = kmalloc(buf_size, GFP_KERNEL); | ||
113 | if (!flashbuf) | ||
114 | return -ENOMEM; | ||
115 | } | ||
116 | |||
117 | for (i=0; i<c->nr_blocks; i++) { | ||
118 | struct jffs2_eraseblock *jeb = &c->blocks[i]; | ||
119 | |||
120 | ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size); | ||
121 | |||
122 | if (ret < 0) | ||
123 | goto out; | ||
124 | |||
125 | ACCT_PARANOIA_CHECK(jeb); | ||
126 | |||
127 | /* Now decide which list to put it on */ | ||
128 | switch(ret) { | ||
129 | case BLK_STATE_ALLFF: | ||
130 | /* | ||
131 | * Empty block. Since we can't be sure it | ||
132 | * was entirely erased, we just queue it for erase | ||
133 | * again. It will be marked as such when the erase | ||
134 | * is complete. Meanwhile we still count it as empty | ||
135 | * for later checks. | ||
136 | */ | ||
137 | empty_blocks++; | ||
138 | list_add(&jeb->list, &c->erase_pending_list); | ||
139 | c->nr_erasing_blocks++; | ||
140 | break; | ||
141 | |||
142 | case BLK_STATE_CLEANMARKER: | ||
143 | /* Only a CLEANMARKER node is valid */ | ||
144 | if (!jeb->dirty_size) { | ||
145 | /* It's actually free */ | ||
146 | list_add(&jeb->list, &c->free_list); | ||
147 | c->nr_free_blocks++; | ||
148 | } else { | ||
149 | /* Dirt */ | ||
150 | D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset)); | ||
151 | list_add(&jeb->list, &c->erase_pending_list); | ||
152 | c->nr_erasing_blocks++; | ||
153 | } | ||
154 | break; | ||
155 | |||
156 | case BLK_STATE_CLEAN: | ||
157 | /* Full (or almost full) of clean data. Clean list */ | ||
158 | list_add(&jeb->list, &c->clean_list); | ||
159 | break; | ||
160 | |||
161 | case BLK_STATE_PARTDIRTY: | ||
162 | /* Some data, but not full. Dirty list. */ | ||
163 | /* We want to remember the block with most free space | ||
164 | and stick it in the 'nextblock' position to start writing to it. */ | ||
165 | if (jeb->free_size > min_free(c) && | ||
166 | (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { | ||
167 | /* Better candidate for the next writes to go to */ | ||
168 | if (c->nextblock) { | ||
169 | c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; | ||
170 | c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; | ||
171 | c->free_size -= c->nextblock->free_size; | ||
172 | c->wasted_size -= c->nextblock->wasted_size; | ||
173 | c->nextblock->free_size = c->nextblock->wasted_size = 0; | ||
174 | if (VERYDIRTY(c, c->nextblock->dirty_size)) { | ||
175 | list_add(&c->nextblock->list, &c->very_dirty_list); | ||
176 | } else { | ||
177 | list_add(&c->nextblock->list, &c->dirty_list); | ||
178 | } | ||
179 | } | ||
180 | c->nextblock = jeb; | ||
181 | } else { | ||
182 | jeb->dirty_size += jeb->free_size + jeb->wasted_size; | ||
183 | c->dirty_size += jeb->free_size + jeb->wasted_size; | ||
184 | c->free_size -= jeb->free_size; | ||
185 | c->wasted_size -= jeb->wasted_size; | ||
186 | jeb->free_size = jeb->wasted_size = 0; | ||
187 | if (VERYDIRTY(c, jeb->dirty_size)) { | ||
188 | list_add(&jeb->list, &c->very_dirty_list); | ||
189 | } else { | ||
190 | list_add(&jeb->list, &c->dirty_list); | ||
191 | } | ||
192 | } | ||
193 | break; | ||
194 | |||
195 | case BLK_STATE_ALLDIRTY: | ||
196 | /* Nothing valid - not even a clean marker. Needs erasing. */ | ||
197 | /* For now we just put it on the erasing list. We'll start the erases later */ | ||
198 | D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset)); | ||
199 | list_add(&jeb->list, &c->erase_pending_list); | ||
200 | c->nr_erasing_blocks++; | ||
201 | break; | ||
202 | |||
203 | case BLK_STATE_BADBLOCK: | ||
204 | D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset)); | ||
205 | list_add(&jeb->list, &c->bad_list); | ||
206 | c->bad_size += c->sector_size; | ||
207 | c->free_size -= c->sector_size; | ||
208 | bad_blocks++; | ||
209 | break; | ||
210 | default: | ||
211 | printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n"); | ||
212 | BUG(); | ||
213 | } | ||
214 | } | ||
215 | |||
216 | /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ | ||
217 | if (c->nextblock && (c->nextblock->dirty_size)) { | ||
218 | c->nextblock->wasted_size += c->nextblock->dirty_size; | ||
219 | c->wasted_size += c->nextblock->dirty_size; | ||
220 | c->dirty_size -= c->nextblock->dirty_size; | ||
221 | c->nextblock->dirty_size = 0; | ||
222 | } | ||
223 | #if defined CONFIG_JFFS2_FS_NAND || defined CONFIG_JFFS2_FS_NOR_ECC | ||
224 | if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size & (c->wbuf_pagesize-1))) { | ||
225 | /* If we're going to start writing into a block which already | ||
226 | contains data, and the end of the data isn't page-aligned, | ||
227 | skip a little and align it. */ | ||
228 | |||
229 | uint32_t skip = c->nextblock->free_size & (c->wbuf_pagesize-1); | ||
230 | |||
231 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", | ||
232 | skip)); | ||
233 | c->nextblock->wasted_size += skip; | ||
234 | c->wasted_size += skip; | ||
235 | |||
236 | c->nextblock->free_size -= skip; | ||
237 | c->free_size -= skip; | ||
238 | } | ||
239 | #endif | ||
240 | if (c->nr_erasing_blocks) { | ||
241 | if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { | ||
242 | printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); | ||
243 | printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks); | ||
244 | ret = -EIO; | ||
245 | goto out; | ||
246 | } | ||
247 | jffs2_erase_pending_trigger(c); | ||
248 | } | ||
249 | ret = 0; | ||
250 | out: | ||
251 | if (buf_size) | ||
252 | kfree(flashbuf); | ||
253 | #ifndef __ECOS | ||
254 | else | ||
255 | c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); | ||
256 | #endif | ||
257 | return ret; | ||
258 | } | ||
259 | |||
260 | static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf, | ||
261 | uint32_t ofs, uint32_t len) | ||
262 | { | ||
263 | int ret; | ||
264 | size_t retlen; | ||
265 | |||
266 | ret = jffs2_flash_read(c, ofs, len, &retlen, buf); | ||
267 | if (ret) { | ||
268 | D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret)); | ||
269 | return ret; | ||
270 | } | ||
271 | if (retlen < len) { | ||
272 | D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen)); | ||
273 | return -EIO; | ||
274 | } | ||
275 | D2(printk(KERN_DEBUG "Read 0x%x bytes from 0x%08x into buf\n", len, ofs)); | ||
276 | D2(printk(KERN_DEBUG "000: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
277 | buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15])); | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
282 | unsigned char *buf, uint32_t buf_size) { | ||
283 | struct jffs2_unknown_node *node; | ||
284 | struct jffs2_unknown_node crcnode; | ||
285 | uint32_t ofs, prevofs; | ||
286 | uint32_t hdr_crc, buf_ofs, buf_len; | ||
287 | int err; | ||
288 | int noise = 0; | ||
289 | #ifdef CONFIG_JFFS2_FS_NAND | ||
290 | int cleanmarkerfound = 0; | ||
291 | #endif | ||
292 | |||
293 | ofs = jeb->offset; | ||
294 | prevofs = jeb->offset - 1; | ||
295 | |||
296 | D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs)); | ||
297 | |||
298 | #ifdef CONFIG_JFFS2_FS_NAND | ||
299 | if (jffs2_cleanmarker_oob(c)) { | ||
300 | int ret = jffs2_check_nand_cleanmarker(c, jeb); | ||
301 | D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret)); | ||
302 | /* Even if it's not found, we still scan to see | ||
303 | if the block is empty. We use this information | ||
304 | to decide whether to erase it or not. */ | ||
305 | switch (ret) { | ||
306 | case 0: cleanmarkerfound = 1; break; | ||
307 | case 1: break; | ||
308 | case 2: return BLK_STATE_BADBLOCK; | ||
309 | case 3: return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */ | ||
310 | default: return ret; | ||
311 | } | ||
312 | } | ||
313 | #endif | ||
314 | buf_ofs = jeb->offset; | ||
315 | |||
316 | if (!buf_size) { | ||
317 | buf_len = c->sector_size; | ||
318 | } else { | ||
319 | buf_len = EMPTY_SCAN_SIZE; | ||
320 | err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); | ||
321 | if (err) | ||
322 | return err; | ||
323 | } | ||
324 | |||
325 | /* We temporarily use 'ofs' as a pointer into the buffer/jeb */ | ||
326 | ofs = 0; | ||
327 | |||
328 | /* Scan only 4KiB of 0xFF before declaring it's empty */ | ||
329 | while(ofs < EMPTY_SCAN_SIZE && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF) | ||
330 | ofs += 4; | ||
331 | |||
332 | if (ofs == EMPTY_SCAN_SIZE) { | ||
333 | #ifdef CONFIG_JFFS2_FS_NAND | ||
334 | if (jffs2_cleanmarker_oob(c)) { | ||
335 | /* scan oob, take care of cleanmarker */ | ||
336 | int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound); | ||
337 | D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret)); | ||
338 | switch (ret) { | ||
339 | case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF; | ||
340 | case 1: return BLK_STATE_ALLDIRTY; | ||
341 | default: return ret; | ||
342 | } | ||
343 | } | ||
344 | #endif | ||
345 | D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset)); | ||
346 | return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */ | ||
347 | } | ||
348 | if (ofs) { | ||
349 | D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset, | ||
350 | jeb->offset + ofs)); | ||
351 | DIRTY_SPACE(ofs); | ||
352 | } | ||
353 | |||
354 | /* Now ofs is a complete physical flash offset as it always was... */ | ||
355 | ofs += jeb->offset; | ||
356 | |||
357 | noise = 10; | ||
358 | |||
359 | scan_more: | ||
360 | while(ofs < jeb->offset + c->sector_size) { | ||
361 | |||
362 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
363 | |||
364 | cond_resched(); | ||
365 | |||
366 | if (ofs & 3) { | ||
367 | printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs); | ||
368 | ofs = PAD(ofs); | ||
369 | continue; | ||
370 | } | ||
371 | if (ofs == prevofs) { | ||
372 | printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs); | ||
373 | DIRTY_SPACE(4); | ||
374 | ofs += 4; | ||
375 | continue; | ||
376 | } | ||
377 | prevofs = ofs; | ||
378 | |||
379 | if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { | ||
380 | D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node), | ||
381 | jeb->offset, c->sector_size, ofs, sizeof(*node))); | ||
382 | DIRTY_SPACE((jeb->offset + c->sector_size)-ofs); | ||
383 | break; | ||
384 | } | ||
385 | |||
386 | if (buf_ofs + buf_len < ofs + sizeof(*node)) { | ||
387 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | ||
388 | D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", | ||
389 | sizeof(struct jffs2_unknown_node), buf_len, ofs)); | ||
390 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | ||
391 | if (err) | ||
392 | return err; | ||
393 | buf_ofs = ofs; | ||
394 | } | ||
395 | |||
396 | node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs]; | ||
397 | |||
398 | if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) { | ||
399 | uint32_t inbuf_ofs; | ||
400 | uint32_t empty_start; | ||
401 | |||
402 | empty_start = ofs; | ||
403 | ofs += 4; | ||
404 | |||
405 | D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs)); | ||
406 | more_empty: | ||
407 | inbuf_ofs = ofs - buf_ofs; | ||
408 | while (inbuf_ofs < buf_len) { | ||
409 | if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) { | ||
410 | printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n", | ||
411 | empty_start, ofs); | ||
412 | DIRTY_SPACE(ofs-empty_start); | ||
413 | goto scan_more; | ||
414 | } | ||
415 | |||
416 | inbuf_ofs+=4; | ||
417 | ofs += 4; | ||
418 | } | ||
419 | /* Ran off end. */ | ||
420 | D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs)); | ||
421 | |||
422 | /* If we're only checking the beginning of a block with a cleanmarker, | ||
423 | bail now */ | ||
424 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && | ||
425 | c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_in_ino) { | ||
426 | D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE)); | ||
427 | return BLK_STATE_CLEANMARKER; | ||
428 | } | ||
429 | |||
430 | /* See how much more there is to read in this eraseblock... */ | ||
431 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | ||
432 | if (!buf_len) { | ||
433 | /* No more to read. Break out of main loop without marking | ||
434 | this range of empty space as dirty (because it's not) */ | ||
435 | D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n", | ||
436 | empty_start)); | ||
437 | break; | ||
438 | } | ||
439 | D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs)); | ||
440 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | ||
441 | if (err) | ||
442 | return err; | ||
443 | buf_ofs = ofs; | ||
444 | goto more_empty; | ||
445 | } | ||
446 | |||
447 | if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { | ||
448 | printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs); | ||
449 | DIRTY_SPACE(4); | ||
450 | ofs += 4; | ||
451 | continue; | ||
452 | } | ||
453 | if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { | ||
454 | D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs)); | ||
455 | DIRTY_SPACE(4); | ||
456 | ofs += 4; | ||
457 | continue; | ||
458 | } | ||
459 | if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { | ||
460 | printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs); | ||
461 | printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n"); | ||
462 | DIRTY_SPACE(4); | ||
463 | ofs += 4; | ||
464 | continue; | ||
465 | } | ||
466 | if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { | ||
467 | /* OK. We're out of possibilities. Whinge and move on */ | ||
468 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", | ||
469 | JFFS2_MAGIC_BITMASK, ofs, | ||
470 | je16_to_cpu(node->magic)); | ||
471 | DIRTY_SPACE(4); | ||
472 | ofs += 4; | ||
473 | continue; | ||
474 | } | ||
475 | /* We seem to have a node of sorts. Check the CRC */ | ||
476 | crcnode.magic = node->magic; | ||
477 | crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE); | ||
478 | crcnode.totlen = node->totlen; | ||
479 | hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4); | ||
480 | |||
481 | if (hdr_crc != je32_to_cpu(node->hdr_crc)) { | ||
482 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", | ||
483 | ofs, je16_to_cpu(node->magic), | ||
484 | je16_to_cpu(node->nodetype), | ||
485 | je32_to_cpu(node->totlen), | ||
486 | je32_to_cpu(node->hdr_crc), | ||
487 | hdr_crc); | ||
488 | DIRTY_SPACE(4); | ||
489 | ofs += 4; | ||
490 | continue; | ||
491 | } | ||
492 | |||
493 | if (ofs + je32_to_cpu(node->totlen) > | ||
494 | jeb->offset + c->sector_size) { | ||
495 | /* Eep. Node goes over the end of the erase block. */ | ||
496 | printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", | ||
497 | ofs, je32_to_cpu(node->totlen)); | ||
498 | printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n"); | ||
499 | DIRTY_SPACE(4); | ||
500 | ofs += 4; | ||
501 | continue; | ||
502 | } | ||
503 | |||
504 | if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { | ||
505 | /* Wheee. This is an obsoleted node */ | ||
506 | D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs)); | ||
507 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | ||
508 | ofs += PAD(je32_to_cpu(node->totlen)); | ||
509 | continue; | ||
510 | } | ||
511 | |||
512 | switch(je16_to_cpu(node->nodetype)) { | ||
513 | case JFFS2_NODETYPE_INODE: | ||
514 | if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) { | ||
515 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | ||
516 | D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", | ||
517 | sizeof(struct jffs2_raw_inode), buf_len, ofs)); | ||
518 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | ||
519 | if (err) | ||
520 | return err; | ||
521 | buf_ofs = ofs; | ||
522 | node = (void *)buf; | ||
523 | } | ||
524 | err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs); | ||
525 | if (err) return err; | ||
526 | ofs += PAD(je32_to_cpu(node->totlen)); | ||
527 | break; | ||
528 | |||
529 | case JFFS2_NODETYPE_DIRENT: | ||
530 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | ||
531 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | ||
532 | D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", | ||
533 | je32_to_cpu(node->totlen), buf_len, ofs)); | ||
534 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | ||
535 | if (err) | ||
536 | return err; | ||
537 | buf_ofs = ofs; | ||
538 | node = (void *)buf; | ||
539 | } | ||
540 | err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs); | ||
541 | if (err) return err; | ||
542 | ofs += PAD(je32_to_cpu(node->totlen)); | ||
543 | break; | ||
544 | |||
545 | case JFFS2_NODETYPE_CLEANMARKER: | ||
546 | D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); | ||
547 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { | ||
548 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", | ||
549 | ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); | ||
550 | DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); | ||
551 | ofs += PAD(sizeof(struct jffs2_unknown_node)); | ||
552 | } else if (jeb->first_node) { | ||
553 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset); | ||
554 | DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); | ||
555 | ofs += PAD(sizeof(struct jffs2_unknown_node)); | ||
556 | } else { | ||
557 | struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref(); | ||
558 | if (!marker_ref) { | ||
559 | printk(KERN_NOTICE "Failed to allocate node ref for clean marker\n"); | ||
560 | return -ENOMEM; | ||
561 | } | ||
562 | marker_ref->next_in_ino = NULL; | ||
563 | marker_ref->next_phys = NULL; | ||
564 | marker_ref->flash_offset = ofs | REF_NORMAL; | ||
565 | marker_ref->__totlen = c->cleanmarker_size; | ||
566 | jeb->first_node = jeb->last_node = marker_ref; | ||
567 | |||
568 | USED_SPACE(PAD(c->cleanmarker_size)); | ||
569 | ofs += PAD(c->cleanmarker_size); | ||
570 | } | ||
571 | break; | ||
572 | |||
573 | case JFFS2_NODETYPE_PADDING: | ||
574 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | ||
575 | ofs += PAD(je32_to_cpu(node->totlen)); | ||
576 | break; | ||
577 | |||
578 | default: | ||
579 | switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) { | ||
580 | case JFFS2_FEATURE_ROCOMPAT: | ||
581 | printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); | ||
582 | c->flags |= JFFS2_SB_FLAG_RO; | ||
583 | if (!(jffs2_is_readonly(c))) | ||
584 | return -EROFS; | ||
585 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | ||
586 | ofs += PAD(je32_to_cpu(node->totlen)); | ||
587 | break; | ||
588 | |||
589 | case JFFS2_FEATURE_INCOMPAT: | ||
590 | printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); | ||
591 | return -EINVAL; | ||
592 | |||
593 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | ||
594 | D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); | ||
595 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | ||
596 | ofs += PAD(je32_to_cpu(node->totlen)); | ||
597 | break; | ||
598 | |||
599 | case JFFS2_FEATURE_RWCOMPAT_COPY: | ||
600 | D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); | ||
601 | USED_SPACE(PAD(je32_to_cpu(node->totlen))); | ||
602 | ofs += PAD(je32_to_cpu(node->totlen)); | ||
603 | break; | ||
604 | } | ||
605 | } | ||
606 | } | ||
607 | |||
608 | |||
609 | D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset, | ||
610 | jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size)); | ||
611 | |||
612 | /* mark_node_obsolete can add to wasted !! */ | ||
613 | if (jeb->wasted_size) { | ||
614 | jeb->dirty_size += jeb->wasted_size; | ||
615 | c->dirty_size += jeb->wasted_size; | ||
616 | c->wasted_size -= jeb->wasted_size; | ||
617 | jeb->wasted_size = 0; | ||
618 | } | ||
619 | |||
620 | if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size | ||
621 | && (!jeb->first_node || !jeb->first_node->next_in_ino) ) | ||
622 | return BLK_STATE_CLEANMARKER; | ||
623 | |||
624 | /* move blocks with max 4 byte dirty space to cleanlist */ | ||
625 | else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) { | ||
626 | c->dirty_size -= jeb->dirty_size; | ||
627 | c->wasted_size += jeb->dirty_size; | ||
628 | jeb->wasted_size += jeb->dirty_size; | ||
629 | jeb->dirty_size = 0; | ||
630 | return BLK_STATE_CLEAN; | ||
631 | } else if (jeb->used_size || jeb->unchecked_size) | ||
632 | return BLK_STATE_PARTDIRTY; | ||
633 | else | ||
634 | return BLK_STATE_ALLDIRTY; | ||
635 | } | ||
636 | |||
637 | static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino) | ||
638 | { | ||
639 | struct jffs2_inode_cache *ic; | ||
640 | |||
641 | ic = jffs2_get_ino_cache(c, ino); | ||
642 | if (ic) | ||
643 | return ic; | ||
644 | |||
645 | if (ino > c->highest_ino) | ||
646 | c->highest_ino = ino; | ||
647 | |||
648 | ic = jffs2_alloc_inode_cache(); | ||
649 | if (!ic) { | ||
650 | printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n"); | ||
651 | return NULL; | ||
652 | } | ||
653 | memset(ic, 0, sizeof(*ic)); | ||
654 | |||
655 | ic->ino = ino; | ||
656 | ic->nodes = (void *)ic; | ||
657 | jffs2_add_ino_cache(c, ic); | ||
658 | if (ino == 1) | ||
659 | ic->nlink = 1; | ||
660 | return ic; | ||
661 | } | ||
662 | |||
663 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
664 | struct jffs2_raw_inode *ri, uint32_t ofs) | ||
665 | { | ||
666 | struct jffs2_raw_node_ref *raw; | ||
667 | struct jffs2_inode_cache *ic; | ||
668 | uint32_t ino = je32_to_cpu(ri->ino); | ||
669 | |||
670 | D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); | ||
671 | |||
672 | /* We do very little here now. Just check the ino# to which we should attribute | ||
673 | this node; we can do all the CRC checking etc. later. There's a tradeoff here -- | ||
674 | we used to scan the flash once only, reading everything we want from it into | ||
675 | memory, then building all our in-core data structures and freeing the extra | ||
676 | information. Now we allow the first part of the mount to complete a lot quicker, | ||
677 | but we have to go _back_ to the flash in order to finish the CRC checking, etc. | ||
678 | Which means that the _full_ amount of time to get to proper write mode with GC | ||
679 | operational may actually be _longer_ than before. Sucks to be me. */ | ||
680 | |||
681 | raw = jffs2_alloc_raw_node_ref(); | ||
682 | if (!raw) { | ||
683 | printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of node reference failed\n"); | ||
684 | return -ENOMEM; | ||
685 | } | ||
686 | |||
687 | ic = jffs2_get_ino_cache(c, ino); | ||
688 | if (!ic) { | ||
689 | /* Inocache get failed. Either we read a bogus ino# or it's just genuinely the | ||
690 | first node we found for this inode. Do a CRC check to protect against the former | ||
691 | case */ | ||
692 | uint32_t crc = crc32(0, ri, sizeof(*ri)-8); | ||
693 | |||
694 | if (crc != je32_to_cpu(ri->node_crc)) { | ||
695 | printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
696 | ofs, je32_to_cpu(ri->node_crc), crc); | ||
697 | /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ | ||
698 | DIRTY_SPACE(PAD(je32_to_cpu(ri->totlen))); | ||
699 | jffs2_free_raw_node_ref(raw); | ||
700 | return 0; | ||
701 | } | ||
702 | ic = jffs2_scan_make_ino_cache(c, ino); | ||
703 | if (!ic) { | ||
704 | jffs2_free_raw_node_ref(raw); | ||
705 | return -ENOMEM; | ||
706 | } | ||
707 | } | ||
708 | |||
709 | /* Wheee. It worked */ | ||
710 | |||
711 | raw->flash_offset = ofs | REF_UNCHECKED; | ||
712 | raw->__totlen = PAD(je32_to_cpu(ri->totlen)); | ||
713 | raw->next_phys = NULL; | ||
714 | raw->next_in_ino = ic->nodes; | ||
715 | |||
716 | ic->nodes = raw; | ||
717 | if (!jeb->first_node) | ||
718 | jeb->first_node = raw; | ||
719 | if (jeb->last_node) | ||
720 | jeb->last_node->next_phys = raw; | ||
721 | jeb->last_node = raw; | ||
722 | |||
723 | D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", | ||
724 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), | ||
725 | je32_to_cpu(ri->offset), | ||
726 | je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize))); | ||
727 | |||
728 | pseudo_random += je32_to_cpu(ri->version); | ||
729 | |||
730 | UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen))); | ||
731 | return 0; | ||
732 | } | ||
733 | |||
734 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
735 | struct jffs2_raw_dirent *rd, uint32_t ofs) | ||
736 | { | ||
737 | struct jffs2_raw_node_ref *raw; | ||
738 | struct jffs2_full_dirent *fd; | ||
739 | struct jffs2_inode_cache *ic; | ||
740 | uint32_t crc; | ||
741 | |||
742 | D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs)); | ||
743 | |||
744 | /* We don't get here unless the node is still valid, so we don't have to | ||
745 | mask in the ACCURATE bit any more. */ | ||
746 | crc = crc32(0, rd, sizeof(*rd)-8); | ||
747 | |||
748 | if (crc != je32_to_cpu(rd->node_crc)) { | ||
749 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
750 | ofs, je32_to_cpu(rd->node_crc), crc); | ||
751 | /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ | ||
752 | DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen))); | ||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | pseudo_random += je32_to_cpu(rd->version); | ||
757 | |||
758 | fd = jffs2_alloc_full_dirent(rd->nsize+1); | ||
759 | if (!fd) { | ||
760 | return -ENOMEM; | ||
761 | } | ||
762 | memcpy(&fd->name, rd->name, rd->nsize); | ||
763 | fd->name[rd->nsize] = 0; | ||
764 | |||
765 | crc = crc32(0, fd->name, rd->nsize); | ||
766 | if (crc != je32_to_cpu(rd->name_crc)) { | ||
767 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
768 | ofs, je32_to_cpu(rd->name_crc), crc); | ||
769 | D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino))); | ||
770 | jffs2_free_full_dirent(fd); | ||
771 | /* FIXME: Why do we believe totlen? */ | ||
772 | /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ | ||
773 | DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen))); | ||
774 | return 0; | ||
775 | } | ||
776 | raw = jffs2_alloc_raw_node_ref(); | ||
777 | if (!raw) { | ||
778 | jffs2_free_full_dirent(fd); | ||
779 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): allocation of node reference failed\n"); | ||
780 | return -ENOMEM; | ||
781 | } | ||
782 | ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino)); | ||
783 | if (!ic) { | ||
784 | jffs2_free_full_dirent(fd); | ||
785 | jffs2_free_raw_node_ref(raw); | ||
786 | return -ENOMEM; | ||
787 | } | ||
788 | |||
789 | raw->__totlen = PAD(je32_to_cpu(rd->totlen)); | ||
790 | raw->flash_offset = ofs | REF_PRISTINE; | ||
791 | raw->next_phys = NULL; | ||
792 | raw->next_in_ino = ic->nodes; | ||
793 | ic->nodes = raw; | ||
794 | if (!jeb->first_node) | ||
795 | jeb->first_node = raw; | ||
796 | if (jeb->last_node) | ||
797 | jeb->last_node->next_phys = raw; | ||
798 | jeb->last_node = raw; | ||
799 | |||
800 | fd->raw = raw; | ||
801 | fd->next = NULL; | ||
802 | fd->version = je32_to_cpu(rd->version); | ||
803 | fd->ino = je32_to_cpu(rd->ino); | ||
804 | fd->nhash = full_name_hash(fd->name, rd->nsize); | ||
805 | fd->type = rd->type; | ||
806 | USED_SPACE(PAD(je32_to_cpu(rd->totlen))); | ||
807 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); | ||
808 | |||
809 | return 0; | ||
810 | } | ||
811 | |||
812 | static int count_list(struct list_head *l) | ||
813 | { | ||
814 | uint32_t count = 0; | ||
815 | struct list_head *tmp; | ||
816 | |||
817 | list_for_each(tmp, l) { | ||
818 | count++; | ||
819 | } | ||
820 | return count; | ||
821 | } | ||
822 | |||
823 | /* Note: This breaks if list_empty(head). I don't care. You | ||
824 | might, if you copy this code and use it elsewhere :) */ | ||
825 | static void rotate_list(struct list_head *head, uint32_t count) | ||
826 | { | ||
827 | struct list_head *n = head->next; | ||
828 | |||
829 | list_del(head); | ||
830 | while(count--) { | ||
831 | n = n->next; | ||
832 | } | ||
833 | list_add(head, n); | ||
834 | } | ||
835 | |||
836 | void jffs2_rotate_lists(struct jffs2_sb_info *c) | ||
837 | { | ||
838 | uint32_t x; | ||
839 | uint32_t rotateby; | ||
840 | |||
841 | x = count_list(&c->clean_list); | ||
842 | if (x) { | ||
843 | rotateby = pseudo_random % x; | ||
844 | D1(printk(KERN_DEBUG "Rotating clean_list by %d\n", rotateby)); | ||
845 | |||
846 | rotate_list((&c->clean_list), rotateby); | ||
847 | |||
848 | D1(printk(KERN_DEBUG "Erase block at front of clean_list is at %08x\n", | ||
849 | list_entry(c->clean_list.next, struct jffs2_eraseblock, list)->offset)); | ||
850 | } else { | ||
851 | D1(printk(KERN_DEBUG "Not rotating empty clean_list\n")); | ||
852 | } | ||
853 | |||
854 | x = count_list(&c->very_dirty_list); | ||
855 | if (x) { | ||
856 | rotateby = pseudo_random % x; | ||
857 | D1(printk(KERN_DEBUG "Rotating very_dirty_list by %d\n", rotateby)); | ||
858 | |||
859 | rotate_list((&c->very_dirty_list), rotateby); | ||
860 | |||
861 | D1(printk(KERN_DEBUG "Erase block at front of very_dirty_list is at %08x\n", | ||
862 | list_entry(c->very_dirty_list.next, struct jffs2_eraseblock, list)->offset)); | ||
863 | } else { | ||
864 | D1(printk(KERN_DEBUG "Not rotating empty very_dirty_list\n")); | ||
865 | } | ||
866 | |||
867 | x = count_list(&c->dirty_list); | ||
868 | if (x) { | ||
869 | rotateby = pseudo_random % x; | ||
870 | D1(printk(KERN_DEBUG "Rotating dirty_list by %d\n", rotateby)); | ||
871 | |||
872 | rotate_list((&c->dirty_list), rotateby); | ||
873 | |||
874 | D1(printk(KERN_DEBUG "Erase block at front of dirty_list is at %08x\n", | ||
875 | list_entry(c->dirty_list.next, struct jffs2_eraseblock, list)->offset)); | ||
876 | } else { | ||
877 | D1(printk(KERN_DEBUG "Not rotating empty dirty_list\n")); | ||
878 | } | ||
879 | |||
880 | x = count_list(&c->erasable_list); | ||
881 | if (x) { | ||
882 | rotateby = pseudo_random % x; | ||
883 | D1(printk(KERN_DEBUG "Rotating erasable_list by %d\n", rotateby)); | ||
884 | |||
885 | rotate_list((&c->erasable_list), rotateby); | ||
886 | |||
887 | D1(printk(KERN_DEBUG "Erase block at front of erasable_list is at %08x\n", | ||
888 | list_entry(c->erasable_list.next, struct jffs2_eraseblock, list)->offset)); | ||
889 | } else { | ||
890 | D1(printk(KERN_DEBUG "Not rotating empty erasable_list\n")); | ||
891 | } | ||
892 | |||
893 | if (c->nr_erasing_blocks) { | ||
894 | rotateby = pseudo_random % c->nr_erasing_blocks; | ||
895 | D1(printk(KERN_DEBUG "Rotating erase_pending_list by %d\n", rotateby)); | ||
896 | |||
897 | rotate_list((&c->erase_pending_list), rotateby); | ||
898 | |||
899 | D1(printk(KERN_DEBUG "Erase block at front of erase_pending_list is at %08x\n", | ||
900 | list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list)->offset)); | ||
901 | } else { | ||
902 | D1(printk(KERN_DEBUG "Not rotating empty erase_pending_list\n")); | ||
903 | } | ||
904 | |||
905 | if (c->nr_free_blocks) { | ||
906 | rotateby = pseudo_random % c->nr_free_blocks; | ||
907 | D1(printk(KERN_DEBUG "Rotating free_list by %d\n", rotateby)); | ||
908 | |||
909 | rotate_list((&c->free_list), rotateby); | ||
910 | |||
911 | D1(printk(KERN_DEBUG "Erase block at front of free_list is at %08x\n", | ||
912 | list_entry(c->free_list.next, struct jffs2_eraseblock, list)->offset)); | ||
913 | } else { | ||
914 | D1(printk(KERN_DEBUG "Not rotating empty free_list\n")); | ||
915 | } | ||
916 | } | ||
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c new file mode 100644 index 000000000000..6b2a441d2766 --- /dev/null +++ b/fs/jffs2/super.c | |||
@@ -0,0 +1,365 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: super.c,v 1.104 2004/11/23 15:37:31 gleixner Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/list.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/mount.h> | ||
22 | #include <linux/jffs2.h> | ||
23 | #include <linux/pagemap.h> | ||
24 | #include <linux/mtd/mtd.h> | ||
25 | #include <linux/ctype.h> | ||
26 | #include <linux/namei.h> | ||
27 | #include "compr.h" | ||
28 | #include "nodelist.h" | ||
29 | |||
30 | static void jffs2_put_super(struct super_block *); | ||
31 | |||
32 | static kmem_cache_t *jffs2_inode_cachep; | ||
33 | |||
34 | static struct inode *jffs2_alloc_inode(struct super_block *sb) | ||
35 | { | ||
36 | struct jffs2_inode_info *ei; | ||
37 | ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL); | ||
38 | if (!ei) | ||
39 | return NULL; | ||
40 | return &ei->vfs_inode; | ||
41 | } | ||
42 | |||
43 | static void jffs2_destroy_inode(struct inode *inode) | ||
44 | { | ||
45 | kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); | ||
46 | } | ||
47 | |||
48 | static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | ||
49 | { | ||
50 | struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; | ||
51 | |||
52 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == | ||
53 | SLAB_CTOR_CONSTRUCTOR) { | ||
54 | init_MUTEX_LOCKED(&ei->sem); | ||
55 | inode_init_once(&ei->vfs_inode); | ||
56 | } | ||
57 | } | ||
58 | |||
59 | static int jffs2_sync_fs(struct super_block *sb, int wait) | ||
60 | { | ||
61 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); | ||
62 | |||
63 | down(&c->alloc_sem); | ||
64 | jffs2_flush_wbuf_pad(c); | ||
65 | up(&c->alloc_sem); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static struct super_operations jffs2_super_operations = | ||
70 | { | ||
71 | .alloc_inode = jffs2_alloc_inode, | ||
72 | .destroy_inode =jffs2_destroy_inode, | ||
73 | .read_inode = jffs2_read_inode, | ||
74 | .put_super = jffs2_put_super, | ||
75 | .write_super = jffs2_write_super, | ||
76 | .statfs = jffs2_statfs, | ||
77 | .remount_fs = jffs2_remount_fs, | ||
78 | .clear_inode = jffs2_clear_inode, | ||
79 | .dirty_inode = jffs2_dirty_inode, | ||
80 | .sync_fs = jffs2_sync_fs, | ||
81 | }; | ||
82 | |||
83 | static int jffs2_sb_compare(struct super_block *sb, void *data) | ||
84 | { | ||
85 | struct jffs2_sb_info *p = data; | ||
86 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); | ||
87 | |||
88 | /* The superblocks are considered to be equivalent if the underlying MTD | ||
89 | device is the same one */ | ||
90 | if (c->mtd == p->mtd) { | ||
91 | D1(printk(KERN_DEBUG "jffs2_sb_compare: match on device %d (\"%s\")\n", p->mtd->index, p->mtd->name)); | ||
92 | return 1; | ||
93 | } else { | ||
94 | D1(printk(KERN_DEBUG "jffs2_sb_compare: No match, device %d (\"%s\"), device %d (\"%s\")\n", | ||
95 | c->mtd->index, c->mtd->name, p->mtd->index, p->mtd->name)); | ||
96 | return 0; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | static int jffs2_sb_set(struct super_block *sb, void *data) | ||
101 | { | ||
102 | struct jffs2_sb_info *p = data; | ||
103 | |||
104 | /* For persistence of NFS exports etc. we use the same s_dev | ||
105 | each time we mount the device, don't just use an anonymous | ||
106 | device */ | ||
107 | sb->s_fs_info = p; | ||
108 | p->os_priv = sb; | ||
109 | sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, p->mtd->index); | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type, | ||
115 | int flags, const char *dev_name, | ||
116 | void *data, struct mtd_info *mtd) | ||
117 | { | ||
118 | struct super_block *sb; | ||
119 | struct jffs2_sb_info *c; | ||
120 | int ret; | ||
121 | |||
122 | c = kmalloc(sizeof(*c), GFP_KERNEL); | ||
123 | if (!c) | ||
124 | return ERR_PTR(-ENOMEM); | ||
125 | memset(c, 0, sizeof(*c)); | ||
126 | c->mtd = mtd; | ||
127 | |||
128 | sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c); | ||
129 | |||
130 | if (IS_ERR(sb)) | ||
131 | goto out_put; | ||
132 | |||
133 | if (sb->s_root) { | ||
134 | /* New mountpoint for JFFS2 which is already mounted */ | ||
135 | D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): Device %d (\"%s\") is already mounted\n", | ||
136 | mtd->index, mtd->name)); | ||
137 | goto out_put; | ||
138 | } | ||
139 | |||
140 | D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): New superblock for device %d (\"%s\")\n", | ||
141 | mtd->index, mtd->name)); | ||
142 | |||
143 | sb->s_op = &jffs2_super_operations; | ||
144 | sb->s_flags = flags | MS_NOATIME; | ||
145 | |||
146 | ret = jffs2_do_fill_super(sb, data, (flags&MS_VERBOSE)?1:0); | ||
147 | |||
148 | if (ret) { | ||
149 | /* Failure case... */ | ||
150 | up_write(&sb->s_umount); | ||
151 | deactivate_super(sb); | ||
152 | return ERR_PTR(ret); | ||
153 | } | ||
154 | |||
155 | sb->s_flags |= MS_ACTIVE; | ||
156 | return sb; | ||
157 | |||
158 | out_put: | ||
159 | kfree(c); | ||
160 | put_mtd_device(mtd); | ||
161 | |||
162 | return sb; | ||
163 | } | ||
164 | |||
165 | static struct super_block *jffs2_get_sb_mtdnr(struct file_system_type *fs_type, | ||
166 | int flags, const char *dev_name, | ||
167 | void *data, int mtdnr) | ||
168 | { | ||
169 | struct mtd_info *mtd; | ||
170 | |||
171 | mtd = get_mtd_device(NULL, mtdnr); | ||
172 | if (!mtd) { | ||
173 | D1(printk(KERN_DEBUG "jffs2: MTD device #%u doesn't appear to exist\n", mtdnr)); | ||
174 | return ERR_PTR(-EINVAL); | ||
175 | } | ||
176 | |||
177 | return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd); | ||
178 | } | ||
179 | |||
180 | static struct super_block *jffs2_get_sb(struct file_system_type *fs_type, | ||
181 | int flags, const char *dev_name, | ||
182 | void *data) | ||
183 | { | ||
184 | int err; | ||
185 | struct nameidata nd; | ||
186 | int mtdnr; | ||
187 | |||
188 | if (!dev_name) | ||
189 | return ERR_PTR(-EINVAL); | ||
190 | |||
191 | D1(printk(KERN_DEBUG "jffs2_get_sb(): dev_name \"%s\"\n", dev_name)); | ||
192 | |||
193 | /* The preferred way of mounting in future; especially when | ||
194 | CONFIG_BLK_DEV is implemented - we specify the underlying | ||
195 | MTD device by number or by name, so that we don't require | ||
196 | block device support to be present in the kernel. */ | ||
197 | |||
198 | /* FIXME: How to do the root fs this way? */ | ||
199 | |||
200 | if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') { | ||
201 | /* Probably mounting without the blkdev crap */ | ||
202 | if (dev_name[3] == ':') { | ||
203 | struct mtd_info *mtd; | ||
204 | |||
205 | /* Mount by MTD device name */ | ||
206 | D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd:%%s, name \"%s\"\n", dev_name+4)); | ||
207 | for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) { | ||
208 | mtd = get_mtd_device(NULL, mtdnr); | ||
209 | if (mtd) { | ||
210 | if (!strcmp(mtd->name, dev_name+4)) | ||
211 | return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd); | ||
212 | put_mtd_device(mtd); | ||
213 | } | ||
214 | } | ||
215 | printk(KERN_NOTICE "jffs2_get_sb(): MTD device with name \"%s\" not found.\n", dev_name+4); | ||
216 | } else if (isdigit(dev_name[3])) { | ||
217 | /* Mount by MTD device number name */ | ||
218 | char *endptr; | ||
219 | |||
220 | mtdnr = simple_strtoul(dev_name+3, &endptr, 0); | ||
221 | if (!*endptr) { | ||
222 | /* It was a valid number */ | ||
223 | D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd%%d, mtdnr %d\n", mtdnr)); | ||
224 | return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr); | ||
225 | } | ||
226 | } | ||
227 | } | ||
228 | |||
229 | /* Try the old way - the hack where we allowed users to mount | ||
230 | /dev/mtdblock$(n) but didn't actually _use_ the blkdev */ | ||
231 | |||
232 | err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); | ||
233 | |||
234 | D1(printk(KERN_DEBUG "jffs2_get_sb(): path_lookup() returned %d, inode %p\n", | ||
235 | err, nd.dentry->d_inode)); | ||
236 | |||
237 | if (err) | ||
238 | return ERR_PTR(err); | ||
239 | |||
240 | err = -EINVAL; | ||
241 | |||
242 | if (!S_ISBLK(nd.dentry->d_inode->i_mode)) | ||
243 | goto out; | ||
244 | |||
245 | if (nd.mnt->mnt_flags & MNT_NODEV) { | ||
246 | err = -EACCES; | ||
247 | goto out; | ||
248 | } | ||
249 | |||
250 | if (imajor(nd.dentry->d_inode) != MTD_BLOCK_MAJOR) { | ||
251 | if (!(flags & MS_VERBOSE)) /* Yes I mean this. Strangely */ | ||
252 | printk(KERN_NOTICE "Attempt to mount non-MTD device \"%s\" as JFFS2\n", | ||
253 | dev_name); | ||
254 | goto out; | ||
255 | } | ||
256 | |||
257 | mtdnr = iminor(nd.dentry->d_inode); | ||
258 | path_release(&nd); | ||
259 | |||
260 | return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr); | ||
261 | |||
262 | out: | ||
263 | path_release(&nd); | ||
264 | return ERR_PTR(err); | ||
265 | } | ||
266 | |||
267 | static void jffs2_put_super (struct super_block *sb) | ||
268 | { | ||
269 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); | ||
270 | |||
271 | D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n")); | ||
272 | |||
273 | if (!(sb->s_flags & MS_RDONLY)) | ||
274 | jffs2_stop_garbage_collect_thread(c); | ||
275 | down(&c->alloc_sem); | ||
276 | jffs2_flush_wbuf_pad(c); | ||
277 | up(&c->alloc_sem); | ||
278 | jffs2_free_ino_caches(c); | ||
279 | jffs2_free_raw_node_refs(c); | ||
280 | if (c->mtd->flags & MTD_NO_VIRTBLOCKS) | ||
281 | vfree(c->blocks); | ||
282 | else | ||
283 | kfree(c->blocks); | ||
284 | jffs2_flash_cleanup(c); | ||
285 | kfree(c->inocache_list); | ||
286 | if (c->mtd->sync) | ||
287 | c->mtd->sync(c->mtd); | ||
288 | |||
289 | D1(printk(KERN_DEBUG "jffs2_put_super returning\n")); | ||
290 | } | ||
291 | |||
292 | static void jffs2_kill_sb(struct super_block *sb) | ||
293 | { | ||
294 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); | ||
295 | generic_shutdown_super(sb); | ||
296 | put_mtd_device(c->mtd); | ||
297 | kfree(c); | ||
298 | } | ||
299 | |||
300 | static struct file_system_type jffs2_fs_type = { | ||
301 | .owner = THIS_MODULE, | ||
302 | .name = "jffs2", | ||
303 | .get_sb = jffs2_get_sb, | ||
304 | .kill_sb = jffs2_kill_sb, | ||
305 | }; | ||
306 | |||
307 | static int __init init_jffs2_fs(void) | ||
308 | { | ||
309 | int ret; | ||
310 | |||
311 | printk(KERN_INFO "JFFS2 version 2.2." | ||
312 | #ifdef CONFIG_JFFS2_FS_NAND | ||
313 | " (NAND)" | ||
314 | #endif | ||
315 | " (C) 2001-2003 Red Hat, Inc.\n"); | ||
316 | |||
317 | jffs2_inode_cachep = kmem_cache_create("jffs2_i", | ||
318 | sizeof(struct jffs2_inode_info), | ||
319 | 0, SLAB_RECLAIM_ACCOUNT, | ||
320 | jffs2_i_init_once, NULL); | ||
321 | if (!jffs2_inode_cachep) { | ||
322 | printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n"); | ||
323 | return -ENOMEM; | ||
324 | } | ||
325 | ret = jffs2_compressors_init(); | ||
326 | if (ret) { | ||
327 | printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n"); | ||
328 | goto out; | ||
329 | } | ||
330 | ret = jffs2_create_slab_caches(); | ||
331 | if (ret) { | ||
332 | printk(KERN_ERR "JFFS2 error: Failed to initialise slab caches\n"); | ||
333 | goto out_compressors; | ||
334 | } | ||
335 | ret = register_filesystem(&jffs2_fs_type); | ||
336 | if (ret) { | ||
337 | printk(KERN_ERR "JFFS2 error: Failed to register filesystem\n"); | ||
338 | goto out_slab; | ||
339 | } | ||
340 | return 0; | ||
341 | |||
342 | out_slab: | ||
343 | jffs2_destroy_slab_caches(); | ||
344 | out_compressors: | ||
345 | jffs2_compressors_exit(); | ||
346 | out: | ||
347 | kmem_cache_destroy(jffs2_inode_cachep); | ||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | static void __exit exit_jffs2_fs(void) | ||
352 | { | ||
353 | unregister_filesystem(&jffs2_fs_type); | ||
354 | jffs2_destroy_slab_caches(); | ||
355 | jffs2_compressors_exit(); | ||
356 | kmem_cache_destroy(jffs2_inode_cachep); | ||
357 | } | ||
358 | |||
359 | module_init(init_jffs2_fs); | ||
360 | module_exit(exit_jffs2_fs); | ||
361 | |||
362 | MODULE_DESCRIPTION("The Journalling Flash File System, v2"); | ||
363 | MODULE_AUTHOR("Red Hat, Inc."); | ||
364 | MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for | ||
365 | // the sake of this tag. It's Free Software. | ||
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c new file mode 100644 index 000000000000..7b1820d13712 --- /dev/null +++ b/fs/jffs2/symlink.c | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: symlink.c,v 1.14 2004/11/16 20:36:12 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/namei.h> | ||
19 | #include "nodelist.h" | ||
20 | |||
21 | static int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd); | ||
22 | static void jffs2_put_link(struct dentry *dentry, struct nameidata *nd); | ||
23 | |||
24 | struct inode_operations jffs2_symlink_inode_operations = | ||
25 | { | ||
26 | .readlink = generic_readlink, | ||
27 | .follow_link = jffs2_follow_link, | ||
28 | .put_link = jffs2_put_link, | ||
29 | .setattr = jffs2_setattr | ||
30 | }; | ||
31 | |||
32 | static int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) | ||
33 | { | ||
34 | unsigned char *buf; | ||
35 | buf = jffs2_getlink(JFFS2_SB_INFO(dentry->d_inode->i_sb), JFFS2_INODE_INFO(dentry->d_inode)); | ||
36 | nd_set_link(nd, buf); | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | static void jffs2_put_link(struct dentry *dentry, struct nameidata *nd) | ||
41 | { | ||
42 | char *s = nd_get_link(nd); | ||
43 | if (!IS_ERR(s)) | ||
44 | kfree(s); | ||
45 | } | ||
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c new file mode 100644 index 000000000000..c8128069ecf0 --- /dev/null +++ b/fs/jffs2/wbuf.c | |||
@@ -0,0 +1,1184 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de> | ||
6 | * | ||
7 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
8 | * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de> | ||
9 | * | ||
10 | * For licensing information, see the file 'LICENCE' in this directory. | ||
11 | * | ||
12 | * $Id: wbuf.c,v 1.82 2004/11/20 22:08:31 dwmw2 Exp $ | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/mtd/mtd.h> | ||
19 | #include <linux/crc32.h> | ||
20 | #include <linux/mtd/nand.h> | ||
21 | #include "nodelist.h" | ||
22 | |||
23 | /* For testing write failures */ | ||
24 | #undef BREAKME | ||
25 | #undef BREAKMEHEADER | ||
26 | |||
27 | #ifdef BREAKME | ||
28 | static unsigned char *brokenbuf; | ||
29 | #endif | ||
30 | |||
31 | /* max. erase failures before we mark a block bad */ | ||
32 | #define MAX_ERASE_FAILURES 2 | ||
33 | |||
34 | /* two seconds timeout for timed wbuf-flushing */ | ||
35 | #define WBUF_FLUSH_TIMEOUT 2 * HZ | ||
36 | |||
37 | struct jffs2_inodirty { | ||
38 | uint32_t ino; | ||
39 | struct jffs2_inodirty *next; | ||
40 | }; | ||
41 | |||
42 | static struct jffs2_inodirty inodirty_nomem; | ||
43 | |||
44 | static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino) | ||
45 | { | ||
46 | struct jffs2_inodirty *this = c->wbuf_inodes; | ||
47 | |||
48 | /* If a malloc failed, consider _everything_ dirty */ | ||
49 | if (this == &inodirty_nomem) | ||
50 | return 1; | ||
51 | |||
52 | /* If ino == 0, _any_ non-GC writes mean 'yes' */ | ||
53 | if (this && !ino) | ||
54 | return 1; | ||
55 | |||
56 | /* Look to see if the inode in question is pending in the wbuf */ | ||
57 | while (this) { | ||
58 | if (this->ino == ino) | ||
59 | return 1; | ||
60 | this = this->next; | ||
61 | } | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c) | ||
66 | { | ||
67 | struct jffs2_inodirty *this; | ||
68 | |||
69 | this = c->wbuf_inodes; | ||
70 | |||
71 | if (this != &inodirty_nomem) { | ||
72 | while (this) { | ||
73 | struct jffs2_inodirty *next = this->next; | ||
74 | kfree(this); | ||
75 | this = next; | ||
76 | } | ||
77 | } | ||
78 | c->wbuf_inodes = NULL; | ||
79 | } | ||
80 | |||
81 | static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino) | ||
82 | { | ||
83 | struct jffs2_inodirty *new; | ||
84 | |||
85 | /* Mark the superblock dirty so that kupdated will flush... */ | ||
86 | OFNI_BS_2SFFJ(c)->s_dirt = 1; | ||
87 | |||
88 | if (jffs2_wbuf_pending_for_ino(c, ino)) | ||
89 | return; | ||
90 | |||
91 | new = kmalloc(sizeof(*new), GFP_KERNEL); | ||
92 | if (!new) { | ||
93 | D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n")); | ||
94 | jffs2_clear_wbuf_ino_list(c); | ||
95 | c->wbuf_inodes = &inodirty_nomem; | ||
96 | return; | ||
97 | } | ||
98 | new->ino = ino; | ||
99 | new->next = c->wbuf_inodes; | ||
100 | c->wbuf_inodes = new; | ||
101 | return; | ||
102 | } | ||
103 | |||
104 | static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) | ||
105 | { | ||
106 | struct list_head *this, *next; | ||
107 | static int n; | ||
108 | |||
109 | if (list_empty(&c->erasable_pending_wbuf_list)) | ||
110 | return; | ||
111 | |||
112 | list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { | ||
113 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
114 | |||
115 | D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset)); | ||
116 | list_del(this); | ||
117 | if ((jiffies + (n++)) & 127) { | ||
118 | /* Most of the time, we just erase it immediately. Otherwise we | ||
119 | spend ages scanning it on mount, etc. */ | ||
120 | D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); | ||
121 | list_add_tail(&jeb->list, &c->erase_pending_list); | ||
122 | c->nr_erasing_blocks++; | ||
123 | jffs2_erase_pending_trigger(c); | ||
124 | } else { | ||
125 | /* Sometimes, however, we leave it elsewhere so it doesn't get | ||
126 | immediately reused, and we spread the load a bit. */ | ||
127 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); | ||
128 | list_add_tail(&jeb->list, &c->erasable_list); | ||
129 | } | ||
130 | } | ||
131 | } | ||
132 | |||
133 | static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
134 | { | ||
135 | D1(printk("About to refile bad block at %08x\n", jeb->offset)); | ||
136 | |||
137 | D2(jffs2_dump_block_lists(c)); | ||
138 | /* File the existing block on the bad_used_list.... */ | ||
139 | if (c->nextblock == jeb) | ||
140 | c->nextblock = NULL; | ||
141 | else /* Not sure this should ever happen... need more coffee */ | ||
142 | list_del(&jeb->list); | ||
143 | if (jeb->first_node) { | ||
144 | D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset)); | ||
145 | list_add(&jeb->list, &c->bad_used_list); | ||
146 | } else { | ||
147 | BUG(); | ||
148 | /* It has to have had some nodes or we couldn't be here */ | ||
149 | D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset)); | ||
150 | list_add(&jeb->list, &c->erase_pending_list); | ||
151 | c->nr_erasing_blocks++; | ||
152 | jffs2_erase_pending_trigger(c); | ||
153 | } | ||
154 | D2(jffs2_dump_block_lists(c)); | ||
155 | |||
156 | /* Adjust its size counts accordingly */ | ||
157 | c->wasted_size += jeb->free_size; | ||
158 | c->free_size -= jeb->free_size; | ||
159 | jeb->wasted_size += jeb->free_size; | ||
160 | jeb->free_size = 0; | ||
161 | |||
162 | ACCT_SANITY_CHECK(c,jeb); | ||
163 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
164 | } | ||
165 | |||
166 | /* Recover from failure to write wbuf. Recover the nodes up to the | ||
167 | * wbuf, not the one which we were starting to try to write. */ | ||
168 | |||
169 | static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | ||
170 | { | ||
171 | struct jffs2_eraseblock *jeb, *new_jeb; | ||
172 | struct jffs2_raw_node_ref **first_raw, **raw; | ||
173 | size_t retlen; | ||
174 | int ret; | ||
175 | unsigned char *buf; | ||
176 | uint32_t start, end, ofs, len; | ||
177 | |||
178 | spin_lock(&c->erase_completion_lock); | ||
179 | |||
180 | jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; | ||
181 | |||
182 | jffs2_block_refile(c, jeb); | ||
183 | |||
184 | /* Find the first node to be recovered, by skipping over every | ||
185 | node which ends before the wbuf starts, or which is obsolete. */ | ||
186 | first_raw = &jeb->first_node; | ||
187 | while (*first_raw && | ||
188 | (ref_obsolete(*first_raw) || | ||
189 | (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) { | ||
190 | D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", | ||
191 | ref_offset(*first_raw), ref_flags(*first_raw), | ||
192 | (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)), | ||
193 | c->wbuf_ofs)); | ||
194 | first_raw = &(*first_raw)->next_phys; | ||
195 | } | ||
196 | |||
197 | if (!*first_raw) { | ||
198 | /* All nodes were obsolete. Nothing to recover. */ | ||
199 | D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n")); | ||
200 | spin_unlock(&c->erase_completion_lock); | ||
201 | return; | ||
202 | } | ||
203 | |||
204 | start = ref_offset(*first_raw); | ||
205 | end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw); | ||
206 | |||
207 | /* Find the last node to be recovered */ | ||
208 | raw = first_raw; | ||
209 | while ((*raw)) { | ||
210 | if (!ref_obsolete(*raw)) | ||
211 | end = ref_offset(*raw) + ref_totlen(c, jeb, *raw); | ||
212 | |||
213 | raw = &(*raw)->next_phys; | ||
214 | } | ||
215 | spin_unlock(&c->erase_completion_lock); | ||
216 | |||
217 | D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end)); | ||
218 | |||
219 | buf = NULL; | ||
220 | if (start < c->wbuf_ofs) { | ||
221 | /* First affected node was already partially written. | ||
222 | * Attempt to reread the old data into our buffer. */ | ||
223 | |||
224 | buf = kmalloc(end - start, GFP_KERNEL); | ||
225 | if (!buf) { | ||
226 | printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n"); | ||
227 | |||
228 | goto read_failed; | ||
229 | } | ||
230 | |||
231 | /* Do the read... */ | ||
232 | if (jffs2_cleanmarker_oob(c)) | ||
233 | ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo); | ||
234 | else | ||
235 | ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); | ||
236 | |||
237 | if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) { | ||
238 | /* ECC recovered */ | ||
239 | ret = 0; | ||
240 | } | ||
241 | if (ret || retlen != c->wbuf_ofs - start) { | ||
242 | printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n"); | ||
243 | |||
244 | kfree(buf); | ||
245 | buf = NULL; | ||
246 | read_failed: | ||
247 | first_raw = &(*first_raw)->next_phys; | ||
248 | /* If this was the only node to be recovered, give up */ | ||
249 | if (!(*first_raw)) | ||
250 | return; | ||
251 | |||
252 | /* It wasn't. Go on and try to recover nodes complete in the wbuf */ | ||
253 | start = ref_offset(*first_raw); | ||
254 | } else { | ||
255 | /* Read succeeded. Copy the remaining data from the wbuf */ | ||
256 | memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs); | ||
257 | } | ||
258 | } | ||
259 | /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards. | ||
260 | Either 'buf' contains the data, or we find it in the wbuf */ | ||
261 | |||
262 | |||
263 | /* ... and get an allocation of space from a shiny new block instead */ | ||
264 | ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len); | ||
265 | if (ret) { | ||
266 | printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); | ||
267 | if (buf) | ||
268 | kfree(buf); | ||
269 | return; | ||
270 | } | ||
271 | if (end-start >= c->wbuf_pagesize) { | ||
272 | /* Need to do another write immediately. This, btw, | ||
273 | means that we'll be writing from 'buf' and not from | ||
274 | the wbuf. Since if we're writing from the wbuf there | ||
275 | won't be more than a wbuf full of data, now will | ||
276 | there? :) */ | ||
277 | |||
278 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); | ||
279 | |||
280 | D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", | ||
281 | towrite, ofs)); | ||
282 | |||
283 | #ifdef BREAKMEHEADER | ||
284 | static int breakme; | ||
285 | if (breakme++ == 20) { | ||
286 | printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs); | ||
287 | breakme = 0; | ||
288 | c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen, | ||
289 | brokenbuf, NULL, c->oobinfo); | ||
290 | ret = -EIO; | ||
291 | } else | ||
292 | #endif | ||
293 | if (jffs2_cleanmarker_oob(c)) | ||
294 | ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen, | ||
295 | buf, NULL, c->oobinfo); | ||
296 | else | ||
297 | ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, buf); | ||
298 | |||
299 | if (ret || retlen != towrite) { | ||
300 | /* Argh. We tried. Really we did. */ | ||
301 | printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n"); | ||
302 | kfree(buf); | ||
303 | |||
304 | if (retlen) { | ||
305 | struct jffs2_raw_node_ref *raw2; | ||
306 | |||
307 | raw2 = jffs2_alloc_raw_node_ref(); | ||
308 | if (!raw2) | ||
309 | return; | ||
310 | |||
311 | raw2->flash_offset = ofs | REF_OBSOLETE; | ||
312 | raw2->__totlen = ref_totlen(c, jeb, *first_raw); | ||
313 | raw2->next_phys = NULL; | ||
314 | raw2->next_in_ino = NULL; | ||
315 | |||
316 | jffs2_add_physical_node_ref(c, raw2); | ||
317 | } | ||
318 | return; | ||
319 | } | ||
320 | printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs); | ||
321 | |||
322 | c->wbuf_len = (end - start) - towrite; | ||
323 | c->wbuf_ofs = ofs + towrite; | ||
324 | memcpy(c->wbuf, buf + towrite, c->wbuf_len); | ||
325 | /* Don't muck about with c->wbuf_inodes. False positives are harmless. */ | ||
326 | |||
327 | kfree(buf); | ||
328 | } else { | ||
329 | /* OK, now we're left with the dregs in whichever buffer we're using */ | ||
330 | if (buf) { | ||
331 | memcpy(c->wbuf, buf, end-start); | ||
332 | kfree(buf); | ||
333 | } else { | ||
334 | memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start); | ||
335 | } | ||
336 | c->wbuf_ofs = ofs; | ||
337 | c->wbuf_len = end - start; | ||
338 | } | ||
339 | |||
340 | /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */ | ||
341 | new_jeb = &c->blocks[ofs / c->sector_size]; | ||
342 | |||
343 | spin_lock(&c->erase_completion_lock); | ||
344 | if (new_jeb->first_node) { | ||
345 | /* Odd, but possible with ST flash later maybe */ | ||
346 | new_jeb->last_node->next_phys = *first_raw; | ||
347 | } else { | ||
348 | new_jeb->first_node = *first_raw; | ||
349 | } | ||
350 | |||
351 | raw = first_raw; | ||
352 | while (*raw) { | ||
353 | uint32_t rawlen = ref_totlen(c, jeb, *raw); | ||
354 | |||
355 | D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n", | ||
356 | rawlen, ref_offset(*raw), ref_flags(*raw), ofs)); | ||
357 | |||
358 | if (ref_obsolete(*raw)) { | ||
359 | /* Shouldn't really happen much */ | ||
360 | new_jeb->dirty_size += rawlen; | ||
361 | new_jeb->free_size -= rawlen; | ||
362 | c->dirty_size += rawlen; | ||
363 | } else { | ||
364 | new_jeb->used_size += rawlen; | ||
365 | new_jeb->free_size -= rawlen; | ||
366 | jeb->dirty_size += rawlen; | ||
367 | jeb->used_size -= rawlen; | ||
368 | c->dirty_size += rawlen; | ||
369 | } | ||
370 | c->free_size -= rawlen; | ||
371 | (*raw)->flash_offset = ofs | ref_flags(*raw); | ||
372 | ofs += rawlen; | ||
373 | new_jeb->last_node = *raw; | ||
374 | |||
375 | raw = &(*raw)->next_phys; | ||
376 | } | ||
377 | |||
378 | /* Fix up the original jeb now it's on the bad_list */ | ||
379 | *first_raw = NULL; | ||
380 | if (first_raw == &jeb->first_node) { | ||
381 | jeb->last_node = NULL; | ||
382 | D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); | ||
383 | list_del(&jeb->list); | ||
384 | list_add(&jeb->list, &c->erase_pending_list); | ||
385 | c->nr_erasing_blocks++; | ||
386 | jffs2_erase_pending_trigger(c); | ||
387 | } | ||
388 | else | ||
389 | jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys); | ||
390 | |||
391 | ACCT_SANITY_CHECK(c,jeb); | ||
392 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
393 | |||
394 | ACCT_SANITY_CHECK(c,new_jeb); | ||
395 | D1(ACCT_PARANOIA_CHECK(new_jeb)); | ||
396 | |||
397 | spin_unlock(&c->erase_completion_lock); | ||
398 | |||
399 | D1(printk(KERN_DEBUG "wbuf recovery completed OK\n")); | ||
400 | } | ||
401 | |||
402 | /* Meaning of pad argument: | ||
403 | 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway. | ||
404 | 1: Pad, do not adjust nextblock free_size | ||
405 | 2: Pad, adjust nextblock free_size | ||
406 | */ | ||
407 | #define NOPAD 0 | ||
408 | #define PAD_NOACCOUNT 1 | ||
409 | #define PAD_ACCOUNTING 2 | ||
410 | |||
411 | static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | ||
412 | { | ||
413 | int ret; | ||
414 | size_t retlen; | ||
415 | |||
416 | /* Nothing to do if not NAND flash. In particular, we shouldn't | ||
417 | del_timer() the timer we never initialised. */ | ||
418 | if (jffs2_can_mark_obsolete(c)) | ||
419 | return 0; | ||
420 | |||
421 | if (!down_trylock(&c->alloc_sem)) { | ||
422 | up(&c->alloc_sem); | ||
423 | printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n"); | ||
424 | BUG(); | ||
425 | } | ||
426 | |||
427 | if(!c->wbuf || !c->wbuf_len) | ||
428 | return 0; | ||
429 | |||
430 | /* claim remaining space on the page | ||
431 | this happens, if we have a change to a new block, | ||
432 | or if fsync forces us to flush the writebuffer. | ||
433 | if we have a switch to next page, we will not have | ||
434 | enough remaining space for this. | ||
435 | */ | ||
436 | if (pad) { | ||
437 | c->wbuf_len = PAD(c->wbuf_len); | ||
438 | |||
439 | /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR | ||
440 | with 8 byte page size */ | ||
441 | memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len); | ||
442 | |||
443 | if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) { | ||
444 | struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len); | ||
445 | padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
446 | padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING); | ||
447 | padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len); | ||
448 | padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4)); | ||
449 | } | ||
450 | } | ||
451 | /* else jffs2_flash_writev has actually filled in the rest of the | ||
452 | buffer for us, and will deal with the node refs etc. later. */ | ||
453 | |||
454 | #ifdef BREAKME | ||
455 | static int breakme; | ||
456 | if (breakme++ == 20) { | ||
457 | printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs); | ||
458 | breakme = 0; | ||
459 | c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, | ||
460 | &retlen, brokenbuf, NULL, c->oobinfo); | ||
461 | ret = -EIO; | ||
462 | } else | ||
463 | #endif | ||
464 | |||
465 | if (jffs2_cleanmarker_oob(c)) | ||
466 | ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo); | ||
467 | else | ||
468 | ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf); | ||
469 | |||
470 | if (ret || retlen != c->wbuf_pagesize) { | ||
471 | if (ret) | ||
472 | printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret); | ||
473 | else { | ||
474 | printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n", | ||
475 | retlen, c->wbuf_pagesize); | ||
476 | ret = -EIO; | ||
477 | } | ||
478 | |||
479 | jffs2_wbuf_recover(c); | ||
480 | |||
481 | return ret; | ||
482 | } | ||
483 | |||
484 | spin_lock(&c->erase_completion_lock); | ||
485 | |||
486 | /* Adjust free size of the block if we padded. */ | ||
487 | if (pad) { | ||
488 | struct jffs2_eraseblock *jeb; | ||
489 | |||
490 | jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; | ||
491 | |||
492 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", | ||
493 | (jeb==c->nextblock)?"next":"", jeb->offset)); | ||
494 | |||
495 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be | ||
496 | padded. If there is less free space in the block than that, | ||
497 | something screwed up */ | ||
498 | if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) { | ||
499 | printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", | ||
500 | c->wbuf_ofs, c->wbuf_len, c->wbuf_pagesize-c->wbuf_len); | ||
501 | printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", | ||
502 | jeb->offset, jeb->free_size); | ||
503 | BUG(); | ||
504 | } | ||
505 | jeb->free_size -= (c->wbuf_pagesize - c->wbuf_len); | ||
506 | c->free_size -= (c->wbuf_pagesize - c->wbuf_len); | ||
507 | jeb->wasted_size += (c->wbuf_pagesize - c->wbuf_len); | ||
508 | c->wasted_size += (c->wbuf_pagesize - c->wbuf_len); | ||
509 | } | ||
510 | |||
511 | /* Stick any now-obsoleted blocks on the erase_pending_list */ | ||
512 | jffs2_refile_wbuf_blocks(c); | ||
513 | jffs2_clear_wbuf_ino_list(c); | ||
514 | spin_unlock(&c->erase_completion_lock); | ||
515 | |||
516 | memset(c->wbuf,0xff,c->wbuf_pagesize); | ||
517 | /* adjust write buffer offset, else we get a non contiguous write bug */ | ||
518 | c->wbuf_ofs += c->wbuf_pagesize; | ||
519 | c->wbuf_len = 0; | ||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | /* Trigger garbage collection to flush the write-buffer. | ||
524 | If ino arg is zero, do it if _any_ real (i.e. not GC) writes are | ||
525 | outstanding. If ino arg non-zero, do it only if a write for the | ||
526 | given inode is outstanding. */ | ||
527 | int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | ||
528 | { | ||
529 | uint32_t old_wbuf_ofs; | ||
530 | uint32_t old_wbuf_len; | ||
531 | int ret = 0; | ||
532 | |||
533 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino)); | ||
534 | |||
535 | down(&c->alloc_sem); | ||
536 | if (!jffs2_wbuf_pending_for_ino(c, ino)) { | ||
537 | D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino)); | ||
538 | up(&c->alloc_sem); | ||
539 | return 0; | ||
540 | } | ||
541 | |||
542 | old_wbuf_ofs = c->wbuf_ofs; | ||
543 | old_wbuf_len = c->wbuf_len; | ||
544 | |||
545 | if (c->unchecked_size) { | ||
546 | /* GC won't make any progress for a while */ | ||
547 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n")); | ||
548 | down_write(&c->wbuf_sem); | ||
549 | ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); | ||
550 | up_write(&c->wbuf_sem); | ||
551 | } else while (old_wbuf_len && | ||
552 | old_wbuf_ofs == c->wbuf_ofs) { | ||
553 | |||
554 | up(&c->alloc_sem); | ||
555 | |||
556 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n")); | ||
557 | |||
558 | ret = jffs2_garbage_collect_pass(c); | ||
559 | if (ret) { | ||
560 | /* GC failed. Flush it with padding instead */ | ||
561 | down(&c->alloc_sem); | ||
562 | down_write(&c->wbuf_sem); | ||
563 | ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); | ||
564 | up_write(&c->wbuf_sem); | ||
565 | break; | ||
566 | } | ||
567 | down(&c->alloc_sem); | ||
568 | } | ||
569 | |||
570 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n")); | ||
571 | |||
572 | up(&c->alloc_sem); | ||
573 | return ret; | ||
574 | } | ||
575 | |||
576 | /* Pad write-buffer to end and write it, wasting space. */ | ||
577 | int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c) | ||
578 | { | ||
579 | int ret; | ||
580 | |||
581 | down_write(&c->wbuf_sem); | ||
582 | ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); | ||
583 | up_write(&c->wbuf_sem); | ||
584 | |||
585 | return ret; | ||
586 | } | ||
587 | |||
588 | #define PAGE_DIV(x) ( (x) & (~(c->wbuf_pagesize - 1)) ) | ||
589 | #define PAGE_MOD(x) ( (x) & (c->wbuf_pagesize - 1) ) | ||
590 | int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) | ||
591 | { | ||
592 | struct kvec outvecs[3]; | ||
593 | uint32_t totlen = 0; | ||
594 | uint32_t split_ofs = 0; | ||
595 | uint32_t old_totlen; | ||
596 | int ret, splitvec = -1; | ||
597 | int invec, outvec; | ||
598 | size_t wbuf_retlen; | ||
599 | unsigned char *wbuf_ptr; | ||
600 | size_t donelen = 0; | ||
601 | uint32_t outvec_to = to; | ||
602 | |||
603 | /* If not NAND flash, don't bother */ | ||
604 | if (!c->wbuf) | ||
605 | return jffs2_flash_direct_writev(c, invecs, count, to, retlen); | ||
606 | |||
607 | down_write(&c->wbuf_sem); | ||
608 | |||
609 | /* If wbuf_ofs is not initialized, set it to target address */ | ||
610 | if (c->wbuf_ofs == 0xFFFFFFFF) { | ||
611 | c->wbuf_ofs = PAGE_DIV(to); | ||
612 | c->wbuf_len = PAGE_MOD(to); | ||
613 | memset(c->wbuf,0xff,c->wbuf_pagesize); | ||
614 | } | ||
615 | |||
616 | /* Fixup the wbuf if we are moving to a new eraseblock. The checks below | ||
617 | fail for ECC'd NOR because cleanmarker == 16, so a block starts at | ||
618 | xxx0010. */ | ||
619 | if (jffs2_nor_ecc(c)) { | ||
620 | if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) { | ||
621 | c->wbuf_ofs = PAGE_DIV(to); | ||
622 | c->wbuf_len = PAGE_MOD(to); | ||
623 | memset(c->wbuf,0xff,c->wbuf_pagesize); | ||
624 | } | ||
625 | } | ||
626 | |||
627 | /* Sanity checks on target address. | ||
628 | It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs), | ||
629 | and it's permitted to write at the beginning of a new | ||
630 | erase block. Anything else, and you die. | ||
631 | New block starts at xxx000c (0-b = block header) | ||
632 | */ | ||
633 | if ( (to & ~(c->sector_size-1)) != (c->wbuf_ofs & ~(c->sector_size-1)) ) { | ||
634 | /* It's a write to a new block */ | ||
635 | if (c->wbuf_len) { | ||
636 | D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs)); | ||
637 | ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); | ||
638 | if (ret) { | ||
639 | /* the underlying layer has to check wbuf_len to do the cleanup */ | ||
640 | D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret)); | ||
641 | *retlen = 0; | ||
642 | goto exit; | ||
643 | } | ||
644 | } | ||
645 | /* set pointer to new block */ | ||
646 | c->wbuf_ofs = PAGE_DIV(to); | ||
647 | c->wbuf_len = PAGE_MOD(to); | ||
648 | } | ||
649 | |||
650 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { | ||
651 | /* We're not writing immediately after the writebuffer. Bad. */ | ||
652 | printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to); | ||
653 | if (c->wbuf_len) | ||
654 | printk(KERN_CRIT "wbuf was previously %08x-%08x\n", | ||
655 | c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); | ||
656 | BUG(); | ||
657 | } | ||
658 | |||
659 | /* Note outvecs[3] above. We know count is never greater than 2 */ | ||
660 | if (count > 2) { | ||
661 | printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count); | ||
662 | BUG(); | ||
663 | } | ||
664 | |||
665 | invec = 0; | ||
666 | outvec = 0; | ||
667 | |||
668 | /* Fill writebuffer first, if already in use */ | ||
669 | if (c->wbuf_len) { | ||
670 | uint32_t invec_ofs = 0; | ||
671 | |||
672 | /* adjust alignment offset */ | ||
673 | if (c->wbuf_len != PAGE_MOD(to)) { | ||
674 | c->wbuf_len = PAGE_MOD(to); | ||
675 | /* take care of alignment to next page */ | ||
676 | if (!c->wbuf_len) | ||
677 | c->wbuf_len = c->wbuf_pagesize; | ||
678 | } | ||
679 | |||
680 | while(c->wbuf_len < c->wbuf_pagesize) { | ||
681 | uint32_t thislen; | ||
682 | |||
683 | if (invec == count) | ||
684 | goto alldone; | ||
685 | |||
686 | thislen = c->wbuf_pagesize - c->wbuf_len; | ||
687 | |||
688 | if (thislen >= invecs[invec].iov_len) | ||
689 | thislen = invecs[invec].iov_len; | ||
690 | |||
691 | invec_ofs = thislen; | ||
692 | |||
693 | memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen); | ||
694 | c->wbuf_len += thislen; | ||
695 | donelen += thislen; | ||
696 | /* Get next invec, if actual did not fill the buffer */ | ||
697 | if (c->wbuf_len < c->wbuf_pagesize) | ||
698 | invec++; | ||
699 | } | ||
700 | |||
701 | /* write buffer is full, flush buffer */ | ||
702 | ret = __jffs2_flush_wbuf(c, NOPAD); | ||
703 | if (ret) { | ||
704 | /* the underlying layer has to check wbuf_len to do the cleanup */ | ||
705 | D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret)); | ||
706 | /* Retlen zero to make sure our caller doesn't mark the space dirty. | ||
707 | We've already done everything that's necessary */ | ||
708 | *retlen = 0; | ||
709 | goto exit; | ||
710 | } | ||
711 | outvec_to += donelen; | ||
712 | c->wbuf_ofs = outvec_to; | ||
713 | |||
714 | /* All invecs done ? */ | ||
715 | if (invec == count) | ||
716 | goto alldone; | ||
717 | |||
718 | /* Set up the first outvec, containing the remainder of the | ||
719 | invec we partially used */ | ||
720 | if (invecs[invec].iov_len > invec_ofs) { | ||
721 | outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs; | ||
722 | totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs; | ||
723 | if (totlen > c->wbuf_pagesize) { | ||
724 | splitvec = outvec; | ||
725 | split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen); | ||
726 | } | ||
727 | outvec++; | ||
728 | } | ||
729 | invec++; | ||
730 | } | ||
731 | |||
732 | /* OK, now we've flushed the wbuf and the start of the bits | ||
733 | we have been asked to write, now to write the rest.... */ | ||
734 | |||
735 | /* totlen holds the amount of data still to be written */ | ||
736 | old_totlen = totlen; | ||
737 | for ( ; invec < count; invec++,outvec++ ) { | ||
738 | outvecs[outvec].iov_base = invecs[invec].iov_base; | ||
739 | totlen += outvecs[outvec].iov_len = invecs[invec].iov_len; | ||
740 | if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) { | ||
741 | splitvec = outvec; | ||
742 | split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen); | ||
743 | old_totlen = totlen; | ||
744 | } | ||
745 | } | ||
746 | |||
747 | /* Now the outvecs array holds all the remaining data to write */ | ||
748 | /* Up to splitvec,split_ofs is to be written immediately. The rest | ||
749 | goes into the (now-empty) wbuf */ | ||
750 | |||
751 | if (splitvec != -1) { | ||
752 | uint32_t remainder; | ||
753 | |||
754 | remainder = outvecs[splitvec].iov_len - split_ofs; | ||
755 | outvecs[splitvec].iov_len = split_ofs; | ||
756 | |||
757 | /* We did cross a page boundary, so we write some now */ | ||
758 | if (jffs2_cleanmarker_oob(c)) | ||
759 | ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo); | ||
760 | else | ||
761 | ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen); | ||
762 | |||
763 | if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) { | ||
764 | /* At this point we have no problem, | ||
765 | c->wbuf is empty. | ||
766 | */ | ||
767 | *retlen = donelen; | ||
768 | goto exit; | ||
769 | } | ||
770 | |||
771 | donelen += wbuf_retlen; | ||
772 | c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen); | ||
773 | |||
774 | if (remainder) { | ||
775 | outvecs[splitvec].iov_base += split_ofs; | ||
776 | outvecs[splitvec].iov_len = remainder; | ||
777 | } else { | ||
778 | splitvec++; | ||
779 | } | ||
780 | |||
781 | } else { | ||
782 | splitvec = 0; | ||
783 | } | ||
784 | |||
785 | /* Now splitvec points to the start of the bits we have to copy | ||
786 | into the wbuf */ | ||
787 | wbuf_ptr = c->wbuf; | ||
788 | |||
789 | for ( ; splitvec < outvec; splitvec++) { | ||
790 | /* Don't copy the wbuf into itself */ | ||
791 | if (outvecs[splitvec].iov_base == c->wbuf) | ||
792 | continue; | ||
793 | memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len); | ||
794 | wbuf_ptr += outvecs[splitvec].iov_len; | ||
795 | donelen += outvecs[splitvec].iov_len; | ||
796 | } | ||
797 | c->wbuf_len = wbuf_ptr - c->wbuf; | ||
798 | |||
799 | /* If there's a remainder in the wbuf and it's a non-GC write, | ||
800 | remember that the wbuf affects this ino */ | ||
801 | alldone: | ||
802 | *retlen = donelen; | ||
803 | |||
804 | if (c->wbuf_len && ino) | ||
805 | jffs2_wbuf_dirties_inode(c, ino); | ||
806 | |||
807 | ret = 0; | ||
808 | |||
809 | exit: | ||
810 | up_write(&c->wbuf_sem); | ||
811 | return ret; | ||
812 | } | ||
813 | |||
814 | /* | ||
815 | * This is the entry for flash write. | ||
816 | * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev | ||
817 | */ | ||
818 | int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf) | ||
819 | { | ||
820 | struct kvec vecs[1]; | ||
821 | |||
822 | if (jffs2_can_mark_obsolete(c)) | ||
823 | return c->mtd->write(c->mtd, ofs, len, retlen, buf); | ||
824 | |||
825 | vecs[0].iov_base = (unsigned char *) buf; | ||
826 | vecs[0].iov_len = len; | ||
827 | return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0); | ||
828 | } | ||
829 | |||
830 | /* | ||
831 | Handle readback from writebuffer and ECC failure return | ||
832 | */ | ||
833 | int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf) | ||
834 | { | ||
835 | loff_t orbf = 0, owbf = 0, lwbf = 0; | ||
836 | int ret; | ||
837 | |||
838 | /* Read flash */ | ||
839 | if (!jffs2_can_mark_obsolete(c)) { | ||
840 | down_read(&c->wbuf_sem); | ||
841 | |||
842 | if (jffs2_cleanmarker_oob(c)) | ||
843 | ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo); | ||
844 | else | ||
845 | ret = c->mtd->read(c->mtd, ofs, len, retlen, buf); | ||
846 | |||
847 | if ( (ret == -EBADMSG) && (*retlen == len) ) { | ||
848 | printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", | ||
849 | len, ofs); | ||
850 | /* | ||
851 | * We have the raw data without ECC correction in the buffer, maybe | ||
852 | * we are lucky and all data or parts are correct. We check the node. | ||
853 | * If data are corrupted node check will sort it out. | ||
854 | * We keep this block, it will fail on write or erase and the we | ||
855 | * mark it bad. Or should we do that now? But we should give him a chance. | ||
856 | * Maybe we had a system crash or power loss before the ecc write or | ||
857 | * a erase was completed. | ||
858 | * So we return success. :) | ||
859 | */ | ||
860 | ret = 0; | ||
861 | } | ||
862 | } else | ||
863 | return c->mtd->read(c->mtd, ofs, len, retlen, buf); | ||
864 | |||
865 | /* if no writebuffer available or write buffer empty, return */ | ||
866 | if (!c->wbuf_pagesize || !c->wbuf_len) | ||
867 | goto exit; | ||
868 | |||
869 | /* if we read in a different block, return */ | ||
870 | if ( (ofs & ~(c->sector_size-1)) != (c->wbuf_ofs & ~(c->sector_size-1)) ) | ||
871 | goto exit; | ||
872 | |||
873 | if (ofs >= c->wbuf_ofs) { | ||
874 | owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */ | ||
875 | if (owbf > c->wbuf_len) /* is read beyond write buffer ? */ | ||
876 | goto exit; | ||
877 | lwbf = c->wbuf_len - owbf; /* number of bytes to copy */ | ||
878 | if (lwbf > len) | ||
879 | lwbf = len; | ||
880 | } else { | ||
881 | orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ | ||
882 | if (orbf > len) /* is write beyond write buffer ? */ | ||
883 | goto exit; | ||
884 | lwbf = len - orbf; /* number of bytes to copy */ | ||
885 | if (lwbf > c->wbuf_len) | ||
886 | lwbf = c->wbuf_len; | ||
887 | } | ||
888 | if (lwbf > 0) | ||
889 | memcpy(buf+orbf,c->wbuf+owbf,lwbf); | ||
890 | |||
891 | exit: | ||
892 | up_read(&c->wbuf_sem); | ||
893 | return ret; | ||
894 | } | ||
895 | |||
896 | /* | ||
897 | * Check, if the out of band area is empty | ||
898 | */ | ||
899 | int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode) | ||
900 | { | ||
901 | unsigned char *buf; | ||
902 | int ret = 0; | ||
903 | int i,len,page; | ||
904 | size_t retlen; | ||
905 | int oob_size; | ||
906 | |||
907 | /* allocate a buffer for all oob data in this sector */ | ||
908 | oob_size = c->mtd->oobsize; | ||
909 | len = 4 * oob_size; | ||
910 | buf = kmalloc(len, GFP_KERNEL); | ||
911 | if (!buf) { | ||
912 | printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n"); | ||
913 | return -ENOMEM; | ||
914 | } | ||
915 | /* | ||
916 | * if mode = 0, we scan for a total empty oob area, else we have | ||
917 | * to take care of the cleanmarker in the first page of the block | ||
918 | */ | ||
919 | ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf); | ||
920 | if (ret) { | ||
921 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); | ||
922 | goto out; | ||
923 | } | ||
924 | |||
925 | if (retlen < len) { | ||
926 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read " | ||
927 | "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset)); | ||
928 | ret = -EIO; | ||
929 | goto out; | ||
930 | } | ||
931 | |||
932 | /* Special check for first page */ | ||
933 | for(i = 0; i < oob_size ; i++) { | ||
934 | /* Yeah, we know about the cleanmarker. */ | ||
935 | if (mode && i >= c->fsdata_pos && | ||
936 | i < c->fsdata_pos + c->fsdata_len) | ||
937 | continue; | ||
938 | |||
939 | if (buf[i] != 0xFF) { | ||
940 | D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n", | ||
941 | buf[page+i], page+i, jeb->offset)); | ||
942 | ret = 1; | ||
943 | goto out; | ||
944 | } | ||
945 | } | ||
946 | |||
947 | /* we know, we are aligned :) */ | ||
948 | for (page = oob_size; page < len; page += sizeof(long)) { | ||
949 | unsigned long dat = *(unsigned long *)(&buf[page]); | ||
950 | if(dat != -1) { | ||
951 | ret = 1; | ||
952 | goto out; | ||
953 | } | ||
954 | } | ||
955 | |||
956 | out: | ||
957 | kfree(buf); | ||
958 | |||
959 | return ret; | ||
960 | } | ||
961 | |||
962 | /* | ||
963 | * Scan for a valid cleanmarker and for bad blocks | ||
964 | * For virtual blocks (concatenated physical blocks) check the cleanmarker | ||
965 | * only in the first page of the first physical block, but scan for bad blocks in all | ||
966 | * physical blocks | ||
967 | */ | ||
968 | int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
969 | { | ||
970 | struct jffs2_unknown_node n; | ||
971 | unsigned char buf[2 * NAND_MAX_OOBSIZE]; | ||
972 | unsigned char *p; | ||
973 | int ret, i, cnt, retval = 0; | ||
974 | size_t retlen, offset; | ||
975 | int oob_size; | ||
976 | |||
977 | offset = jeb->offset; | ||
978 | oob_size = c->mtd->oobsize; | ||
979 | |||
980 | /* Loop through the physical blocks */ | ||
981 | for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) { | ||
982 | /* Check first if the block is bad. */ | ||
983 | if (c->mtd->block_isbad (c->mtd, offset)) { | ||
984 | D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset)); | ||
985 | return 2; | ||
986 | } | ||
987 | /* | ||
988 | * We read oob data from page 0 and 1 of the block. | ||
989 | * page 0 contains cleanmarker and badblock info | ||
990 | * page 1 contains failure count of this block | ||
991 | */ | ||
992 | ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf); | ||
993 | |||
994 | if (ret) { | ||
995 | D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); | ||
996 | return ret; | ||
997 | } | ||
998 | if (retlen < (oob_size << 1)) { | ||
999 | D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset)); | ||
1000 | return -EIO; | ||
1001 | } | ||
1002 | |||
1003 | /* Check cleanmarker only on the first physical block */ | ||
1004 | if (!cnt) { | ||
1005 | n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK); | ||
1006 | n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER); | ||
1007 | n.totlen = cpu_to_je32 (8); | ||
1008 | p = (unsigned char *) &n; | ||
1009 | |||
1010 | for (i = 0; i < c->fsdata_len; i++) { | ||
1011 | if (buf[c->fsdata_pos + i] != p[i]) { | ||
1012 | retval = 1; | ||
1013 | } | ||
1014 | } | ||
1015 | D1(if (retval == 1) { | ||
1016 | printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset); | ||
1017 | printk(KERN_WARNING "OOB at %08x was ", offset); | ||
1018 | for (i=0; i < oob_size; i++) { | ||
1019 | printk("%02x ", buf[i]); | ||
1020 | } | ||
1021 | printk("\n"); | ||
1022 | }) | ||
1023 | } | ||
1024 | offset += c->mtd->erasesize; | ||
1025 | } | ||
1026 | return retval; | ||
1027 | } | ||
1028 | |||
1029 | int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
1030 | { | ||
1031 | struct jffs2_unknown_node n; | ||
1032 | int ret; | ||
1033 | size_t retlen; | ||
1034 | |||
1035 | n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
1036 | n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER); | ||
1037 | n.totlen = cpu_to_je32(8); | ||
1038 | |||
1039 | ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n); | ||
1040 | |||
1041 | if (ret) { | ||
1042 | D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); | ||
1043 | return ret; | ||
1044 | } | ||
1045 | if (retlen != c->fsdata_len) { | ||
1046 | D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len)); | ||
1047 | return ret; | ||
1048 | } | ||
1049 | return 0; | ||
1050 | } | ||
1051 | |||
1052 | /* | ||
1053 | * On NAND we try to mark this block bad. If the block was erased more | ||
1054 | * than MAX_ERASE_FAILURES we mark it finaly bad. | ||
1055 | * Don't care about failures. This block remains on the erase-pending | ||
1056 | * or badblock list as long as nobody manipulates the flash with | ||
1057 | * a bootloader or something like that. | ||
1058 | */ | ||
1059 | |||
1060 | int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) | ||
1061 | { | ||
1062 | int ret; | ||
1063 | |||
1064 | /* if the count is < max, we try to write the counter to the 2nd page oob area */ | ||
1065 | if( ++jeb->bad_count < MAX_ERASE_FAILURES) | ||
1066 | return 0; | ||
1067 | |||
1068 | if (!c->mtd->block_markbad) | ||
1069 | return 1; // What else can we do? | ||
1070 | |||
1071 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset)); | ||
1072 | ret = c->mtd->block_markbad(c->mtd, bad_offset); | ||
1073 | |||
1074 | if (ret) { | ||
1075 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); | ||
1076 | return ret; | ||
1077 | } | ||
1078 | return 1; | ||
1079 | } | ||
1080 | |||
1081 | #define NAND_JFFS2_OOB16_FSDALEN 8 | ||
1082 | |||
1083 | static struct nand_oobinfo jffs2_oobinfo_docecc = { | ||
1084 | .useecc = MTD_NANDECC_PLACE, | ||
1085 | .eccbytes = 6, | ||
1086 | .eccpos = {0,1,2,3,4,5} | ||
1087 | }; | ||
1088 | |||
1089 | |||
1090 | static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c) | ||
1091 | { | ||
1092 | struct nand_oobinfo *oinfo = &c->mtd->oobinfo; | ||
1093 | |||
1094 | /* Do this only, if we have an oob buffer */ | ||
1095 | if (!c->mtd->oobsize) | ||
1096 | return 0; | ||
1097 | |||
1098 | /* Cleanmarker is out-of-band, so inline size zero */ | ||
1099 | c->cleanmarker_size = 0; | ||
1100 | |||
1101 | /* Should we use autoplacement ? */ | ||
1102 | if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) { | ||
1103 | D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n")); | ||
1104 | /* Get the position of the free bytes */ | ||
1105 | if (!oinfo->oobfree[0][1]) { | ||
1106 | printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n"); | ||
1107 | return -ENOSPC; | ||
1108 | } | ||
1109 | c->fsdata_pos = oinfo->oobfree[0][0]; | ||
1110 | c->fsdata_len = oinfo->oobfree[0][1]; | ||
1111 | if (c->fsdata_len > 8) | ||
1112 | c->fsdata_len = 8; | ||
1113 | } else { | ||
1114 | /* This is just a legacy fallback and should go away soon */ | ||
1115 | switch(c->mtd->ecctype) { | ||
1116 | case MTD_ECC_RS_DiskOnChip: | ||
1117 | printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n"); | ||
1118 | c->oobinfo = &jffs2_oobinfo_docecc; | ||
1119 | c->fsdata_pos = 6; | ||
1120 | c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN; | ||
1121 | c->badblock_pos = 15; | ||
1122 | break; | ||
1123 | |||
1124 | default: | ||
1125 | D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n")); | ||
1126 | return -EINVAL; | ||
1127 | } | ||
1128 | } | ||
1129 | return 0; | ||
1130 | } | ||
1131 | |||
1132 | int jffs2_nand_flash_setup(struct jffs2_sb_info *c) | ||
1133 | { | ||
1134 | int res; | ||
1135 | |||
1136 | /* Initialise write buffer */ | ||
1137 | init_rwsem(&c->wbuf_sem); | ||
1138 | c->wbuf_pagesize = c->mtd->oobblock; | ||
1139 | c->wbuf_ofs = 0xFFFFFFFF; | ||
1140 | |||
1141 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | ||
1142 | if (!c->wbuf) | ||
1143 | return -ENOMEM; | ||
1144 | |||
1145 | res = jffs2_nand_set_oobinfo(c); | ||
1146 | |||
1147 | #ifdef BREAKME | ||
1148 | if (!brokenbuf) | ||
1149 | brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | ||
1150 | if (!brokenbuf) { | ||
1151 | kfree(c->wbuf); | ||
1152 | return -ENOMEM; | ||
1153 | } | ||
1154 | memset(brokenbuf, 0xdb, c->wbuf_pagesize); | ||
1155 | #endif | ||
1156 | return res; | ||
1157 | } | ||
1158 | |||
1159 | void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c) | ||
1160 | { | ||
1161 | kfree(c->wbuf); | ||
1162 | } | ||
1163 | |||
1164 | #ifdef CONFIG_JFFS2_FS_NOR_ECC | ||
1165 | int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) { | ||
1166 | /* Cleanmarker is actually larger on the flashes */ | ||
1167 | c->cleanmarker_size = 16; | ||
1168 | |||
1169 | /* Initialize write buffer */ | ||
1170 | init_rwsem(&c->wbuf_sem); | ||
1171 | c->wbuf_pagesize = c->mtd->eccsize; | ||
1172 | c->wbuf_ofs = 0xFFFFFFFF; | ||
1173 | |||
1174 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | ||
1175 | if (!c->wbuf) | ||
1176 | return -ENOMEM; | ||
1177 | |||
1178 | return 0; | ||
1179 | } | ||
1180 | |||
1181 | void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) { | ||
1182 | kfree(c->wbuf); | ||
1183 | } | ||
1184 | #endif | ||
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c new file mode 100644 index 000000000000..80a5db542629 --- /dev/null +++ b/fs/jffs2/write.c | |||
@@ -0,0 +1,708 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: write.c,v 1.87 2004/11/16 20:36:12 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/fs.h> | ||
16 | #include <linux/crc32.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/pagemap.h> | ||
19 | #include <linux/mtd/mtd.h> | ||
20 | #include "nodelist.h" | ||
21 | #include "compr.h" | ||
22 | |||
23 | |||
24 | int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri) | ||
25 | { | ||
26 | struct jffs2_inode_cache *ic; | ||
27 | |||
28 | ic = jffs2_alloc_inode_cache(); | ||
29 | if (!ic) { | ||
30 | return -ENOMEM; | ||
31 | } | ||
32 | |||
33 | memset(ic, 0, sizeof(*ic)); | ||
34 | |||
35 | f->inocache = ic; | ||
36 | f->inocache->nlink = 1; | ||
37 | f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; | ||
38 | f->inocache->ino = ++c->highest_ino; | ||
39 | f->inocache->state = INO_STATE_PRESENT; | ||
40 | |||
41 | ri->ino = cpu_to_je32(f->inocache->ino); | ||
42 | |||
43 | D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino)); | ||
44 | jffs2_add_ino_cache(c, f->inocache); | ||
45 | |||
46 | ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
47 | ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); | ||
48 | ri->totlen = cpu_to_je32(PAD(sizeof(*ri))); | ||
49 | ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); | ||
50 | ri->mode = cpu_to_jemode(mode); | ||
51 | |||
52 | f->highest_version = 1; | ||
53 | ri->version = cpu_to_je32(f->highest_version); | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | #if CONFIG_JFFS2_FS_DEBUG > 0 | ||
59 | static void writecheck(struct jffs2_sb_info *c, uint32_t ofs) | ||
60 | { | ||
61 | unsigned char buf[16]; | ||
62 | size_t retlen; | ||
63 | int ret, i; | ||
64 | |||
65 | ret = jffs2_flash_read(c, ofs, 16, &retlen, buf); | ||
66 | if (ret || (retlen != 16)) { | ||
67 | D1(printk(KERN_DEBUG "read failed or short in writecheck(). ret %d, retlen %zd\n", ret, retlen)); | ||
68 | return; | ||
69 | } | ||
70 | ret = 0; | ||
71 | for (i=0; i<16; i++) { | ||
72 | if (buf[i] != 0xff) | ||
73 | ret = 1; | ||
74 | } | ||
75 | if (ret) { | ||
76 | printk(KERN_WARNING "ARGH. About to write node to 0x%08x on flash, but there are data already there:\n", ofs); | ||
77 | printk(KERN_WARNING "0x%08x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
78 | ofs, | ||
79 | buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], | ||
80 | buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]); | ||
81 | } | ||
82 | } | ||
83 | #endif | ||
84 | |||
85 | |||
86 | /* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, | ||
87 | write it to the flash, link it into the existing inode/fragment list */ | ||
88 | |||
89 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode) | ||
90 | |||
91 | { | ||
92 | struct jffs2_raw_node_ref *raw; | ||
93 | struct jffs2_full_dnode *fn; | ||
94 | size_t retlen; | ||
95 | struct kvec vecs[2]; | ||
96 | int ret; | ||
97 | int retried = 0; | ||
98 | unsigned long cnt = 2; | ||
99 | |||
100 | D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) { | ||
101 | printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dnode()\n"); | ||
102 | BUG(); | ||
103 | } | ||
104 | ); | ||
105 | vecs[0].iov_base = ri; | ||
106 | vecs[0].iov_len = sizeof(*ri); | ||
107 | vecs[1].iov_base = (unsigned char *)data; | ||
108 | vecs[1].iov_len = datalen; | ||
109 | |||
110 | D1(writecheck(c, flash_ofs)); | ||
111 | |||
112 | if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { | ||
113 | printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen); | ||
114 | } | ||
115 | raw = jffs2_alloc_raw_node_ref(); | ||
116 | if (!raw) | ||
117 | return ERR_PTR(-ENOMEM); | ||
118 | |||
119 | fn = jffs2_alloc_full_dnode(); | ||
120 | if (!fn) { | ||
121 | jffs2_free_raw_node_ref(raw); | ||
122 | return ERR_PTR(-ENOMEM); | ||
123 | } | ||
124 | |||
125 | fn->ofs = je32_to_cpu(ri->offset); | ||
126 | fn->size = je32_to_cpu(ri->dsize); | ||
127 | fn->frags = 0; | ||
128 | |||
129 | /* check number of valid vecs */ | ||
130 | if (!datalen || !data) | ||
131 | cnt = 1; | ||
132 | retry: | ||
133 | fn->raw = raw; | ||
134 | |||
135 | raw->flash_offset = flash_ofs; | ||
136 | raw->__totlen = PAD(sizeof(*ri)+datalen); | ||
137 | raw->next_phys = NULL; | ||
138 | |||
139 | ret = jffs2_flash_writev(c, vecs, cnt, flash_ofs, &retlen, | ||
140 | (alloc_mode==ALLOC_GC)?0:f->inocache->ino); | ||
141 | |||
142 | if (ret || (retlen != sizeof(*ri) + datalen)) { | ||
143 | printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", | ||
144 | sizeof(*ri)+datalen, flash_ofs, ret, retlen); | ||
145 | |||
146 | /* Mark the space as dirtied */ | ||
147 | if (retlen) { | ||
148 | /* Doesn't belong to any inode */ | ||
149 | raw->next_in_ino = NULL; | ||
150 | |||
151 | /* Don't change raw->size to match retlen. We may have | ||
152 | written the node header already, and only the data will | ||
153 | seem corrupted, in which case the scan would skip over | ||
154 | any node we write before the original intended end of | ||
155 | this node */ | ||
156 | raw->flash_offset |= REF_OBSOLETE; | ||
157 | jffs2_add_physical_node_ref(c, raw); | ||
158 | jffs2_mark_node_obsolete(c, raw); | ||
159 | } else { | ||
160 | printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset); | ||
161 | jffs2_free_raw_node_ref(raw); | ||
162 | } | ||
163 | if (!retried && alloc_mode != ALLOC_NORETRY && (raw = jffs2_alloc_raw_node_ref())) { | ||
164 | /* Try to reallocate space and retry */ | ||
165 | uint32_t dummy; | ||
166 | struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; | ||
167 | |||
168 | retried = 1; | ||
169 | |||
170 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); | ||
171 | |||
172 | ACCT_SANITY_CHECK(c,jeb); | ||
173 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
174 | |||
175 | if (alloc_mode == ALLOC_GC) { | ||
176 | ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs, &dummy); | ||
177 | } else { | ||
178 | /* Locking pain */ | ||
179 | up(&f->sem); | ||
180 | jffs2_complete_reservation(c); | ||
181 | |||
182 | ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs, &dummy, alloc_mode); | ||
183 | down(&f->sem); | ||
184 | } | ||
185 | |||
186 | if (!ret) { | ||
187 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); | ||
188 | |||
189 | ACCT_SANITY_CHECK(c,jeb); | ||
190 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
191 | |||
192 | goto retry; | ||
193 | } | ||
194 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | ||
195 | jffs2_free_raw_node_ref(raw); | ||
196 | } | ||
197 | /* Release the full_dnode which is now useless, and return */ | ||
198 | jffs2_free_full_dnode(fn); | ||
199 | return ERR_PTR(ret?ret:-EIO); | ||
200 | } | ||
201 | /* Mark the space used */ | ||
202 | /* If node covers at least a whole page, or if it starts at the | ||
203 | beginning of a page and runs to the end of the file, or if | ||
204 | it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. | ||
205 | */ | ||
206 | if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || | ||
207 | ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && | ||
208 | (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) { | ||
209 | raw->flash_offset |= REF_PRISTINE; | ||
210 | } else { | ||
211 | raw->flash_offset |= REF_NORMAL; | ||
212 | } | ||
213 | jffs2_add_physical_node_ref(c, raw); | ||
214 | |||
215 | /* Link into per-inode list */ | ||
216 | spin_lock(&c->erase_completion_lock); | ||
217 | raw->next_in_ino = f->inocache->nodes; | ||
218 | f->inocache->nodes = raw; | ||
219 | spin_unlock(&c->erase_completion_lock); | ||
220 | |||
221 | D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", | ||
222 | flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize), | ||
223 | je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), | ||
224 | je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen))); | ||
225 | |||
226 | if (retried) { | ||
227 | ACCT_SANITY_CHECK(c,NULL); | ||
228 | } | ||
229 | |||
230 | return fn; | ||
231 | } | ||
232 | |||
233 | struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode) | ||
234 | { | ||
235 | struct jffs2_raw_node_ref *raw; | ||
236 | struct jffs2_full_dirent *fd; | ||
237 | size_t retlen; | ||
238 | struct kvec vecs[2]; | ||
239 | int retried = 0; | ||
240 | int ret; | ||
241 | |||
242 | D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", | ||
243 | je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), | ||
244 | je32_to_cpu(rd->name_crc))); | ||
245 | D1(writecheck(c, flash_ofs)); | ||
246 | |||
247 | D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { | ||
248 | printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); | ||
249 | BUG(); | ||
250 | } | ||
251 | ); | ||
252 | |||
253 | vecs[0].iov_base = rd; | ||
254 | vecs[0].iov_len = sizeof(*rd); | ||
255 | vecs[1].iov_base = (unsigned char *)name; | ||
256 | vecs[1].iov_len = namelen; | ||
257 | |||
258 | raw = jffs2_alloc_raw_node_ref(); | ||
259 | |||
260 | if (!raw) | ||
261 | return ERR_PTR(-ENOMEM); | ||
262 | |||
263 | fd = jffs2_alloc_full_dirent(namelen+1); | ||
264 | if (!fd) { | ||
265 | jffs2_free_raw_node_ref(raw); | ||
266 | return ERR_PTR(-ENOMEM); | ||
267 | } | ||
268 | |||
269 | fd->version = je32_to_cpu(rd->version); | ||
270 | fd->ino = je32_to_cpu(rd->ino); | ||
271 | fd->nhash = full_name_hash(name, strlen(name)); | ||
272 | fd->type = rd->type; | ||
273 | memcpy(fd->name, name, namelen); | ||
274 | fd->name[namelen]=0; | ||
275 | |||
276 | retry: | ||
277 | fd->raw = raw; | ||
278 | |||
279 | raw->flash_offset = flash_ofs; | ||
280 | raw->__totlen = PAD(sizeof(*rd)+namelen); | ||
281 | raw->next_phys = NULL; | ||
282 | |||
283 | ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen, | ||
284 | (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino)); | ||
285 | if (ret || (retlen != sizeof(*rd) + namelen)) { | ||
286 | printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", | ||
287 | sizeof(*rd)+namelen, flash_ofs, ret, retlen); | ||
288 | /* Mark the space as dirtied */ | ||
289 | if (retlen) { | ||
290 | raw->next_in_ino = NULL; | ||
291 | raw->flash_offset |= REF_OBSOLETE; | ||
292 | jffs2_add_physical_node_ref(c, raw); | ||
293 | jffs2_mark_node_obsolete(c, raw); | ||
294 | } else { | ||
295 | printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset); | ||
296 | jffs2_free_raw_node_ref(raw); | ||
297 | } | ||
298 | if (!retried && (raw = jffs2_alloc_raw_node_ref())) { | ||
299 | /* Try to reallocate space and retry */ | ||
300 | uint32_t dummy; | ||
301 | struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; | ||
302 | |||
303 | retried = 1; | ||
304 | |||
305 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); | ||
306 | |||
307 | ACCT_SANITY_CHECK(c,jeb); | ||
308 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
309 | |||
310 | if (alloc_mode == ALLOC_GC) { | ||
311 | ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs, &dummy); | ||
312 | } else { | ||
313 | /* Locking pain */ | ||
314 | up(&f->sem); | ||
315 | jffs2_complete_reservation(c); | ||
316 | |||
317 | ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs, &dummy, alloc_mode); | ||
318 | down(&f->sem); | ||
319 | } | ||
320 | |||
321 | if (!ret) { | ||
322 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); | ||
323 | ACCT_SANITY_CHECK(c,jeb); | ||
324 | D1(ACCT_PARANOIA_CHECK(jeb)); | ||
325 | goto retry; | ||
326 | } | ||
327 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | ||
328 | jffs2_free_raw_node_ref(raw); | ||
329 | } | ||
330 | /* Release the full_dnode which is now useless, and return */ | ||
331 | jffs2_free_full_dirent(fd); | ||
332 | return ERR_PTR(ret?ret:-EIO); | ||
333 | } | ||
334 | /* Mark the space used */ | ||
335 | raw->flash_offset |= REF_PRISTINE; | ||
336 | jffs2_add_physical_node_ref(c, raw); | ||
337 | |||
338 | spin_lock(&c->erase_completion_lock); | ||
339 | raw->next_in_ino = f->inocache->nodes; | ||
340 | f->inocache->nodes = raw; | ||
341 | spin_unlock(&c->erase_completion_lock); | ||
342 | |||
343 | if (retried) { | ||
344 | ACCT_SANITY_CHECK(c,NULL); | ||
345 | } | ||
346 | |||
347 | return fd; | ||
348 | } | ||
349 | |||
350 | /* The OS-specific code fills in the metadata in the jffs2_raw_inode for us, so that | ||
351 | we don't have to go digging in struct inode or its equivalent. It should set: | ||
352 | mode, uid, gid, (starting)isize, atime, ctime, mtime */ | ||
353 | int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
354 | struct jffs2_raw_inode *ri, unsigned char *buf, | ||
355 | uint32_t offset, uint32_t writelen, uint32_t *retlen) | ||
356 | { | ||
357 | int ret = 0; | ||
358 | uint32_t writtenlen = 0; | ||
359 | |||
360 | D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n", | ||
361 | f->inocache->ino, offset, writelen)); | ||
362 | |||
363 | while(writelen) { | ||
364 | struct jffs2_full_dnode *fn; | ||
365 | unsigned char *comprbuf = NULL; | ||
366 | uint16_t comprtype = JFFS2_COMPR_NONE; | ||
367 | uint32_t phys_ofs, alloclen; | ||
368 | uint32_t datalen, cdatalen; | ||
369 | int retried = 0; | ||
370 | |||
371 | retry: | ||
372 | D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset)); | ||
373 | |||
374 | ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen, ALLOC_NORMAL); | ||
375 | if (ret) { | ||
376 | D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret)); | ||
377 | break; | ||
378 | } | ||
379 | down(&f->sem); | ||
380 | datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1))); | ||
381 | cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen); | ||
382 | |||
383 | comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen); | ||
384 | |||
385 | ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
386 | ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); | ||
387 | ri->totlen = cpu_to_je32(sizeof(*ri) + cdatalen); | ||
388 | ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); | ||
389 | |||
390 | ri->ino = cpu_to_je32(f->inocache->ino); | ||
391 | ri->version = cpu_to_je32(++f->highest_version); | ||
392 | ri->isize = cpu_to_je32(max(je32_to_cpu(ri->isize), offset + datalen)); | ||
393 | ri->offset = cpu_to_je32(offset); | ||
394 | ri->csize = cpu_to_je32(cdatalen); | ||
395 | ri->dsize = cpu_to_je32(datalen); | ||
396 | ri->compr = comprtype & 0xff; | ||
397 | ri->usercompr = (comprtype >> 8 ) & 0xff; | ||
398 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | ||
399 | ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); | ||
400 | |||
401 | fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, phys_ofs, ALLOC_NORETRY); | ||
402 | |||
403 | jffs2_free_comprbuf(comprbuf, buf); | ||
404 | |||
405 | if (IS_ERR(fn)) { | ||
406 | ret = PTR_ERR(fn); | ||
407 | up(&f->sem); | ||
408 | jffs2_complete_reservation(c); | ||
409 | if (!retried) { | ||
410 | /* Write error to be retried */ | ||
411 | retried = 1; | ||
412 | D1(printk(KERN_DEBUG "Retrying node write in jffs2_write_inode_range()\n")); | ||
413 | goto retry; | ||
414 | } | ||
415 | break; | ||
416 | } | ||
417 | ret = jffs2_add_full_dnode_to_inode(c, f, fn); | ||
418 | if (f->metadata) { | ||
419 | jffs2_mark_node_obsolete(c, f->metadata->raw); | ||
420 | jffs2_free_full_dnode(f->metadata); | ||
421 | f->metadata = NULL; | ||
422 | } | ||
423 | if (ret) { | ||
424 | /* Eep */ | ||
425 | D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret)); | ||
426 | jffs2_mark_node_obsolete(c, fn->raw); | ||
427 | jffs2_free_full_dnode(fn); | ||
428 | |||
429 | up(&f->sem); | ||
430 | jffs2_complete_reservation(c); | ||
431 | break; | ||
432 | } | ||
433 | up(&f->sem); | ||
434 | jffs2_complete_reservation(c); | ||
435 | if (!datalen) { | ||
436 | printk(KERN_WARNING "Eep. We didn't actually write any data in jffs2_write_inode_range()\n"); | ||
437 | ret = -EIO; | ||
438 | break; | ||
439 | } | ||
440 | D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen)); | ||
441 | writtenlen += datalen; | ||
442 | offset += datalen; | ||
443 | writelen -= datalen; | ||
444 | buf += datalen; | ||
445 | } | ||
446 | *retlen = writtenlen; | ||
447 | return ret; | ||
448 | } | ||
449 | |||
450 | int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen) | ||
451 | { | ||
452 | struct jffs2_raw_dirent *rd; | ||
453 | struct jffs2_full_dnode *fn; | ||
454 | struct jffs2_full_dirent *fd; | ||
455 | uint32_t alloclen, phys_ofs; | ||
456 | int ret; | ||
457 | |||
458 | /* Try to reserve enough space for both node and dirent. | ||
459 | * Just the node will do for now, though | ||
460 | */ | ||
461 | ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL); | ||
462 | D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen)); | ||
463 | if (ret) { | ||
464 | up(&f->sem); | ||
465 | return ret; | ||
466 | } | ||
467 | |||
468 | ri->data_crc = cpu_to_je32(0); | ||
469 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | ||
470 | |||
471 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL); | ||
472 | |||
473 | D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n", | ||
474 | jemode_to_cpu(ri->mode))); | ||
475 | |||
476 | if (IS_ERR(fn)) { | ||
477 | D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n")); | ||
478 | /* Eeek. Wave bye bye */ | ||
479 | up(&f->sem); | ||
480 | jffs2_complete_reservation(c); | ||
481 | return PTR_ERR(fn); | ||
482 | } | ||
483 | /* No data here. Only a metadata node, which will be | ||
484 | obsoleted by the first data write | ||
485 | */ | ||
486 | f->metadata = fn; | ||
487 | |||
488 | up(&f->sem); | ||
489 | jffs2_complete_reservation(c); | ||
490 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | ||
491 | |||
492 | if (ret) { | ||
493 | /* Eep. */ | ||
494 | D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n")); | ||
495 | return ret; | ||
496 | } | ||
497 | |||
498 | rd = jffs2_alloc_raw_dirent(); | ||
499 | if (!rd) { | ||
500 | /* Argh. Now we treat it like a normal delete */ | ||
501 | jffs2_complete_reservation(c); | ||
502 | return -ENOMEM; | ||
503 | } | ||
504 | |||
505 | down(&dir_f->sem); | ||
506 | |||
507 | rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
508 | rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); | ||
509 | rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); | ||
510 | rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); | ||
511 | |||
512 | rd->pino = cpu_to_je32(dir_f->inocache->ino); | ||
513 | rd->version = cpu_to_je32(++dir_f->highest_version); | ||
514 | rd->ino = ri->ino; | ||
515 | rd->mctime = ri->ctime; | ||
516 | rd->nsize = namelen; | ||
517 | rd->type = DT_REG; | ||
518 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | ||
519 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); | ||
520 | |||
521 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); | ||
522 | |||
523 | jffs2_free_raw_dirent(rd); | ||
524 | |||
525 | if (IS_ERR(fd)) { | ||
526 | /* dirent failed to write. Delete the inode normally | ||
527 | as if it were the final unlink() */ | ||
528 | jffs2_complete_reservation(c); | ||
529 | up(&dir_f->sem); | ||
530 | return PTR_ERR(fd); | ||
531 | } | ||
532 | |||
533 | /* Link the fd into the inode's list, obsoleting an old | ||
534 | one if necessary. */ | ||
535 | jffs2_add_fd_to_list(c, fd, &dir_f->dents); | ||
536 | |||
537 | jffs2_complete_reservation(c); | ||
538 | up(&dir_f->sem); | ||
539 | |||
540 | return 0; | ||
541 | } | ||
542 | |||
543 | |||
544 | int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | ||
545 | const char *name, int namelen, struct jffs2_inode_info *dead_f) | ||
546 | { | ||
547 | struct jffs2_raw_dirent *rd; | ||
548 | struct jffs2_full_dirent *fd; | ||
549 | uint32_t alloclen, phys_ofs; | ||
550 | int ret; | ||
551 | |||
552 | if (1 /* alternative branch needs testing */ || | ||
553 | !jffs2_can_mark_obsolete(c)) { | ||
554 | /* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */ | ||
555 | |||
556 | rd = jffs2_alloc_raw_dirent(); | ||
557 | if (!rd) | ||
558 | return -ENOMEM; | ||
559 | |||
560 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_DELETION); | ||
561 | if (ret) { | ||
562 | jffs2_free_raw_dirent(rd); | ||
563 | return ret; | ||
564 | } | ||
565 | |||
566 | down(&dir_f->sem); | ||
567 | |||
568 | /* Build a deletion node */ | ||
569 | rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
570 | rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); | ||
571 | rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); | ||
572 | rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); | ||
573 | |||
574 | rd->pino = cpu_to_je32(dir_f->inocache->ino); | ||
575 | rd->version = cpu_to_je32(++dir_f->highest_version); | ||
576 | rd->ino = cpu_to_je32(0); | ||
577 | rd->mctime = cpu_to_je32(get_seconds()); | ||
578 | rd->nsize = namelen; | ||
579 | rd->type = DT_UNKNOWN; | ||
580 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | ||
581 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); | ||
582 | |||
583 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION); | ||
584 | |||
585 | jffs2_free_raw_dirent(rd); | ||
586 | |||
587 | if (IS_ERR(fd)) { | ||
588 | jffs2_complete_reservation(c); | ||
589 | up(&dir_f->sem); | ||
590 | return PTR_ERR(fd); | ||
591 | } | ||
592 | |||
593 | /* File it. This will mark the old one obsolete. */ | ||
594 | jffs2_add_fd_to_list(c, fd, &dir_f->dents); | ||
595 | up(&dir_f->sem); | ||
596 | } else { | ||
597 | struct jffs2_full_dirent **prev = &dir_f->dents; | ||
598 | uint32_t nhash = full_name_hash(name, namelen); | ||
599 | |||
600 | down(&dir_f->sem); | ||
601 | |||
602 | while ((*prev) && (*prev)->nhash <= nhash) { | ||
603 | if ((*prev)->nhash == nhash && | ||
604 | !memcmp((*prev)->name, name, namelen) && | ||
605 | !(*prev)->name[namelen]) { | ||
606 | struct jffs2_full_dirent *this = *prev; | ||
607 | |||
608 | D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n", | ||
609 | this->ino, ref_offset(this->raw))); | ||
610 | |||
611 | *prev = this->next; | ||
612 | jffs2_mark_node_obsolete(c, (this->raw)); | ||
613 | jffs2_free_full_dirent(this); | ||
614 | break; | ||
615 | } | ||
616 | prev = &((*prev)->next); | ||
617 | } | ||
618 | up(&dir_f->sem); | ||
619 | } | ||
620 | |||
621 | /* dead_f is NULL if this was a rename not a real unlink */ | ||
622 | /* Also catch the !f->inocache case, where there was a dirent | ||
623 | pointing to an inode which didn't exist. */ | ||
624 | if (dead_f && dead_f->inocache) { | ||
625 | |||
626 | down(&dead_f->sem); | ||
627 | |||
628 | while (dead_f->dents) { | ||
629 | /* There can be only deleted ones */ | ||
630 | fd = dead_f->dents; | ||
631 | |||
632 | dead_f->dents = fd->next; | ||
633 | |||
634 | if (fd->ino) { | ||
635 | printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n", | ||
636 | dead_f->inocache->ino, fd->name, fd->ino); | ||
637 | } else { | ||
638 | D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", fd->name, dead_f->inocache->ino)); | ||
639 | } | ||
640 | jffs2_mark_node_obsolete(c, fd->raw); | ||
641 | jffs2_free_full_dirent(fd); | ||
642 | } | ||
643 | |||
644 | dead_f->inocache->nlink--; | ||
645 | /* NB: Caller must set inode nlink if appropriate */ | ||
646 | up(&dead_f->sem); | ||
647 | } | ||
648 | |||
649 | jffs2_complete_reservation(c); | ||
650 | |||
651 | return 0; | ||
652 | } | ||
653 | |||
654 | |||
655 | int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen) | ||
656 | { | ||
657 | struct jffs2_raw_dirent *rd; | ||
658 | struct jffs2_full_dirent *fd; | ||
659 | uint32_t alloclen, phys_ofs; | ||
660 | int ret; | ||
661 | |||
662 | rd = jffs2_alloc_raw_dirent(); | ||
663 | if (!rd) | ||
664 | return -ENOMEM; | ||
665 | |||
666 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); | ||
667 | if (ret) { | ||
668 | jffs2_free_raw_dirent(rd); | ||
669 | return ret; | ||
670 | } | ||
671 | |||
672 | down(&dir_f->sem); | ||
673 | |||
674 | /* Build a deletion node */ | ||
675 | rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
676 | rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); | ||
677 | rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); | ||
678 | rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); | ||
679 | |||
680 | rd->pino = cpu_to_je32(dir_f->inocache->ino); | ||
681 | rd->version = cpu_to_je32(++dir_f->highest_version); | ||
682 | rd->ino = cpu_to_je32(ino); | ||
683 | rd->mctime = cpu_to_je32(get_seconds()); | ||
684 | rd->nsize = namelen; | ||
685 | |||
686 | rd->type = type; | ||
687 | |||
688 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | ||
689 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); | ||
690 | |||
691 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); | ||
692 | |||
693 | jffs2_free_raw_dirent(rd); | ||
694 | |||
695 | if (IS_ERR(fd)) { | ||
696 | jffs2_complete_reservation(c); | ||
697 | up(&dir_f->sem); | ||
698 | return PTR_ERR(fd); | ||
699 | } | ||
700 | |||
701 | /* File it. This will mark the old one obsolete. */ | ||
702 | jffs2_add_fd_to_list(c, fd, &dir_f->dents); | ||
703 | |||
704 | jffs2_complete_reservation(c); | ||
705 | up(&dir_f->sem); | ||
706 | |||
707 | return 0; | ||
708 | } | ||
diff --git a/fs/jffs2/writev.c b/fs/jffs2/writev.c new file mode 100644 index 000000000000..f079f8388566 --- /dev/null +++ b/fs/jffs2/writev.c | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002 Red Hat, Inc. | ||
5 | * | ||
6 | * Created by David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
9 | * | ||
10 | * $Id: writev.c,v 1.6 2004/11/16 20:36:12 dwmw2 Exp $ | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/mtd/mtd.h> | ||
16 | #include "nodelist.h" | ||
17 | |||
18 | /* This ought to be in core MTD code. All registered MTD devices | ||
19 | without writev should have this put in place. Bug the MTD | ||
20 | maintainer */ | ||
21 | static inline int mtd_fake_writev(struct mtd_info *mtd, const struct kvec *vecs, | ||
22 | unsigned long count, loff_t to, size_t *retlen) | ||
23 | { | ||
24 | unsigned long i; | ||
25 | size_t totlen = 0, thislen; | ||
26 | int ret = 0; | ||
27 | |||
28 | for (i=0; i<count; i++) { | ||
29 | if (!vecs[i].iov_len) | ||
30 | continue; | ||
31 | ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base); | ||
32 | totlen += thislen; | ||
33 | if (ret || thislen != vecs[i].iov_len) | ||
34 | break; | ||
35 | to += vecs[i].iov_len; | ||
36 | } | ||
37 | if (retlen) | ||
38 | *retlen = totlen; | ||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, | ||
43 | unsigned long count, loff_t to, size_t *retlen) | ||
44 | { | ||
45 | if (c->mtd->writev) | ||
46 | return c->mtd->writev(c->mtd, vecs, count, to, retlen); | ||
47 | else | ||
48 | return mtd_fake_writev(c->mtd, vecs, count, to, retlen); | ||
49 | } | ||
50 | |||