aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jffs2/erase.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/jffs2/erase.c')
-rw-r--r--fs/jffs2/erase.c442
1 files changed, 442 insertions, 0 deletions
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
new file mode 100644
index 000000000000..41451e8bf361
--- /dev/null
+++ b/fs/jffs2/erase.c
@@ -0,0 +1,442 @@
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 * $Id: erase.c,v 1.66 2004/11/16 20:36:11 dwmw2 Exp $
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/mtd/mtd.h>
17#include <linux/compiler.h>
18#include <linux/crc32.h>
19#include <linux/sched.h>
20#include <linux/pagemap.h>
21#include "nodelist.h"
22
23struct erase_priv_struct {
24 struct jffs2_eraseblock *jeb;
25 struct jffs2_sb_info *c;
26};
27
28#ifndef __ECOS
29static void jffs2_erase_callback(struct erase_info *);
30#endif
31static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
32static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
33static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
34static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
35
36static void jffs2_erase_block(struct jffs2_sb_info *c,
37 struct jffs2_eraseblock *jeb)
38{
39 int ret;
40 uint32_t bad_offset;
41#ifdef __ECOS
42 ret = jffs2_flash_erase(c, jeb);
43 if (!ret) {
44 jffs2_erase_succeeded(c, jeb);
45 return;
46 }
47 bad_offset = jeb->offset;
48#else /* Linux */
49 struct erase_info *instr;
50
51 instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
52 if (!instr) {
53 printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
54 spin_lock(&c->erase_completion_lock);
55 list_del(&jeb->list);
56 list_add(&jeb->list, &c->erase_pending_list);
57 c->erasing_size -= c->sector_size;
58 c->dirty_size += c->sector_size;
59 jeb->dirty_size = c->sector_size;
60 spin_unlock(&c->erase_completion_lock);
61 return;
62 }
63
64 memset(instr, 0, sizeof(*instr));
65
66 instr->mtd = c->mtd;
67 instr->addr = jeb->offset;
68 instr->len = c->sector_size;
69 instr->callback = jffs2_erase_callback;
70 instr->priv = (unsigned long)(&instr[1]);
71 instr->fail_addr = 0xffffffff;
72
73 ((struct erase_priv_struct *)instr->priv)->jeb = jeb;
74 ((struct erase_priv_struct *)instr->priv)->c = c;
75
76 ret = c->mtd->erase(c->mtd, instr);
77 if (!ret)
78 return;
79
80 bad_offset = instr->fail_addr;
81 kfree(instr);
82#endif /* __ECOS */
83
84 if (ret == -ENOMEM || ret == -EAGAIN) {
85 /* Erase failed immediately. Refile it on the list */
86 D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
87 spin_lock(&c->erase_completion_lock);
88 list_del(&jeb->list);
89 list_add(&jeb->list, &c->erase_pending_list);
90 c->erasing_size -= c->sector_size;
91 c->dirty_size += c->sector_size;
92 jeb->dirty_size = c->sector_size;
93 spin_unlock(&c->erase_completion_lock);
94 return;
95 }
96
97 if (ret == -EROFS)
98 printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset);
99 else
100 printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret);
101
102 jffs2_erase_failed(c, jeb, bad_offset);
103}
104
105void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
106{
107 struct jffs2_eraseblock *jeb;
108
109 down(&c->erase_free_sem);
110
111 spin_lock(&c->erase_completion_lock);
112
113 while (!list_empty(&c->erase_complete_list) ||
114 !list_empty(&c->erase_pending_list)) {
115
116 if (!list_empty(&c->erase_complete_list)) {
117 jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
118 list_del(&jeb->list);
119 spin_unlock(&c->erase_completion_lock);
120 jffs2_mark_erased_block(c, jeb);
121
122 if (!--count) {
123 D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n"));
124 goto done;
125 }
126
127 } else if (!list_empty(&c->erase_pending_list)) {
128 jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
129 D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset));
130 list_del(&jeb->list);
131 c->erasing_size += c->sector_size;
132 c->wasted_size -= jeb->wasted_size;
133 c->free_size -= jeb->free_size;
134 c->used_size -= jeb->used_size;
135 c->dirty_size -= jeb->dirty_size;
136 jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0;
137 jffs2_free_all_node_refs(c, jeb);
138 list_add(&jeb->list, &c->erasing_list);
139 spin_unlock(&c->erase_completion_lock);
140
141 jffs2_erase_block(c, jeb);
142
143 } else {
144 BUG();
145 }
146
147 /* Be nice */
148 cond_resched();
149 spin_lock(&c->erase_completion_lock);
150 }
151
152 spin_unlock(&c->erase_completion_lock);
153 done:
154 D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
155
156 up(&c->erase_free_sem);
157}
158
159static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
160{
161 D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
162 spin_lock(&c->erase_completion_lock);
163 list_del(&jeb->list);
164 list_add_tail(&jeb->list, &c->erase_complete_list);
165 spin_unlock(&c->erase_completion_lock);
166 /* Ensure that kupdated calls us again to mark them clean */
167 jffs2_erase_pending_trigger(c);
168}
169
170static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
171{
172 /* For NAND, if the failure did not occur at the device level for a
173 specific physical page, don't bother updating the bad block table. */
174 if (jffs2_cleanmarker_oob(c) && (bad_offset != 0xffffffff)) {
175 /* We had a device-level failure to erase. Let's see if we've
176 failed too many times. */
177 if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
178 /* We'd like to give this block another try. */
179 spin_lock(&c->erase_completion_lock);
180 list_del(&jeb->list);
181 list_add(&jeb->list, &c->erase_pending_list);
182 c->erasing_size -= c->sector_size;
183 c->dirty_size += c->sector_size;
184 jeb->dirty_size = c->sector_size;
185 spin_unlock(&c->erase_completion_lock);
186 return;
187 }
188 }
189
190 spin_lock(&c->erase_completion_lock);
191 c->erasing_size -= c->sector_size;
192 c->bad_size += c->sector_size;
193 list_del(&jeb->list);
194 list_add(&jeb->list, &c->bad_list);
195 c->nr_erasing_blocks--;
196 spin_unlock(&c->erase_completion_lock);
197 wake_up(&c->erase_wait);
198}
199
200#ifndef __ECOS
201static void jffs2_erase_callback(struct erase_info *instr)
202{
203 struct erase_priv_struct *priv = (void *)instr->priv;
204
205 if(instr->state != MTD_ERASE_DONE) {
206 printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state);
207 jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
208 } else {
209 jffs2_erase_succeeded(priv->c, priv->jeb);
210 }
211 kfree(instr);
212}
213#endif /* !__ECOS */
214
215/* Hmmm. Maybe we should accept the extra space it takes and make
216 this a standard doubly-linked list? */
217static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
218 struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb)
219{
220 struct jffs2_inode_cache *ic = NULL;
221 struct jffs2_raw_node_ref **prev;
222
223 prev = &ref->next_in_ino;
224
225 /* Walk the inode's list once, removing any nodes from this eraseblock */
226 while (1) {
227 if (!(*prev)->next_in_ino) {
228 /* We're looking at the jffs2_inode_cache, which is
229 at the end of the linked list. Stash it and continue
230 from the beginning of the list */
231 ic = (struct jffs2_inode_cache *)(*prev);
232 prev = &ic->nodes;
233 continue;
234 }
235
236 if (((*prev)->flash_offset & ~(c->sector_size -1)) == jeb->offset) {
237 /* It's in the block we're erasing */
238 struct jffs2_raw_node_ref *this;
239
240 this = *prev;
241 *prev = this->next_in_ino;
242 this->next_in_ino = NULL;
243
244 if (this == ref)
245 break;
246
247 continue;
248 }
249 /* Not to be deleted. Skip */
250 prev = &((*prev)->next_in_ino);
251 }
252
253 /* PARANOIA */
254 if (!ic) {
255 printk(KERN_WARNING "inode_cache not found in remove_node_refs()!!\n");
256 return;
257 }
258
259 D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
260 jeb->offset, jeb->offset + c->sector_size, ic->ino));
261
262 D2({
263 int i=0;
264 struct jffs2_raw_node_ref *this;
265 printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG);
266
267 this = ic->nodes;
268
269 while(this) {
270 printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this));
271 if (++i == 5) {
272 printk("\n" KERN_DEBUG);
273 i=0;
274 }
275 this = this->next_in_ino;
276 }
277 printk("\n");
278 });
279
280 if (ic->nodes == (void *)ic) {
281 D1(printk(KERN_DEBUG "inocache for ino #%u is all gone now. Freeing\n", ic->ino));
282 jffs2_del_ino_cache(c, ic);
283 jffs2_free_inode_cache(ic);
284 }
285}
286
287static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
288{
289 struct jffs2_raw_node_ref *ref;
290 D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset));
291 while(jeb->first_node) {
292 ref = jeb->first_node;
293 jeb->first_node = ref->next_phys;
294
295 /* Remove from the inode-list */
296 if (ref->next_in_ino)
297 jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
298 /* else it was a non-inode node or already removed, so don't bother */
299
300 jffs2_free_raw_node_ref(ref);
301 }
302 jeb->last_node = NULL;
303}
304
305static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
306{
307 struct jffs2_raw_node_ref *marker_ref = NULL;
308 unsigned char *ebuf;
309 size_t retlen;
310 int ret;
311 uint32_t bad_offset;
312
313 if (!jffs2_cleanmarker_oob(c)) {
314 marker_ref = jffs2_alloc_raw_node_ref();
315 if (!marker_ref) {
316 printk(KERN_WARNING "Failed to allocate raw node ref for clean marker\n");
317 /* Stick it back on the list from whence it came and come back later */
318 jffs2_erase_pending_trigger(c);
319 spin_lock(&c->erase_completion_lock);
320 list_add(&jeb->list, &c->erase_complete_list);
321 spin_unlock(&c->erase_completion_lock);
322 return;
323 }
324 }
325 ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
326 if (!ebuf) {
327 printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Assuming it worked\n", jeb->offset);
328 } else {
329 uint32_t ofs = jeb->offset;
330
331 D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
332 while(ofs < jeb->offset + c->sector_size) {
333 uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
334 int i;
335
336 bad_offset = ofs;
337
338 ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf);
339 if (ret) {
340 printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
341 goto bad;
342 }
343 if (retlen != readlen) {
344 printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
345 goto bad;
346 }
347 for (i=0; i<readlen; i += sizeof(unsigned long)) {
348 /* It's OK. We know it's properly aligned */
349 unsigned long datum = *(unsigned long *)(&ebuf[i]);
350 if (datum + 1) {
351 bad_offset += i;
352 printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, bad_offset);
353 bad:
354 if (!jffs2_cleanmarker_oob(c))
355 jffs2_free_raw_node_ref(marker_ref);
356 kfree(ebuf);
357 bad2:
358 spin_lock(&c->erase_completion_lock);
359 /* Stick it on a list (any list) so
360 erase_failed can take it right off
361 again. Silly, but shouldn't happen
362 often. */
363 list_add(&jeb->list, &c->erasing_list);
364 spin_unlock(&c->erase_completion_lock);
365 jffs2_erase_failed(c, jeb, bad_offset);
366 return;
367 }
368 }
369 ofs += readlen;
370 cond_resched();
371 }
372 kfree(ebuf);
373 }
374
375 bad_offset = jeb->offset;
376
377 /* Write the erase complete marker */
378 D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset));
379 if (jffs2_cleanmarker_oob(c)) {
380
381 if (jffs2_write_nand_cleanmarker(c, jeb))
382 goto bad2;
383
384 jeb->first_node = jeb->last_node = NULL;
385
386 jeb->free_size = c->sector_size;
387 jeb->used_size = 0;
388 jeb->dirty_size = 0;
389 jeb->wasted_size = 0;
390 } else {
391 struct kvec vecs[1];
392 struct jffs2_unknown_node marker = {
393 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
394 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
395 .totlen = cpu_to_je32(c->cleanmarker_size)
396 };
397
398 marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
399
400 vecs[0].iov_base = (unsigned char *) &marker;
401 vecs[0].iov_len = sizeof(marker);
402 ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen);
403
404 if (ret) {
405 printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n",
406 jeb->offset, ret);
407 goto bad2;
408 }
409 if (retlen != sizeof(marker)) {
410 printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
411 jeb->offset, sizeof(marker), retlen);
412 goto bad2;
413 }
414
415 marker_ref->next_in_ino = NULL;
416 marker_ref->next_phys = NULL;
417 marker_ref->flash_offset = jeb->offset | REF_NORMAL;
418 marker_ref->__totlen = c->cleanmarker_size;
419
420 jeb->first_node = jeb->last_node = marker_ref;
421
422 jeb->free_size = c->sector_size - c->cleanmarker_size;
423 jeb->used_size = c->cleanmarker_size;
424 jeb->dirty_size = 0;
425 jeb->wasted_size = 0;
426 }
427
428 spin_lock(&c->erase_completion_lock);
429 c->erasing_size -= c->sector_size;
430 c->free_size += jeb->free_size;
431 c->used_size += jeb->used_size;
432
433 ACCT_SANITY_CHECK(c,jeb);
434 D1(ACCT_PARANOIA_CHECK(jeb));
435
436 list_add_tail(&jeb->list, &c->free_list);
437 c->nr_erasing_blocks--;
438 c->nr_free_blocks++;
439 spin_unlock(&c->erase_completion_lock);
440 wake_up(&c->erase_wait);
441}
442