diff options
Diffstat (limited to 'fs/jffs2')
-rw-r--r-- | fs/jffs2/erase.c | 22 |
1 files changed, 20 insertions, 2 deletions
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index e9e3c2c330cc..f81148ae3101 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c | |||
@@ -50,12 +50,14 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
50 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); | 50 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); |
51 | if (!instr) { | 51 | if (!instr) { |
52 | printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); | 52 | printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); |
53 | down(&c->erase_free_sem); | ||
53 | spin_lock(&c->erase_completion_lock); | 54 | spin_lock(&c->erase_completion_lock); |
54 | list_move(&jeb->list, &c->erase_pending_list); | 55 | list_move(&jeb->list, &c->erase_pending_list); |
55 | c->erasing_size -= c->sector_size; | 56 | c->erasing_size -= c->sector_size; |
56 | c->dirty_size += c->sector_size; | 57 | c->dirty_size += c->sector_size; |
57 | jeb->dirty_size = c->sector_size; | 58 | jeb->dirty_size = c->sector_size; |
58 | spin_unlock(&c->erase_completion_lock); | 59 | spin_unlock(&c->erase_completion_lock); |
60 | up(&c->erase_free_sem); | ||
59 | return; | 61 | return; |
60 | } | 62 | } |
61 | 63 | ||
@@ -82,12 +84,14 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
82 | if (ret == -ENOMEM || ret == -EAGAIN) { | 84 | if (ret == -ENOMEM || ret == -EAGAIN) { |
83 | /* Erase failed immediately. Refile it on the list */ | 85 | /* Erase failed immediately. Refile it on the list */ |
84 | D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); | 86 | D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); |
87 | down(&c->erase_free_sem); | ||
85 | spin_lock(&c->erase_completion_lock); | 88 | spin_lock(&c->erase_completion_lock); |
86 | list_move(&jeb->list, &c->erase_pending_list); | 89 | list_move(&jeb->list, &c->erase_pending_list); |
87 | c->erasing_size -= c->sector_size; | 90 | c->erasing_size -= c->sector_size; |
88 | c->dirty_size += c->sector_size; | 91 | c->dirty_size += c->sector_size; |
89 | jeb->dirty_size = c->sector_size; | 92 | jeb->dirty_size = c->sector_size; |
90 | spin_unlock(&c->erase_completion_lock); | 93 | spin_unlock(&c->erase_completion_lock); |
94 | up(&c->erase_free_sem); | ||
91 | return; | 95 | return; |
92 | } | 96 | } |
93 | 97 | ||
@@ -114,6 +118,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) | |||
114 | jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list); | 118 | jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list); |
115 | list_del(&jeb->list); | 119 | list_del(&jeb->list); |
116 | spin_unlock(&c->erase_completion_lock); | 120 | spin_unlock(&c->erase_completion_lock); |
121 | up(&c->erase_free_sem); | ||
117 | jffs2_mark_erased_block(c, jeb); | 122 | jffs2_mark_erased_block(c, jeb); |
118 | 123 | ||
119 | if (!--count) { | 124 | if (!--count) { |
@@ -134,6 +139,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) | |||
134 | jffs2_free_jeb_node_refs(c, jeb); | 139 | jffs2_free_jeb_node_refs(c, jeb); |
135 | list_add(&jeb->list, &c->erasing_list); | 140 | list_add(&jeb->list, &c->erasing_list); |
136 | spin_unlock(&c->erase_completion_lock); | 141 | spin_unlock(&c->erase_completion_lock); |
142 | up(&c->erase_free_sem); | ||
137 | 143 | ||
138 | jffs2_erase_block(c, jeb); | 144 | jffs2_erase_block(c, jeb); |
139 | 145 | ||
@@ -143,22 +149,24 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) | |||
143 | 149 | ||
144 | /* Be nice */ | 150 | /* Be nice */ |
145 | yield(); | 151 | yield(); |
152 | down(&c->erase_free_sem); | ||
146 | spin_lock(&c->erase_completion_lock); | 153 | spin_lock(&c->erase_completion_lock); |
147 | } | 154 | } |
148 | 155 | ||
149 | spin_unlock(&c->erase_completion_lock); | 156 | spin_unlock(&c->erase_completion_lock); |
157 | up(&c->erase_free_sem); | ||
150 | done: | 158 | done: |
151 | D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); | 159 | D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); |
152 | |||
153 | up(&c->erase_free_sem); | ||
154 | } | 160 | } |
155 | 161 | ||
156 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 162 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
157 | { | 163 | { |
158 | D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); | 164 | D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); |
165 | down(&c->erase_free_sem); | ||
159 | spin_lock(&c->erase_completion_lock); | 166 | spin_lock(&c->erase_completion_lock); |
160 | list_move_tail(&jeb->list, &c->erase_complete_list); | 167 | list_move_tail(&jeb->list, &c->erase_complete_list); |
161 | spin_unlock(&c->erase_completion_lock); | 168 | spin_unlock(&c->erase_completion_lock); |
169 | up(&c->erase_free_sem); | ||
162 | /* Ensure that kupdated calls us again to mark them clean */ | 170 | /* Ensure that kupdated calls us again to mark them clean */ |
163 | jffs2_erase_pending_trigger(c); | 171 | jffs2_erase_pending_trigger(c); |
164 | } | 172 | } |
@@ -172,22 +180,26 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
172 | failed too many times. */ | 180 | failed too many times. */ |
173 | if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { | 181 | if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { |
174 | /* We'd like to give this block another try. */ | 182 | /* We'd like to give this block another try. */ |
183 | down(&c->erase_free_sem); | ||
175 | spin_lock(&c->erase_completion_lock); | 184 | spin_lock(&c->erase_completion_lock); |
176 | list_move(&jeb->list, &c->erase_pending_list); | 185 | list_move(&jeb->list, &c->erase_pending_list); |
177 | c->erasing_size -= c->sector_size; | 186 | c->erasing_size -= c->sector_size; |
178 | c->dirty_size += c->sector_size; | 187 | c->dirty_size += c->sector_size; |
179 | jeb->dirty_size = c->sector_size; | 188 | jeb->dirty_size = c->sector_size; |
180 | spin_unlock(&c->erase_completion_lock); | 189 | spin_unlock(&c->erase_completion_lock); |
190 | up(&c->erase_free_sem); | ||
181 | return; | 191 | return; |
182 | } | 192 | } |
183 | } | 193 | } |
184 | 194 | ||
195 | down(&c->erase_free_sem); | ||
185 | spin_lock(&c->erase_completion_lock); | 196 | spin_lock(&c->erase_completion_lock); |
186 | c->erasing_size -= c->sector_size; | 197 | c->erasing_size -= c->sector_size; |
187 | c->bad_size += c->sector_size; | 198 | c->bad_size += c->sector_size; |
188 | list_move(&jeb->list, &c->bad_list); | 199 | list_move(&jeb->list, &c->bad_list); |
189 | c->nr_erasing_blocks--; | 200 | c->nr_erasing_blocks--; |
190 | spin_unlock(&c->erase_completion_lock); | 201 | spin_unlock(&c->erase_completion_lock); |
202 | up(&c->erase_free_sem); | ||
191 | wake_up(&c->erase_wait); | 203 | wake_up(&c->erase_wait); |
192 | } | 204 | } |
193 | 205 | ||
@@ -444,6 +456,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
444 | jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL); | 456 | jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL); |
445 | } | 457 | } |
446 | 458 | ||
459 | down(&c->erase_free_sem); | ||
447 | spin_lock(&c->erase_completion_lock); | 460 | spin_lock(&c->erase_completion_lock); |
448 | c->erasing_size -= c->sector_size; | 461 | c->erasing_size -= c->sector_size; |
449 | c->free_size += jeb->free_size; | 462 | c->free_size += jeb->free_size; |
@@ -456,23 +469,28 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
456 | c->nr_erasing_blocks--; | 469 | c->nr_erasing_blocks--; |
457 | c->nr_free_blocks++; | 470 | c->nr_free_blocks++; |
458 | spin_unlock(&c->erase_completion_lock); | 471 | spin_unlock(&c->erase_completion_lock); |
472 | up(&c->erase_free_sem); | ||
459 | wake_up(&c->erase_wait); | 473 | wake_up(&c->erase_wait); |
460 | return; | 474 | return; |
461 | 475 | ||
462 | filebad: | 476 | filebad: |
477 | down(&c->erase_free_sem); | ||
463 | spin_lock(&c->erase_completion_lock); | 478 | spin_lock(&c->erase_completion_lock); |
464 | /* Stick it on a list (any list) so erase_failed can take it | 479 | /* Stick it on a list (any list) so erase_failed can take it |
465 | right off again. Silly, but shouldn't happen often. */ | 480 | right off again. Silly, but shouldn't happen often. */ |
466 | list_add(&jeb->list, &c->erasing_list); | 481 | list_add(&jeb->list, &c->erasing_list); |
467 | spin_unlock(&c->erase_completion_lock); | 482 | spin_unlock(&c->erase_completion_lock); |
483 | up(&c->erase_free_sem); | ||
468 | jffs2_erase_failed(c, jeb, bad_offset); | 484 | jffs2_erase_failed(c, jeb, bad_offset); |
469 | return; | 485 | return; |
470 | 486 | ||
471 | refile: | 487 | refile: |
472 | /* Stick it back on the list from whence it came and come back later */ | 488 | /* Stick it back on the list from whence it came and come back later */ |
473 | jffs2_erase_pending_trigger(c); | 489 | jffs2_erase_pending_trigger(c); |
490 | down(&c->erase_free_sem); | ||
474 | spin_lock(&c->erase_completion_lock); | 491 | spin_lock(&c->erase_completion_lock); |
475 | list_add(&jeb->list, &c->erase_complete_list); | 492 | list_add(&jeb->list, &c->erase_complete_list); |
476 | spin_unlock(&c->erase_completion_lock); | 493 | spin_unlock(&c->erase_completion_lock); |
494 | up(&c->erase_free_sem); | ||
477 | return; | 495 | return; |
478 | } | 496 | } |