aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jffs2/erase.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/jffs2/erase.c')
-rw-r--r--fs/jffs2/erase.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index a1db9180633f..96d9ad56e573 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -50,14 +50,14 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
50 instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); 50 instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
51 if (!instr) { 51 if (!instr) {
52 printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); 52 printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
53 down(&c->erase_free_sem); 53 mutex_lock(&c->erase_free_sem);
54 spin_lock(&c->erase_completion_lock); 54 spin_lock(&c->erase_completion_lock);
55 list_move(&jeb->list, &c->erase_pending_list); 55 list_move(&jeb->list, &c->erase_pending_list);
56 c->erasing_size -= c->sector_size; 56 c->erasing_size -= c->sector_size;
57 c->dirty_size += c->sector_size; 57 c->dirty_size += c->sector_size;
58 jeb->dirty_size = c->sector_size; 58 jeb->dirty_size = c->sector_size;
59 spin_unlock(&c->erase_completion_lock); 59 spin_unlock(&c->erase_completion_lock);
60 up(&c->erase_free_sem); 60 mutex_unlock(&c->erase_free_sem);
61 return; 61 return;
62 } 62 }
63 63
@@ -84,14 +84,14 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
84 if (ret == -ENOMEM || ret == -EAGAIN) { 84 if (ret == -ENOMEM || ret == -EAGAIN) {
85 /* Erase failed immediately. Refile it on the list */ 85 /* Erase failed immediately. Refile it on the list */
86 D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); 86 D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
87 down(&c->erase_free_sem); 87 mutex_lock(&c->erase_free_sem);
88 spin_lock(&c->erase_completion_lock); 88 spin_lock(&c->erase_completion_lock);
89 list_move(&jeb->list, &c->erase_pending_list); 89 list_move(&jeb->list, &c->erase_pending_list);
90 c->erasing_size -= c->sector_size; 90 c->erasing_size -= c->sector_size;
91 c->dirty_size += c->sector_size; 91 c->dirty_size += c->sector_size;
92 jeb->dirty_size = c->sector_size; 92 jeb->dirty_size = c->sector_size;
93 spin_unlock(&c->erase_completion_lock); 93 spin_unlock(&c->erase_completion_lock);
94 up(&c->erase_free_sem); 94 mutex_unlock(&c->erase_free_sem);
95 return; 95 return;
96 } 96 }
97 97
@@ -107,7 +107,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
107{ 107{
108 struct jffs2_eraseblock *jeb; 108 struct jffs2_eraseblock *jeb;
109 109
110 down(&c->erase_free_sem); 110 mutex_lock(&c->erase_free_sem);
111 111
112 spin_lock(&c->erase_completion_lock); 112 spin_lock(&c->erase_completion_lock);
113 113
@@ -118,7 +118,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
118 jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list); 118 jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
119 list_del(&jeb->list); 119 list_del(&jeb->list);
120 spin_unlock(&c->erase_completion_lock); 120 spin_unlock(&c->erase_completion_lock);
121 up(&c->erase_free_sem); 121 mutex_unlock(&c->erase_free_sem);
122 jffs2_mark_erased_block(c, jeb); 122 jffs2_mark_erased_block(c, jeb);
123 123
124 if (!--count) { 124 if (!--count) {
@@ -139,7 +139,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
139 jffs2_free_jeb_node_refs(c, jeb); 139 jffs2_free_jeb_node_refs(c, jeb);
140 list_add(&jeb->list, &c->erasing_list); 140 list_add(&jeb->list, &c->erasing_list);
141 spin_unlock(&c->erase_completion_lock); 141 spin_unlock(&c->erase_completion_lock);
142 up(&c->erase_free_sem); 142 mutex_unlock(&c->erase_free_sem);
143 143
144 jffs2_erase_block(c, jeb); 144 jffs2_erase_block(c, jeb);
145 145
@@ -149,12 +149,12 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
149 149
150 /* Be nice */ 150 /* Be nice */
151 yield(); 151 yield();
152 down(&c->erase_free_sem); 152 mutex_lock(&c->erase_free_sem);
153 spin_lock(&c->erase_completion_lock); 153 spin_lock(&c->erase_completion_lock);
154 } 154 }
155 155
156 spin_unlock(&c->erase_completion_lock); 156 spin_unlock(&c->erase_completion_lock);
157 up(&c->erase_free_sem); 157 mutex_unlock(&c->erase_free_sem);
158 done: 158 done:
159 D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); 159 D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
160} 160}
@@ -162,11 +162,11 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
162static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 162static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
163{ 163{
164 D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); 164 D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
165 down(&c->erase_free_sem); 165 mutex_lock(&c->erase_free_sem);
166 spin_lock(&c->erase_completion_lock); 166 spin_lock(&c->erase_completion_lock);
167 list_move_tail(&jeb->list, &c->erase_complete_list); 167 list_move_tail(&jeb->list, &c->erase_complete_list);
168 spin_unlock(&c->erase_completion_lock); 168 spin_unlock(&c->erase_completion_lock);
169 up(&c->erase_free_sem); 169 mutex_unlock(&c->erase_free_sem);
170 /* Ensure that kupdated calls us again to mark them clean */ 170 /* Ensure that kupdated calls us again to mark them clean */
171 jffs2_erase_pending_trigger(c); 171 jffs2_erase_pending_trigger(c);
172} 172}
@@ -180,26 +180,26 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock
180 failed too many times. */ 180 failed too many times. */
181 if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { 181 if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
182 /* We'd like to give this block another try. */ 182 /* We'd like to give this block another try. */
183 down(&c->erase_free_sem); 183 mutex_lock(&c->erase_free_sem);
184 spin_lock(&c->erase_completion_lock); 184 spin_lock(&c->erase_completion_lock);
185 list_move(&jeb->list, &c->erase_pending_list); 185 list_move(&jeb->list, &c->erase_pending_list);
186 c->erasing_size -= c->sector_size; 186 c->erasing_size -= c->sector_size;
187 c->dirty_size += c->sector_size; 187 c->dirty_size += c->sector_size;
188 jeb->dirty_size = c->sector_size; 188 jeb->dirty_size = c->sector_size;
189 spin_unlock(&c->erase_completion_lock); 189 spin_unlock(&c->erase_completion_lock);
190 up(&c->erase_free_sem); 190 mutex_unlock(&c->erase_free_sem);
191 return; 191 return;
192 } 192 }
193 } 193 }
194 194
195 down(&c->erase_free_sem); 195 mutex_lock(&c->erase_free_sem);
196 spin_lock(&c->erase_completion_lock); 196 spin_lock(&c->erase_completion_lock);
197 c->erasing_size -= c->sector_size; 197 c->erasing_size -= c->sector_size;
198 c->bad_size += c->sector_size; 198 c->bad_size += c->sector_size;
199 list_move(&jeb->list, &c->bad_list); 199 list_move(&jeb->list, &c->bad_list);
200 c->nr_erasing_blocks--; 200 c->nr_erasing_blocks--;
201 spin_unlock(&c->erase_completion_lock); 201 spin_unlock(&c->erase_completion_lock);
202 up(&c->erase_free_sem); 202 mutex_unlock(&c->erase_free_sem);
203 wake_up(&c->erase_wait); 203 wake_up(&c->erase_wait);
204} 204}
205 205
@@ -456,7 +456,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
456 jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL); 456 jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL);
457 } 457 }
458 458
459 down(&c->erase_free_sem); 459 mutex_lock(&c->erase_free_sem);
460 spin_lock(&c->erase_completion_lock); 460 spin_lock(&c->erase_completion_lock);
461 c->erasing_size -= c->sector_size; 461 c->erasing_size -= c->sector_size;
462 c->free_size += jeb->free_size; 462 c->free_size += jeb->free_size;
@@ -469,28 +469,28 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
469 c->nr_erasing_blocks--; 469 c->nr_erasing_blocks--;
470 c->nr_free_blocks++; 470 c->nr_free_blocks++;
471 spin_unlock(&c->erase_completion_lock); 471 spin_unlock(&c->erase_completion_lock);
472 up(&c->erase_free_sem); 472 mutex_unlock(&c->erase_free_sem);
473 wake_up(&c->erase_wait); 473 wake_up(&c->erase_wait);
474 return; 474 return;
475 475
476filebad: 476filebad:
477 down(&c->erase_free_sem); 477 mutex_lock(&c->erase_free_sem);
478 spin_lock(&c->erase_completion_lock); 478 spin_lock(&c->erase_completion_lock);
479 /* Stick it on a list (any list) so erase_failed can take it 479 /* Stick it on a list (any list) so erase_failed can take it
480 right off again. Silly, but shouldn't happen often. */ 480 right off again. Silly, but shouldn't happen often. */
481 list_add(&jeb->list, &c->erasing_list); 481 list_add(&jeb->list, &c->erasing_list);
482 spin_unlock(&c->erase_completion_lock); 482 spin_unlock(&c->erase_completion_lock);
483 up(&c->erase_free_sem); 483 mutex_unlock(&c->erase_free_sem);
484 jffs2_erase_failed(c, jeb, bad_offset); 484 jffs2_erase_failed(c, jeb, bad_offset);
485 return; 485 return;
486 486
487refile: 487refile:
488 /* Stick it back on the list from whence it came and come back later */ 488 /* Stick it back on the list from whence it came and come back later */
489 jffs2_erase_pending_trigger(c); 489 jffs2_erase_pending_trigger(c);
490 down(&c->erase_free_sem); 490 mutex_lock(&c->erase_free_sem);
491 spin_lock(&c->erase_completion_lock); 491 spin_lock(&c->erase_completion_lock);
492 list_add(&jeb->list, &c->erase_complete_list); 492 list_add(&jeb->list, &c->erase_complete_list);
493 spin_unlock(&c->erase_completion_lock); 493 spin_unlock(&c->erase_completion_lock);
494 up(&c->erase_free_sem); 494 mutex_unlock(&c->erase_free_sem);
495 return; 495 return;
496} 496}