diff options
Diffstat (limited to 'fs/jffs2/nodemgmt.c')
-rw-r--r-- | fs/jffs2/nodemgmt.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index a3e67a4ed675..747a73f0aa4d 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c | |||
@@ -48,7 +48,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
48 | minsize = PAD(minsize); | 48 | minsize = PAD(minsize); |
49 | 49 | ||
50 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize)); | 50 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize)); |
51 | down(&c->alloc_sem); | 51 | mutex_lock(&c->alloc_sem); |
52 | 52 | ||
53 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n")); | 53 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n")); |
54 | 54 | ||
@@ -81,7 +81,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
81 | dirty, c->unchecked_size, c->sector_size)); | 81 | dirty, c->unchecked_size, c->sector_size)); |
82 | 82 | ||
83 | spin_unlock(&c->erase_completion_lock); | 83 | spin_unlock(&c->erase_completion_lock); |
84 | up(&c->alloc_sem); | 84 | mutex_unlock(&c->alloc_sem); |
85 | return -ENOSPC; | 85 | return -ENOSPC; |
86 | } | 86 | } |
87 | 87 | ||
@@ -104,11 +104,11 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
104 | D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", | 104 | D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", |
105 | avail, blocksneeded * c->sector_size)); | 105 | avail, blocksneeded * c->sector_size)); |
106 | spin_unlock(&c->erase_completion_lock); | 106 | spin_unlock(&c->erase_completion_lock); |
107 | up(&c->alloc_sem); | 107 | mutex_unlock(&c->alloc_sem); |
108 | return -ENOSPC; | 108 | return -ENOSPC; |
109 | } | 109 | } |
110 | 110 | ||
111 | up(&c->alloc_sem); | 111 | mutex_unlock(&c->alloc_sem); |
112 | 112 | ||
113 | D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", | 113 | D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", |
114 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, | 114 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, |
@@ -124,7 +124,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
124 | if (signal_pending(current)) | 124 | if (signal_pending(current)) |
125 | return -EINTR; | 125 | return -EINTR; |
126 | 126 | ||
127 | down(&c->alloc_sem); | 127 | mutex_lock(&c->alloc_sem); |
128 | spin_lock(&c->erase_completion_lock); | 128 | spin_lock(&c->erase_completion_lock); |
129 | } | 129 | } |
130 | 130 | ||
@@ -137,7 +137,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
137 | if (!ret) | 137 | if (!ret) |
138 | ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); | 138 | ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); |
139 | if (ret) | 139 | if (ret) |
140 | up(&c->alloc_sem); | 140 | mutex_unlock(&c->alloc_sem); |
141 | return ret; | 141 | return ret; |
142 | } | 142 | } |
143 | 143 | ||
@@ -462,7 +462,7 @@ void jffs2_complete_reservation(struct jffs2_sb_info *c) | |||
462 | { | 462 | { |
463 | D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n")); | 463 | D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n")); |
464 | jffs2_garbage_collect_trigger(c); | 464 | jffs2_garbage_collect_trigger(c); |
465 | up(&c->alloc_sem); | 465 | mutex_unlock(&c->alloc_sem); |
466 | } | 466 | } |
467 | 467 | ||
468 | static inline int on_list(struct list_head *obj, struct list_head *head) | 468 | static inline int on_list(struct list_head *obj, struct list_head *head) |
@@ -511,7 +511,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
511 | any jffs2_raw_node_refs. So we don't need to stop erases from | 511 | any jffs2_raw_node_refs. So we don't need to stop erases from |
512 | happening, or protect against people holding an obsolete | 512 | happening, or protect against people holding an obsolete |
513 | jffs2_raw_node_ref without the erase_completion_lock. */ | 513 | jffs2_raw_node_ref without the erase_completion_lock. */ |
514 | down(&c->erase_free_sem); | 514 | mutex_lock(&c->erase_free_sem); |
515 | } | 515 | } |
516 | 516 | ||
517 | spin_lock(&c->erase_completion_lock); | 517 | spin_lock(&c->erase_completion_lock); |
@@ -714,7 +714,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
714 | } | 714 | } |
715 | 715 | ||
716 | out_erase_sem: | 716 | out_erase_sem: |
717 | up(&c->erase_free_sem); | 717 | mutex_unlock(&c->erase_free_sem); |
718 | } | 718 | } |
719 | 719 | ||
720 | int jffs2_thread_should_wake(struct jffs2_sb_info *c) | 720 | int jffs2_thread_should_wake(struct jffs2_sb_info *c) |